From 03e258c839d7b15d2a64f0f5abb86bb0cbaf0387 Mon Sep 17 00:00:00 2001 From: github-actions Date: Fri, 4 Aug 2023 02:41:23 +0000 Subject: [PATCH] Build --- .../index.html | 162 + .../index.html | 172 + 2011/testing-roundoff/index.html | 237 + .../index.html | 168 + .../index.html | 246 + .../index.html | 331 + .../index.html | 173 +- .../index.html | 173 +- .../index.html | 184 + 2013/supercomputing-2013-sc13/index.html | 174 + .../index.html | 257 + 2014/docker-for-hpc/index.html | 174 + .../index.html | 159 + .../index.html | 188 +- 2014/hadoop-for-hpcers/index.html | 160 + .../index.html | 185 +- .../index.html | 184 + .../index.html | 176 +- .../index.html | 188 + .../index.html | 164 + .../index.html | 173 + .../index.html | 187 +- .../index.html | 179 +- .../index.html | 182 +- 2014/scalable-data-analysis-in-r/index.html | 175 + .../index.html | 158 + .../index.html | 177 + 2014/the-shell-for-scientists/index.html | 195 + .../udoo-quad-test-drive/index.html | 175 +- .../index.html | 161 + .../index.html | 212 +- .../index.html | 245 +- 2015/hpc-mpi-on-rce-podcast/index.html | 162 + 2015/hpc-seeing-is-believing/index.html | 181 + .../index.html | 184 + .../index.html | 179 + .../index.html | 181 + .../index.html | 225 + .../index.html | 189 +- .../index.html | 225 + .../index.html | 156 + .../objections-continued/index.html | 222 +- .../index.html | 160 + 2015/spark-in-hpc-clusters/index.html | 217 + .../index.html | 156 + .../index.html | 214 + .../index.html | 160 + .../index.html | 161 + 2016/basics-of-i-o-benchmarking/index.html | 183 + .../index.html | 178 + 2016/mpi-s-place-in-big-computing/index.html | 183 + .../index.html | 186 + 2016/sc16-stir-it-up/index.html | 199 + .../index.html | 169 + .../index.html | 183 + .../index.html | 170 + 2017/cleaning-up-gracc/index.html | 234 + .../index.html | 223 + .../index.html | 183 + 2017/installing-scitokens-on-a-mac/index.html | 198 + 2017/oh-woe-is-2016-not/index.html | 198 + .../index.html | 200 +- .../index.html | 244 +- .../index.html | 173 +- 2017/stashcache/index.html | 221 + .../index.html | 173 +- .../index.html | 174 +- .../index.html | 197 +- .../index.html | 250 + .../index.html | 187 + .../index.html | 258 + 2018/htcondor-pull-mode/index.html | 209 + 2018/stashcache-by-the-numbers/index.html | 274 + 2018/the-taming-of-the-gpu/index.html | 182 + .../index.html | 213 + .../index.html | 196 +- .../index.html | 179 + 2019/isc-19-recap/index.html | 161 + .../letsencrypt-for-multiple-hosts/index.html | 214 + .../index.html | 173 +- 2019/sc-19-recap/index.html | 258 + .../index.html | 194 +- .../index.html | 179 +- .../index.html | 195 +- 2020/bootup-fun-dual-socket-power9/index.html | 172 + .../index.html | 187 +- .../index.html | 267 + .../index.html | 157 + .../index.html | 240 + .../gracc-transition-visualization/index.html | 199 + .../index.html | 314 +- .../numa-on-nine-with-spectrum-lsf/index.html | 173 +- .../pdsw-20-recap/index.html | 196 +- .../sc-20-recap/index.html | 246 +- .../index.html | 284 +- .../index.html | 244 + .../index.html | 200 +- .../index.html | 170 + .../index.html | 176 + .../index.html | 180 + 2020/xrootd-client-manager/index.html | 271 + .../index.html | 256 + 2021/booting-hifive-unmatched/index.html | 164 + .../index.html | 156 + .../index.html | 156 + .../index.html | 156 + .../fun-with-an-aarch64-nas/index.html | 173 +- .../index.html | 174 +- .../index.html | 175 +- .../iops-are-dumb/index.html | 221 +- .../index.html | 218 + .../index.html | 230 + .../index.html | 248 + .../index.html | 175 + 2021/sre-to-solutions-architect/index.html | 188 + 2021/the-easy-hpc-button/index.html | 170 + .../index.html | 198 +- .../uptodate/index.html | 247 +- .../very-risqu\303\251-computing/index.html" | 239 + .../weekend-it-sparc-eology/index.html | 173 +- .../index.html | 166 + .../index.html | 208 + .../index.html | 218 + .../index.html | 202 + .../index.html | 211 +- .../index.html | 198 + .../index.html | 156 + 2022/ceph-osd-cpu-scaling-part-1/index.html | 156 + 2022/ceph-rocksdb-tuning-deep-dive/index.html | 156 + 2022/containerize-it-baby/index.html | 169 + 2022/converged-computing/index.html | 205 + .../index.html | 173 +- .../index.html | 227 + .../index.html | 178 + .../index.html | 156 + .../index.html | 172 + .../index.html | 220 + .../index.html | 262 +- .../index.html | 164 + 2022/life-and-leaving-nersc/index.html | 208 + .../lsf-hookin-up-with-the-criu/index.html | 173 +- .../mnt-reform-2-part-deux/index.html | 173 +- .../neunundneunzig-mnt-reform-s/index.html | 173 +- .../index.html | 169 + .../numa-on-power9/index.html | 173 +- .../index.html | 215 +- .../index.html | 156 + .../relivin-the-90-s-amiga-style/index.html | 173 +- .../research-software-registries/index.html | 194 +- .../sc-22-recap/index.html | 264 +- .../index.html | 156 + .../ssh-tunnels/index.html | 232 +- .../index.html | 219 +- .../index.html | 181 +- 2022/the-web-services-i-self-host/index.html | 184 + 2022/things-that-are-hard/index.html | 220 + .../index.html | 174 + .../tunel-apps-for-hpc/index.html | 202 +- .../index.html | 173 +- .../index.html | 189 + .../index.html | 171 +- .../index.html | 191 +- 404.html | 93 + Gemfile | 15 - LICENSE | 21 - README.md | 26 - _config.yml | 97 - _data/authors.yml | 35 - _posts/ajdecon/2021-1-2-p=147.md | 90 - _posts/ajdecon/2022-1-15-p=176.md | 58 - _posts/ajdecon/2022-10-30-p=235.md | 77 - _posts/ajdecon/2022-11-1-p=247.md | 54 - _posts/ajdecon/2022-11-27-p=264.md | 113 - _posts/ajdecon/2022-12-20-p=289.md | 101 - _posts/ajdecon/2022-12-5-p=268.md | 104 - _posts/ajdecon/2022-2-12-p=167.md | 67 - _posts/ajdecon/2022-2-25-p=203.md | 40 - _posts/ajdecon/2022-3-12-p=13.md | 120 - _posts/ajdecon/2022-5-14-p=227.md | 33 - ...-computing-data-netowork-facilitieshtml.md | 27 - ...re-architectures-of-the-next-decadehtml.md | 39 - .../2011-11-23-testing-roundoff-2html.md | 106 - ...pping-your-program-at-the-first-nanhtml.md | 112 - ...canadian-astronomical-society-cascahtml.md | 33 - ...14-1-16-scalable-data-analysis-in-rhtml.md | 39 - .../2014-10-5-shell-for-scientistshtml.md | 57 - ...-20-machine-learning-for-scientistshtml.md | 27 - .../dursi/2014-9-4-hadoop-for-hpcershtml.md | 21 - ...-12-18-approximate-squiggle-mappinghtml.md | 20 - .../2015-3-2-spark-in-hpc-clustershtml.md | 86 - .../2015-5-1-hpcmpi-on-rce-podcasthtml.md | 24 - .../dursi/2015-5-1-understanding-poahtml.md | 19 - _posts/dursi/2015-5-19-io-performancehtml.md | 20 - ...6-10-14-mpis-place-in-big-computinghtml.md | 53 - ...chapel-tensorflow-workshop-at-umichhtml.md | 31 - ...2016-9-9-jupyter-for-bioinformaticshtml.md | 38 - .../2017-2-15-beyond-single-core-rhtml.md | 45 - _posts/dursi/2017-6-1-compute-canadianhtml.md | 88 - ...-new-computing-landscape-and-chapelhtml.md | 31 - ...s_managing_research_computing_teamshtml.md | 113 - .../2020-12-4-reseach-infrastructurehtml.md | 36 - ...ge-and-sustained-scientific-softarehtml.md | 138 - ...anagers_need_to_speak_out_on_racismhtml.md | 47 - .../2021-11-23-users-time-is-valuablehtml.md | 42 - .../2021-6-6-nobody-cares-tech-stackhtml.md | 102 - ...ch-computing-funding-to-researchershtml.md | 115 - _posts/dursi/2022-2-26-1500-jobshtml.md | 59 - .../dweitzel/2017-11-6-cleaning-up-gracc.md | 105 - _posts/dweitzel/2017-6-14-stashcache.md | 97 - .../2017-9-7-installing-scitokens-on-a-mac.md | 58 - .../dweitzel/2018-8-31-htcondor-pull-mode.md | 79 - .../2018-9-26-stashcache-by-the-numbers.md | 148 - ...19-10-11-letsencrypt-for-multiple-hosts.md | 84 - .../2020-10-11-xrootd-client-manager.md | 147 - _posts/dweitzel/2020-3-8-gracc-transition.md | 70 - _posts/dweitzel/2022-1-22-improving-geoip.md | 95 - _posts/dweitzel/2022-9-14-dashboards.md | 108 - _posts/gaborsamu/2013-10-9-hpc411_bridge.md | 190 - _posts/gaborsamu/2013-11-22-sc13.md | 33 - _posts/gaborsamu/2013-7-9-isc13_wrapup.md | 44 - _posts/gaborsamu/2014-3-13-arm_days_of_old.md | 49 - _posts/gaborsamu/2014-4-11-armed_ready_lsf.md | 116 - _posts/gaborsamu/2014-6-28-isc14_leipzig.md | 43 - _posts/gaborsamu/2014-9-5-docker_for_hpc.md | 33 - .../gaborsamu/2015-1-30-ultrasparc_laptop.md | 73 - .../gaborsamu/2015-2-16-workpadz50_netbsd.md | 86 - .../2015-5-11-hpc_seeing_believing.md | 40 - _posts/gaborsamu/2015-5-13-sdi_openpower.md | 41 - .../gaborsamu/2015-5-29-lsf_docker_whale.md | 38 - .../gaborsamu/2015-6-7-clustermanager_eggs.md | 43 - .../gaborsamu/2015-7-29-isc15_lookingback.md | 84 - _posts/gaborsamu/2016-11-11-sc16_stiritup.md | 58 - ...16-4-2-reminiscing_computingrenaissance.md | 47 - _posts/gaborsamu/2017-1-9-woe_is_2016.md | 57 - .../2017-9-15-benchmarking_macchiatobin.md | 42 - .../2018-10-2-spectrumlsf_gpu_usage.md | 119 - _posts/gaborsamu/2018-6-21-taming_gpu.md | 41 - .../gaborsamu/2019-12-19-intelligent_hpc.md | 38 - .../2019-6-5-beyond_simulation_isc.md | 72 - _posts/gaborsamu/2020-1-16-power9_bootup.md | 34 - _posts/gaborsamu/2020-4-1-flora_watch.md | 35 - _posts/gaborsamu/2020-9-4-gpu_pac.md | 99 - _posts/gaborsamu/2021-1-16-new_novena.md | 77 - _posts/gaborsamu/2021-11-23-easy_hpc.md | 29 - .../gaborsamu/2021-6-15-risque_computing.md | 98 - _posts/gaborsamu/2021-8-8-riscv_bootup.md | 24 - _posts/gaborsamu/2021-9-26-ten64_nas.md | 115 - _posts/gaborsamu/2022-1-4-2022_hpc.md | 28 - ...061427721284246post-8823126637772820705.md | 33 - ...061427721284246post-5875436226821857964.md | 32 - ...061427721284246post-4750722661841327889.md | 18 - ...061427721284246post-6242128228381347032.md | 16 - ...061427721284246post-8310251418902836284.md | 15 - ...061427721284246post-8244135124065934589.md | 15 - ...061427721284246post-6567557412815023149.md | 20 - ...061427721284246post-5095302032595554238.md | 40 - ...7061427721284246post-489145380970011565.md | 91 - ...061427721284246post-4879102640203246583.md | 43 - ...061427721284246post-4381869359328536242.md | 105 - ...061427721284246post-2872044202410197608.md | 20 - ...061427721284246post-1586489402384574408.md | 16 - ...061427721284246post-2015541491779458493.md | 56 - _posts/markhpc/2021-11-22-crimson-2021q3.md | 15 - _posts/markhpc/2021-7-29-crimson-2021q2.md | 15 - _posts/markhpc/2021-8-30-crimson-classic.md | 15 - _posts/markhpc/2022-1-12-age-binning.md | 15 - _posts/markhpc/2022-10-24-qemu-kvm.md | 15 - _posts/markhpc/2022-11-8-osd-cpu-scaling.md | 15 - _posts/markhpc/2022-4-13-spooky-allocator.md | 15 - _posts/markhpc/2022-5-26-bluewal.md | 15 - _posts/markhpc/2022-7-25-rocksdb-tuning.md | 15 - _posts/vsoch/2022-1-7-what-is-hard.md | 88 - .../vsoch/2022-11-18-converged-computing.md | 74 - .../vsoch/2022-11-3-containerize-it-baby.md | 33 - about/index.html | 107 + archive/index.html | 1011 + assets/css/highlight.css | 199 + assets/css/style.css | 219 + assets/images/favicon.png | Bin 0 -> 689723 bytes assets/images/hpc-social-blue.png | Bin 0 -> 689723 bytes assets/images/icon/facebook.svg | 6 + assets/images/icon/github.svg | 6 + assets/images/icon/instagram.svg | 6 + assets/images/icon/linkedin.svg | 6 + assets/images/icon/me.svg | 65 + assets/images/icon/twitter.svg | 6 + assets/images/icon/whatsapp.svg | 6 + assets/images/icon/youtube.svg | 6 + assets/scripts.js | 12 + atom.xml | 26134 ++++++++++++++++ feed.json | 704 + feed.xml | 6836 ++++ index.html | 207 +- page/2/index.html | 207 + page/3/index.html | 207 + page/4/index.html | 207 + page/5/index.html | 207 + page/6/index.html | 207 + page/7/index.html | 207 + page/8/index.html | 207 + page/9/index.html | 115 + pages/404.md | 7 - pages/about.md | 19 - pages/archive.md | 21 - pages/atom.xml | 29 - pages/feed.json | 61 - pages/tags.md | 29 - robots.txt | 1 + sitemap.xml | 687 + tags/index.html | 96 + 310 files changed, 67477 insertions(+), 8642 deletions(-) create mode 100644 2010/canadian-astronomical-computing-data-and-network-facilities-a-white-paper-for-the-2010-long-range-plan/index.html create mode 100644 2010/codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decade/index.html create mode 100644 2011/testing-roundoff/index.html create mode 100644 2012/present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-casca/index.html create mode 100644 2012/stopping-your-program-at-the-first-nan/index.html create mode 100644 2013/ibm-platform-hpc-4-1-1-creating-a-network-bridge-on-compute-nodes/index.html rename _posts/gaborsamu/2013-4-24-hpc32_cuda.md => 2013/ibm-platform-hpc-v3-2-gpu-management-with-nvidia-cuda-5/index.html (58%) rename _posts/gaborsamu/2013-12-20-hpc411_nvidia.md => 2013/ibm-platform-hpc-v4-1-1-1-best-practices-for-managing-nvidia-gpu-devices/index.html (90%) create mode 100644 2013/isc-2013-wrapup-ibm-platform-hpc-and-intel-xeon-phi/index.html create mode 100644 2013/supercomputing-2013-sc13/index.html create mode 100644 2014/armed-and-ready-with-ibm-platform-lsf/index.html create mode 100644 2014/docker-for-hpc/index.html create mode 100644 2014/exascale-in-perspective-rsc-s-1-2-petaflop-rack/index.html rename _posts/dursi/2014-10-1-scientific-data-shouldnt-be-written-in-texthtml.md => 2014/floating-point-data-shouldn-t-be-serialized-as-text/index.html (56%) create mode 100644 2014/hadoop-for-hpcers/index.html rename _posts/glennklockwood/2014-5-17-tagbloggercom1999blog-4307061427721284246post-1233543740366021889.md => 2014/hadoop-s-uncomfortable-fit-in-hpc/index.html (59%) create mode 100644 2014/isc-2014-auf-wiedersehen-leipzig/index.html rename _posts/glennklockwood/2014-2-12-tagbloggercom1999blog-4307061427721284246post-8372161405271607668.md => 2014/linux-perf-libquadmath-and-gfortran-s-insane-behavior/index.html (65%) create mode 100644 2014/looking-forward-from-the-arm-days-of-old/index.html create mode 100644 2014/machine-learning-for-scientists/index.html create mode 100644 2014/parallelizing-r-on-supercomputers/index.html rename _posts/glennklockwood/2014-6-24-tagbloggercom1999blog-4307061427721284246post-1519944886078195806.md => 2014/perspectives-on-the-current-state-of-data-intensive-scientific-computing/index.html (52%) rename _posts/glennklockwood/2014-2-25-tagbloggercom1999blog-4307061427721284246post-2558467222542471411.md => 2014/quantum-espresso-compiling-and-choice-of-libraries/index.html (62%) rename _posts/glennklockwood/2014-2-25-tagbloggercom1999blog-4307061427721284246post-5218204332103869167.md => 2014/quantum-espresso-performance-benefits-of-vendor-optimized-libraries/index.html (62%) create mode 100644 2014/scalable-data-analysis-in-r/index.html create mode 100644 2014/spark-on-supercomputers-a-few-notes/index.html create mode 100644 2014/storage-utilization-in-the-long-tail-of-science/index.html create mode 100644 2014/the-shell-for-scientists/index.html rename _posts/gaborsamu/2014-3-23-udoo_test.md => 2014/udoo-quad-test-drive/index.html (64%) create mode 100644 2015/approximate-mapping-of-nanopore-squiggle-data-with-spatial-indexing/index.html rename _posts/dursi/2015-4-26-coarray-fortran-goes-mainstream-gcc-5-1html.md => 2015/coarray-fortran-goes-mainstream-gcc-5-1/index.html (69%) rename _posts/dursi/2015-4-3-hpc-is-dying-and-mpi-is-killing-ithtml.md => 2015/hpc-is-dying-and-mpi-is-killing-it/index.html (82%) create mode 100644 2015/hpc-mpi-on-rce-podcast/index.html create mode 100644 2015/hpc-seeing-is-believing/index.html create mode 100644 2015/ibm-platform-cluster-manager-how-do-you-like-your-eggs/index.html create mode 100644 2015/ibm-platform-lsf-and-docker-a-whale-of-a-time/index.html create mode 100644 2015/ibm-software-defined-infrastructure-put-the-power-down-and-jump-the-chasm/index.html create mode 100644 2015/ibm-workpad-z50-netbsd-an-interesting-combination/index.html rename _posts/dursi/2015-4-19-in-praise-of-mpi-collectives-and-mpi-iohtml.md => 2015/in-praise-of-mpi-collectives-and-mpi-io/index.html (56%) create mode 100644 2015/looking-back-at-isc-high-performance-2015/index.html create mode 100644 2015/more-conjecture-on-knl-s-near-memory/index.html rename _posts/dursi/2015-4-9-objections-continuedhtml.md => 2015/objections-continued/index.html (69%) create mode 100644 2015/on-random-vs-streaming-i-o-performance-or-seek-and-you-shall-find-eventually/index.html create mode 100644 2015/spark-in-hpc-clusters/index.html create mode 100644 2015/thoughts-on-the-nsf-future-directions-interim-report/index.html create mode 100644 2015/ultrasparc-powered-laptop-circa-2001/index.html create mode 100644 2015/understanding-partial-order-alignment-for-multiple-sequence-alignment/index.html create mode 100644 2016/an-uninformed-perspective-on-taihulight-s-design/index.html create mode 100644 2016/basics-of-i-o-benchmarking/index.html create mode 100644 2016/jupyter-notebooks-for-performing-and-sharing-bioinformatics-analyses/index.html create mode 100644 2016/mpi-s-place-in-big-computing/index.html create mode 100644 2016/reminiscing-and-the-computing-renaissance/index.html create mode 100644 2016/sc16-stir-it-up/index.html create mode 100644 2016/spark-chapel-tensorflow-workshop-at-umich/index.html create mode 100644 2017/beyond-single-core-r-parallel-data-analysis/index.html create mode 100644 2017/chapel-s-home-in-the-landscape-of-new-scientific-computing-languages/index.html create mode 100644 2017/cleaning-up-gracc/index.html create mode 100644 2017/compute-canadian-building-a-successful-and-federated-computational-research-enterprise-together/index.html create mode 100644 2017/cool-and-quiet-benchmarking-on-macchiatobin-armada-8040/index.html create mode 100644 2017/installing-scitokens-on-a-mac/index.html create mode 100644 2017/oh-woe-is-2016-not/index.html rename _posts/glennklockwood/2017-3-13-tagbloggercom1999blog-4307061427721284246post-3928423618033745788.md => 2017/reviewing-the-state-of-the-art-of-burst-buffers/index.html (52%) rename _posts/dursi/2017-5-28-julia-vs-chapelhtml.md => 2017/should-i-use-chapel-or-julia-for-my-next-project/index.html (93%) rename _posts/gaborsamu/2017-8-31-spectrumlsf_armv8.md => 2017/standing-up-a-ibm-spectrum-lsf-community-edition-cluster-on-arm-v8/index.html (78%) create mode 100644 2017/stashcache/index.html rename _posts/gaborsamu/2017-8-29-turning_up_heat_armv8.md => 2017/turning-up-the-heat-on-my-armada-8040/index.html (78%) rename _posts/gaborsamu/2018-9-14-spectrumlsf_gpu_autoconfig.md => 2018/a-hands-on-look-at-gpu-autoconfig-in-ibm-spectrum-lsf/index.html (69%) rename _posts/dursi/2018-7-16-incrementalism-for-scientific-developmenthtml.md => 2018/a-killer-feature-for-scientific-development-frameworks-an-incremental-path-to-maturity/index.html (62%) create mode 100644 2018/a-week-in-the-life-of-an-sc-attendee/index.html create mode 100644 2018/are-fpgas-the-answer-to-hpc-s-woes/index.html create mode 100644 2018/gpu-usage-information-for-jobs-in-ibm-spectrum-lsf/index.html create mode 100644 2018/htcondor-pull-mode/index.html create mode 100644 2018/stashcache-by-the-numbers/index.html create mode 100644 2018/the-taming-of-the-gpu/index.html create mode 100644 2019/beyond-simulation-harnessing-ai-for-next-generation-hpc-at-isc/index.html rename _posts/dursi/2019-9-29-science-makes-great-managers-but-not-necessarily-good-oneshtml.md => 2019/computational-science-collaborations-train-great-managers-but-trainees-might-need-help-to-become-good-managers-first/index.html (59%) create mode 100644 2019/intelligent-hpc-keeping-hard-work-at-bay-es/index.html create mode 100644 2019/isc-19-recap/index.html create mode 100644 2019/letsencrypt-for-multiple-hosts/index.html rename _posts/gaborsamu/2019-5-30-poweringhpc.md => 2019/powering-the-future-of-hpc-ai-with-openpower/index.html (50%) create mode 100644 2019/sc-19-recap/index.html rename _posts/dursi/2019-11-6-purpose-of-research-computinghtml.md => 2019/the-purpose-of-research-computing-is-the-research-not-the-computing/index.html (68%) rename _posts/glennklockwood/2019-2-27-tagbloggercom1999blog-4307061427721284246post-7916337747246028185.md => 2019/vast-data-s-storage-system-architecture/index.html (76%) rename _posts/dursi/2019-1-19-what-are-national-research-computing-platforms-for-nowhtml.md => 2019/what-should-a-national-research-computing-platform-be/index.html (69%) create mode 100644 2020/bootup-fun-dual-socket-power9/index.html rename _posts/dursi/2020-11-22-cpus-getting-weirderhtml.md => 2020/buckle-up-cpus-are-going-to-get-weirder/index.html (52%) create mode 100644 2020/cobol-imperial-college-bursty-maintenance-and-sustained-scientific-software/index.html create mode 100644 2020/exascale-s-long-shadow-and-the-hpc-being-left-behind/index.html create mode 100644 2020/extending-the-spectrum-lsf-gui-to-display-job-gpu-metrics/index.html create mode 100644 2020/gracc-transition-visualization/index.html rename _posts/dursi/2020-3-24-quickstart-remote-one-on-oneshtml.md => 2020/how-to-quickly-start-one-on-ones-with-your-research-computing-team-a-one-week-plan-of-action/index.html (87%) rename _posts/gaborsamu/2020-1-10-power9_numa.md => 2020/numa-on-nine-with-spectrum-lsf/index.html (70%) rename _posts/glennklockwood/2020-11-20-tagbloggercom1999blog-4307061427721284246post-1085875292800357949.md => 2020/pdsw-20-recap/index.html (60%) rename _posts/glennklockwood/2020-11-23-tagbloggercom1999blog-4307061427721284246post-3476164791460111937.md => 2020/sc-20-recap/index.html (80%) rename _posts/ajdecon/2020-12-14-p=113.md => 2020/sketching-out-hpc-clusters-at-different-scales/index.html (58%) create mode 100644 2020/things-i-learned-from-looking-at-500-research-computing-manager-jobs-over-10-months/index.html rename _posts/dursi/2020-10-24-research-computing-in-the-aftertimeshtml.md => 2020/what-will-post-pandemic-academic-research-computing-look-like/index.html (60%) create mode 100644 2020/when-research-infrastructure-is-and-isn-t-maintained/index.html create mode 100644 2020/when-you-got-time-on-your-side-create-something/index.html create mode 100644 2020/white-managers-in-research-computing-we-need-to-be-speaking-out-about-racism-then-listening-and-advocating/index.html create mode 100644 2020/xrootd-client-manager/index.html create mode 100644 2021/10-4-to-the-ten64-with-rockstor/index.html create mode 100644 2021/booting-hifive-unmatched/index.html create mode 100644 2021/ceph-crimson-2021-q2-project-update/index.html create mode 100644 2021/ceph-crimson-2021-q3-project-update/index.html create mode 100644 2021/crimson-vs-classic-1-nvme-multi-osd-analysis/index.html rename _posts/gaborsamu/2021-3-5-aarch64_nas.md => 2021/fun-with-an-aarch64-nas/index.html (96%) rename _posts/gaborsamu/2021-12-15-lsf_openshift_mpi.md => 2021/hello-operator-i-need-an-hpc-cluster-fast/index.html (68%) rename _posts/gaborsamu/2021-9-14-riscv_benchmarking.md => 2021/hifive-unmatched-some-benchmarking-results/index.html (88%) rename _posts/glennklockwood/2021-10-24-tagbloggercom1999blog-4307061427721284246post-7905659352474005091.md => 2021/iops-are-dumb/index.html (60%) create mode 100644 2021/late-to-the-party-and-a-few-bits-short/index.html create mode 100644 2021/nobody-else-cares-about-your-tech-stack/index.html create mode 100644 2021/research-computing-funding-should-mostly-just-go-to-researchers/index.html create mode 100644 2021/researcher-s-time-has-value-too/index.html create mode 100644 2021/sre-to-solutions-architect/index.html create mode 100644 2021/the-easy-hpc-button/index.html rename _posts/dursi/2021-9-11-specialty-to-competehtml.md => 2021/to-compete-your-team-needs-a-specialty/index.html (60%) rename _posts/vsoch/2021-9-19-uptodate.md => 2021/uptodate/index.html (76%) create mode 100644 "2021/very-risqu\303\251-computing/index.html" rename _posts/gaborsamu/2021-11-2-sparc_solaris9.md => 2021/weekend-it-sparc-eology/index.html (71%) create mode 100644 2022/a-supportive-job-interview-story/index.html create mode 100644 2022/adam-s-weekly-ish-update-2022-12-20/index.html create mode 100644 2022/adam-s-weekly-update-2022-11-27/index.html create mode 100644 2022/adam-s-weekly-update-2022-12-04/index.html rename _posts/markhpc/2022-11-28-lambda.md => 2022/an-initial-look-at-deep-learning-io-performance/index.html (80%) create mode 100644 2022/an-unstructured-rant-on-running-long-lived-software-services/index.html create mode 100644 2022/cache-age-binning-pr-finally-merged/index.html create mode 100644 2022/ceph-osd-cpu-scaling-part-1/index.html create mode 100644 2022/ceph-rocksdb-tuning-deep-dive/index.html create mode 100644 2022/containerize-it-baby/index.html create mode 100644 2022/converged-computing/index.html rename _posts/gaborsamu/2022-5-12-lsf_output.md => 2022/customizing-command-output-in-ibm-spectrum-lsf/index.html (71%) create mode 100644 2022/dashboards-for-learning-data-visualizations/index.html create mode 100644 2022/developing-managed-vs-self-hosted-software/index.html create mode 100644 2022/experimenting-with-igor-s-bluestore-wal/index.html create mode 100644 2022/happy-living-close-ish-to-the-metal/index.html create mode 100644 2022/improving-the-open-science-data-federation-s-cache-selection/index.html rename _posts/vsoch/2022-2-15-paks.md => 2022/interactive-development-containers/index.html (89%) create mode 100644 2022/interesting-links-i-clicked-this-week/index.html create mode 100644 2022/life-and-leaving-nersc/index.html rename _posts/gaborsamu/2022-4-12-criu_lsf.md => 2022/lsf-hookin-up-with-the-criu/index.html (62%) rename _posts/gaborsamu/2022-6-9-reform_reloaded.md => 2022/mnt-reform-2-part-deux/index.html (77%) rename _posts/gaborsamu/2022-6-6-neunundneunzig_reform.md => 2022/neunundneunzig-mnt-reform-s/index.html (58%) create mode 100644 2022/new-year-s-resolution-for-hpc-using-resources-more-efficiently/index.html rename _posts/gaborsamu/2022-1-25-numa_lsf.md => 2022/numa-on-power9/index.html (71%) rename _posts/vsoch/2022-5-7-pipelib.md => 2022/pipelib-simple-library-to-parse-filter-and-sort-things/index.html (77%) create mode 100644 2022/qemu-kvm-ceph-librbd-performance/index.html rename _posts/gaborsamu/2022-3-29-reliving_amiga.md => 2022/relivin-the-90-s-amiga-style/index.html (54%) rename _posts/vsoch/2022-6-19-research-software-registries.md => 2022/research-software-registries/index.html (69%) rename _posts/glennklockwood/2022-11-24-tagbloggercom1999blog-4307061427721284246post-2068110509046297403.md => 2022/sc-22-recap/index.html (65%) create mode 100644 2022/spooky-allocator-issues-and-fixes/index.html rename _posts/vsoch/2022-6-26-ssh-tunnels.md => 2022/ssh-tunnels/index.html (74%) rename _posts/vsoch/2022-4-24-rsepedia.md => 2022/the-research-software-ecosystem/index.html (76%) rename _posts/dursi/2022-7-3-utility-vs-professional-serviecshtml.md => 2022/the-utility-vs-the-professional-services-firm/index.html (51%) create mode 100644 2022/the-web-services-i-self-host/index.html create mode 100644 2022/things-that-are-hard/index.html create mode 100644 2022/toy-programs-for-learning-a-new-language/index.html rename _posts/vsoch/2022-8-4-hpc-apps.md => 2022/tunel-apps-for-hpc/index.html (63%) rename _posts/gaborsamu/2022-12-13-lsf_grafana.md => 2022/visualizing-spectrum-lsf-data-with-grafana/index.html (58%) create mode 100644 2022/what-i-ve-learned-from-looking-at-1-500-jobs-leading-research-computing-teams/index.html rename _posts/gaborsamu/2023-3-1-lsf_macos.md => 2023/lsf-client-on-macos-submitting-from-your-laptop/index.html (61%) rename _posts/gaborsamu/2023-1-24-lsf_tig.md => 2023/monitoring-ibm-spectrum-lsf-with-the-tig-stack/index.html (98%) create mode 100644 404.html delete mode 100644 Gemfile delete mode 100644 LICENSE delete mode 100644 README.md delete mode 100644 _config.yml delete mode 100644 _data/authors.yml delete mode 100644 _posts/ajdecon/2021-1-2-p=147.md delete mode 100644 _posts/ajdecon/2022-1-15-p=176.md delete mode 100644 _posts/ajdecon/2022-10-30-p=235.md delete mode 100644 _posts/ajdecon/2022-11-1-p=247.md delete mode 100644 _posts/ajdecon/2022-11-27-p=264.md delete mode 100644 _posts/ajdecon/2022-12-20-p=289.md delete mode 100644 _posts/ajdecon/2022-12-5-p=268.md delete mode 100644 _posts/ajdecon/2022-2-12-p=167.md delete mode 100644 _posts/ajdecon/2022-2-25-p=203.md delete mode 100644 _posts/ajdecon/2022-3-12-p=13.md delete mode 100644 _posts/ajdecon/2022-5-14-p=227.md delete mode 100644 _posts/dursi/2010-5-1-canadian-astronomical-computing-data-netowork-facilitieshtml.md delete mode 100644 _posts/dursi/2010-6-17-codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decadehtml.md delete mode 100644 _posts/dursi/2011-11-23-testing-roundoff-2html.md delete mode 100644 _posts/dursi/2012-1-12-stopping-your-program-at-the-first-nanhtml.md delete mode 100644 _posts/dursi/2012-1-13-present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-cascahtml.md delete mode 100644 _posts/dursi/2014-1-16-scalable-data-analysis-in-rhtml.md delete mode 100644 _posts/dursi/2014-10-5-shell-for-scientistshtml.md delete mode 100644 _posts/dursi/2014-12-20-machine-learning-for-scientistshtml.md delete mode 100644 _posts/dursi/2014-9-4-hadoop-for-hpcershtml.md delete mode 100644 _posts/dursi/2015-12-18-approximate-squiggle-mappinghtml.md delete mode 100644 _posts/dursi/2015-3-2-spark-in-hpc-clustershtml.md delete mode 100644 _posts/dursi/2015-5-1-hpcmpi-on-rce-podcasthtml.md delete mode 100644 _posts/dursi/2015-5-1-understanding-poahtml.md delete mode 100644 _posts/dursi/2015-5-19-io-performancehtml.md delete mode 100644 _posts/dursi/2016-10-14-mpis-place-in-big-computinghtml.md delete mode 100644 _posts/dursi/2016-5-10-spark-chapel-tensorflow-workshop-at-umichhtml.md delete mode 100644 _posts/dursi/2016-9-9-jupyter-for-bioinformaticshtml.md delete mode 100644 _posts/dursi/2017-2-15-beyond-single-core-rhtml.md delete mode 100644 _posts/dursi/2017-6-1-compute-canadianhtml.md delete mode 100644 _posts/dursi/2017-6-4-new-computing-landscape-and-chapelhtml.md delete mode 100644 _posts/dursi/2020-10-14-jobs_managing_research_computing_teamshtml.md delete mode 100644 _posts/dursi/2020-12-4-reseach-infrastructurehtml.md delete mode 100644 _posts/dursi/2020-4-18-cobol-imperial-college-and-sustained-scientific-softarehtml.md delete mode 100644 _posts/dursi/2020-6-5-managers_need_to_speak_out_on_racismhtml.md delete mode 100644 _posts/dursi/2021-11-23-users-time-is-valuablehtml.md delete mode 100644 _posts/dursi/2021-6-6-nobody-cares-tech-stackhtml.md delete mode 100644 _posts/dursi/2021-6-8-research-computing-funding-to-researchershtml.md delete mode 100644 _posts/dursi/2022-2-26-1500-jobshtml.md delete mode 100644 _posts/dweitzel/2017-11-6-cleaning-up-gracc.md delete mode 100644 _posts/dweitzel/2017-6-14-stashcache.md delete mode 100644 _posts/dweitzel/2017-9-7-installing-scitokens-on-a-mac.md delete mode 100644 _posts/dweitzel/2018-8-31-htcondor-pull-mode.md delete mode 100644 _posts/dweitzel/2018-9-26-stashcache-by-the-numbers.md delete mode 100644 _posts/dweitzel/2019-10-11-letsencrypt-for-multiple-hosts.md delete mode 100644 _posts/dweitzel/2020-10-11-xrootd-client-manager.md delete mode 100644 _posts/dweitzel/2020-3-8-gracc-transition.md delete mode 100644 _posts/dweitzel/2022-1-22-improving-geoip.md delete mode 100644 _posts/dweitzel/2022-9-14-dashboards.md delete mode 100644 _posts/gaborsamu/2013-10-9-hpc411_bridge.md delete mode 100644 _posts/gaborsamu/2013-11-22-sc13.md delete mode 100644 _posts/gaborsamu/2013-7-9-isc13_wrapup.md delete mode 100644 _posts/gaborsamu/2014-3-13-arm_days_of_old.md delete mode 100644 _posts/gaborsamu/2014-4-11-armed_ready_lsf.md delete mode 100644 _posts/gaborsamu/2014-6-28-isc14_leipzig.md delete mode 100644 _posts/gaborsamu/2014-9-5-docker_for_hpc.md delete mode 100644 _posts/gaborsamu/2015-1-30-ultrasparc_laptop.md delete mode 100644 _posts/gaborsamu/2015-2-16-workpadz50_netbsd.md delete mode 100644 _posts/gaborsamu/2015-5-11-hpc_seeing_believing.md delete mode 100644 _posts/gaborsamu/2015-5-13-sdi_openpower.md delete mode 100644 _posts/gaborsamu/2015-5-29-lsf_docker_whale.md delete mode 100644 _posts/gaborsamu/2015-6-7-clustermanager_eggs.md delete mode 100644 _posts/gaborsamu/2015-7-29-isc15_lookingback.md delete mode 100644 _posts/gaborsamu/2016-11-11-sc16_stiritup.md delete mode 100644 _posts/gaborsamu/2016-4-2-reminiscing_computingrenaissance.md delete mode 100644 _posts/gaborsamu/2017-1-9-woe_is_2016.md delete mode 100644 _posts/gaborsamu/2017-9-15-benchmarking_macchiatobin.md delete mode 100644 _posts/gaborsamu/2018-10-2-spectrumlsf_gpu_usage.md delete mode 100644 _posts/gaborsamu/2018-6-21-taming_gpu.md delete mode 100644 _posts/gaborsamu/2019-12-19-intelligent_hpc.md delete mode 100644 _posts/gaborsamu/2019-6-5-beyond_simulation_isc.md delete mode 100644 _posts/gaborsamu/2020-1-16-power9_bootup.md delete mode 100644 _posts/gaborsamu/2020-4-1-flora_watch.md delete mode 100644 _posts/gaborsamu/2020-9-4-gpu_pac.md delete mode 100644 _posts/gaborsamu/2021-1-16-new_novena.md delete mode 100644 _posts/gaborsamu/2021-11-23-easy_hpc.md delete mode 100644 _posts/gaborsamu/2021-6-15-risque_computing.md delete mode 100644 _posts/gaborsamu/2021-8-8-riscv_bootup.md delete mode 100644 _posts/gaborsamu/2021-9-26-ten64_nas.md delete mode 100644 _posts/gaborsamu/2022-1-4-2022_hpc.md delete mode 100644 _posts/glennklockwood/2014-11-5-tagbloggercom1999blog-4307061427721284246post-8823126637772820705.md delete mode 100644 _posts/glennklockwood/2014-4-24-tagbloggercom1999blog-4307061427721284246post-5875436226821857964.md delete mode 100644 _posts/glennklockwood/2014-6-29-tagbloggercom1999blog-4307061427721284246post-4750722661841327889.md delete mode 100644 _posts/glennklockwood/2014-6-8-tagbloggercom1999blog-4307061427721284246post-6242128228381347032.md delete mode 100644 _posts/glennklockwood/2015-1-29-tagbloggercom1999blog-4307061427721284246post-8310251418902836284.md delete mode 100644 _posts/glennklockwood/2015-4-29-tagbloggercom1999blog-4307061427721284246post-8244135124065934589.md delete mode 100644 _posts/glennklockwood/2016-6-21-tagbloggercom1999blog-4307061427721284246post-6567557412815023149.md delete mode 100644 _posts/glennklockwood/2016-7-22-tagbloggercom1999blog-4307061427721284246post-5095302032595554238.md delete mode 100644 _posts/glennklockwood/2018-11-24-tagbloggercom1999blog-4307061427721284246post-489145380970011565.md delete mode 100644 _posts/glennklockwood/2018-2-24-tagbloggercom1999blog-4307061427721284246post-4879102640203246583.md delete mode 100644 _posts/glennklockwood/2019-11-27-tagbloggercom1999blog-4307061427721284246post-4381869359328536242.md delete mode 100644 _posts/glennklockwood/2019-6-27-tagbloggercom1999blog-4307061427721284246post-2872044202410197608.md delete mode 100644 _posts/glennklockwood/2020-5-20-tagbloggercom1999blog-4307061427721284246post-1586489402384574408.md delete mode 100644 _posts/glennklockwood/2022-5-27-tagbloggercom1999blog-4307061427721284246post-2015541491779458493.md delete mode 100644 _posts/markhpc/2021-11-22-crimson-2021q3.md delete mode 100644 _posts/markhpc/2021-7-29-crimson-2021q2.md delete mode 100644 _posts/markhpc/2021-8-30-crimson-classic.md delete mode 100644 _posts/markhpc/2022-1-12-age-binning.md delete mode 100644 _posts/markhpc/2022-10-24-qemu-kvm.md delete mode 100644 _posts/markhpc/2022-11-8-osd-cpu-scaling.md delete mode 100644 _posts/markhpc/2022-4-13-spooky-allocator.md delete mode 100644 _posts/markhpc/2022-5-26-bluewal.md delete mode 100644 _posts/markhpc/2022-7-25-rocksdb-tuning.md delete mode 100644 _posts/vsoch/2022-1-7-what-is-hard.md delete mode 100644 _posts/vsoch/2022-11-18-converged-computing.md delete mode 100644 _posts/vsoch/2022-11-3-containerize-it-baby.md create mode 100644 about/index.html create mode 100644 archive/index.html create mode 100644 assets/css/highlight.css create mode 100644 assets/css/style.css create mode 100644 assets/images/favicon.png create mode 100644 assets/images/hpc-social-blue.png create mode 100644 assets/images/icon/facebook.svg create mode 100644 assets/images/icon/github.svg create mode 100644 assets/images/icon/instagram.svg create mode 100644 assets/images/icon/linkedin.svg create mode 100644 assets/images/icon/me.svg create mode 100644 assets/images/icon/twitter.svg create mode 100644 assets/images/icon/whatsapp.svg create mode 100644 assets/images/icon/youtube.svg create mode 100644 assets/scripts.js create mode 100644 atom.xml create mode 100644 feed.json create mode 100644 feed.xml create mode 100644 page/2/index.html create mode 100644 page/3/index.html create mode 100644 page/4/index.html create mode 100644 page/5/index.html create mode 100644 page/6/index.html create mode 100644 page/7/index.html create mode 100644 page/8/index.html create mode 100644 page/9/index.html delete mode 100755 pages/404.md delete mode 100755 pages/about.md delete mode 100644 pages/archive.md delete mode 100755 pages/atom.xml delete mode 100755 pages/feed.json delete mode 100644 pages/tags.md create mode 100644 robots.txt create mode 100644 sitemap.xml create mode 100644 tags/index.html diff --git a/2010/canadian-astronomical-computing-data-and-network-facilities-a-white-paper-for-the-2010-long-range-plan/index.html b/2010/canadian-astronomical-computing-data-and-network-facilities-a-white-paper-for-the-2010-long-range-plan/index.html new file mode 100644 index 0000000..c164569 --- /dev/null +++ b/2010/canadian-astronomical-computing-data-and-network-facilities-a-white-paper-for-the-2010-long-range-plan/index.html @@ -0,0 +1,162 @@ + + + + + + + Canadian Astronomical Computing, Data And Network Facilities- A White Paper for the 2010 Long Range Plan - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+

hpc.social

+ + + + + + + + + + + +
+ High Performance Computing
Practitioners
and friends /#hpc +
+
+ +
+
+
+ +
+ +
+ Share:  + +
+
+ +
+ This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
+ +
+

Canadian Astronomical Computing, Data And Network Facilities- A White Paper for the 2010 Long Range Plan

+

In this whitepaper for the CASCA 2010 Long Range Plan, I and the rest of the Computing, Data, and Network committee of CASCA lay out the state of ecosystem for computation in support of Canadian astronomy, and suggests a path forward for the time period of the 2010-2020 long range plan.

+ +

Abstract

+ +

Significant investment in new large, expensive astronomical observing facilities spanning a substantial portion of the electronic spectrum was a dominant theme of LRP2000 and continues to be necessary for Canadian astronomy to maintain its world position. These developments are generating increasingly large volumes of data. Such investments only makes sense if they are balanced by strong infrastructure support to ensure that data acquired with these facilities can be readily accessed and analyzed by observers, and that theoreticians have the tools available to simulate and understand their context. This will require continuing investment in computational facilities to store and analyze the data, networks to ensure useful access to the data and products by Canadian researchers, and personnel to help Canadian researchers make use of these tools.

+ +

In addition, large parallel simulations have become an essential tool for astrophysical theory, and Canadian Astronomy has world-leading simulators and developers who rely on world-class High Performance Computing facilities being maintained in Canada to do their research effectively.

+ +

We recommend that Compute Canada be funded at $72M/yr to bring HPC funding per capita in line with G8 norms; that part of every Compute Canada technology renewal include a Top-20 class computing facility; NSERC and other funding agencies begin supporting software development as an integral component of scientific research; that the staff funding for consortia be tripled, including local access to technical analyst staff; and that the last mile bottleneck of campus networking less than 10 Gb/s be addressed where it is impacting researchers, with particular urgency for the current 1 Gb/s connection at the CADC.

+ +
+
+ +
+ + + + + + + + + + +
+ + + + + diff --git a/2010/codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decade/index.html b/2010/codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decade/index.html new file mode 100644 index 0000000..267b6d6 --- /dev/null +++ b/2010/codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decade/index.html @@ -0,0 +1,172 @@ + + + + + + + Codes as Instruments- Community Applications and Simulation Software for the Hardware Architectures of the Next Decade - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+

hpc.social

+ + + + + + + + + + + +
+ High Performance Computing
Practitioners
and friends /#hpc +
+
+ +
+
+
+ +
+ +
+ Share:  + +
+
+ +
+ This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
+ +
+

Codes as Instruments- Community Applications and Simulation Software for the Hardware Architectures of the Next Decade

+

It is becoming increasingly problematic that, even as computing and data becomes more and more fundamental to research, and the complexity and diversity of computing technologies out there grows, getting stable funding for developing high-quality research software remains so difficult.

+ +

In this whitepaper for the CASCA 2010 Long Range Plan, my colleague Falk Herwig and I lay out the case for increased funding of R&D software development by professional research software developers. We make a couple points which I genuinely believe to be strong:

+ +

First, increased benefits. A successful community code can support an enoormous body of research. By the (admittedly somewhat crude) count we use in this paper, the top six reseach codes in Astronomy accounted for approximately 50% of the computational astronomy publications over the period of study, and the top three - Cloudy, Gadget, and FLASH, which I was part of - accounted for nearly 40%. That is an enormous about of R&D effort enabled by those projects.

+ +

Second, reduced costs. We cite from the growing research software development literature to demonstrate the high (and growing) challenges of engineering these codes in a scientists’ spare time, and the high cost of software defects. By having a small cadre of professional research software development personnel, better quality software can be developed more efficiently.

+ +

Finally, a word about the title - this is an analogy due to Falk, and while it’s been controversial, I think there’s a lot of truth to it. Astronomy has always relied heavily on, for instance, telescopes - but a telescope is only part of an observational facility. A big photon-gathering dish is only as useful as the scientific instrument that’s placed at its focus to make sense of those photons. Similarly, a huge computer by itself has no scientific value without software to run on it. Unless our community invests in computational instruments with the same level of seriousness as observational instruments, our ability to make use of these facilities is going to be needlessly limited.

+ +

Abstract

+ +

Modern astronomical research requires increasingly sophisticated computing facilities and software tools. Computational tools have become the fundamental tools to turn observational raw data into scientific insight. Complex multi-physics simulation codes have developed into tools for numerical experiments that provide scientific insight beyond classical theory. Canadian researchers need an environment for developement and maintenance of these critical tools. In particular, the drastically enhanced complexity of deeply heterogeneous hardware architectures poses a real challenge to using present and future HPC facilties.

+ +

Without a national program in astrophysical simulation science and astronomy application code developement we are becoming vulnerable with respect to our ability to maximise the scientific return from existing and planned investments into atronomy. In addition, there are significant industrial/commercial HQP needs that simulation and application code program could start to address, if it is properly aligned with academic training opportunities.

+ +

We outline the framework and requirements for such a framework for developing Canadian astronomical application and simulation codes — and code builders. In the US decadal plan process, voices are calling for similar emphasis on developing infrastructure and incentives for open community codes (Weiner et al. 2009). We propose funding several small interdisciplinary teams of postdocs, graduate students, and staff, housed in departments at Universities that have or are about to make a commitment in a relevant area (e.g. applied math, computational physics, modeling science). These teams can, while training astronomical and computational HQP, focus on building tools that have been deemed to be high priorities by the astronomical and astrophysical communities in order to make the best scientific use of our new computational faciliites.

+ +
+
+ +
+ + + + + + + + + + +
+ + + + + diff --git a/2011/testing-roundoff/index.html b/2011/testing-roundoff/index.html new file mode 100644 index 0000000..ff9166b --- /dev/null +++ b/2011/testing-roundoff/index.html @@ -0,0 +1,237 @@ + + + + + + + Testing Roundoff - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+

hpc.social

+ + + + + + + + + + + +
+ High Performance Computing
Practitioners
and friends /#hpc +
+
+ +
+
+
+ +
+ +
+ Share:  + +
+
+ +
+ This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
+ +
+

Testing Roundoff

+

A talk has been circulating (HT: Hacker News) from a conference celebrating 50 years of scientific computing at Stanford where the author, William Kahan, discusses an old and sadly disused trick for testing the numerical stability of the implementation of an algorithm that should work with any C99 or Fortran 2003 compiler without changing the underlying code. It’s definitely a tool that’s worth having in your toolbox, so it’s worth mentioning here.

+ +

We’ll consider a simple numerical problem; imagine a projectile launched from height $h = 0$ with velocity $v_0=5000 \mathrm{m s}^{-1}$, and subject to the Earth’s gravitational accelleration, $g = 9.81 \mathrm{m} \mathrm{s}^{-2}$. We’re going to ask when the (first) time is that the projectile hits a height h.

+ +

This is going to be an application of our friend the quadratic equation:

+ +

[r = \frac{-b \pm \sqrt{b^2 - 4 a c}}{2 a}]

+ +

Now, because of the repeated subtraction, a naive implementation of this equation is known to undergo catastrophic cancellation near $b^2=4 a c$, or for where the discriminant is much less than \(b\) — in our case, near the ends and the peak of the projectile’s trajectory. We’re going to demonstrate that below.

+ +

Now, before we show that such sensitivity can happen, we should ask — why would we care? If we test our code and know it gives “good enough” answers under the conditions that matter to us, does it really matter what could happen in other circumstances? The answer, of course, is yes. There are a lot of things we could want to do — increase the agressiveness of compiler optimizations when compiling our code, for instance — which will have the effect of numerically perturbing our computation; and we need to know if those small perturbations will have small, or large, effects on our answers.

+ +

It turns out that IEEE 754, the standard for floating point numbers, can give us some help with this. (Everyone who does numerical work should know at least a little bit about the floating point standard, or at least the issues involved with floating point numbers. What every computer scientist should know about floating point, particularly the first few sections, is an essential guide). The floating point standard - which almost all widely-used computing hardware should support - allows you to set certain properties of the mathematics “on the fly”. One particularly useful feature is the ability to set how the last digit of all floating point operations are rounded - to nearest (the default), to zero (eg, always truncate), to positive infinity (eg, always round up) or to negative infinity (always round down). In the C99 standard, this is implemented in the “fenv.h” header and the math library; in Fortran2003, this is part of the intrinsic IEEE_ARITHMETIC module, where you can call IEEE_SET_ROUNDING_MODE.

+ +

By changing the rounding, you are perturbing every floating point operation in your calculation. If this perturbation results in significant changes in your result, then your calculation is very fragile, and you may have to look into re-writing the calculation, using another algorithm, or resorting to using higher precision for that calculation (which will push the perturbations to less significant decimal places). If not, then you have some evidence that your calculation is robust to perturbations, at least in the last bit.

+ +

Below we have an example of how you’d do this in C. We have a simple routine which uses the obvious implementation of the quadratic equation to calculate the time when the projectile is at one meter, and we perform this calculation with all available rounding modes:

+ +
#include <stdio.h>
+#include <math.h>
+#include <fenv.h>
+
+const int NOSOLN=-1;
+const int SOLN  = 0;
+
+int time(const float vo, const float g, const float ho, float *time) {
+    float disc  = (vo*vo - 2.*g*ho);
+
+    if (disc < 0) return NOSOLN;
+
+    disc = sqrt(disc);
+    float root1 = (vo + disc)/g;
+    float root2 = (vo - disc)/g;
+
+    if ((root2 >= 0.) && root2 < root1)
+        *time = root2;
+    else
+        *time = root1;
+
+    return SOLN;
+}
+
+
+int main(int argc, char **argv) {
+
+    const float g =9.81;
+    const float vo=5000.;
+    const int   ho=1.;
+
+    int nroundings=4;
+    int roundings[]={FE_TONEAREST, FE_UPWARD, FE_DOWNWARD, FE_TOWARDZERO};
+    char *names[]  ={"To nearest", "To +inf", "To -inf", "To zero"};
+
+    for (int r=0; r<nroundings; r++) {
+        int status = fesetround(roundings[r]);
+        if (status) {
+            fprintf(stderr,"Could not set rounding to '%s'.\n", names[r]);
+        } else {
+            float soln;
+            time(vo, g, ho, &soln);
+            printf("%s: %f\n", names[r], soln);
+        }
+    }
+
+    return 0;
+}
+
+ +

We compile the code with gcc (any C99 compiler should work):

+ +
$ gcc -O0 -Wall -std=c99 quadratic.c -o quadratic -lm
+
+

Note that we need to explicitly link in the math library, and to turn off optimization (so that the compiler doesn’t replace the repeated calls to time() with a single call). Running this, we find:

+ +
$ ./quadratic
+To nearest: 0.000199
+To +inf: 0.000149
+To -inf: 0.000249
+To zero: 0.000249
+
+ +

Changing the rounding modes changes the result by 50%! This shows that our current implementation - which is not giving obviously wrong answers - is extremely fragile in the presence of numerical noise, and we should exercise extreme caution with compiler flags, etc. (How to re-write the expression to be more robust to small changes is a topic for another day.)

+ +
+
+ +
+ + + + + + + + + + +
+ + + + + diff --git a/2012/present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-casca/index.html b/2012/present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-casca/index.html new file mode 100644 index 0000000..10bade0 --- /dev/null +++ b/2012/present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-casca/index.html @@ -0,0 +1,168 @@ + + + + + + + Present and Future Computing, Data, and Networks Committee of the Canadian Astronomical Society (CASCA) - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+

hpc.social

+ + + + + + + + + + + +
+ High Performance Computing
Practitioners
and friends /#hpc +
+
+ +
+
+
+ +
+ +
+ Share:  + +
+
+ +
+ This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
+ +
+

Present and Future Computing, Data, and Networks Committee of the Canadian Astronomical Society (CASCA)

+

This document is a whitepaper I wrote for the CASCA Computing and Data committee outlining the computing needs for the Canadian astronomy community for the coming several years. It does a fairly decent job of laying out the diverse range of large-scale R&D computing needs for the national community.

+ +

Executive Summary

+ +

Advanced research computing resources have never been so essential to the Canadian Astronomy and Astrophysics research community. In the past few years, astronomical researchers have benefited greatly from modern large-scale computing systems; a diverse range of resources, which are a good match to the diverse computing needs of our scientists; and good working relationships with existing providers, allowing flexibility and collaboration between these centres and research groups.

+ +

However, CASCA has concerns about the near future of advanced research computing available to its researchers. Here the Computers, Data, and Networks Committee of CASCA present, on behalf of the Society, a summary of the current state of the computing needs, successes, and concerns of our researchers taken from previous consultative summaries and their updates. This is the first step of a process that will continue through the first half of 2013, which will include a comprehensive survey of research computing needs of the Canadian Astronomy and Astrophysics community, and will investigate a variety of strategies for meeting those needs.

+ +

Early systems funded by the CFI NPF are already showing their age; in many cases they are out of their maintenance contract and are already starting to fail. The lack of any clear signs of new investment on the horizon means that even if existing systems were to continue operating perfectly, as other nations continue to invest in new research computing platforms, our researchers, using stagnant computing hardware, will not only fall behind our international competitors as data volumes continue to increase, but also be unable to make full use of prior investments.

+ +

When new funding does become available, the Canadian astronomy community would like to see changes in emphasis taken as lessons learned from the CFI NPF procurement. Previous investment focused largely on computing hardware. While this addressed a real and pressing need resulting from years of underinvestment, the research endeavor requires a more holistic approach. Computing hardware investments must be balanced with similar investments in storage, highly qualified personnel, software development, and networking to maximize results.

+ +

In this report, we recommend an urgent search for new and sustainable sources of funding for advanced research computing funding; an increased focus on personnel, software development, and storage; maintaining a diverse range of systems; enabling major longer-term projects by committing resources for longer than the one-year allocation window currently of the RAC process; continuing to enable close working relationships with research groups and computing providers, preferably as close to the researchers as possible. In addition, we recommend that CCI’s board, through the proposed Researcher Advisory Committee or otherwise, establish a direct relationship with CASCA (and similar professional groups), with via persons charged with representing the needs of these research communities in planning for Compute Canada.

+ +
+
+ +
+ + + + + + + + + + +
+ + + + + diff --git a/2012/stopping-your-program-at-the-first-nan/index.html b/2012/stopping-your-program-at-the-first-nan/index.html new file mode 100644 index 0000000..8bcf6c7 --- /dev/null +++ b/2012/stopping-your-program-at-the-first-nan/index.html @@ -0,0 +1,246 @@ + + + + + + + Stopping your program at the first NaN - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+

hpc.social

+ + + + + + + + + + + +
+ High Performance Computing
Practitioners
and friends /#hpc +
+
+ +
+
+
+ +
+ +
+ Share:  + +
+
+ +
+ This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
+ +
+

Stopping your program at the first NaN

+

If you know that somewhere in your program, there lurks a catastrophic numerical bug that puts NaNs or Infs into your results and you want to know where it first happens, the search can be a little frustrating. However, as before, the IEEE standard can help you; these illegal events (divide by zero, underflow or overflow, or invalid operations which cause NaNs) can be made to trigger exceptions, which will stop your code right at the point where it happens; then if you run your code through a debugger, you can find the very line where it happens.

+ +

We’ll discuss using the gnu compilers here; other compiler suites have similar options.

+ +

Let’s take a look at the following Fortran code:

+ +
program nantest
+    real :: a, b, c
+
+    a = 1.
+    b = 2.
+
+    c = a/b
+    print *, c,a,b
+
+    a = 0.
+    b = 0.
+
+    c = a/b
+    print *, c,a,b
+
+    a = 2.
+    b = 1.
+
+    c = a/b
+    print *,c,a,b
+end program nantest
+
+ +

If we compile this code with -ffpe-trap=invalid (I usually add ,zero,overflow , and even underflow if I think that’s causing me a problem in intermediate results), then the debugger can tell us the line where it all goes wrong:

+ +
$ gfortran -o nantest nantest.f90 -ffpe-trap=invalid,zero,overflow -g -static
+$ gdb nantest
+[...]
+(gdb) run
+Starting program: /scratch/ljdursi/Testing/fortran/nantest
+  0.50000000       1.0000000       2.0000000    
+
+Program received signal SIGFPE, Arithmetic exception.
+0x0000000000400384 in nantest () at nantest.f90:13
+13          c = a/b
+Current language:  auto; currently fortran
+
+ +

With the intel fortran compiler (ifort), using the option -fpe0 will do the same thing.

+ +

It’s a little tricker with C code; we have to actually insert a call to feenableexcept(), which enables floating point exceptions, and is defined in fenv.h;

+ +
#include <stdio.h>
+#include <fenv.h>
+
+int main(int argc, char **argv) {
+    float a, b, c;
+    feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
+
+    a = 1.;
+    b = 2.;
+
+    c = a/b;
+    printf("%f %f %f\n", a, b, c);
+
+    a = 0.;
+    b = 0.;
+
+    c = a/b;
+    printf("%f %f %f\n", a, b, c);
+
+    a = 2.;
+    b = 1.;
+
+    c = a/b;
+    printf("%f %f %f\n", a, b, c);
+
+    return 0;
+}
+
+

but the effect is the same:

+ +
$ gcc -o nantest nantest.c -lm -g
+$ gdb ./nantest
+[...]
+(gdb) run
+Starting program: /scratch/s/scinet/ljdursi/Testing/exception/nantest
+1.000000 2.000000 0.500000
+
+Program received signal SIGFPE, Arithmetic exception.
+0x00000000004005d0 in main (argc=1, argv=0x7fffffffe4b8) at nantest.c:17
+17	    c = a/b;
+
+ +

either way, you have a much better handle on where the errors are occuring.

+ +
+
+ +
+ + + + + + + + + + +
+ + + + + diff --git a/2013/ibm-platform-hpc-4-1-1-creating-a-network-bridge-on-compute-nodes/index.html b/2013/ibm-platform-hpc-4-1-1-creating-a-network-bridge-on-compute-nodes/index.html new file mode 100644 index 0000000..72cd45b --- /dev/null +++ b/2013/ibm-platform-hpc-4-1-1-creating-a-network-bridge-on-compute-nodes/index.html @@ -0,0 +1,331 @@ + + + + + + + IBM Platform HPC 4.1.1- Creating a network bridge on compute nodes - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ +
+

hpc.social

+ + + + + + + + + + + +
+ High Performance Computing
Practitioners
and friends /#hpc +
+
+ +
+
+
+ +
+ +
+ Share:  + +
+
+ +
+ This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
+ +
+

IBM Platform HPC 4.1.1- Creating a network bridge on compute nodes

+

Applies to

+ +
    +
  • IBM Platform HPC V4.1.1
  • +
  • IBM Platform Cluster Manager V4.1.1
  • +
+

Introduction

+ +

IBM Platform HPC provides the ability to customise the network configuration +of compute nodes via Network Profiles. Network Profiles support a custom NIC +script for each defined interface.

+ +

This provides the ability to configure network bonding and bridging. Here we +provide a detailed example on how to configure a network bridge in a cluster +managed by IBM Platform HPC.

+ +

IBM Platform HPC includes xCAT technology for cluster provisioning. xCAT +includes a script (/install/postscripts/xHRM) which may be used to +configure network bridging. This script is leveraged as a custom network +script in the example below.

+ +

Example

+ +

The configuration of the network provision may be viewed in the IBM Platform HPC Web console at: Resources > Node Provisioning > Networks.

+ +
+
+ +

The configuration of network provision may also be viewed using the lsdef CLI.

+ +
# lsdef -t network provision
+Object name: provision
+    domain=private.dns.zone
+    dynamicrange=192.0.2.201-192.0.2.254
+    gateway=<xcatmaster>
+    mask=255.255.255.0
+    mgtifname=eth0
+    net=192.0.2.0
+    staticrange=192.0.2.15-192.0.2.49
+    staticrangeincrement=1
+    tftpserver=192.0.2.50
+ +

The Network Profile default_network_profile which includes the network +provision may be viewed in the IBM Platform HPC Web console at: Resources > +Node Provisioning > Provisioning Templates > Network Profiles.

+ +

+ +

The Network Profile default_network_profile configuration may also be viewed +using the lsdef CLI.

+ +
# lsdef -t group __NetworkProfile_default_network_profile
+Object name: __NetworkProfile_default_network_profile
+    grouptype=static
+    installnic=eth0
+    members=
+    netboot=xnba
+    nichostnamesuffixes.eth0=-eth0
+    nichostnamesuffixes.bmc=-bmc
+    nicnetworks.eth0=provision
+    nicnetworks.bmc=provision
+    nictypes.eth0=Ethernet
+    nictypes.bmc=BMC
+    primarynic=eth0
+ +

Here, we configure a network bridge br0 against eth0 for compute nodes +using a new Network Profile.

+ +
    +
  1. Add a new Network Profile with name default_network_profile_bridge via +the IBM Platform HPC Web console. As an Administrator user, browse to Resources > Node Provisioning > Provisioning Templates > Network Profiles and select +the button Add.
  2. +
+
+
+ +

A total of three devices are required to be added:

+ +
    +
  • +

    eth0

    + +
  • +
  • +

    Type: Ethernet

    + +
  • +
  • +

    Network: provision

    + +
  • +
  • +

    bmc

    + +
  • +
  • +

    Type: BMC

    + +
  • +
  • +

    Network: provision

    + +
  • +
  • +

    br0

    + +
  • +
  • +

    Type: Customized

    + +
  • +
  • +

    Network: provision

    + +
  • +
  • +

    Configuration Command: xHRM bridgeprereq eth0:br0 (creates network bridge +br0 against eth0)

    + +
  • +
+

The new Network Profile default_network_profile_bridge is shown below.

+ +
+
+ +
    +
  1. Now we are ready to provision the nodes using the new Network Profile +default_network_profile_bridge. To begin the process to add nodes, navigate +in the the IBM Platform HPC Web console to Resources > Devices > Nodes and +select the button Add. Within the Add Nodes window, select optionally +Node Group compute and Select Specify Properties for the provisioning +template. This will allow you to select the newly created network profile +default_network_profile_bridge. Here the hardware profile IPMI and stateful +provisioning are used.
  2. +
+
+
+ +

Nodes are added using Auto discovery by PXE boot. Nodes may also be added +using a node information file.

+ +

The nodes are powered on, detected by IBM Platform HPC and provisioned. In +this example, two nodes compute000, compute001 are detected and +subsequently provisioned.

+ +
    +
  1. Once the nodes have been provisioned and complete their initial boot, they +appear in the IBM Platform HPC Web console (Resources > Devices > Nodes) with +Status booted and Workload Agent OK.
  2. +
+
+
+ +

The network bridge is configured on the nodes as expected. We may see this +via the IBM Platform HPC Web console by browsing to Resources > Devices > +Nodes and selecting the Summary tab and scrolling to Other Key Properties.

+ +
+
+ +

Finally, using the CLI xdsh, we remotely execute ifconfig on node compute001to check the configuration of interface br0.

+ +
# xdsh compute001 ifconfig br0
+compute001: br0       Link encap:Ethernet  HWaddr 00:1E:67:49:CC:E5   
+compute001:           inet addr:192.0.2.20  Bcast:192.0.2.255  Mask:255.255.255.0
+compute001:           inet6 addr: fe80::b03b:7cff:fe61:c1d4/64 Scope:Link
+compute001:           UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
+compute001:           RX packets:26273 errors:0 dropped:0 overruns:0 frame:0
+compute001:           TX packets:42490 errors:0 dropped:0 overruns:0 carrier:0
+compute001:           collisions:0 txqueuelen:0  
+compute001:           RX bytes:11947435 (11.3 MiB)  TX bytes:7827365 (7.4 MiB)
+compute001:
+ +

As expected, the compute nodes have been provisioned with a network bridge +br0 configured.

+ +
+
+ +
+ + + + + + + + + + +
+ + + + + diff --git a/_posts/gaborsamu/2013-4-24-hpc32_cuda.md b/2013/ibm-platform-hpc-v3-2-gpu-management-with-nvidia-cuda-5/index.html similarity index 58% rename from _posts/gaborsamu/2013-4-24-hpc32_cuda.md rename to 2013/ibm-platform-hpc-v3-2-gpu-management-with-nvidia-cuda-5/index.html index 145e719..e70c11a 100644 --- a/_posts/gaborsamu/2013-4-24-hpc32_cuda.md +++ b/2013/ibm-platform-hpc-v3-2-gpu-management-with-nvidia-cuda-5/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2013-04-24 18:32:15' -layout: post -original_url: https://www.gaborsamu.com/blog/hpc32_cuda/ -slug: ibm-platform-hpc-v3-2-gpu-management-with-nvidia-cuda-5 -title: IBM Platform HPC V3.2- GPU Management with NVIDIA CUDA 5 ---- - -

IBM Platform HPC V3.2 is easy-to-use, yet comprehensive technical computing + + + + + + + IBM Platform HPC V3.2- GPU Management with NVIDIA CUDA 5 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

+
+ +
+

hpc.social

+ + + + + + + + + + + +
+ High Performance Computing
Practitioners
and friends /#hpc +
+
+ +
+
+
+ +
+ +
+ Share:  + +
+
+ +
+ This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
+ +
+

IBM Platform HPC V3.2- GPU Management with NVIDIA CUDA 5

+

IBM Platform HPC V3.2 is easy-to-use, yet comprehensive technical computing cluster management software. It includes as standard GPU scheduling, managementand monitoring capabilities for systems equipped with NVIDIA Tesla GPUs.

IBM Platform HPC V3.2 has support out of the box for NVIDIA CUDA 4.1, @@ -240,4 +309,76 @@

  • Host List View (GPU Tab)
  • -
    \ No newline at end of file + + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/gaborsamu/2013-12-20-hpc411_nvidia.md b/2013/ibm-platform-hpc-v4-1-1-1-best-practices-for-managing-nvidia-gpu-devices/index.html similarity index 90% rename from _posts/gaborsamu/2013-12-20-hpc411_nvidia.md rename to 2013/ibm-platform-hpc-v4-1-1-1-best-practices-for-managing-nvidia-gpu-devices/index.html index 8c5b46f..2ad182a 100644 --- a/_posts/gaborsamu/2013-12-20-hpc411_nvidia.md +++ b/2013/ibm-platform-hpc-v4-1-1-1-best-practices-for-managing-nvidia-gpu-devices/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2013-12-20 18:35:22' -layout: post -original_url: https://www.gaborsamu.com/blog/hpc411_nvidia/ -slug: ibm-platform-hpc-v4-1-1-1-best-practices-for-managing-nvidia-gpu-devices -title: IBM Platform HPC V4.1.1.1- Best Practices for Managing NVIDIA GPU devices ---- - -

    Summary

    + + + + + + + IBM Platform HPC V4.1.1.1- Best Practices for Managing NVIDIA GPU devices - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    IBM Platform HPC V4.1.1.1- Best Practices for Managing NVIDIA GPU devices

    +

    Summary

    IBM Platform HPC V4.1.1.1 is easy-to-use, yet comprehensive technical computing infrastructure management software. It includes as standard GPU @@ -1030,4 +1099,76 @@ /etc/init.d/lsf stop /etc/init.d/lsf start fi -exit 0

    \ No newline at end of file +exit 0
    + + +
    + +
    + + + + + + + + + + + + + + + + diff --git a/2013/isc-2013-wrapup-ibm-platform-hpc-and-intel-xeon-phi/index.html b/2013/isc-2013-wrapup-ibm-platform-hpc-and-intel-xeon-phi/index.html new file mode 100644 index 0000000..0d2bf94 --- /dev/null +++ b/2013/isc-2013-wrapup-ibm-platform-hpc-and-intel-xeon-phi/index.html @@ -0,0 +1,184 @@ + + + + + + + ISC 2013 wrapup- IBM Platform HPC and Intel Xeon Phi - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    ISC 2013 wrapup- IBM Platform HPC and Intel Xeon Phi

    +

    This past June at ISC 2013 the IBM booth featured live demonstration of IBM +Platform HPC V3.2 managing an IBM iDataplex cluster equipped with Intel Xeon +Phi coprocessors.

    + +

    As part of the demonstration, the potential performance gains running an +application on Intel Xeon Phi coprocessors was shown by running the visually +stunning Intel Embree crown rendering on Intel Xeon and Intel Xeon +Phi simultaneously.

    + +

    IBM Platform HPC provides a unified web-based interface for deployment and +managment of the cluster. Additionally, it includes application submission +templates to allow administrators the flexiblity to create templates to greatly simplify the submission of jobs for their users. A number of templates for +well known ISV and open source applications are also included as standard. +For ISC, a template was created to allow Intel Embree to be easily launched +through the built-in workload manager for execution on Intel Xeon or +Intel Xeon Phi coprocessors.

    + +

    Finally, when the processor intensive Intel Embree application was running, +the monitoring and reporting capabilities of IBM Platform HPC provided both +real time and historical reporting on the health of each node in the cluster, +including metrics specific to the Intel Xeon Phi coprocessor such as +temperature, power consumption and utilization - all through a consistent +web-based interface.

    + +

    Enjoy the short video of the demo here.

    + +
    + +
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2013/supercomputing-2013-sc13/index.html b/2013/supercomputing-2013-sc13/index.html new file mode 100644 index 0000000..c0d18ac --- /dev/null +++ b/2013/supercomputing-2013-sc13/index.html @@ -0,0 +1,174 @@ + + + + + + + Supercomputing 2013 (SC13) - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Supercomputing 2013 (SC13)

    +

    Super Computing 2013 has now come to a close. For those of you who were in +Denver, we hope that you had the opportunity to visit the IBM booth. Among +the many live demonstrations running at the IBM booth, there was a demo of +IBM Platform HPC for System x.

    + +

    In addition to the demo running live on IBM NeXtScale, there was also a static +IBM NeXtScale system on display for people to touch and see.

    + +
    +
    + +

    The IBM Platform HPC demo featured IBM NeXtScale and the Weather Research +and Forecasting Model (WRF) application.

    + +
    +
    + +

    Even though SC13 has just wrapped up, I’m already looking forward next years +events.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/armed-and-ready-with-ibm-platform-lsf/index.html b/2014/armed-and-ready-with-ibm-platform-lsf/index.html new file mode 100644 index 0000000..b5b7f89 --- /dev/null +++ b/2014/armed-and-ready-with-ibm-platform-lsf/index.html @@ -0,0 +1,257 @@ + + + + + + + Armed and ready with IBM Platform LSF - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Armed and ready with IBM Platform LSF

    +

    These days it’s not uncommon to hear about CPUs based upon ARM cores. They can +be found in mobile phones, embedded systems, laptops and even servers. Indeed, +recently there have been a number of major announcements from vendors building +processors based ARM cores. This includes the AMD Opteron A1100, NVIDIA Tegra +K1 and even the Apple A7, which is used the iPhone 5s. What these all have in +common is that they are 64-bit and based on the ARM v8 ISA. At the same time, +the ARM-server chip startup Calxeda announced it was shutting down. Surging +power requirements, as well as the announcement of 64-bit chips have led to +renewed interest in energy efficient ARM based processors for high performance +computing.

    + +

    When building out an infrastructure for Technical Computing, a workload manager +is typically used to control access to the computing resources. As it turns out,the leading workload manager IBM Platfom LSF (formerly Platform Computing) has +supported Linux on ARM for about 10 years. In fact, today there are IBM +clients using Platform LSF on Linux ARM-based clusters as part of mobile +device design and testing.

    + +

    The current release of IBM Platform LSF 9.1.2 supports Linux on ARM v7 with +upcoming support for ARM v8. Given that Platform LSF provides the ability to +build out heterogeneous clusters, creating a compute cluster containing ARM, +Power and x86 based nodes is a snap. Jobs may be targetted to a specific +processor type and the optional portal IBM Platform Application Centre +provides an easy to use, highly configurable, application-centric web based +interface for job management.

    + +

    Hello. How do you “doo”?

    + +

    I’ve recently had the opportunity to test IBM Platform LSF on two node, ARM +based cluster . The IBM Platform LSF master node was a Udoo Quad system running Debian Wheezy ARMv7 EABI hard-float. The second node was running Fedora on a +ARM v8 simulator. Installation and operation of the software was identical to +other platforms. Using the Platform LSF ELIM (External LIM) facility for +adding external load indices, I was able to quickly create a script to load +the processor temperature on the Udoo Quad system.

    + +

    Now, putting Platform LSF through it’s paces, we see the type and model and +other physical characteristics of the nodes are detected.

    + +
    $ lshosts -w
    +HOST_NAME type model cpuf ncpus maxmem maxswp server RESOURCES
    +udoo LINUX_ARM ARM7l 60.0 4 875M - Yes (mg)
    +ma1arms4 LINUX_ARM   ARM8  60.0     1   1.8G   1.9G    Yes ()
    + +

    Looking at the load information on the system, we see the built-in load +indices, in addition to the cputemp metric which I introduced to report the +CPU temperature (Celsius). At this point the system is essentially idle.

    + +
    $ lsload -l
    +HOST_NAME status r15s r1m r15m ut pg io ls it tmp swp mem cputemp
    +udoo ok 0.5 0.6 1.5 4% 0.0 311 1 0 1297M 0M 701M 45.0
    +ma1arms4   busy   3.6  *7.7   6.2  52%   0.0   50 3   0  954M  1.9G  1.6G 0.0
    + +

    Next, we submit a job for execution to Platform LSF. Rather than the requisite +sleep job, we submit something a bit more interesting, the HPC Challenge +Benchmark (HPCC). Debian Wheezy happens to include a pre-compiled binary which +is compiled against OpenMPI.

    + +

    As the Udoo Quad is a 4 core system (as the name implies), hpcc is submitted +requesting 4 cores.

    + +
    $ bsub -n 4 mpiexec -n 4 /usr/bin/hpcc
    +Job <2> is submitted to default queue <normal>.
    + +

    With HPCC running, we quickly see the utilization as well as the CPU +temperature increase to 60C.

    + +
    $ lsload -l
    +HOST_NAME status r15s r1m r15m ut pg io ls it tmp swp mem cputemp
    +udoo ok 5.1 5.1 2.4 94% 0.0 49 1 0 1376M 0M 497M 60.0
    +ma1arms4   ok   0.5  1.1   1.2  40%   0.0   50 3   0  954M  1.9G  1.6G 0.0
    + +

    During the life of the job, the resource utilization may be easily viewed using the Platform LSF user commands. This includes details such as the PIDs which +the job is comprised of.

    + +
    $ bjobs -l
    + 
    +Job <2>, User <debian>, Project <default>, Status <RUN>, Queue <normal>, 
    +                    Command <mpiexec -n 4 /usr/bin/hpcc>, Share group charged </debian>
    +Sun Feb 2 23:49:48: Submitted from host <udoo>, CWD </opt/ibm/lsf/conf>, 
    +                    4 Processors Requested;
    +Sun Feb 2 23:49:48: Started on 4 Hosts/Processors <udoo> <udoo> <udoo> <udoo>,
    +Execution Home </home/debian>, Execution CWD </opt/ibm/lsf/conf>;
    +Sun Feb 2 23:51:05: Resource usage collected.
    +The CPU time used is 227 seconds.
    +MEM: 140 Mbytes; SWAP: 455 Mbytes; NTHREAD: 8
    +PGID: 15678; PIDs: 15678 15679 15681 15682 15683 15684
    +15685
    +....
    +....
    + +

    New Roads?

    + +

    Here we could speak of GFlops, and other such measures of performance, but +that was not my objective. The key, is that there is a growing interest in +non-x86 solutions for Technical Computing. IBM Platform LSF software has +supported and continues to support a wide variety of operating systems and +processor architectures, from ARM to IBM Power to IBM System z.

    + +

    As for ARM based development boards such as the Udoo Quad, Parallela Board, +etc., they are inexpensive as well as being energy efficient. This fact makes +them of interest to HPC scientists looking at possible approaches to energy +efficiency for HPC workloads. Let us know your thoughts about the suitability +of ARM for HPC workloads.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/docker-for-hpc/index.html b/2014/docker-for-hpc/index.html new file mode 100644 index 0000000..fdaabc4 --- /dev/null +++ b/2014/docker-for-hpc/index.html @@ -0,0 +1,174 @@ + + + + + + + Docker for HPC - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Docker for HPC

    +

    With the recent release of Docker 1.0 and the broad industry backing from +organizations such as Red Hat and IBM, it’s no wonder that interest in the use +and application of this Linux container technology continues to grow. Docker is +shipped with Red Hat Enterprise 7 and there exists a growing registry of Docker images for a wide variety of applications.

    + +

    For those who unfamiliar with Docker, it’s essentially a container technology +for the Linux platform, which leverages existing and well proven technologies +such as control groups (cgroup), and LinuX Containers (LXC). Docker brings +these technologies together and provides ease of setup, use and compelling +efficiency.

    + +

    The IBM Platform Computing team has recently announced the availability of the +IBM Platform LSF and Docker integration, which is available as an open beta on +Service Management Connect. Supplementing the release of the integration is a +white paper which is focused on the suitability of Docker for high performance +computing (HPC) and includes an easy to follow, real world example of how to +run a Docker image under Platform LSF.

    + +

    Happy tinkering!

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/exascale-in-perspective-rsc-s-1-2-petaflop-rack/index.html b/2014/exascale-in-perspective-rsc-s-1-2-petaflop-rack/index.html new file mode 100644 index 0000000..b5874f6 --- /dev/null +++ b/2014/exascale-in-perspective-rsc-s-1-2-petaflop-rack/index.html @@ -0,0 +1,159 @@ + + + + + + + Exascale in perspective- RSC's 1.2 petaflop rack - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Exascale in perspective- RSC's 1.2 petaflop rack

    +
    Russian supercomputing manufacturer RSC generated some buzz at ISC'14 last week when they showed their 1.2 PF-per-rack Xeon Phi-based platform.  I was aware of this system from when they first announced it a few months prior, and I referenced it in a piece of a blog post I was writing about the scarier aspects of exascale computing.  Given my impending career change though, it is unclear that I will have the time to ever finish that post before it becomes outdated.  Since RSC is back in the spotlight though, I thought I'd post the piece I wrote up to illustrate how wacky this 1.2 PF rack really is in terms of power consumption.  Power consumption, of course, is the limiting factor standing between today and the era of exascale computing.

    So, to put a 400 kW, 1.2 PF rack into perspective, here is that piece:

    +

    +

    The Importance of Energy Efficiency

    Up through the petascale era in which we currently live, raw performance of high-performance components--processors, RAM, and interconnect--were what limited the ultimate performance of a given high-end machine.  The first petaflop machine, Los Alamos' Roadrunner, derived most of its FLOPs from high-speed PowerXCell 8i processors pushing 3.2 GHz per core.  Similarly, the first 10 PF supercomputer, RIKEN's K computer, derived its performance from its sheer size of 864 cabinets.  Although I don't mean to diminish the work done by the engineers that actually got these systems to deliver this performance, the petascale era really was made possible by making really big systems out of really fast processors.

    By contrast, Exascale represents the first milestone where the limitation does not lie in making these high-performance components faster; rather, performance is limited by the amount of electricity that can be physically delivered to a processor and the amount of heat that can be extracted from it.  This limitation is what has given rise to these massively parallel processors that eschew a few fast cores for a larger number of low-powered ones.  By keeping clock speeds low and densely packing many (dozens or hundreds) of compute cores on a single silicon die, these massively parallel processors are now realizing power efficiencies (flops per watt) that are an order of magnitude higher than what traditional CPUs can deliver.

    The closest technology on the market that will probably resemble the future's exaflop machines are based on accelerators--either NVIDIA GPUs or Intel's MICs.  The goal will be to jam as many of these massively parallel processors into as small a space and with as tight of an integration as possible.  Recognizing this trend, NERSC has opted to build what I would call the first "pre-exascale" machine in its NERSC-8 procurement which will feature a homogeneous system of manycore processors.

    However, such pre-exascale hardware doesn't actually exist yet, and NERSC-8 won't appear until 2016.  What does exist, though, is a product by Russia's RSC Group called PetaStream: a rack packed with 1024 current-generation Xeon Phi (Knight's Corner) coprocessors that has a peak performance of 1.2 PF/rack.  While this sounds impressive, it also highlights the principal challenge of exascale computing: power consumption.  One rack of RSC PetaStream is rated for 400 kW, delivering 3 GFLOPs/watt peak.  Let's put this into perspective.

    Kilowatts, megawatts, and gigawatts in perspective

    During a recent upgrade to our data center infrastructure, three MQ DCA220SS-series diesel generators were brought in for the critical systems.  Each is capable of producing 220 kVA according to the spec sheets.
    +
    Three 220 kVA diesel generators plugged in during a PM at SDSC
    It would take three of these diesel generators to power a single rack of RSC's PetaStream.  Of course, these backup diesel generators aren't a very efficient way of generating commercial power, so this example is a bit skewed.

    Let's look at something that is used to generate large quantities of commercial power instead.  A GE 1.5-77 wind turbine, which is GE's most popular model, is advertised as delivering 1.5 megawatts at wind speeds above 15 miles per hour.

    GE 1.5 MW wind turbine.   Source: NREL
    Doing the math, this means that the above pictured turbine would be able to power only three racks of RSC PetaStream on a breezy day.

    To create a supercomputer with a peak capability of an exaflop using RSC's platform, you'd need over 800 racks of PetaStream and over 300 MW of power to turn it all on.  That's over 200 of the above GE wind turbines and enough electrity to power about 290,000 homes in the U.S.  Wind farms of this size do exist; for example,

    300 MW Stateline Wind Farm.  Source: Wikimedia Commons
    the Stateline Wind Farm, which was built on the border between Oregon and Washington, has a capacity of about 300 MW.  Of course, wind farms of this capacity cannot be built in any old place.

    Commercial nuclear power plants can be built in a variety of places though, and they typically generate on the order of 1 gigawatt (GW) of power per reactor.  In my home state of New Jersey, the Hope Creek Nuclear Generating Station has a single reactor that was built to deliver about 1.2 GW of power:

    1.2 GW Hope Creek nuclear power station.  The actual reactor is housed in the concrete cylinder to the bottom left.  Courtesy of the Nuclear Regulatory Commission.

    This is enough to power almost 4 exaflops of PetaStream.  Of course, building a nuclear reactor for every exaflop supercomputer would be extremely costly, given the multi-billion dollar cost of building reactors like this.  Clearly, the energy efficiency (flops/watt) of computing technology needs to improve substantially before we can arrive at the exascale era.
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/dursi/2014-10-1-scientific-data-shouldnt-be-written-in-texthtml.md b/2014/floating-point-data-shouldn-t-be-serialized-as-text/index.html similarity index 56% rename from _posts/dursi/2014-10-1-scientific-data-shouldnt-be-written-in-texthtml.md rename to 2014/floating-point-data-shouldn-t-be-serialized-as-text/index.html index 2a6fbc7..456b6d7 100644 --- a/_posts/dursi/2014-10-1-scientific-data-shouldnt-be-written-in-texthtml.md +++ b/2014/floating-point-data-shouldn-t-be-serialized-as-text/index.html @@ -1,23 +1,90 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2014-10-01 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/scientific-data-shouldnt-be-written-in-text.html -slug: floating-point-data-shouldn-t-be-serialized-as-text -title: Floating-Point Data Shouldn't Be Serialized As Text ---- - -

    Write data files in a binary format, unless you’re going to actually be reading the output - and you’re not going to be reading a millions of data points.

    - + + + + + + + Floating-Point Data Shouldn't Be Serialized As Text - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Floating-Point Data Shouldn't Be Serialized As Text

    +

    Write data files in a binary format, unless you’re going to actually be reading the output - and you’re not going to be reading a millions of data points.

    The reasons for using binary are threefold, in decreasing importance:

    -
    • Accuracy
    • Performance
    • @@ -26,10 +93,8 @@

      Accuracy concerns may be the most obvious. When you are converting a (binary) floating point number to a string representation of the decimal number, you are inevitably going to truncate at some point. That’s ok if you are sure that when you read the text value back into a floating point value, you are certainly going to get the same value; but that is actually a subtle question and requires choosing your format carefully. Using default formatting, various compilers perform this task with varying degrees of quality. This blog post, written from the point of view of a games programmer, does a good job of covering the issues; but note that for technical computing, we generally must be much more demanding about accuracy.

      -

      Let’s consider a little program which, for a variety of formats, writes a single-precision real number out to a string, and then reads it back in again, keeping track of the maximum error it encounters. We’ll just go from 0 to 1, in units of machine epsilon. The code follows:

      -
      #include <stdio.h>
       #include <math.h>
       #include <float.h>
      @@ -74,7 +139,6 @@
       
       

      and when we run it, we get:

      -
      $ ./accuracy
       Maximum errors:
             %11.4f	      %13.6f	      %15.8f	     %17.10f	          %f
      @@ -83,10 +147,8 @@
       
       

      Note that even using a format with 8 digits after the decimal place - which we might think would be plenty, given that single precision reals are only accurate to 6-7 decimal places - when the data makes a round trip through string-formatting we don’t get exact copies back, off by approximately $10^{-8}$. And this compiler’s default format does not give us accurate round-trip floating point values; some error is introduced! If you’re a video-game programmer, that level of accuracy may well be enough. For scientific/technical work, however, that might absolutely not be ok, particularly if there’s some bias to where the error is introduced, or if the error occurs in what is supposed to be a conserved quantity.

      -

      Note that if you try running this code, you’ll notice that it takes a surprisingly long time to finish. That’s because, maybe surprisingly, performance is another real issue with text output of floating point numbers. Consider a following simple program, which just writes out a 2d array of a 5000 × 5000 floats as text (using fprintf() and as unformatted binary, using fwrite(). The code will follow, but to start here’s the timing outputs:

      -
      $ ./io-performance 5000
       Text      : time = 20.229191
       Raw Binary: time = 0.042213
      @@ -94,19 +156,14 @@
       
       

      Note that when writing to disk, the binary output is 479 times as fast as text output. There are two reasons for this - one is that you can write out data all at once, rather than having to loop; the other is that generating the string decimal representation of a floating point number is a surprisingly subtle operation which requires a significant amount of computing for each value.

      -

      Finally, is data size; the text file in the above example comes out (on my system - depends on compilers default floating string representation, etc) to about 4 times the size of the binary file.

      -

      Now, there are real problems with binary output. In particular, raw binary output is very brittle. If you change platforms, or your data size changes, your output may no longer be any good. Adding new variables to the output will break the file format unless you always add new data at the end of the file, and you have no way of knowing ahead of time what variables are in a binary blob you get from your collaborator (who might be you, three months ago).

      -

      Most of the downsides of binary output are avoided by using libraries which use binary output to serialize, but include enough metadata to describe the data. For output of large scientific arrays, NetCDF – which writes self-describing binary files that are much more “future proof” than raw binary – is a good chioce. Better still, since it’s a standard, many tools read NetCDF files. In other contexts, formats like BSON make a lot of sense.

      -

      There are many NetCDF tutorials on the internet; one I wrote is is here. A simple example using NetCDF gives IO performance results much closer to raw binary than to text:

      -
      $ ./io-performance
       Text      : time = 20.504855
       Raw Binary: time = 0.049945
      @@ -115,7 +172,6 @@
       
       

      but gives you a nice self-describing file:

      -
      $ ncdump -h test.nc
       netcdf test {
       dimensions:
      @@ -129,7 +185,6 @@
       
       

      and file sizes about the same as raw binary:

      -
      $ du -sh test.*
       96M	test.dat
       96M	test.nc
      @@ -138,7 +193,6 @@
       
       

      the code follows:

      -
      #include <stdio.h>
       #include <stdlib.h>
       #include <sys/time.h>
      @@ -253,4 +307,76 @@
       }
       
      -

      (This post is crosslisted from a StackOverflow Answer.)

      \ No newline at end of file +

      (This post is crosslisted from a StackOverflow Answer.)

      + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/hadoop-for-hpcers/index.html b/2014/hadoop-for-hpcers/index.html new file mode 100644 index 0000000..6586669 --- /dev/null +++ b/2014/hadoop-for-hpcers/index.html @@ -0,0 +1,160 @@ + + + + + + + Hadoop For HPCers - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Hadoop For HPCers

    +

    I and my colleague Mike Nolta have put together a half-day tutorial on Hadoop - briefly covering HDFS, Map Reduce, Pig, and Spark - for an HPC audience, and put the materials on github.

    + +

    The Hadoop ecosystem of tools continues to rapidly grow, and now includes tools like Spark and Flink that are very good for iterative numerical computation - either simulation or data analysis. These tools, and the underlying technologies, are (or should be) of real interest to the HPC community, but most materials are written for audiences with web application or maybe machine-learning backgrounds, which makes it harder for an HPC audience to see how they can be useful to them and how they might be applied.

    + +

    Most of the source code is Python. Included on git hub are all sources for the examples, a vagrantfile for a VM to run the software on your laptop, and the presentation in Markdown and PDF. Feel free to fork, send pull requests, or use the materials as you see fit.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/glennklockwood/2014-5-17-tagbloggercom1999blog-4307061427721284246post-1233543740366021889.md b/2014/hadoop-s-uncomfortable-fit-in-hpc/index.html similarity index 59% rename from _posts/glennklockwood/2014-5-17-tagbloggercom1999blog-4307061427721284246post-1233543740366021889.md rename to 2014/hadoop-s-uncomfortable-fit-in-hpc/index.html index baed1c9..cb36993 100644 --- a/_posts/glennklockwood/2014-5-17-tagbloggercom1999blog-4307061427721284246post-1233543740366021889.md +++ b/2014/hadoop-s-uncomfortable-fit-in-hpc/index.html @@ -1,33 +1,176 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2014-05-17 06:28:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2014/05/hadoops-uncomfortable-fit-in-hpc.html -slug: hadoop-s-uncomfortable-fit-in-hpc -title: Hadoop's Uncomfortable Fit in HPC ---- - -Hadoop has come up in a few conversations I've had in the last few days, and it's occurred to me that the supercomputing community continues having a difficult time fully understanding how Hadoop currently fits (and should fit) into scientific computing.  HPCwire was kind enough to run a piece that let me voice my perspective of the realities of Hadoop use in HPC a few months ago--that is, scientists are still getting a feel for Hadoop and what it can do, and it just isn't seeing widespread adoption in scientific computing yet.  This contrasts with the tremendous buzz surrounding the "Hadoop" brand and ultimately gives way to strange dialogue, originating from the HPC side of the fence, like this:

    -
    -I'm not sure if this original comment was facetious and dismissive of the Hadoop buzz or if it was a genuinely interested observation.  Regardless of the intent, both interpretations reveal an important fact: Hadoop is being taken seriously only at a subset of supercomputing facilities in the US, and at a finer granularity, only by a subset of professionals within the HPC community.  Hadoop is in a very weird place within HPC as a result, and I thought it might benefit the greater discussion of its ultimate role in research computing if I laid out some of the factors contributing to Hadoop's current awkward fit.  The rest of this post will strive to answer two questions: Why does Hadoop remain at the fringe of high-performance computing, and what will it take for it to be a serious solution in HPC?

    #1. Hadoop is an invader

    I think what makes Hadoop uncomfortable to the HPC community is that, unlike virtually every other technology that has found successful adoption within research computing, Hadoop was not designed by HPC people.  Compare this to a few other technologies that are core to modern supercomputing:

    By contrast, Hadoop was developed by Yahoo, and the original MapReduce was developed by Google.  They were not created to solve problems in fundamental science or national defense; they were created to provide a service for the masses.  They weren't meant to interface with traditional supercomputers or domain scientists; Hadoop is very much an interloper in the world of supercomputing.
    + + + + + + + Hadoop's Uncomfortable Fit in HPC - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Hadoop's Uncomfortable Fit in HPC

    +

    Hadoop has come up in a few conversations I’ve had in the last few days, and it’s occurred to me that the supercomputing community continues having a difficult time fully understanding how Hadoop currently fits (and should fit) into scientific computing.  HPCwire was kind enough to run a piece that let me voice my perspective of the realities of Hadoop use in HPC a few months ago–that is, scientists are still getting a feel for Hadoop and what it can do, and it just isn’t seeing widespread adoption in scientific computing yet.  This contrasts with the tremendous buzz surrounding the “Hadoop” brand and ultimately gives way to strange dialogue, originating from the HPC side of the fence, like this:

    <div class="separator" style="clear: both; text-align: center;"></div> +
    <div class="separator" style="clear: both; text-align: center;"></div> +I’m not sure if this original comment was facetious and dismissive of the Hadoop buzz or if it was a genuinely interested observation.  Regardless of the intent, both interpretations reveal an important fact: Hadoop is being taken seriously only at a subset of supercomputing facilities in the US, and at a finer granularity, only by a subset of professionals within the HPC community.  Hadoop is in a very weird place within HPC as a result, and I thought it might benefit the greater discussion of its ultimate role in research computing if I laid out some of the factors contributing to Hadoop’s current awkward fit.  The rest of this post will strive to answer two questions: Why does Hadoop remain at the fringe of high-performance computing, and what will it take for it to be a serious solution in HPC?

    <h2>#1. Hadoop is an invader</h2>I think what makes Hadoop uncomfortable to the HPC community is that, unlike virtually every other technology that has found successful adoption within research computing, Hadoop was not designed by HPC people.  Compare this to a few other technologies that are core to modern supercomputing:

    <ul><li>MPI was literally born at the world’s largest supercomputing conference, and the reference was developed by computer scientists major universities and national labs.  It was developed by scientists for scientists.</li><li>OpenMP was developed by an industrial consortium comprised of vendors of high-performance computing hardware and software.  Like MPI, this standard emerged as a result of vendor-specific threading APIs causing compatibility nightmares across different high-end computing platforms.</li><li>CUDA was developed out of Brook which was developed by computer scientists at Stanford.  Again, CUDA now is largely targeted at high-performance computing (although this is changing–and it’ll be interesting to see if adoption outside of HPC really happens)</li></ul><div>By contrast, Hadoop was developed by Yahoo, and the original MapReduce was developed by Google.  They were not created to solve problems in fundamental science or national defense; they were created to provide a service for the masses.  They weren’t meant to interface with traditional supercomputers or domain scientists; Hadoop is very much an interloper in the world of supercomputing.</div>


    The notion that Hadoop's commercial origins make it contentious for stodgy people in the traditional supercomputing arena may sound silly without context, but the fact is, developing a framework for a commercial application rather than a scientific application leaves it with an interesting amount of baggage.

    -

    #2. Hadoop looks funny

    The most obvious baggage that Hadoop brings with it to HPC is the fact that it is written in Java.  One of the core design features of the Java language was to allow its programmers to write code once and be able to run it on any hardware platform--a concept that is diametrically opposite to the foundations of high-performance computing, where code should be compiled and optimized for the specific hardware on which it will run.  Java made sense for Hadoop due to its origins in the world of web services, but Java maintains a perception of being slow and inefficient.  Slow and inefficient codes are, frankly, offensive to most HPC professionals, and I'd wager than a majority of researchers in traditional HPC scientific domains simply don't know the Java language at all.  I sure don't.
    +

    #2. Hadoop looks funny

    +
    The most obvious baggage that Hadoop brings with it to HPC is the fact that it is written in Java.  One of the core design features of the Java language was to allow its programmers to write code once and be able to run it on any hardware platform--a concept that is diametrically opposite to the foundations of high-performance computing, where code should be compiled and optimized for the specific hardware on which it will run.  Java made sense for Hadoop due to its origins in the world of web services, but Java maintains a perception of being slow and inefficient.  Slow and inefficient codes are, frankly, offensive to most HPC professionals, and I'd wager than a majority of researchers in traditional HPC scientific domains simply don't know the Java language at all.  I sure don't.

    The idea of running Java applications on supercomputers is beginning to look less funny nowadays with the explosion of cheap genome sequencing.  Some of the most popular foundational applications in bioinformatics (e.g., GATK and Picard) are written in Java, and although considered an "emerging community" within the field of supercomputing, bioinformatics is rapidly outgrowing the capabilities of lab-scale computing.  Perhaps most telling is Intel's recent contributions to the Java-based GATK which facilitate much richer use of AVX operations for variant calling.

    With that being said though, Java is still a very strange way to interact with a supercomputer.  Java applications don't compile, look, or feel like normal applications in UNIX as a result of their cross-platform compatibility.  Its runtime environment exposes a lot of very strange things to the user for no particularly good reason (-Xmx1g?  I'm still not sure why I need to specify this to see the version of Java I'm running, much less do anything else**) and it doesn't support shared-memory parallelism in an HPC-oriented way (manual thread management, thread pools...yuck).  For the vast majority of HPC users coming from traditional domain sciences and the professionals who support their infrastructure, Java applications remain unconventional and foreign.

    -
    ** A few readers have pointed out that this isn't necessary, and on regular desktops or servers, they would be correct.  However, this remark is true on multi-user, shared resources like supercomputer login nodes where ulimits exist to prevent one user from rendering the node unusable for everyone else.  For example, we only allow up to 4 GB of RAM per user on our larger machine's login nodes, and this is not sufficient to run java -version.  Yes, there are ways to work around this, but that's the whole point I was trying to make--this is an aspect of Java that is weird when compared to plain old C and Fortran applications.
    +
    ** A few readers have pointed out that this isn't necessary, and on regular desktops or servers, they would be correct.  However, this remark is true on multi-user, shared resources like supercomputer login nodes where ulimits exist to prevent one user from rendering the node unusable for everyone else.  For example, we only allow up to 4 GB of RAM per user on our larger machine's login nodes, and this is not sufficient to run java -version.  Yes, there are ways to work around this, but that's the whole point I was trying to make--this is an aspect of Java that is weird when compared to plain old C and Fortran applications.

    -

    #3. Hadoop reinvents HPC technologies poorly

    For those who have taken a serious look at the performance characteristics of Hadoop, the honest truth is that it re-invents a lot of functionality that has existed in HPC for decades, and it does so very poorly.  Consider the following examples:
    +

    #3. Hadoop reinvents HPC technologies poorly

    +
    For those who have taken a serious look at the performance characteristics of Hadoop, the honest truth is that it re-invents a lot of functionality that has existed in HPC for decades, and it does so very poorly.  Consider the following examples:
    1. Hadoop uses TCP with a combination of REST and RPC for inter-process communication.  HPC has been using lossless DMA-based communication, which provides better performance in all respects, for years now.
    2. Hadoop doesn't really handle multi-tenancy and its schedulers are terrible.  The architecture of Hadoop is such that, with a 3x replication factor, a single cluster can only support three concurrent jobs at a time with optimal performance.  Its current scheduler options have very little in the way of intelligent, locality-aware job placement.
    3. Hadoop doesn't support scalable interconnect topologies.  The rack-aware capabilities of Hadoop, while powerful for their intended purpose, do not support scalable network topologies like multidimensional meshes and toruses.  They handle Clos-style network topologies, period.
    4. HDFS is very slow and very obtuse.  Parallel file systems like Lustre and GPFS have been an integral part of HPC for years, and HDFS is just very slow and difficult to use by comparison.  The lack of a POSIX interface means getting data in and out is tedious, and its vertical integration of everything from replication and striping to centralized metadata in Java makes it rather unresponsive.
    However, these poor reinventions are not the result of ignorance; rather, Hadoop's reinvention of a lot of HPC technologies arises from reason #1 above: Hadoop was not designed to run on supercomputers and it was not designed to fit into the existing matrix of technologies available to traditional HPC.  Rather, it was created to interoperate with web-oriented infrastructure.  Specifically addressing the above four points,
    1. Hadoop uses TCP/IP and Ethernet because virtually all data center infrastructure is centered around these technologies, not high-speed RDMA.  Similarly, REST and RPC are used across enterprise-oriented services because they are simple protocols.
    2. Multi-tenancy arises when many people want to use a scarce resource such as a supercomputer; in the corporate world, resources should never be a limiting factor because waiting in line is what makes consumers look elsewhere.  This principle and the need for elasticity is what has made the cloud so attractive to service providers.  It follows that Hadoop is designed to provide a service for a single client such as a single search service or data warehouse.
    3. Hadoop's support for Clos-style (leaf/spine) topologies models most data center networks.  Meshes, toruses, and more exotic topologies are exclusive to supercomputing and had no relevance to Hadoop's intended infrastructure.
    4. HDFS implements everything in software to allow it to run on the cheapest and simplest hardware possible--JBODs full of spinning disk.  The lack of a POSIX interface is a direct result of Hadoop's optimization for large block reads and data warehousing.  By making HDFS write-once, a lot of complex distributed locking can go out the window because MapReduce doesn't need it.
    This loops back around to item #1 above: Hadoop came from outside of HPC, and it carries this baggage with it.

    #4. Hadoop evolution is backwards

    A tiny anecdote

    I gave two MapReduce-related consultations this past month which really highlighted how this evolutionary path of Hadoop (and MapReduce in general) is not serving HPC very well.

    My first meeting was with a few folks from a large clinical testing lab that was beginning to to incorporate genetic testing into their service lineup. They were having a difficult time keeping up with the volume of genetic data being brought in by their customers and were exploring Hadoop BLAST as an alternative to their current BLAST-centric workflow. The problem, though, is that Hadoop BLAST was developed as an academic project when Hadoop 0.20 (which has evolved into Hadoop 1.x) was the latest and greatest technology. Industry has largely moved beyond Hadoop version 1 onto Hadoop 2 and YARN, and this lab was having significant difficulties in getting Hadoop BLAST to run on their brand new Hadoop cluster because its documentation hasn't been updated in three years

    The other meeting was with a colleague who works for a multinational credit scoring company.  They were deploying Spark on their Cloudera cluster with the aforementioned clinical testing company: their data collection processes were outgrowing their computational capabilities and they were exploring better alternatives for data exploration.  The problem they encountered was not one caused by their applications being frozen in time after someone finished their Ph.D.; rather, their IT department had botched the Spark installation.

    This disparity is pervasive throughout the Hadoop application ecosystem.  Tools created for scientific research seem to be abandoned just as quickly as they were created, so looking for existing Hadoop-based tools for research can be a frustrating game of chasing 404s.
    -
    Generally speaking, the development of all technologies at the core of HPC have followed a similar evolutionary path into broad adoption.  Both software and hardware technologies arise as disparities between available and necessary solutions widen.  Researchers often hack together non-standard solutions to these problems until a critical mass is achieved, and a standard technology emerges to unify these varying solutions and fill the gap.  OpenMP is a great example--before it became standard, there were a number of vendor-specific pragma-based multithreading APIs;  Cray, Sun, and SGI all had their own implementations that did the same thing but made porting codes between systems very unpleasant.  These vendors ultimately all adopted a standard interface which became OpenMP, and that technology has been embraced because it provided a portal way of solving the original motivating problem.

    The evolution of Hadoop has very much been a backwards one; it entered HPC as a solution to a problem which, by and large, did not yet exist.  As a result, it followed a common, but backwards, pattern by which computer scientists, not domain scientists, get excited by a new toy and invest a lot of effort into creating proof-of-concept codes and use cases.  Unfortunately, this sort of development is fundamentally unsustainable because of its nucleation in a vacuum, and in the case of Hadoop, researchers moved on to the next big thing and largely abandoned their model applications as the shine of Hadoop faded (see sidebar).  This has left a graveyard of software, documentation, and ideas that are frozen in time and rapidly losing relevance as Hadoop moves on.

    Consider this evolutionary path of Hadoop compared to OpenMP: there were no OpenMP proofs-of-concept.  There didn't need to be any; the problems had already been defined by the people who needed OpenMP, so by the time OpenMP was standardized and implemented in compilers, application developers already knew where it would be needed.

    Not surprisingly, innovation in the Hadoop software ecosystem remains in the sphere for which it was developed: data warehousing and data analytics.  Applications and libraries like Impala, Parquet, and Spark are at the cutting edge of applied analytics in the Hadoop/MapReduce ecosystem and represent useful, usable implementations of some really novel ideas.

    How can Hadoop fit into HPC?

    So this all is why Hadoop is in this awkward position, but does this mean Hadoop (and MapReduce) will never be welcome in the world of HPC?  Alternatively, what would it take for Hadoop to become a universally recognized core technology in HPC?

    I'll say up front that there are no easy answers--if there were, I wouldn't be delivering this monologue.  However, solutions are being developed and attempted to address a few of the four major barriers I outlined above.

    Reimplement MapReduce in an HPC-oriented way

    This idea has been tried in a number of different ways (see MPI MapReduce and Phoenix), but none have really gained traction.  I suspect this is largely the result of one particular roadblock: there just aren't that many problems which are so onerous in the traditional HPC space that reimplementing a solution in a relatively obscure implementation of MapReduce becomes worth the effort.  As I mentioned in point #4 above, HPC vendors haven't been creating their own MapReduce APIs to address the demands of their customers as they did for OpenMP and MPI's predecessors, so Hadoop's role in HPC is not clearly addressing a problem that needs an immediate solution.

    This is not to say that the data-oriented problems at which Hadoop excels do not exist within the domain sciences.  Rather, there are two key roles that Hadoop/MapReduce will play in scientific computations:
    • Solving existing problems:  The most activity I've seen involving Hadoop in domain sciences comes out of bioinformatics and observational sciences.  Bioinformatics, as a consumer of HPC cycles, is still in its infancy, but the data sets being generated by next-generation sequencers are enormous--the data to describe a single human genome, even when compressed, takes up about 120 GB.  Similarly, advances in imaging and storage technology have allowed astronomy and radiology to generate extremely large collections of data.
    • Enabling new problems: One of Hadoop's more long-term promises is not solving the problems of today, but giving us a solution to problems we previously thought to be intractable.  Although I can't disclose too much detail, an example of this lies in statistical mechanics: many problems involving large ensembles of particles have relied on data sampling or averaging to reduce the sheer volume of numerical information into a usable state.  Hadoop and MapReduce allow us to start considering what deeper, more subtle patterns may emerge if a massive trajectory through phase space could be dumped and analyzed with, say, machine learning methods.

    Unfortunately, reimplementing MapReduce inside the context of existing HPC paradigms represents a large amount of work for a relatively small subset of problems.  Some sort of catalyzing scientific problem will need to emerge to give vendors and application developers a strong reason to start re-thinking their problems in terms of MapReduce.

    Incorporate HPC technologies in Hadoop

    Rather than reimplementing Hadoop/MapReduce as an HPC technology, I think a more viable approach forward is to build upon the Hadoop framework and correct some of its poorly reinvented features I described in item #3 above.  This will allow HPC to continuously fold in new innovations being developed in Hadoop's traditional competencies--data warehousing and analytics--as they become relevant to scientific problems.  Some serious effort is being made to this end:

    In addition to incorporating these software technologies from HPC into Hadoop, there are some really clever things you can do with hardware technologies that make Hadoop much more appealing to traditional HPC.  I am working on some exciting and innovative (if I may say so) architecture designs that will further lower the barrier between Hadoop and HPC at my day job, and with any luck, we'll get to see some of these ideas go into production in the next few years.

    Make MapReduce Less Weird

    The very nature of MapReduce is a very strange one to supercomputing--it solves a class of problems that the world's fastest supercomputers just weren't designed to solve.  Rather than make raw compute performance the most important capability, MapReduce treats I/O scalability as the most important capability and CPU performance is secondary.  As such, it will always be weird until such a day comes when science faces an equal balance of compute-limited and data-limited problems.  Fundamentally, I'm not sure that such a day will ever come.  Throwing data against a wall to see what sticks is good, but deriving analytical insight is better.

    With that all being said, there's room for improvement in making Hadoop less weird.  Spark is an exciting project because it sits at a nice point between academia and industry; developed at Berkeley but targeted directly at Hadoop, it feels like it was developed for scientists, and it treats high-performance as a first-class citizen by providing the ability to utilize memory a lot more efficiently than Hadoop does.  It also doesn't have such a heavy-handed Java-ness to it and provides a reasonably rich interface for Python (and R support is on the way!).  There still are a lot of rough edges (this is where the academic origins shine through, I think) but I'm hopeful that it cleans up under the Apache project.

    Perhaps more than (or inclusive of) the first two paths forward in increasing MapReduce adoption in research science, Spark holds the most promise in that it feels less like Hadoop and more normal from the HPC perspective.  It doesn't force you to cast your problem in terms of a map and a reduce step; the way in which you interact with your data (your resilient distributed dataset, or RDD, in Spark parlance) is much more versatile and is more likely to directly translate to the logical operation you want to perform.  It also supports the basic things Hadoop lacks such as iterative operations.

    Moving Forward

    I think I have a pretty good idea about why Hadoop has received a lukewarm, and sometimes cold, reception in HPC circles, and much of these underlying reasons are wholly justified.  Hadoop's from the wrong side of the tracks from the purists' perspective, and it's not really changing the way the world will do its high-performance computing.  There is a disproportionate amount of hype surrounding it as a result of its revolutionary successes in the commercial data sector.

    However, Hadoop and MapReduce aren't to be dismissed outright either.  There is a growing subset of scientific problems that are growing against a scalability limit in terms of data movement, and at some point, solving these problems using conventional, CPU-oriented parallelism will reduce to using the wrong tool for the job.  The key, as is always the case in this business, is to understand the job and realize that there are more tools in the toolbox than just a hammer.

    As these data-intensive and data-bound problems gain a growing presence in traditional HPC domains, I hope the progress being made on making Hadoop and MapReduce more relevant to research science continues.  I mentioned above that great strides forward are being made to truly bridge the gap of utility and making MapReduce a serious go-to solution to scientific problems, and although Hadoop remains on the fringe of HPC today, it won't pay to dismiss it for too much longer.
    \ No newline at end of file +
    Generally speaking, the development of all technologies at the core of HPC have followed a similar evolutionary path into broad adoption.  Both software and hardware technologies arise as disparities between available and necessary solutions widen.  Researchers often hack together non-standard solutions to these problems until a critical mass is achieved, and a standard technology emerges to unify these varying solutions and fill the gap.  OpenMP is a great example--before it became standard, there were a number of vendor-specific pragma-based multithreading APIs;  Cray, Sun, and SGI all had their own implementations that did the same thing but made porting codes between systems very unpleasant.  These vendors ultimately all adopted a standard interface which became OpenMP, and that technology has been embraced because it provided a portal way of solving the original motivating problem.

    The evolution of Hadoop has very much been a backwards one; it entered HPC as a solution to a problem which, by and large, did not yet exist.  As a result, it followed a common, but backwards, pattern by which computer scientists, not domain scientists, get excited by a new toy and invest a lot of effort into creating proof-of-concept codes and use cases.  Unfortunately, this sort of development is fundamentally unsustainable because of its nucleation in a vacuum, and in the case of Hadoop, researchers moved on to the next big thing and largely abandoned their model applications as the shine of Hadoop faded (see sidebar).  This has left a graveyard of software, documentation, and ideas that are frozen in time and rapidly losing relevance as Hadoop moves on.

    Consider this evolutionary path of Hadoop compared to OpenMP: there were no OpenMP proofs-of-concept.  There didn't need to be any; the problems had already been defined by the people who needed OpenMP, so by the time OpenMP was standardized and implemented in compilers, application developers already knew where it would be needed.

    Not surprisingly, innovation in the Hadoop software ecosystem remains in the sphere for which it was developed: data warehousing and data analytics.  Applications and libraries like Impala, Parquet, and Spark are at the cutting edge of applied analytics in the Hadoop/MapReduce ecosystem and represent useful, usable implementations of some really novel ideas.

    How can Hadoop fit into HPC?

    So this all is why Hadoop is in this awkward position, but does this mean Hadoop (and MapReduce) will never be welcome in the world of HPC?  Alternatively, what would it take for Hadoop to become a universally recognized core technology in HPC?

    I'll say up front that there are no easy answers--if there were, I wouldn't be delivering this monologue.  However, solutions are being developed and attempted to address a few of the four major barriers I outlined above.

    Reimplement MapReduce in an HPC-oriented way

    This idea has been tried in a number of different ways (see MPI MapReduce and Phoenix), but none have really gained traction.  I suspect this is largely the result of one particular roadblock: there just aren't that many problems which are so onerous in the traditional HPC space that reimplementing a solution in a relatively obscure implementation of MapReduce becomes worth the effort.  As I mentioned in point #4 above, HPC vendors haven't been creating their own MapReduce APIs to address the demands of their customers as they did for OpenMP and MPI's predecessors, so Hadoop's role in HPC is not clearly addressing a problem that needs an immediate solution.

    This is not to say that the data-oriented problems at which Hadoop excels do not exist within the domain sciences.  Rather, there are two key roles that Hadoop/MapReduce will play in scientific computations:
    • Solving existing problems:  The most activity I've seen involving Hadoop in domain sciences comes out of bioinformatics and observational sciences.  Bioinformatics, as a consumer of HPC cycles, is still in its infancy, but the data sets being generated by next-generation sequencers are enormous--the data to describe a single human genome, even when compressed, takes up about 120 GB.  Similarly, advances in imaging and storage technology have allowed astronomy and radiology to generate extremely large collections of data.
    • Enabling new problems: One of Hadoop's more long-term promises is not solving the problems of today, but giving us a solution to problems we previously thought to be intractable.  Although I can't disclose too much detail, an example of this lies in statistical mechanics: many problems involving large ensembles of particles have relied on data sampling or averaging to reduce the sheer volume of numerical information into a usable state.  Hadoop and MapReduce allow us to start considering what deeper, more subtle patterns may emerge if a massive trajectory through phase space could be dumped and analyzed with, say, machine learning methods.

    Unfortunately, reimplementing MapReduce inside the context of existing HPC paradigms represents a large amount of work for a relatively small subset of problems.  Some sort of catalyzing scientific problem will need to emerge to give vendors and application developers a strong reason to start re-thinking their problems in terms of MapReduce.

    Incorporate HPC technologies in Hadoop

    Rather than reimplementing Hadoop/MapReduce as an HPC technology, I think a more viable approach forward is to build upon the Hadoop framework and correct some of its poorly reinvented features I described in item #3 above.  This will allow HPC to continuously fold in new innovations being developed in Hadoop's traditional competencies--data warehousing and analytics--as they become relevant to scientific problems.  Some serious effort is being made to this end:

    In addition to incorporating these software technologies from HPC into Hadoop, there are some really clever things you can do with hardware technologies that make Hadoop much more appealing to traditional HPC.  I am working on some exciting and innovative (if I may say so) architecture designs that will further lower the barrier between Hadoop and HPC at my day job, and with any luck, we'll get to see some of these ideas go into production in the next few years.

    Make MapReduce Less Weird

    The very nature of MapReduce is a very strange one to supercomputing--it solves a class of problems that the world's fastest supercomputers just weren't designed to solve.  Rather than make raw compute performance the most important capability, MapReduce treats I/O scalability as the most important capability and CPU performance is secondary.  As such, it will always be weird until such a day comes when science faces an equal balance of compute-limited and data-limited problems.  Fundamentally, I'm not sure that such a day will ever come.  Throwing data against a wall to see what sticks is good, but deriving analytical insight is better.

    With that all being said, there's room for improvement in making Hadoop less weird.  Spark is an exciting project because it sits at a nice point between academia and industry; developed at Berkeley but targeted directly at Hadoop, it feels like it was developed for scientists, and it treats high-performance as a first-class citizen by providing the ability to utilize memory a lot more efficiently than Hadoop does.  It also doesn't have such a heavy-handed Java-ness to it and provides a reasonably rich interface for Python (and R support is on the way!).  There still are a lot of rough edges (this is where the academic origins shine through, I think) but I'm hopeful that it cleans up under the Apache project.

    Perhaps more than (or inclusive of) the first two paths forward in increasing MapReduce adoption in research science, Spark holds the most promise in that it feels less like Hadoop and more normal from the HPC perspective.  It doesn't force you to cast your problem in terms of a map and a reduce step; the way in which you interact with your data (your resilient distributed dataset, or RDD, in Spark parlance) is much more versatile and is more likely to directly translate to the logical operation you want to perform.  It also supports the basic things Hadoop lacks such as iterative operations.

    Moving Forward

    I think I have a pretty good idea about why Hadoop has received a lukewarm, and sometimes cold, reception in HPC circles, and much of these underlying reasons are wholly justified.  Hadoop's from the wrong side of the tracks from the purists' perspective, and it's not really changing the way the world will do its high-performance computing.  There is a disproportionate amount of hype surrounding it as a result of its revolutionary successes in the commercial data sector.

    However, Hadoop and MapReduce aren't to be dismissed outright either.  There is a growing subset of scientific problems that are growing against a scalability limit in terms of data movement, and at some point, solving these problems using conventional, CPU-oriented parallelism will reduce to using the wrong tool for the job.  The key, as is always the case in this business, is to understand the job and realize that there are more tools in the toolbox than just a hammer.

    As these data-intensive and data-bound problems gain a growing presence in traditional HPC domains, I hope the progress being made on making Hadoop and MapReduce more relevant to research science continues.  I mentioned above that great strides forward are being made to truly bridge the gap of utility and making MapReduce a serious go-to solution to scientific problems, and although Hadoop remains on the fringe of HPC today, it won't pay to dismiss it for too much longer.
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/isc-2014-auf-wiedersehen-leipzig/index.html b/2014/isc-2014-auf-wiedersehen-leipzig/index.html new file mode 100644 index 0000000..8c994b7 --- /dev/null +++ b/2014/isc-2014-auf-wiedersehen-leipzig/index.html @@ -0,0 +1,184 @@ + + + + + + + ISC 2014- Auf Wiedersehen Leipzig - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    ISC 2014- Auf Wiedersehen Leipzig

    +

    I’ve just returned from International Supercomputing 2014, which took place in +Leipzig, Germany. As was the case in 2013, I greatly enjoyed my time at the +conference, and the hospitality in Leipzig. It’s a wonderful city to visit.

    + +

    You will have read in my previous blogs about my experiences with ARM based +developer systems, and running IBM Platform LSF. For me, ISC 2014 was a very +interesting event for one big reason - variety! Variety is the spice +of life as they say. And the variety in this case came from the displays at +OpenPOWER Foundation members Mellanox and NVIDIA, as well as servers based on +the the newly unveiled Applied Micro X-Gene 64-bit ARM processors.

    + +

    Although small in size, the Tyan POWER8 motherboard with NVIDIA Tesla K40 +installed made a strong statement. Although OpenPOWER was founded in 2013, we +are already seeing the benefits of this foundation - with a varied member +base including education, interconnect, and accelerator vendors - all with an +HPC pedigree. With the rich set of members that is growing, these look to be +exciting times for the IBM POWER8 processor and the OpenPower Foundation.

    + +

    For those of you who did not attend, the IBM booth had a number of live demos +including the IBM Platform Computing Cloud Service, which is built on top of +IBM SoftLayer infrastructure. This service can provide both hybrid and +stand-alone clouds and is ideally suited for HPC workloads - as it’s +non-virtualized.

    + +

    So we say Auf Wiedersehen to Leipzig for now and look forward to the spice that New Orleans will provide this autumn; where there will surely be more exciting +things emerging from the OpenPower Foundation!

    + +
    +
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/glennklockwood/2014-2-12-tagbloggercom1999blog-4307061427721284246post-8372161405271607668.md b/2014/linux-perf-libquadmath-and-gfortran-s-insane-behavior/index.html similarity index 65% rename from _posts/glennklockwood/2014-2-12-tagbloggercom1999blog-4307061427721284246post-8372161405271607668.md rename to 2014/linux-perf-libquadmath-and-gfortran-s-insane-behavior/index.html index 1c590ef..b994228 100644 --- a/_posts/glennklockwood/2014-2-12-tagbloggercom1999blog-4307061427721284246post-8372161405271607668.md +++ b/2014/linux-perf-libquadmath-and-gfortran-s-insane-behavior/index.html @@ -1,20 +1,90 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2014-02-12 20:20:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2014/02/linux-perf-libquadmath-and-gfortrans.html -slug: linux-perf-libquadmath-and-gfortran-s-insane-behavior -title: Linux perf, libquadmath, and GFortran's Insane Behavior ---- - -Executive Summary: libquadmath was introduced in GFortran 4.6 which fundamentally changed what the -fdefault-real-8 switch does.  Rather than promoting all floating point arithmetic to double precision, it doubles the width of all floating point types, so explicitly typed double precision is converted to quad precision.  This quad precision is orders of magnitude slower since it must be done in software, causing binaries built with -fdefault-real-8 to grind to a halt when built with GFortran 4.6 and newer.  The solution is to add -fdefault-double-8 to undo this implicit doubling of explicit real*8.

    What follows is a case study of sorts in how I discovered this.  Maybe my methodology will be useful for others who are tasked with debugging performance problems.

    The Problem

    A colleague from my past in research science sent me an e-mail this morning with a very typical problem that people run into whenever they try transferring their applications from one machine to another.  He wrote,
    "I've been having a problem with compiling on a new workstation that is an HP with the newer gcc/gfortran 4.6.3.  The executable for the code runs very slow.  If I compile the exact same on the cluster or one of the Dell workstations (both have gfortran 4.4.3) it runs very fast on both.  Also, if I transfer the compiled binary from the cluster to the new HP workstation, it runs fast."
    That is to say,

    Run on New
    Workstation
    Run on Old
    Workstation
    Compiled on
    New Workstation
    SLOW?
    Compiled on
    Old Workstation
    FASTFAST

    The fact that the old binary ran fast on the new machine ruled out some odd hardware or kernel problem and suggested that the issue was somewhere in userland.  Userland issues are always fixable issues, so this alone suggests that there will be a solution to this issue if we dig deep enough.
    + + + + + + + Linux perf, libquadmath, and GFortran's Insane Behavior - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Linux perf, libquadmath, and GFortran's Insane Behavior

    +

    Executive Summary: libquadmath was introduced in GFortran 4.6 which fundamentally changed what the -fdefault-real-8 switch does.  Rather than promoting all floating point arithmetic to double precision, it doubles the width of all floating point types, so explicitly typed double precision is converted to quad precision.  This quad precision is orders of magnitude slower since it must be done in software, causing binaries built with -fdefault-real-8 to grind to a halt when built with GFortran 4.6 and newer.  The solution is to add -fdefault-double-8 to undo this implicit doubling of explicit real*8.

    What follows is a case study of sorts in how I discovered this.  Maybe my methodology will be useful for others who are tasked with debugging performance problems.

    <h2 id="prob">The Problem</h2>A colleague from my past in research science sent me an e-mail this morning with a very typical problem that people run into whenever they try transferring their applications from one machine to another.  He wrote,
    <blockquote class="tr_bq">“I’ve been having a problem with compiling on a new workstation that is an HP with the newer gcc/gfortran 4.6.3.  The executable for the code runs very slow.  If I compile the exact same on the cluster or one of the Dell workstations (both have gfortran 4.4.3) it runs very fast on both.  Also, if I transfer the compiled binary from the cluster to the new HP workstation, it runs fast.”</blockquote>That is to say,

    <table style="border-collapse: collapse; border: 1px solid black; margin: 0 auto; text-align: center;"><tbody><tr style="border: 1px solid black;"><th></th><th style="border: 1px solid black;">Run on New
    Workstation</th><th style="border: 1px solid black;">Run on Old
    Workstation</th></tr><tr style="border: 1px solid black;"><th style="border: 1px solid black; text-align: right;">Compiled on
    New Workstation</th><td style="background: #ff7777; border: 1px solid black;">SLOW</td><td style="border: 1px solid black;">?</td></tr><tr><th style="text-align: right;">Compiled on
    Old Workstation</th><td style="background: #77ff77; border: 1px solid black;">FAST</td><td style="background: #77ff77;">FAST</td></tr></tbody></table>
    <div>The fact that the old binary ran fast on the new machine ruled out some odd hardware or kernel problem and suggested that the issue was somewhere in userland.  Userland issues are always fixable issues, so this alone suggests that there will be a solution to this issue if we dig deep enough.</div>


    -

    A Little Logic, A Little Arcane Knowledge

    The difference in performance was probably related to the upgrade from GFortran 4.4 to GFortran 4.6, and just to make sure this was a well-defined problem, I re-built the application and ran the test case on a local machine to ensure that the problem was reproducible on hardware and an OS with which I was familiar.  I built with
    +

    A Little Logic, A Little Arcane Knowledge

    +
    The difference in performance was probably related to the upgrade from GFortran 4.4 to GFortran 4.6, and just to make sure this was a well-defined problem, I re-built the application and ran the test case on a local machine to ensure that the problem was reproducible on hardware and an OS with which I was familiar.  I built with
    • The GFortran 4.4 that ships with Red Hat 6.  My colleague said that his build with GFortran 4.4 ran fine, and I was able to confirm that GFortran 4.4 produced a reliably fast executable.
    • The GFortran 4.6 that ships with Ubuntu 12.04 (my colleague's machine).  He said that this one ran very slowly, and I could confirm that GFortran 4.6 did, indeed, produce an unusably slow binary
    • The GFortran 4.8 that I built as a "latest-and-greatest" version on my test system.  I wanted to verify that there wasn't some bug in 4.6 that was patched out of subsequent releases.  Unfortunately this was not the case, as GFortran 4.8 also produced a very slow binary.
    The good news was that the problem is reproducible and we have a baseline case where the application does behave as intended.  This meant that, in the worst-case scenario, we can do line-by-line comparisons of the assembly code for the working and non-working binaries to see where the problem lies.  Thus, we know the problem has a solution.

    Of course, the bad news was that some change made between GFortran 4.4 and GFortran 4.6 broke this code, and we have to figure out exactly what this change was.
    @@ -23,4 +93,76 @@

    A Little Logic, A Little Arcane Knowledge

    The differenc
    1. GFortran has been known to throw backwards compatibility to the wind and make wild changes default behavior.  For example, g77 and GFortran 4.1 used 8-byte record marker lengths by default, but then switched over to 4-byte markers in GFortran 4.2 to be in line with what every other Fortran compiler does.  This meant that data generated by GFortran 4.1 was not compatible with anything else.  It wouldn't have surprised me if they did this sort of thing again.
    2. GCC introduced libquadmath in version 4.6 which made all GFortran objects built with 4.6 or later pull in libquadmath.  This used to cause me problems because Red Hat 5 did not ship with libquadmath, making all binaries dynamically linked against GFortran 4.6 not portable* to RHEL5.  Thus, this issue might have something to do with the addition of libquadmath.
    * I acknowledge that trying to move binaries between machines is pretty crazy in its own right.  Explaining why this was an actual issue for me is both uninteresting and beyond the scope of this post.

    Examining Baseline Performance

    -
    All modern Linux kernels ship with the perf subsystem which makes diagnosing performance problems significantly easier than it has been in the past.  If you haven't familiarized yourself with them yet, you really need to--all it took for me was a 2-minute demo by +Peter Kjellström at SC'13 last year to realize Linux perf is serious business.  We will simply use it as an alternative to gprof in this case so that we don't have to re-build all this code with instrumentation, but perf can also do a lot of things that used to be the exclusive domain of special-purpose libraries like PAPI and IPM.

    Running the "good" build of this application through perf establishes our baseline expected behavior:

    $ perf record -o fast.report -g ./mdvgg.x
    WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,
    check /proc/sys/kernel/kptr_restrict.

    Samples in kernel functions may not be resolved if a suitable vmlinux
    file is not found in the buildid cache or in the vmlinux path.

    Samples in kernel modules won't be resolved at all.

    If some relocation was applied (e.g. kexec) symbols may be misresolved
    even with a suitable vmlinux or kallsyms file.

    Pressure list found
    taux,y,z: 0.000000 0.000000 0.000000
    txx0,tyy0,tzz0: 1013250. 1013250. 1013250.
    Wolf beta value: 4.4600E-008

    ***Lennard-Jones parameters, epsilons in ergs ***
    0.00000D+00 0.00000D+00 0.00000D+00 0.00000D+00 0.00000D+00
    ...
    average energy per atom: -0.39932E-11 -57.4739kcal/mole
    average energy with selfterm: -0.51864E-11 -74.6481kcal/mole

    [ perf record: Woken up 3 times to write data ]
    [ perf record: Captured and wrote 0.895 MB fast.report (~39121 samples) ]
    where
    • -o fast.report dumps the recorded data to a file called fast.report
    • -g generates call graphs in addition to the flat profile (this isn't always necessary)
    • ./mdvgg.x is the application binary we are profiling

    The scary warnings about kernel functions are harmless and a result of this entire debugging process being run as an unprivileged user.  Once the job finishes running, viewing the report reveals (with some extraneous data removed for brevity):

    $ perf report -i fast.report --stdio --sort dso -g flat
    ...
    # Overhead  Command         Shared Object
    # ........ ....... ....................
    #
    72.13% mdvgg.x mdvgg.x
    61.12%
    pairs_
    move1_

    7.00%
    listwater_
    bulk_
    MAIN__
    0x400efd
    ...
    20.99% mdvgg.x libc-2.12.so
    14.09%
    __memset_sse2
    bulk_
    MAIN__
    0x400efd

    0.97%
    __memset_sse2
    MAIN__
    0x400efd
    where
    • -i fast.report is the file containing our recorded data
    • --stdio prevents perf from using the interactive text user interface (I only added this because I can't paste interactions into a blog)
    • --sort dso presents the output in a relatively compact way sorted by the shared object in which time was being spent
    • -g flat presents a relatively flat profile (we don't need the full call graph)

    Thus, the majority of our runtime is taken up in a subroutine called pairs, called from move1 when this application is working normally.  A surprising fraction of runtime was also consumed by memset(3) in this case, but this was the result of my test input being so small that most of the actual runtime was spent doing initialization.  Even though this is generally not a great way to test application performance, it is acceptable in this case because even initialization takes 20x longer with the "bad" binary built against GFortran 4.6 (which in itself is a very insightful behavior that suggests that there is something systematically wrong with the bad binary).  The simplest and shortest possible run required to reproduce the issue should elucidate where the problem lies.

    Now, profiling the "bad" binary built with GFortran 4.6 should give us a definite place to start looking:

    $ perf record -o slow.report -g ./mdvgg.x
    WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,
    check /proc/sys/kernel/kptr_restrict.
    ...

    $ perf report -i slow.report --stdio --sort dso -g flat
    ...
    # Overhead Shared Object
    # ........ ....................
    #
    93.59% libgcc_s.so.1
    48.69%
    __sfp_handle_exceptions

    13.54%
    __multf3

    6.89%
    __addtf3

    6.16%
    __subtf3

    ...
    3.02% libquadmath.so.0.0.0
    1.62%
    __sfp_handle_exceptions

    2.67% mdvgg.x
    1.91%
    hcristo_
    fcc100_

    ...

    Well there's our problem!  Only 2.67% of the application runtime is actually being spent running the mdvgg.x application, and a huge amount of time is being spent in some __sfp_handle_exceptions call.  What gives?

    Now I'm not ashamed to say that I routinely turn to Google to figure out what most of this sort of computer nonsense means.  Unfortunately, searching for "__sfp_handle_exceptions" doesn't turn up anything useful, so the only hint we have is that the name of the call suggests that this "bad" build is generating a lot of floating point exceptions (FPEs).

    The logical next step is to rebuild the application with a lot of FPE trapping (FCFLAGS+=-ffpe-trap=invalid,zero,overflow,underflow,denormal).  This will determine if the code had been generating a ton of floating point exceptions all along but GFortran had just gotten stricter in 4.6.  Unfortunately, doing this just leads to more disappointment--the application does not generate any of the common floating point exceptions, meaning that this mysterious __sfp_handle_exceptions is, in fact, not handling serious floating point exceptions.  What else could it be doing?

    Although this particular application was both quick enough to run entirely through perf and serial enough to not require any special considerations with MPI, getting these perf profiles from long-running and highly parallel codes is similarly easy.  Instead of running the application through perf (perf record -o fast.report -g ./mdvgg.x) you can attach perf to an already-running process for a fixed period of time to generate a sample of the overall performance profile.  This is achieved by doing perf record -o fast.report -g -p sleep 10.  Perf attaches to the specified pid and gathers data from it, and just sleeps for ten seconds before detaching.

    Quad-Precision: Back to the Intel 80286

    Giving up on __sftp_handle_exceptions and moving on down the performance profile, it appears that suddenly libquadmath (which, as I mentioned above, appeared after our "working" compiler version was released) is soaking up cycles.  Furthermore, a quick googling of some of those big offenders like __multf3, __addtf3, and __subtf3 reveals that they are software implementations of long-double arithmetic--the application is now doing quad precision arithmetic in this "bad" build whereas it is definitely not doing this in our "good" build.

    Suddenly everything becomes a little clearer: long-double floating point arithmetic involves numbers stored in 128-bit precision, but 64-bit CPUs (or more properly, FPUs) are only capable of handling (you guessed it) 64-bit precision floating point calculations.  Thus, to get an application to do calculations in 128-bit precision, a software layer (libquadmath) must emulate 128-bit floating point hardware and actually translate the binary logic into something the 64-bit CPU can understand.  This is analogous to getting a 3rd grader to do a large calculation (e.g., 6×8) by breaking into pieces they know how to solve (e.g., 8+8, 8+8, 8+8), and it is a very slow process.  This massive performance loss is why Intel has had a hardware floating point unit in every processor it's designed since the 20386 (ca. 1985).

    The obvious question is then why GFortran 4.6 has decided to start carrying out all of the calculations in this code in quad precision by default.  Surely the GFortran developers didn't think forcing all arithmetic to be done in software was a good idea, right?

    Redefining Default Behavior

    Of course not.

    The next challenge, then, is to dig through the GFortran 4.6 manual to figure out what the libquadmath integration did to default behavior, or alternatively, what compiler flags started changing the precision of variables and calculations automatically.

    This is where knowledge of Fortran becomes important, because an unfortunate aspect of F77 (which has carried forward in F90) is its implicit typing.  A novice Fortran programmer (like a new graduate student) may think that doing something like

    implicit real*8(a-h,o-z)
    value1 = 0.31415926535e+1

    will store a double-precision (real*8) value in value1.  This isn't the case, as the "e+1" instead of "d+1" tends to render this a single-precision value.  This isn't always the case, but let it suffice to say that the details get messy and I've seen different compilers handle this in different ways by default.

    Anyway, every Fortran compiler has options to override this implicit typing and force all floating point values into double precision.  In GFortran, this has traditionally been -fdefault-real-8; that is, the default data type for real (i.e., single-precision) values is real*8, or double precision.  In this particular code's makefile, this flag was enabled to override the sloppy coding practices of decades of graduate students and ensure precision wasn't going down the drain because someone used E's instead of D's in 1997.

    When a simple search for "quadmath" in the GFortran 4.6 manual turns up nothing, searching for -fdefault-real-8 is the next step.  Lo and behold, this gem appears:
    -fdefault-real-8
    Set the default real type to an 8 byte wide type. Do nothing if this is already the default. This option also affects the kind of non-double real constants like 1.0, and does promote the default width of DOUBLE PRECISION to 16 bytes if possible, unless -fdefault-double-8 is given, too. 
    Bingo.  Any code that previously used -fdefault-real-8 to ensure that all floating point arithmetic was being done in double precision now does all explicitly typed double precision arithmetic as 128-bit quad precision in software as its effective default behavior.  What's worse is that this change in behavior isn't even mentioned in the release notes for GFortran 4.6 because -fdefault-real-8 has always tried to promote real*8 to real*16 as its intended behavior; it simply never succeeded because GFortran didn't support software quad-precision before libquadmath appeared in 4.6.

    Quite frankly, defining the behavior of something as straightforward-sounding as -fdefault-real-8 to be so environment-specific is insane.  The only use case where this new behavior would even make sense is if a programmer intentionally mixes real*4 and real*8 datatypes within code and wants to see what will happen if all variable widths are doubled uniformly.  On the other hand if -fdefault-real-8 was being used to ensure all calculations were done in double-precision (as was the case in this application and at least a few other unrelated scientific codes with which I have worked), performance takes a catastrophic hit simply because a new quad-precision math library is bundled with GCC.

    It would make more sense if GFortran added a -fdefault-real-16 (a la Intel Fortran's -real-size 128 switch) to promote all floating point to quad precision.  In fact, I find it difficult to make sense of GFortran's choice to make -fdefault-real-8 preserve mixed precision codes as it does; the only case where I can envision this sort of behavior being useful is in codes that implement their own reduced-precision FFTs.  I have literally never encountered such a code, though.

    Ultimately the solution to this problem, for those who are fortunate enough to get to the bottom of it, is to simply add -fdefault-double-8 in addition to -fdefault-real-8.  This was enough to fix the issue my colleague was having, and now his lab is back to crunching away with molecular dynamics at normal speed.
    \ No newline at end of file +
    All modern Linux kernels ship with the perf subsystem which makes diagnosing performance problems significantly easier than it has been in the past.  If you haven't familiarized yourself with them yet, you really need to--all it took for me was a 2-minute demo by +Peter Kjellström at SC'13 last year to realize Linux perf is serious business.  We will simply use it as an alternative to gprof in this case so that we don't have to re-build all this code with instrumentation, but perf can also do a lot of things that used to be the exclusive domain of special-purpose libraries like PAPI and IPM.

    Running the "good" build of this application through perf establishes our baseline expected behavior:

    $ perf record -o fast.report -g ./mdvgg.x
    WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,
    check /proc/sys/kernel/kptr_restrict.

    Samples in kernel functions may not be resolved if a suitable vmlinux
    file is not found in the buildid cache or in the vmlinux path.

    Samples in kernel modules won't be resolved at all.

    If some relocation was applied (e.g. kexec) symbols may be misresolved
    even with a suitable vmlinux or kallsyms file.

    Pressure list found
    taux,y,z: 0.000000 0.000000 0.000000
    txx0,tyy0,tzz0: 1013250. 1013250. 1013250.
    Wolf beta value: 4.4600E-008

    ***Lennard-Jones parameters, epsilons in ergs ***
    0.00000D+00 0.00000D+00 0.00000D+00 0.00000D+00 0.00000D+00
    ...
    average energy per atom: -0.39932E-11 -57.4739kcal/mole
    average energy with selfterm: -0.51864E-11 -74.6481kcal/mole

    [ perf record: Woken up 3 times to write data ]
    [ perf record: Captured and wrote 0.895 MB fast.report (~39121 samples) ]
    where
    • -o fast.report dumps the recorded data to a file called fast.report
    • -g generates call graphs in addition to the flat profile (this isn't always necessary)
    • ./mdvgg.x is the application binary we are profiling

    The scary warnings about kernel functions are harmless and a result of this entire debugging process being run as an unprivileged user.  Once the job finishes running, viewing the report reveals (with some extraneous data removed for brevity):

    $ perf report -i fast.report --stdio --sort dso -g flat
    ...
    # Overhead  Command         Shared Object
    # ........ ....... ....................
    #
    72.13% mdvgg.x mdvgg.x
    61.12%
    pairs_
    move1_

    7.00%
    listwater_
    bulk_
    MAIN__
    0x400efd
    ...
    20.99% mdvgg.x libc-2.12.so
    14.09%
    __memset_sse2
    bulk_
    MAIN__
    0x400efd

    0.97%
    __memset_sse2
    MAIN__
    0x400efd
    where
    • -i fast.report is the file containing our recorded data
    • --stdio prevents perf from using the interactive text user interface (I only added this because I can't paste interactions into a blog)
    • --sort dso presents the output in a relatively compact way sorted by the shared object in which time was being spent
    • -g flat presents a relatively flat profile (we don't need the full call graph)

    Thus, the majority of our runtime is taken up in a subroutine called pairs, called from move1 when this application is working normally.  A surprising fraction of runtime was also consumed by memset(3) in this case, but this was the result of my test input being so small that most of the actual runtime was spent doing initialization.  Even though this is generally not a great way to test application performance, it is acceptable in this case because even initialization takes 20x longer with the "bad" binary built against GFortran 4.6 (which in itself is a very insightful behavior that suggests that there is something systematically wrong with the bad binary).  The simplest and shortest possible run required to reproduce the issue should elucidate where the problem lies.

    Now, profiling the "bad" binary built with GFortran 4.6 should give us a definite place to start looking:

    $ perf record -o slow.report -g ./mdvgg.x
    WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,
    check /proc/sys/kernel/kptr_restrict.
    ...

    $ perf report -i slow.report --stdio --sort dso -g flat
    ...
    # Overhead Shared Object
    # ........ ....................
    #
    93.59% libgcc_s.so.1
    48.69%
    __sfp_handle_exceptions

    13.54%
    __multf3

    6.89%
    __addtf3

    6.16%
    __subtf3

    ...
    3.02% libquadmath.so.0.0.0
    1.62%
    __sfp_handle_exceptions

    2.67% mdvgg.x
    1.91%
    hcristo_
    fcc100_

    ...

    Well there's our problem!  Only 2.67% of the application runtime is actually being spent running the mdvgg.x application, and a huge amount of time is being spent in some __sfp_handle_exceptions call.  What gives?

    Now I'm not ashamed to say that I routinely turn to Google to figure out what most of this sort of computer nonsense means.  Unfortunately, searching for "__sfp_handle_exceptions" doesn't turn up anything useful, so the only hint we have is that the name of the call suggests that this "bad" build is generating a lot of floating point exceptions (FPEs).

    The logical next step is to rebuild the application with a lot of FPE trapping (FCFLAGS+=-ffpe-trap=invalid,zero,overflow,underflow,denormal).  This will determine if the code had been generating a ton of floating point exceptions all along but GFortran had just gotten stricter in 4.6.  Unfortunately, doing this just leads to more disappointment--the application does not generate any of the common floating point exceptions, meaning that this mysterious __sfp_handle_exceptions is, in fact, not handling serious floating point exceptions.  What else could it be doing?

    Although this particular application was both quick enough to run entirely through perf and serial enough to not require any special considerations with MPI, getting these perf profiles from long-running and highly parallel codes is similarly easy.  Instead of running the application through perf (perf record -o fast.report -g ./mdvgg.x) you can attach perf to an already-running process for a fixed period of time to generate a sample of the overall performance profile.  This is achieved by doing perf record -o fast.report -g -p sleep 10.  Perf attaches to the specified pid and gathers data from it, and just sleeps for ten seconds before detaching.

    Quad-Precision: Back to the Intel 80286

    Giving up on __sftp_handle_exceptions and moving on down the performance profile, it appears that suddenly libquadmath (which, as I mentioned above, appeared after our "working" compiler version was released) is soaking up cycles.  Furthermore, a quick googling of some of those big offenders like __multf3, __addtf3, and __subtf3 reveals that they are software implementations of long-double arithmetic--the application is now doing quad precision arithmetic in this "bad" build whereas it is definitely not doing this in our "good" build.

    Suddenly everything becomes a little clearer: long-double floating point arithmetic involves numbers stored in 128-bit precision, but 64-bit CPUs (or more properly, FPUs) are only capable of handling (you guessed it) 64-bit precision floating point calculations.  Thus, to get an application to do calculations in 128-bit precision, a software layer (libquadmath) must emulate 128-bit floating point hardware and actually translate the binary logic into something the 64-bit CPU can understand.  This is analogous to getting a 3rd grader to do a large calculation (e.g., 6×8) by breaking into pieces they know how to solve (e.g., 8+8, 8+8, 8+8), and it is a very slow process.  This massive performance loss is why Intel has had a hardware floating point unit in every processor it's designed since the 20386 (ca. 1985).

    The obvious question is then why GFortran 4.6 has decided to start carrying out all of the calculations in this code in quad precision by default.  Surely the GFortran developers didn't think forcing all arithmetic to be done in software was a good idea, right?

    Redefining Default Behavior

    Of course not.

    The next challenge, then, is to dig through the GFortran 4.6 manual to figure out what the libquadmath integration did to default behavior, or alternatively, what compiler flags started changing the precision of variables and calculations automatically.

    This is where knowledge of Fortran becomes important, because an unfortunate aspect of F77 (which has carried forward in F90) is its implicit typing.  A novice Fortran programmer (like a new graduate student) may think that doing something like

    implicit real*8(a-h,o-z)
    value1 = 0.31415926535e+1

    will store a double-precision (real*8) value in value1.  This isn't the case, as the "e+1" instead of "d+1" tends to render this a single-precision value.  This isn't always the case, but let it suffice to say that the details get messy and I've seen different compilers handle this in different ways by default.

    Anyway, every Fortran compiler has options to override this implicit typing and force all floating point values into double precision.  In GFortran, this has traditionally been -fdefault-real-8; that is, the default data type for real (i.e., single-precision) values is real*8, or double precision.  In this particular code's makefile, this flag was enabled to override the sloppy coding practices of decades of graduate students and ensure precision wasn't going down the drain because someone used E's instead of D's in 1997.

    When a simple search for "quadmath" in the GFortran 4.6 manual turns up nothing, searching for -fdefault-real-8 is the next step.  Lo and behold, this gem appears:
    -fdefault-real-8
    Set the default real type to an 8 byte wide type. Do nothing if this is already the default. This option also affects the kind of non-double real constants like 1.0, and does promote the default width of DOUBLE PRECISION to 16 bytes if possible, unless -fdefault-double-8 is given, too. 
    Bingo.  Any code that previously used -fdefault-real-8 to ensure that all floating point arithmetic was being done in double precision now does all explicitly typed double precision arithmetic as 128-bit quad precision in software as its effective default behavior.  What's worse is that this change in behavior isn't even mentioned in the release notes for GFortran 4.6 because -fdefault-real-8 has always tried to promote real*8 to real*16 as its intended behavior; it simply never succeeded because GFortran didn't support software quad-precision before libquadmath appeared in 4.6.

    Quite frankly, defining the behavior of something as straightforward-sounding as -fdefault-real-8 to be so environment-specific is insane.  The only use case where this new behavior would even make sense is if a programmer intentionally mixes real*4 and real*8 datatypes within code and wants to see what will happen if all variable widths are doubled uniformly.  On the other hand if -fdefault-real-8 was being used to ensure all calculations were done in double-precision (as was the case in this application and at least a few other unrelated scientific codes with which I have worked), performance takes a catastrophic hit simply because a new quad-precision math library is bundled with GCC.

    It would make more sense if GFortran added a -fdefault-real-16 (a la Intel Fortran's -real-size 128 switch) to promote all floating point to quad precision.  In fact, I find it difficult to make sense of GFortran's choice to make -fdefault-real-8 preserve mixed precision codes as it does; the only case where I can envision this sort of behavior being useful is in codes that implement their own reduced-precision FFTs.  I have literally never encountered such a code, though.

    Ultimately the solution to this problem, for those who are fortunate enough to get to the bottom of it, is to simply add -fdefault-double-8 in addition to -fdefault-real-8.  This was enough to fix the issue my colleague was having, and now his lab is back to crunching away with molecular dynamics at normal speed.
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/looking-forward-from-the-arm-days-of-old/index.html b/2014/looking-forward-from-the-arm-days-of-old/index.html new file mode 100644 index 0000000..79e93ee --- /dev/null +++ b/2014/looking-forward-from-the-arm-days-of-old/index.html @@ -0,0 +1,188 @@ + + + + + + + Looking forward from the ARM days of old - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Looking forward from the ARM days of old

    +

    These days we often hear about CPUs based upon ARM cores. They can be found +in mobile phones, embedded systems, laptops and even servers. Indeed, projects such as Mont Blanc are investigating the +use of ARM based systems for high performance computing (HPC).

    + +

    Back in the late 1980’s, I was a student in high-school and a budding +computer scientist. In those days, my view of the personal computer market +was ver North American centric. Until one day I read about a new desktop +computer from the UK know as the Acorn Achimedes. This system was based upon +a RISC CPU which was given the name ARM (Acorn RISC Machine). The writeup in +the local Toronto Computes! newspaper indicated that Olivetti Canada was +bringing the Acorn Archimedes range to North America. As luck would have it, +Olivetti was just down the road from me. After after a few phone calls, I was +invited to their offices for some hands on time with a top of the line +Acorn Archimedes 440. This was the start of my journey with ARM based +systems. The folks at Olivetti were kind enough let me use the Archie over a +number of days. During that time, I had a chance to try out a number of +different software products including games and productivity software. Overall, +I was greatly impressed by the Archie and it’s operating system, RISC OS and +it’s WIMP interface. One game in particular I remember quite well called +Zarch - which showed off the 3D graphics capabilities of the system.

    + +
    + +
    + +

    The only catch for me was the list price of the system. As I recall it was +around $2,500 CAD, which for me at the time was prohibitive.

    + +

    Moving forward to 2014, I’ve recently been tinkering with the ARM-based mini +PC UDOO Quad running Debian Wheezy EABI (hard-float). This happens to +intersect with another area of interest, Technical Computing.

    + +

    I’ll share more of my experiences with Udoo Quad in the coming weeks.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/machine-learning-for-scientists/index.html b/2014/machine-learning-for-scientists/index.html new file mode 100644 index 0000000..5130359 --- /dev/null +++ b/2014/machine-learning-for-scientists/index.html @@ -0,0 +1,164 @@ + + + + + + + Machine Learning for Scientists - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Machine Learning for Scientists

    +

    I recently taught a 1-day machine learning workshop for scientists for the good folks at SciNetHPC. There was enough interest (nearly forty people signed up for a day-long session near the end of term) that we had to book a large-ish classroom.

    + +

    There’s a lot of interest in the topic — which might even be surprising, given that a lot of the material is either familiar or pretty easy to digest for those who spend a lot of their time doing scientific data analysis. But for those coming to it for the first time and on their own, the difference in terminology (“features”? “shrinkage”? Wait, you just mean variables and regularization?) and the huge number of different methods available can be pretty baffling.

    + +

    And I think it helps to have someone with a science background to explain the very different approaches taken to modelling than in the sciences (especially the natural sciences) and why it is that way. Having that connection means that you can translate – so that the very real expertise and experience they do already have can be a benefit, rather than throwing up barriers. (“Bias-Variance tradeoff? You mean you’re willing to introduce error just to get the error bars down a bit – centred on the wrong answer? What kind of monster are you, and what dangerous nonsense is this machine learning stuff?”)

    + +

    This was the first time teaching this material, and while there are some things I’d like to improve (especially doing more on PCA and clustering, although I don’t know what I’d take out for a 1-day class), I think that it went fairly well. The presentation can be seen online, and everything’s available on github.

    + +

    Incidentally, this was my first time using Slidify for a presentation, and I really enjoyed it – this may be the first markdown/html5 setup that finally gets me willingly moving away from Keynote for this sort of material. Obviously, Slidify integrates much more closely with R than with python, particularly for graphics; but still, it was a pleasure to use.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/parallelizing-r-on-supercomputers/index.html b/2014/parallelizing-r-on-supercomputers/index.html new file mode 100644 index 0000000..86ab975 --- /dev/null +++ b/2014/parallelizing-r-on-supercomputers/index.html @@ -0,0 +1,173 @@ + + + + + + + Parallelizing R on Supercomputers - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Parallelizing R on Supercomputers

    +
    Executive summary:  I've posted a tutorial on how to parallelize R codes on my website.  This post is a more personal reflection on how I got there.

    +

    +


    “Parallel Options for R” was the title of the first talk I ever presented on behalf of my employer, and despite the fact that I didn’t (and still don’t) know anything about the R language, statistics, or how to parallelize any of it, the shoe seemed to fit at the time.  The talk went over well, and I’ve been asked to give the talk in my capacity as the resident “parallel R guy” plenty of times since.
    <div>
    </div>

    +
    Every once in a while I get asked how I came to become so involved in some of the weird topics about which I write and speak--after all, I really have no formal training in things like SR-IOV, Hadoop, and next-generation gene sequencing.  As much as I'd like to claim I just have some infinite sage-like knowledge, the reality is that I have to learn about these various technologies as a result of my day job--answering helpdesk tickets.  In the case of parallel R, I simply got a ticket in January 2013 that read,
    +
    "I just ran an intensive R script through [the supercomputer].  Its not much faster than my own machine.  Could you point me to a tutorial for how I can make the process run in different processors in parallel?"
    +
    I couldn't very well say "lol no idea" (which was the truth), but the fact is that there are only about three whole people in my group** who are tasked with solving every problem that comes in from the thousand unique users who run jobs on our system every year.  If I didn't know the answer, there was a good chance that nobody else knew either.  That doesn't change the fact that someone needs to answer the user's question though, and that fact is what got me into the parallel R business.
    +

    +
    In my quest for an answer to this user's helpdesk request, I further discovered that there were no good tutorials online that explain the process of parallelizing R codes.  Thus, I wound up having to buy a book to learn what I need to know to answer the user's question.  So I did, and I learned the rough basics of how someone might go about parallelizing their R codes.  I gave the user a few starting pointers, some of the libraries that he might want to check out on CRAN, and tried to provide some boilerplate code that might help him parallelize his particular script.  We then went our separate ways.
    +

    +
    With all this reflection aside though, I never lost sight of the reality that I never did answer the user's question: what is a good tutorial on how to parallelize R codes?
    +

    +
    This question has actually come up a number of times from a number of users over the last year.  Rather than take the easy route and tell everyone to attend my next talk on the subject, I decided to turn my presentation on parallelizing R into a series of tutorials which I've put on my website:
    +

    + +

    +
    It's not comprehensive by any means; notably, I did not cover either the pbdr library out of UTK/Oak Ridge (an omission with no particularly good justification) or SPRINT from Edinburgh (it's a bit specialized in functionality).  I also haven't had the opportunity to convert my presentation on using R with Hadoop and Spark into the final component of this tutorial.  Those topics will come as time permits.  Regardless, I hope someone finds the write-up useful.
    +

    +
    ** I say "whole people" to reflect that our funding provides somewhere in the neighborhood of three full-time equivalent employees providing front-line user support.  That funding winds up getting distributed across more physical staff.
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/glennklockwood/2014-6-24-tagbloggercom1999blog-4307061427721284246post-1519944886078195806.md b/2014/perspectives-on-the-current-state-of-data-intensive-scientific-computing/index.html similarity index 52% rename from _posts/glennklockwood/2014-6-24-tagbloggercom1999blog-4307061427721284246post-1519944886078195806.md rename to 2014/perspectives-on-the-current-state-of-data-intensive-scientific-computing/index.html index fae35b1..618f449 100644 --- a/_posts/glennklockwood/2014-6-24-tagbloggercom1999blog-4307061427721284246post-1519944886078195806.md +++ b/2014/perspectives-on-the-current-state-of-data-intensive-scientific-computing/index.html @@ -1,20 +1,90 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2014-06-24 19:08:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2014/06/perspectives-on-current-state-of-data.html -slug: perspectives-on-the-current-state-of-data-intensive-scientific-computing -title: Perspectives on the Current State of Data-Intensive Scientific Computing ---- - -I recently had the benefit of being invited to attend two workshops in Oakland, CA, hosted by the U.S. Department of Energy (DOE), that shared the common theme of emerging trends in data-intensive computing: the Joint User Forum on Data-Intensive Computing and the High Performance Computing Operational Review.  My current employment requires that I stay abreast of all topics in data-intensive scientific computing (I wish there was an acronym to abbreviate this...DISC perhaps?) so I didn't go in with the expectation of being exposed to a world of new information.  As it turned out though, I did gain a very insightful perspective on how data-intensive scientific computing (DISC), and I daresay Big Data, is seen from the people who operate some of the world's largest supercomputers.

    The DOE perspective is surprisingly realistic, application-oriented, and tightly integrated with high-performance computing.  There was the obligatory discussion of Hadoop and how it may be wedged into machines at LLNL with Magpie, ORNL with Spot Hadoop, and SDSC with myHadoop, of course, and there was also some discussion of real production use of Hadoop on bona fide Hadoop clusters at some of the DOE labs.  However, Hadoop played only a minor role in the grand scheme of the two meetings for all of the reasons I've outlined previously.

    Rather, these two meetings had three major themes that crept into all aspects of the discussion:
    1. Scientific workflows
    2. Burst buffers
    3. Data curation
    I found this to be a very interesting trend, as #1 and #2 (workflows and burst buffers) aren't topics I'd heard come up at any other DISC workshops, forums, or meetings I've attended.  The connection between DISC and workflows wasn't immediately evident to me, and burst buffers are a unique aspect of cyberinfrastructure that have only been thrust into the spotlight with the NERSC-8/LANL Trinity RFP last fall.  However, all three of these topics will become central to both data-intensive scientific computing and, by virtue of their ability to produce data, exascale supercomputers.
    -

    -

    Scientific workflows

    Workflows are one of those aspects of scientific computing that have been easy to dismiss as the toys of computer scientists because traditional problems in high-performance computing have typically been quite monolithic in how they are run.  SDSC's own Kepler and USC's Pegasus systems are perhaps the most well-known and highly engineered workflow management systems, and I have to confess that when I'd first heard of them a few years ago, I thought they seemed like a very complicated way to do very simple tasks.
    + + + + + + + Perspectives on the Current State of Data-Intensive Scientific Computing - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Perspectives on the Current State of Data-Intensive Scientific Computing

    +

    I recently had the benefit of being invited to attend two workshops in Oakland, CA, hosted by the U.S. Department of Energy (DOE), that shared the common theme of emerging trends in data-intensive computing: the Joint User Forum on Data-Intensive Computing and the High Performance Computing Operational Review.  My current employment requires that I stay abreast of all topics in data-intensive scientific computing (I wish there was an acronym to abbreviate this…DISC perhaps?) so I didn’t go in with the expectation of being exposed to a world of new information.  As it turned out though, I did gain a very insightful perspective on how data-intensive scientific computing (DISC), and I daresay Big Data, is seen from the people who operate some of the world’s largest supercomputers.

    The DOE perspective is surprisingly realistic, application-oriented, and tightly integrated with high-performance computing.  There was the obligatory discussion of Hadoop and how it may be wedged into machines at LLNL with Magpie, ORNL with Spot Hadoop, and SDSC with myHadoop, of course, and there was also some discussion of real production use of Hadoop on bona fide Hadoop clusters at some of the DOE labs.  However, Hadoop played only a minor role in the grand scheme of the two meetings for all of the reasons I’ve outlined previously.

    Rather, these two meetings had three major themes that crept into all aspects of the discussion:
    <ol><li>Scientific workflows</li><li>Burst buffers</li><li>Data curation</li></ol><div>I found this to be a very interesting trend, as #1 and #2 (workflows and burst buffers) aren’t topics I’d heard come up at any other DISC workshops, forums, or meetings I’ve attended.  The connection between DISC and workflows wasn’t immediately evident to me, and burst buffers are a unique aspect of cyberinfrastructure that have only been thrust into the spotlight with the NERSC-8/LANL Trinity RFP last fall.  However, all three of these topics will become central to both data-intensive scientific computing and, by virtue of their ability to produce data, exascale supercomputers.</div>

    +

    +

    Scientific workflows

    +
    Workflows are one of those aspects of scientific computing that have been easy to dismiss as the toys of computer scientists because traditional problems in high-performance computing have typically been quite monolithic in how they are run.  SDSC's own Kepler and USC's Pegasus systems are perhaps the most well-known and highly engineered workflow management systems, and I have to confess that when I'd first heard of them a few years ago, I thought they seemed like a very complicated way to do very simple tasks.

    As it turns out though, both data-intensive scientific computing and exascale computing (by virtue of the output size of exaflop calculations) tend to follow patterns that look an awful lot like map/reduce at a very abstract level.  This is a result of the fact that most data-intensive problems are not processing giant monoliths of tightly coupled and inter-related data; rather, they are working on large collections of generally independent data.  Consider the recent talk I gave about a large-scale genomic study on which I consulted; the general data processing flow was
    1. Receive 2,190 input files, 20 GB each, from a data-generating instrument
    2. Do some processing on each input file
    3. Combine groups of five input files into 438 files, each 100 GB in size
    4. Do more processing 
    5. Combine 438 files into 25 overlapping groups to get 100 files, each 2.5 GB in size
    6. Do more processing
    7. Combine 100 files into a single 250 GB file
    8. Perform statistical analysis on this 250 GB file for scientific insight
    @@ -27,7 +97,8 @@

    Scientific workflows

    Workflows are one of those aspects of scientif
    Question #2 is more interesting to me since this problem of multiple people cooking up different but equivalent solutions to the same problems is pervasive throughout computational and computer science. This is in large part due to the fatal assumption held by many computer scientists that good software can be simply "thrown over the fence" to scientists and it will be adopted.  This has never worked; rather, the majority of widely adopted software technologies in HPC have been a result of the standardization of a landscape of similar but non-standard tools.  This is something I touched on in a previous post when outlining the history of MPI and OpenMP's successes.

    I don't think the menagerie of workflows' developers are ready to settle on a standard, as the field is not mature enough to have a holistic understanding of all of the issues that workflows need to solve.  Despite the numerous presentations and discussions of various workflow solutions being used across DOE's user facilities, my presentation was the only one that considered optimizing workflow execution for the underlying hardware.  Given that the target audience of these talks were users of high-performance computing, the lack of consideration given to the performance aspects of workflow optimization is a testament to this immaturity.

    -

    Burst buffers

    For those who haven't been following the details of one of DOE's more recent procurement rounds, the NERSC-8 and Trinity request for proposals (RFP) explicitly required that all vendor proposals include a burst buffer to address the capability of multi-petaflop simulations to dump tremendous amounts of data in very short order.  The target use case is for petascale checkpoint-restart, where the memory of thousands of nodes (hundreds of terabytes of data) needs to be flushed to disk in an amount of time that doesn't dominate the overall execution time of the calculation.
    +

    Burst buffers

    +
    For those who haven't been following the details of one of DOE's more recent procurement rounds, the NERSC-8 and Trinity request for proposals (RFP) explicitly required that all vendor proposals include a burst buffer to address the capability of multi-petaflop simulations to dump tremendous amounts of data in very short order.  The target use case is for petascale checkpoint-restart, where the memory of thousands of nodes (hundreds of terabytes of data) needs to be flushed to disk in an amount of time that doesn't dominate the overall execution time of the calculation.

    The concept of what a burst buffer is remains poorly defined.  I got the sense that there are two outstanding definitions:
    • The NERSC burst buffer is something more tightly integrated on the compute side of the system and may be a resource that can be allocated on a per-job basis
    • The Argonne burst buffer is something more tightly integrated on the storage side of the system and acts in a fashion that is largely transparent to the user.  This sounded a lot like the burst buffer support being explored for Lustre.
    In addition, Los Alamos National Labs (LANL) is exploring burst buffers for the Trinity procurement, and it wasn't clear to me if they had chosen a definition or if they are exploring all angles.  One commonality is that DOE is going full-steam ahead on providing this burst buffer capability in some form or another, and solid-state storage is going to be a central enabling component.
    @@ -38,7 +109,8 @@

    Burst buffers

    For those who haven't been following the deta
    Of course, the fact that my favorite supercomputer provides dynamically allocatable SSDs in a fashion not far removed from these NERSC burst buffers probably biases me, but we've demonstrated unique DISC successes enabled by our ability to pile tons of flash on to single compute nodes.  This isn't to say that the Argonne burst buffer isn't without merit; given that the Argonne Leadership Computing Facility (ALCF) caters to capability jobs rather than capacity jobs, their user base is better served by providing a uniform, transparent burst I/O capability across all nodes.  The NERSC burst buffer, by comparison, is a lot less transparent and will probably be much more susceptible to user disuse or misuse.  I suspect that when the dust settles, both takes on the burst buffer concept will make their way into production use.

    A lot of the talk and technologies surrounding burst buffers are shrouded in NNSA secrecy or vendor non-disclosures, so I'm not sure what more there is to be said.  However, the good folks at HPCwire ran an insightful article on burst buffers after the NERSC-8 announcement for those who are interested in more detail.

    -

    Data curation

    The final theme that bubbled just beneath the surface of the DOE workshops was the idea that we are coming upon an era where scientists can no longer save all their data from all their calculations in perpetuity.  Rather, someone will have to become the curator of the scientific data being generated by computations and figure out what is and is not worth keeping, and how or where that data should be stored and managed.  This concept of selectively retaining user data manifested in a variety of discussions ranging from in-place data sharing and publication with Globus Plus and science DMZs to transparently managing online data volumes with hierarchical storage management (HSM).  However, the common idea was that scientists are going to have to start coming to grips with data management themselves, as facilities will soon be unable to cope with the entirety of their users' data.
    +

    Data curation

    +
    The final theme that bubbled just beneath the surface of the DOE workshops was the idea that we are coming upon an era where scientists can no longer save all their data from all their calculations in perpetuity.  Rather, someone will have to become the curator of the scientific data being generated by computations and figure out what is and is not worth keeping, and how or where that data should be stored and managed.  This concept of selectively retaining user data manifested in a variety of discussions ranging from in-place data sharing and publication with Globus Plus and science DMZs to transparently managing online data volumes with hierarchical storage management (HSM).  However, the common idea was that scientists are going to have to start coming to grips with data management themselves, as facilities will soon be unable to cope with the entirety of their users' data.

    This was a particularly interesting problem to me because it very closely echoed the sentiments that came about from Datanami's recent LeverageBIGDATA event which had a much more industry-minded audience.  The general consensus is that several fields are far ahead of the pack in terms of addressing this issue; the high-energy physics community has been filtering data at its genesis (e.g., ignoring the data from uninteresting collision events) for years now, and enterprises seem comfortable with retaining marketing data for only as long as it is useful.  By comparison, NERSC's tape archive has not discarded user data since its inception several decades ago; each new tape system simply repacks the previous generation's tape to roll all old data forward.

    @@ -50,7 +122,8 @@

    Data curation

    The final theme that bubbled just beneath the surface

    The good news here is that the problem of curating digital data is not new; it is simply new to high-performance computing.  In the spirit of doing things the right way, DOE invited the director of LANL's Research Library to attend the workshops, and she provided valuable insights into how methods of digital data curation may be applied to these emerging challenges in data-intensive scientific computing.

    -

    Final Thoughts

    The products of the working groups' conventions at the HPC Operational Review are being assembled into a report to be delivered to DOE's Office of Science, and it should be available online at the HPCOR 2014 website as well as the usual DOE document repository in a few months.  Hopefully it will reflect what I feel was the essence of the workshop, but at any rate, it should contain a nice perspective on how we can expect the HPC community to address the new demands emerging from data-intensive scientific computing (DISC) community.
    +

    Final Thoughts

    +
    The products of the working groups' conventions at the HPC Operational Review are being assembled into a report to be delivered to DOE's Office of Science, and it should be available online at the HPCOR 2014 website as well as the usual DOE document repository in a few months.  Hopefully it will reflect what I feel was the essence of the workshop, but at any rate, it should contain a nice perspective on how we can expect the HPC community to address the new demands emerging from data-intensive scientific computing (DISC) community.

    In the context of high-performance computing, 
    • Workflow management systems will continue to gain importance as data sets become larger, more parallel, and more unwieldy.
    • Burst buffers, in one form or another, will become the hardware solution to the fact that all exascale simulations will become data-intensive problems.
    • Data curation frameworks are the final piece of the puzzle and will provide the manageability of data at rest.
    @@ -60,4 +133,76 @@

    Final Thoughts

    The products of the working groups' conventions at t


    -
    I also learned that I have a weird fascination with streetcars.  I'm glad I was introduced to supercomputers first.
    \ No newline at end of file +
    I also learned that I have a weird fascination with streetcars.  I'm glad I was introduced to supercomputers first.
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/glennklockwood/2014-2-25-tagbloggercom1999blog-4307061427721284246post-2558467222542471411.md b/2014/quantum-espresso-compiling-and-choice-of-libraries/index.html similarity index 62% rename from _posts/glennklockwood/2014-2-25-tagbloggercom1999blog-4307061427721284246post-2558467222542471411.md rename to 2014/quantum-espresso-compiling-and-choice-of-libraries/index.html index b47588b..9202ed8 100644 --- a/_posts/glennklockwood/2014-2-25-tagbloggercom1999blog-4307061427721284246post-2558467222542471411.md +++ b/2014/quantum-espresso-compiling-and-choice-of-libraries/index.html @@ -1,22 +1,92 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2014-02-25 00:59:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2014/02/quantum-espresso-compiling-and-choice.html -slug: quantum-espresso-compiling-and-choice-of-libraries -title: Quantum ESPRESSO- Compiling and Choice of Libraries ---- - -
    We recently upgraded our two big machines at work, and as a result of that upgrade, a number of our users had to rebuild their installation of Quantum ESPRESSO.  As it turns out, little quirks in our system conflicted with little quirks in Quantum ESPRESSO after the upgrade and resulted in the regular process of just doing ./configure and make not working out of the box.
    + + + + + + + Quantum ESPRESSO- Compiling and Choice of Libraries - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Quantum ESPRESSO- Compiling and Choice of Libraries

    +
    We recently upgraded our two big machines at work, and as a result of that upgrade, a number of our users had to rebuild their installation of Quantum ESPRESSO.  As it turns out, little quirks in our system conflicted with little quirks in Quantum ESPRESSO after the upgrade and resulted in the regular process of just doing ./configure and make not working out of the box.

    Since I had been playing with Quantum ESPRESSO for the purpose of benchmarking QDR InfiniBand virtualized with SR-IOV, I also took it upon myself to iron out exactly how to squeeze the best performance out of QE with respect to compilers, MPI stacks, and choice of linear algebra libraries.  For the sake of posterity (or at least until a new version of QE comes out that makes this all irrelevant), here are my notes.

    I also wrapped all of these build options into a script that will configure and build optimized versions of Quantum ESPRESSO for various compiler and MPI combinations on the two machines I support at work.

    -

    BLAS, LAPACK, and ScaLAPACK

    Quantum ESPRESSO, like a multitude of other scientific codes, does a lot of linear algebra and uses the BLAS, LAPACK, and ScaLAPACK libraries to this end.  I have to shamefully admit that I never fully understood the relationship between these libraries before[1], but figuring out how to build Quantum ESPRESSO to deliver the best performance was a great excuse to sit down and get it straightened out.
    +

    BLAS, LAPACK, and ScaLAPACK

    +
    Quantum ESPRESSO, like a multitude of other scientific codes, does a lot of linear algebra and uses the BLAS, LAPACK, and ScaLAPACK libraries to this end.  I have to shamefully admit that I never fully understood the relationship between these libraries before[1], but figuring out how to build Quantum ESPRESSO to deliver the best performance was a great excuse to sit down and get it straightened out.

    BLAS, LAPACK, and ScaLAPACK are all libraries (and de facto standard APIs) that provide increasing levels of abstraction to glue applications to underlying hardware.  This is the way I see this layering taking place:

    @@ -34,7 +104,8 @@

    BLAS, LAPACK, and ScaLAPACK

    Quantum ESPRESSO, like a mul

    [1] I have a compelling excuse though!  I got my start in scientific computing doing molecular dynamics simulations, and there just isn't a great deal of linear algebra required to calculate most models.  I did work on an electronegativity-based model that required solving big systems of equations, but we found that there were more efficient ways to tackle the underlying physical problem like using a clever extended Lagrangian methods.

    -

    Building Quantum ESPRESSO

    Customizing a build of Quantum ESPRESSO isn't completely standard compared to most non-scientific Linux packages, but it's miles ahead of most scientific packages in that it uses autoconf instead of a home-cooked build process.

    Choice of Libraries

    There are a few key factors to define when building Quantum ESPRESSO.  As you may have guessed from the previous section, they are (in no particular order):
    • choice of compiler
    • choice of MPI implementation
    • choice of BLAS library
    • choice of LAPACK library
    • choice of ScaLAPACK library
    • choice of FFT library
    +

    Building Quantum ESPRESSO

    +
    Customizing a build of Quantum ESPRESSO isn't completely standard compared to most non-scientific Linux packages, but it's miles ahead of most scientific packages in that it uses autoconf instead of a home-cooked build process.

    Choice of Libraries

    There are a few key factors to define when building Quantum ESPRESSO.  As you may have guessed from the previous section, they are (in no particular order):
    • choice of compiler
    • choice of MPI implementation
    • choice of BLAS library
    • choice of LAPACK library
    • choice of ScaLAPACK library
    • choice of FFT library
    On most academic systems like SDSC's Gordon and Trestles, there are several options available for each one of these parameters, and figuring out (1) how to actually define your choice for each, and (2) determine which provides the best performance can be a bear.  What's worse is that these choices are often tied together; for example, the best ScaLAPACK implementation might not be compatible with the best FFT library.

    Gordon and Trestles provide the following options:


    CompilerOptions
    MPIIntel and PGI
    BLASMVAPICH2 and OpenMPI
    LAPACKMKL, ACML, and Netlib Reference
    ScaLAPACKMKL and Netlib Reference
    FFTsMKL, ACML, or FFTW3

    There are actually more than this (e.g., GNU compilers and the MPICH implementation), but I did not test them.

    Passing Library Choices to the Build Process

    As of Quantum ESPRESSO 5.0.3, which is what I used here, you can't specify libraries in the autoconf-standard way (e.g., --with-lapack=/opt/lapack/...).  I suspect this is because the actual implementations these libraries don't follow a standard convention (e.g., LAPACK calls aren't necessarily in a shared object called liblapack.so), but the QE build process does honor certain environment variables.

    @@ -43,4 +114,76 @@

    Passing Library Choices to the Build Process

    As of Quantum ESPRESSO

    Intel

    Since none of these libraries are really standardized, vendors are free to bury their API wrappers in whatever libraries they want and support them to whatever extent they want.  Intel's compilers come bundled with their Math Kernel Library (MKL) which provides bindings for
    • BLAS:
      BLAS_LIBS="-lmkl_intel_lp64 -lmkl_sequential -lmkl_core"
    • LAPACK:
      LAPACK_LIBS can be left as the default since BLAS and LAPACK are buried in the same libraries
    • ScaLAPACK/BLACS:
      SCALAPACK_LIBS="-lmkl_scalapack_lp64 -lmkl_blacs_openmpi_lp64" for OpenMPI OR
      SCALAPACK_LIBS="-lmkl_scalapack_lp64 -lmkl_blacs_intelmpi_lp64" for MVAPICH2
    • FFTW:
      FFT_LIBS="-lmkl_intel_lp64 -lmkl_sequential -lmkl_core" for modern versions of MKL; older versions had the FFTW3 bindings in a separate library
    so your final configure command should look something like
    ./configure \
      CC=icc \
      CXX=icpc \
      FC=ifort \
      F77=ifort \
      BLAS_LIBS="-lmkl_intel_lp64 -lmkl_sequential -lmkl_core" \
      SCALAPACK_LIBS="-lmkl_scalapack_lp64 -lmkl_blacs_openmpi_lp64" \
      FFT_LIBS="-lmkl_intel_lp64 -lmkl_sequential -lmkl_core"
    when compiling with OpenMPI, or with a slightly modified SCALAPACK_LIBS line (-lmkl_blacs_intelmpi_lp64) when compiling with MVAPICH2.

    PGI/AMD

    PGI's compilers come bundled with the AMD Core Math Library (ACML), which provides bindings for BLAS, LAPACK, and FFTW, but its lack of ScaLAPACK means we still must use Netlib's ScaLAPACK and BLACS libraries.  Be sure to load the pgi module, your preferred MPI module, and the scalapack module first!
    • BLAS:
      BLAS_LIBS="-L$PGIHOME/libso -lacml"
    • LAPACK:
      LAPACK_LIBS can be left as the default since BLAS and LAPACK are buried in the same ACML library
    • ScaLAPACK/BLACS:
      SCALAPACK_LIBS="-L$SCALAPACKHOME/lib -lscalapack"
    • FFTW:
      FFT_LIBS="-L$PGIHOME/libso -lacml" even though ACML is included in the $BLAS_LIBS variable--this is because autoconf may pick up a system fftw library which needs to be superceded by the FFTW bindings in ACML.
    so your final configure command should look something like
    ./configure \
      CC=pgcc \
      CXX=pgCC \
      FC=pgf90 \
      F77=pgf77 \
      BLAS_LIBS="-L$PGIHOME/libso -lacml" \
      SCALAPACK_LIBS="-L$SCALAPACKHOME/lib -lscalapack" \
      FFT_LIBS="-L$PGIHOME/libso -lacml"
    After doing this, there is one additional bit of manual hacking that must be done!  PGI is known to trigger problems in Quantum ESPRESSO's IO library, IOTK, and you will need to compile with the -D__IOTK_WORKAROUND1 switch enabled.  This command will hack the necessary line in make.sys:
    sed -i 's/^DFLAGS\(.*\)$/DFLAGS\1 -D__IOTK_WORKAROUND1/' make.sys
    I owe a lot of gratitude to Filippo Spiga of Cambridge/the Quantum ESPRESSO Foundation for helping me quickly work through some of the issues I encountered in getting all of these builds to work correctly.

    In my next post, I will show what effect all of these options has on actual application performance.
    -
    \ No newline at end of file +
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/glennklockwood/2014-2-25-tagbloggercom1999blog-4307061427721284246post-5218204332103869167.md b/2014/quantum-espresso-performance-benefits-of-vendor-optimized-libraries/index.html similarity index 62% rename from _posts/glennklockwood/2014-2-25-tagbloggercom1999blog-4307061427721284246post-5218204332103869167.md rename to 2014/quantum-espresso-performance-benefits-of-vendor-optimized-libraries/index.html index 8a4a9ac..7c7ccab 100644 --- a/_posts/glennklockwood/2014-2-25-tagbloggercom1999blog-4307061427721284246post-5218204332103869167.md +++ b/2014/quantum-espresso-performance-benefits-of-vendor-optimized-libraries/index.html @@ -1,29 +1,173 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2014-02-25 16:42:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2014/02/quantum-espresso-performance-benefits.html -slug: quantum-espresso-performance-benefits-of-vendor-optimized-libraries -title: Quantum ESPRESSO- Performance Benefits of Vendor-Optimized Libraries ---- - -
    In my previous post, I presented a lot of different options you can use to build Quantum ESPRESSO which are (admittedly) very confusing.  At the end of the day, the set of options that produce the fastest-running executable matters the most, so I went through and benchmarked many of the permutations of compiler/MPI/library options.
    + + + + + + + Quantum ESPRESSO- Performance Benefits of Vendor-Optimized Libraries - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Quantum ESPRESSO- Performance Benefits of Vendor-Optimized Libraries

    +
    In my previous post, I presented a lot of different options you can use to build Quantum ESPRESSO which are (admittedly) very confusing.  At the end of the day, the set of options that produce the fastest-running executable matters the most, so I went through and benchmarked many of the permutations of compiler/MPI/library options.

    What this post ultimately illustrates is that you should never use the Netlib reference implementations of BLAS and LAPACK; even Netlib says as much.  ScaLAPACK is much less broadly supported by hardware vendors (e.g., the ACML library that shipped with the PGI compiler I used did not include it), but most of the hardware-dependent optimizations are done below the BLACS level and within the MPI library and associated hardware drivers.  As such, I was able to use Intel's MKL ScaLAPACK when building with the Intel compiler in the data below, but I had to use Netlib's ScaLAPACK with ACML-optimized BLAS and LAPACK when compiling with PGI.

    The actual benchmark I used was the DEISA AUSURF112 benchmark problem with only one pool using 64 MPI processes.  The two testing platforms were

    • SDSC's Gordon supercomputer (four nodes)
      • 16× 2.6 GHz Intel Xeon E5-2670 (Sandy Bridge) cores
      • 64 GB DDR3 SDRAM
      • Mellanox ConnectX-3 QDR HCAs on PCIe 3.0
      • Mellanox Infiniscale IV switch
    • SDSC's Trestles supercomputer (two nodes)
      • 32× 2.4 GHz AMD Opteron 6136 (Magny Cours) nodes
      • 64 GB DDR3 SDRAM
      • Mellanox ConnectX QDR HCAs on PCIe 2.0
      • Voltaire Grid Director 4700 switch

    I don't know the port-to-port latency for the Trestles runs, but the application is bandwidth-bound due to the problem geometry (one pool) and the large amount of MPI_Allreduces and MPI_Alltoallvs render the latency largely irrelevant.  More information about the communication patterns of this benchmark are available from the HPC Advisory Council.

    On both testing systems, the software versions were the same:
    • Compilers: Intel 2013.1.117 and PGI 13.2
    • MPI libraries: MVAPICH2 1.9 and OpenMPI 1.6.5
    • Vendor FFTs: MKL 11.0.1 and ACML 5.3.0
    • Vendor BLAS/LAPACK: MKL 11.0.1 and ACML 5.3.0
    • Vendor ScaLAPACK: MKL 11.0.1 (used Netlib ScaLAPACK 2.0.2 with PGI)
    • Reference FFTs: FFTW 3.3.3
    • Reference BLAS/LAPACK: Netlib 3.4.2
    • Reference ScaLAPACK: Netlib 2.0.2

    Vendor-optimized Libraries

    On Gordon, MKL shows extremely good performance compared to ACML, and this is to be expected given the fact that Intel's MKL is optimized for Gordon's ability to do AVX operations.

    Performance with vendor libraries on Gordon

    In addition, the difference in MPI libraries is also quite consistent.  Although the point-to-point performance of MVAPICH2 and OpenMPI over the same fabric should be comparable, the two libraries have different implementations of MPI collective operations.  Quantum ESPRESSO is dominated by costly MPI_Allreduce and MPI_Alltoallv, so the level of optimization within the MPI implementations is very apparent.

    In fact, the PGI and OpenMPI build (which uses the Netlib ScaLAPACK, as opposed to a vendor-supplied ScaLAPACK which MKL provides) would hang on collectives unless the following environment variable was passed to the OpenMPI runtime:

    OMPI_MCA_coll_sync_barrier_after=100

    This switch forces the OpenMPI runtime to sync all processes after every 100 collective operations to prevent certain MPI ranks from racing so far ahead of the rest that a deadlock occurs.  OpenMPI does this after every 1,000 collectives by default.  Alternatively, HPCAC suggests the following tunings for OpenMPI:

    OMPI_MCA_mpi_affinity_alone=1
    OMPI_MCA_coll_tuned_use_dynamic_rules=1
    OMPI_MCA_coll_tuned_barrier_algorithm=6
    OMPI_MCA_coll_tuned_allreduce_algorithm=0

    These collective tunings also prevented deadlocking of the benchmark, but the performance was no better than simply increasing the implicit barrier frequency with OMPI_MCA_coll_sync_barrier_*.

    Trestles, with its AMD processors, does not realize as large a benefit from using MKL:

    Performance with vendor libraries on Trestles

    MKL still outperforms ACML even on AMD processors, but the margin is almost negligible.  As with the Gordon case though, the difference in MPI implementations is start because of OpenMPI's poor collective performance.

    It is worth noting that PGI with OpenMPI did not work unless both of the following OpenMPI parameters were specified:

    -
    OMPI_MCA_coll_sync_barrier_after=100
    OMPI_MCA_coll_sync_barrier_before=100

    +
    OMPI_MCA_coll_sync_barrier_after=100
    OMPI_MCA_coll_sync_barrier_before=100
    +

    At smaller processor counts, ScaLAPACK compiled with OpenMPI (both Netlib's and MKL's implementations) performed horrendously.  I don't know exactly what the conflict is, but OpenMPI and ScaLAPACK do not seem to play nicely.

    -

    Netlib reference implementations

    As a fun afterthought, I thought it also might be useful to compare the vendor libraries to Netlib's reference implementations of BLAS and LAPACK.  I rebuilt the four compiler+MPI combinations on both systems using Netlib's BLAS, LAPACK, and ScaLAPACK (as well as the stock FFTW library instead of MKL or ACML's versions) to see how badly Netlib's reference really performs, and here are the results:
    +

    Netlib reference implementations

    +
    As a fun afterthought, I thought it also might be useful to compare the vendor libraries to Netlib's reference implementations of BLAS and LAPACK.  I rebuilt the four compiler+MPI combinations on both systems using Netlib's BLAS, LAPACK, and ScaLAPACK (as well as the stock FFTW library instead of MKL or ACML's versions) to see how badly Netlib's reference really performs, and here are the results:

    Performance with Netlib reference libraries on Gordon.  The build with Intel and MVAPICH2 was not able to run.

    On SDSC's Gordon resource, the OpenMPI builds were between 3× and 4× slower, but the PGI build with MVAPICH2 was only(!) 64% slower.  This is a curious result, as I would have expected performance to be dramatically worse across all combinations of compiler and MPI library since BLAS and LAPACK should really show no performance difference when it comes to the choice of MPI library.

    The above results suggest that Quantum ESPRESSO makes its heavy use of BLAS and LAPACK through the ScaLAPACK library, and as such, the ScaLAPACK implementation and its performance with each of the MPI libraries is critically important.  Of course, even with a good combination of ScaLAPACK and MPI stack, having a vendor-optimized BLAS and LAPACK goes a long way in increasing overall performance by more than 50%.

    It should also be obvious that the Intel and MVAPICH2 build's performance data is absent.  This is because the build with Intel and MVAPICH2 repeatedly failed with this error:

    ** On entry to DLASCL parameter number 4 had an illegal value

    This error is the result of DGELSD within LAPACK not converging within the hard-coded criteria.  This problem has been detailed at the LAPACK developers' forums, and the limits were actually dramatically increased since the postings in the aforementioned forum.

    Despite that patch though, the problem still manifests in the newest versions of Netlib's reference BLACS/ScaLAPACK implementation, and I suspect that this is really a fundamental limitation of the BLACS library relying on platform-dependent behavior to produce its results.  Recall from above that the vendor-supplied implementations of LAPACK do not trigger this error.

    On Trestles, the results are even worse:

    Performance with Netlib reference libraries on Trestles.  Only the build with PGI and MVAPICH2 was able to run.
    When built with the Intel compiler, both MVAPICH2- and OpenMPI-linked builds trigger the DLASCL error.  The PGI and OpenMPI build do not trigger this error, but instead hang on collectives even with the OpenMPI tunings I reported for the vendor-optimized Trestles PGI+OpenMPI build.

    Cranking up the implicit barrier frequency beyond 100 might have gotten the test to run, but quite frankly, having to put a barrier before and after every 100th collective is already an extremely aggressive modification to runtime behavior.  Ultimately, this data all suggests that you should, in fact, never use the Netlib reference implementations of BLAS and LAPACK.

    -

    Summary of Data

    Here is an overall summary of the test matrix:
    +

    Summary of Data

    +
    Here is an overall summary of the test matrix:

    Overall performance comparison for AUSURF112 benchmark

    This benchmark is very sensitive to the performance of collectives, and exactly how collectives are performed is specific to the MPI implementation being used.  OpenMPI shows weaker collective performance across the board, and as a result, significantly worse performance.

    These collective calls are largely made via the ScaLAPACK library though, and since ScaLAPACK is built upon BLAS and LAPACK, it is critical to have all components (BLAS, LAPACK, ScaLAPACK, and the MPI implementation) working together.  In all cases tested, Intel's MKL library along with MVAPICH2 provides the best performance.  As one may guess, ACML also performs well on AMD Opteron processors, but its lack of optimization for AVX instructions prevented it from realizing the full performance possible on Sandy Bridge processors.

    In addition to performance, there are conclusions to be drawn about application resiliency, or Quantum ESPRESSO's ability to run calculations without hanging or throwing strange errors:
    • PGI with MVAPICH2 was the most resilient combination; it worked out of the box with all combinations of BLAS/LAPACK/ScaLAPACK tested
    • PGI with OpenMPI was the least resilient combination, perhaps because ACML's lack of ScaLAPACK bindings forces the use of Netlib ScaLAPACK.  Combining Netlib BLAS/LAPACK/ScaLAPACK with PGI/OpenMPI simply failed on Trestles, and getting Netlib ScaLAPACK to play nicely with either MKL or ACML's BLAS/LAPACK libraries when compiled against PGI and OpenMPI required tuning of the OpenMPI collectives.
    • In both test systems, using vendor libraries wherever possible make Quantum ESPRESSO run more reliably.  The only roadblocks encountered when using MKL or ACML arose when they were combined with PGI and OpenMPI, where special collective tunings had to be done.

    At the end of the day, there aren't many big surprises here.  There are three take-away lessons:
    1. MKL provides very strong optimizations for the Intel x86 architecture, and ACML isn't so bad either.  You run into trouble when you start linking against Netlib libraries.
    2. MVAPICH2 has better collectives than OpenMPI, and this translates into better ScaLAPACK performance.  Again, this becomes less true when you start linking against Netlib libraries.
    3. Don't use the Netlib reference implementations of BLAS, LAPACK, or ScaLAPACK because they aren't designed for performance or resiliency.  
      • Using Netlib caused performance to drop by between 60% and 400%, and 
      • only half of the builds that linked against the Netlib reference trials would even run.
    Friends don't let friends link against Netlib!
    -
    \ No newline at end of file +
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/scalable-data-analysis-in-r/index.html b/2014/scalable-data-analysis-in-r/index.html new file mode 100644 index 0000000..55aeb9c --- /dev/null +++ b/2014/scalable-data-analysis-in-r/index.html @@ -0,0 +1,175 @@ + + + + + + + Scalable Data Analysis in R - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Scalable Data Analysis in R

    +

    R is a great environment for interactive analysis on your desktop, but when your data needs outgrow your personal computer, it’s not clear what to do next.

    + +

    I’ve put together material for a day-long tutorial on scalable data analysis in R. It covers:

    + +
      +
    • A brief introduction to R for those coming from a Python background;
    • +
    • The bigmemory package for out-of-core computation on large data matrices, with a simple physical sciences example;
    • +
    • The standard parallel package, including what was the snow and multicore facilities, using airline data as an example
    • +
    • The foreach package, using airline data and simple stock data;
    • +
    • The Rdsm package for shared memory; and
    • +
    • a brief introduction to the powerful pbdR pacakges for extremely large-scale computation.
    • +
    + +

    The presentation for the material, in R markdown (so including the sourcecode) is in the presentation directory; you can read the resulting presentation as markdown there, or as a PDF.

    + +

    The R code from the slides can be found in the R directory.

    + +

    Some data can be found in the data directory; but as you might expect in a workshop on scalable data analysis, the files are quite large! Mostly you can just find scripts for downloading the data; running make in the main directory will pull almost everything down, but a little more work needs go to into automating some of the production of the data products used.

    + +

    Suggestions, as always, greatly welcomed.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/spark-on-supercomputers-a-few-notes/index.html b/2014/spark-on-supercomputers-a-few-notes/index.html new file mode 100644 index 0000000..bc90394 --- /dev/null +++ b/2014/spark-on-supercomputers-a-few-notes/index.html @@ -0,0 +1,158 @@ + + + + + + + Spark on Supercomputers- A Few Notes - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Spark on Supercomputers- A Few Notes

    +

    I’ve been working with Apache Spark quite a bit lately in an effort to bring it into the fold as a viable tool for solving some of the data-intensive problems encountered in supercomputing.  I’ve already added support for provisioning Spark clusters to a branch of the myHadoop framework I maintain so that Slurm, Torque, and SGE users can begin playing with it, and as a result of these efforts, I’ve discovering a number of interesting issues with Spark running on traditional supercomputers.

    At this point in time, Spark is very rough around the edges.  The core implementation of resilient distributed datasets are all there and work wonderfully, but I’ve found that it doesn’t take long to start discovering bugs and half-implemented features that can get very confusing very quickly.  Perhaps half of the problems I’ve faced are the result of the fact that I have been trying to run Spark in non-traditional ways (for example, over hosts’ TCP over InfiniBand interfaces and with non-default config directories), and although the documentation claims to support all of the features necessary to make this possible, the reality is a bit different.

    What follows are just some incoherent notes I’ve taken while porting Spark to the myHadoop framework.  Spark is rapidly developing and it is constantly improving, so I hope this post becomes outdated as the Spark developers make the framework more robust.

    <h2>Control Script Problems</h2>Hadoop and Spark both ship with “control scripts” or “cluster launch scripts” that facilitate the starting and stopping of the entire cluster of daemons.  At the highest level, this includes start-all.sh and stop-all.sh, which make calls to start-dfs.sh and start-yarn.sh (in Hadoop) and start-master.sh and start-slaves.sh.  In Hadoop, these scripts work wonderfully, but Spark’s implementation of these control scripts is still quite immature because they carry implicit assumptions about users’ Spark configurations.

    Like Hadoop, Spark supports a spark-env.sh file (located in $SPARK_CONF_DIR) which defines environment variables for all of the remote Spark workers that are spawned across the cluster.  This file is an ideal place to put the following environment variable definitions:
    <ul><li>SPARK_MASTER_IP - the default value for this is hostname which is generally not a great default on most clusters.  On Rocks, we append “.ibnet” to the hostname to get Spark to operate over the InfiniBand fabric.</li><li>SPARK_LOCAL_IP - again, ensure that this is set up to use the correct interface on the cluster.  We append .ibnet on Rocks.</li><li>SPARK_HOME, SPARK_PREFIX, and SPARK_CONF_DIR should also be defined here since spark-env.sh will usually override the variables defined by spark-config.sh (see below)</li></ul>$SPARK_HOME/sbin/spark-config.sh is where much of the Spark control scripts’ “intelligence” comes from as far as defining the environment variables that Spark needs to launch.  In particular, spark-config.sh defines the following variables before reading spark-env.sh:
    <ul><li>SPARK_PREFIX</li><li>SPARK_HOME</li><li>SPARK_CONF_DIR</li></ul>The problem is that spark-config.sh will stomp all over anything the user defines for the above variables, and since spark-config.sh is called from within all of the Spark control scripts (both evoked by the user and evoked by sub-processes on remote hosts during the daemon spawning process), trying to get Spark to use non-default values for SPARK_CONF_DIR (e.g., exactly what myHadoop does) gets to be tedious.

    The Spark developers tried to work around this by having the control scripts call spark-env.sh after spark-config.sh, meaning you should be able to define your own SPARK_CONF_DIR in spark-env.sh.  Unfortunately, this mechanism of calling spark-env.sh after spark-config.sh appears as

    <pre>. “$sbin/spark-config.sh”

    if [ -f “${SPARK_CONF_DIR}/spark-env.sh” ]; then
    . “${SPARK_CONF_DIR}/spark-env.sh”
    fi
    </pre>
    That is, spark-config.sh will stomp all over any user-specified SPARK_CONF_DIR, and then use the SPARK_CONF_DIR from spark-config.sh to look for spark-env.sh.  Thus, there is no actual way to get the Spark control scripts (as of version 0.9) to honor the user-specified SPARK_CONF_DIR.  It looks like the latest commits to Spark have started to address this, but a cursory glance over the newest control scripts suggests that this remains broken.

    Anyway, as a result of this, myHadoop’s Spark integration eschews the Spark control scripts and handles spawning the daemons more directly using the manual method of spawning slaves.  Doing this averts the following issues:
    <ol><li>start-slaves.sh can’t find any slaves because it always looks for $SPARK_HOME/etc/slaves.  This can be worked around by passing SPARK_SLAVES=$SPARK_CONF_DIR/slaves to start-slaves.sh for a non-default SPARK_CONF_DIR.</li><li>stop-master.sh doesn’t do anything useful because you still need to kill -9 the master process by hand.  Not sure why this is the case.</li></ol><div>
    </div>

    +

    Deciphering Spark Errors

    +

    Here are various cryptic stack traces I’ve encountered while working on Spark.  I kept these mostly for myself, but I’ve started meeting people that hit the same problems and thought it might be worthwhile to share the diagnoses I’ve found.

    In general, Spark seems to work best when used conservatively, but when you start doing things that do not strictly fall within the anticipated use case, things break in strange ways.  For example, if you try to write an RDD with an empty element (e.g., a text file with empty lines), you would get this really crazy error that does not actually say anything meaningful:

    <pre style="font-size: smaller;">14/04/30 16:23:07 ERROR Executor: Exception in task ID 19
    scala.MatchError: 0 (of class java.lang.Integer)
         at org.apache.spark.api.python.PythonRDD\(anon$1.read(PythonRDD.scala:110)<br />&nbsp; &nbsp; &nbsp;at org.apache.spark.api.python.PythonRDD\)anon$1.(PythonRDD.scala:153)
         at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:96)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:109)
         at org.apache.spark.scheduler.Task.run(Task.scala:53)
         at org.apache.spark.executor.Executor$TaskRunner\(anonfun$run$1.apply$mcV$sp(Executor.scala:213)<br />&nbsp; &nbsp; &nbsp;at org.apache.spark.deploy.SparkHadoopUtil.runAsUser(SparkHadoopUtil.scala:49)<br />&nbsp; &nbsp; &nbsp;at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:178)<br />&nbsp; &nbsp; &nbsp;at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)<br />&nbsp; &nbsp; &nbsp;at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)<br />&nbsp; &nbsp; &nbsp;at java.lang.Thread.run(Thread.java:722)</pre><br />I filed a bug report about this particular problem and the&nbsp;<a href="https://github.com/apache/spark/pull/644">issue has been fixed</a>, but it's just one of those edge cases where Spark will fail catastrophically (I had to look at the source code to figure out what "scala.MatchError" meant). &nbsp;Usually you wouldn't be operating on empty data sets, but I discovered this error when I was trying to quickly determine if my Spark slaves were communicating with my master correctly by issuing<br /><br /><pre>file = sc.textFile('hdfs://master.ibnet0/user/glock/input.txt')<br />file.saveAsTextFile('hdfs://master.ibnet0/user/glock/output')</pre><br />That is, simply reading in a file and writing it back out with pyspark would cause catastrophic failure. &nbsp;This is what I meant when I say Spark's still rough around the edges.<br /><br />Here are a few more errors I've encountered. &nbsp;They're not problems with Spark, but the stack traces and exceptions thrown can be a little mysterious. &nbsp;I'm pasting it all here for the sake of googlers who may run into these same problems.<br /><br />If you try to use Spark built against Hadoop 2 with a Hadoop 1 HDFS, you'll get this IPC error:<br /><br /><pre style="font-size: smaller;">&gt;&gt;&gt; file.saveAsTextFile('hdfs://s12ib:54310/user/glock/gutenberg.out')<br />Traceback (most recent call last):<br />&nbsp; File "", line 1, in <br />&nbsp; File "/home/glock/apps/spark-0.9.0/python/pyspark/rdd.py", line 682, in saveAsTextFile<br />&nbsp; &nbsp; keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)<br />&nbsp; File "/home/glock/apps/spark-0.9.0/python/lib/py4j-0.8.1-src.zip/py4j/java_gateway.py", line 537, in __call__<br />&nbsp; File "/home/glock/apps/spark-0.9.0/python/lib/py4j-0.8.1-src.zip/py4j/protocol.py", line 300, in get_return_value<br />py4j.protocol.Py4JJavaError: An error occurred while calling o23.saveAsTextFile.<br />: org.apache.hadoop.ipc.RemoteException: <b>Server IPC version 9 cannot communicate with client version 4</b><br />&nbsp; &nbsp; &nbsp;at org.apache.hadoop.ipc.Client.call(Client.java:1070)<br />&nbsp; &nbsp; &nbsp;at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:225)<br />&nbsp; &nbsp; &nbsp;at $Proxy7.getProtocolVersion(Unknown Source)<br />&nbsp; &nbsp; &nbsp;at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:396)<br />&nbsp; &nbsp; &nbsp;at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:379)<br /><br /></pre><br />If your Pythons aren't all the same version across the nodes when Spark workers are instantiated, you might get a cryptic error like this when trying to call the count() method on an RDD:<br /><br /><pre style="font-size: smaller;">14/04/30 16:15:11 ERROR Executor: Exception in task ID 12<br />org.apache.spark.api.python.PythonException: Traceback (most recent call last):<br />&nbsp; File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/worker.py", line 77, in main<br />&nbsp; &nbsp; serializer.dump_stream(func(split_index, iterator), outfile)<br />&nbsp; File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/serializers.py", line 182, in dump_stream<br />&nbsp; &nbsp; self.serializer.dump_stream(self._batched(iterator), stream)<br />&nbsp; File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/serializers.py", line 117, in dump_stream<br />&nbsp; &nbsp; for obj in iterator:<br />&nbsp; File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/serializers.py", line 171, in _batched<br />&nbsp; &nbsp; for item in iterator:<br />&nbsp; File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/rdd.py", line 493, in func<br />&nbsp; &nbsp; if acc is None:<br /><b>TypeError: an integer is required</b><br /><br />&nbsp; &nbsp; &nbsp;at org.apache.spark.api.python.PythonRDD\)anon$1.read(PythonRDD.scala:131)
         at org.apache.spark.api.python.PythonRDD\(anon$1.(PythonRDD.scala:153)<br />&nbsp; &nbsp; &nbsp;at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:96)<br />&nbsp; &nbsp; &nbsp;at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)<br />&nbsp; &nbsp; &nbsp;at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)<br />&nbsp; &nbsp; &nbsp;at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:109)<br />&nbsp; &nbsp; &nbsp;at org.apache.spark.scheduler.Task.run(Task.scala:53)<br />&nbsp; &nbsp; &nbsp;at org.apache.spark.executor.Executor$TaskRunner\)anonfun$run$1.apply$mcV$sp(Executor.scala:213)
         at org.apache.spark.deploy.SparkHadoopUtil.runAsUser(SparkHadoopUtil.scala:49)
         at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:178)
         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
         at java.lang.Thread.run(Thread.java:722)</pre>

    If you try to write an RDD to a file with mismatched Python versions, or if you were using anything earlier than Python 2.7 (e.g., 2.6) with any Spark version earlier than 1.0.0, you’d see this:

    <pre style="font-size: smaller;">14/04/30 17:53:20 WARN scheduler.TaskSetManager: Loss was due to org.apache.spark.api.python.PythonException
    org.apache.spark.api.python.PythonException: Traceback (most recent call last):
      File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/worker.py”, line 77, in main
        serializer.dump_stream(func(split_index, iterator), outfile)
      File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/serializers.py”, line 117, in dump_stream
        for obj in iterator:
      File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/rdd.py”, line 677, in func
        if not isinstance(x, basestring):
    SystemError: unknown opcode

         at org.apache.spark.api.python.PythonRDD\(anon$1.read(PythonRDD.scala:131)<br />&nbsp; &nbsp; &nbsp;at org.apache.spark.api.python.PythonRDD\)anon$1.(PythonRDD.scala:153)
         at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:96)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:109)
         at org.apache.spark.scheduler.Task.run(Task.scala:53)
         at org.apache.spark.executor.Executor$TaskRunner$$anonfun$run$1.apply$mcV$sp(Executor.scala:213)
         at org.apache.spark.deploy.SparkHadoopUtil.runAsUser(SparkHadoopUtil.scala:49)
         at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:178)
         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
         at java.lang.Thread.run(Thread.java:722)</pre>

    If your HDFS URI is wrong, the error message actually makes sense.  It is buried quite deeply though.

    <pre style="font-size: smaller;">Traceback (most recent call last):
      File “”, line 1, in
      File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/rdd.py”, line 682, in saveAsTextFile
        keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
      File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/lib/py4j-0.8.1-src.zip/py4j/java_gateway.py”, line 537, in call
      File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/lib/py4j-0.8.1-src.zip/py4j/protocol.py”, line 300, in get_return_value
    py4j.protocol.Py4JJavaError: An error occurred while calling o23.saveAsTextFile.
    : java.lang.IllegalArgumentException: java.net.UnknownHostException: s12ib.ibnet0
         at org.apache.hadoop.security.SecurityUtil.buildTokenService(SecurityUtil.java:418)
         at org.apache.hadoop.hdfs.NameNodeProxies.createNonHAProxy(NameNodeProxies.java:231)
         at org.apache.hadoop.hdfs.NameNodeProxies.createProxy(NameNodeProxies.java:139)
         at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:510)
         at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:453)
         at org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:136)
         at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2433)
         at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:88)
         at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2467)
         at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2449)
         at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:367)
         at org.apache.hadoop.fs.Path.getFileSystem(Path.java:287)
         at org.apache.hadoop.mapred.SparkHadoopWriter$.createPathFromString(SparkHadoopWriter.scala:193)
         at org.apache.spark.rdd.PairRDDFunctions.saveAsHadoopFile(PairRDDFunctions.scala:685)
         at org.apache.spark.rdd.PairRDDFunctions.saveAsHadoopFile(PairRDDFunctions.scala:572)
         at org.apache.spark.rdd.RDD.saveAsTextFile(RDD.scala:894)
         at org.apache.spark.api.java.JavaRDDLike$class.saveAsTextFile(JavaRDDLike.scala:355)
         at org.apache.spark.api.java.JavaRDD.saveAsTextFile(JavaRDD.scala:27)
         at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
         at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
         at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
         at java.lang.reflect.Method.invoke(Method.java:597)
         at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)
         at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:379)
         at py4j.Gateway.invoke(Gateway.java:259)
         at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
         at py4j.commands.CallCommand.execute(CallCommand.java:79)
         at py4j.GatewayConnection.run(GatewayConnection.java:207)
         at java.lang.Thread.run(Thread.java:619)
    Caused by: java.net.UnknownHostException: s12ib.ibnet0
         … 29 more</pre>

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/storage-utilization-in-the-long-tail-of-science/index.html b/2014/storage-utilization-in-the-long-tail-of-science/index.html new file mode 100644 index 0000000..f56a798 --- /dev/null +++ b/2014/storage-utilization-in-the-long-tail-of-science/index.html @@ -0,0 +1,177 @@ + + + + + + + Storage Utilization in the Long Tail of Science - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Storage Utilization in the Long Tail of Science

    +

    Introduction

    +

    Since changing careers and moving up to the San Francisco Bay Area in July, I haven’t had nearly as much time to post interesting things here on my blog—I guess that’s the startup life. That isn’t to say that my life in DNA sequencing hasn’t been without interesting observations to explore though; the world of high-throughput sequencing is becoming increasingly dependent on high-performance computing, and many of the problems being solved in genomics and bioinformatics are stressing aspects of system architecture and cyberinfrastructure that haven’t gotten a tremendous amount of exercise from the more traditional scientific domains in computational research.

    Take, for example, the biggest and baddest DNA sequencer on the market: over the course of a three-day run, it outputs around 670 GB of raw (but compressed) sequence data, and this data is spread out over 1,400,000 files. This would translate to an average file size of around 500 KB, but the reality is that the file sizes are a lot less uniform:

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Figure 1. File size distribution of a single flow cell output (~770 gigabases) on Illumina’s highest-end sequencing platform</td></tr></tbody></table>
    After some basic processing (which involves opening and closing hundreds of these files repeatedly and concurrently), these data files are converted into very large files (tens or hundreds of gigabytes each) which then get reduced down to data that is more digestible over the course of hundreds of CPU hours. As one might imagine, this entire process is very good at taxing many aspects of file systems, and on the computational side, most of this IO-intensive processing is not distributed and performance benefits most from single-stream, single-client throughput.

    As a result of these data access and processing patterns, the storage landscape in the world of DNA sequencing and bioinformatics is quite different from conventional supercomputing. Some large sequencing centers do use the file systems we know and love (and hate) like GPFS at JGI and Lustre at Sanger, but it appears that most small- and mid-scale sequencing operations are relying heavily on network-attached storage (NAS) for both receiving raw sequencer data and being a storage substrate for all of the downstream data processing.

    I say all of this because these data patterns—accessing large quantities of small files and large files with a high degree of random IO—is a common trait in many scientific applications used in the “long tail of science.” The fact is, the sorts of IO for which parallel file systems like Lustre and GPFS are designed are tedious (if not difficult) to program, and for the majority of codes that don’t require thousands of cores to make new discoveries, simply reading and writing data files in a naïve way is “good enough.”

    <h3>The Long Tail</h3>This long tail of science is also using up a huge amount of the supercomputing resources made available to the national open science community; to illustrate, 98% of all jobs submitted to the XSEDE supercomputers in 2013 used 1024 or fewer CPU cores, and these modest-scale jobs represented over 50% of all the CPU time burned up on these machines.

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Figure 2. Cumulative job size distribution (weighted by job count and SUs consumed) for all jobs submitted to XSEDE compute resources in 2013</td></tr></tbody></table>
    The NSF has responded to this shift in user demand by awarding Comet, a 2 PF supercomputer designed to run these modest-scale jobs. The Comet architecture limits its full-bisection bandwidth interconnectivity to groups of 72 nodes, and these 72-node islands will actually have enough cores to satisfy 99% of all the jobs submitted to XSEDE clusters in 2013 (see above). By limiting the full-bisection connectivity to smaller islands and using less rich connectivity between islands, the cost savings in not having to buy so many mid-tier and core switches are then turned into additional CPU capacity.

    What the Comet architecture doesn’t address, however, is the question of data patterns and IO stress being generated by this same long tail of science—the so-called 99%. If DNA sequencing is any indicator of the 99%, parallel file systems are actually a poor choice for high-capacity, mid-scale jobs because their performance degrades significantly when facing many small files. Now, the real question is, are the 99% of HPC jobs really generating and manipulating lots of small files in favor of the large striped files that Lustre and GPFS are designed to handle? That is, might the majority of jobs on today’s HPC clusters actually be better served by file systems that are less scalable but handle small files and random IO more gracefully?

    Some colleagues and I set out to answer this question last spring, and a part of this quest involved looking at every single file on two of SDSC’s Data Oasis file systems. This represented about 1.7 PB of real user data spread across two Lustre 2.4 file systems—one designed for temporary scratch data and the other for projects storage—and we wanted to know if users’ data really consisted of the large files that Lustre loves or if, like job size, the 99% are really working with small files.  Since SDSC’s two national resources, Gordon and Trestles, restrict the maximum core count for user jobs to modest-scale submissions, these file systems should contain files representative of long-tail users.

    <h2>Scratch File Systems</h2>At the roughest cut, files can be categorized based on whether their size is on the order of bytes and kilobytes (size < 1024*1024 bytes), megabytes (< 1024 KB), gigabytes (<1024 MB), and terabytes (< 1024 GB). Although pie charts are generally a terrible way to show relative compositions, this is how the files on the 1.2 PB scratch file system broke down:

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Figure 3. Fraction of file count consumed by files of a given size on Data Oasis’s scratch file system for Gordon</td></tr></tbody></table>

    The above figure shows the number of files on the file system classified by their size, and there are clearly a preponderance of small files less than a gigabyte in size. This is not terribly surprising as the data is biased towards smaller files; that is, you can fit a thousand one-megabyte files in the same space that a single one-gigabyte file would take up. Another way to show this data is by how much file system capacity is taken up by files of each size:

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Figure 4. File system capacity consumed by files of a given size on Data Oasis’s scratch file system for Gordon</td></tr></tbody></table>

    This makes it very apparent that the vast majority of the used space on this scratch file system—a total of 1.23 PB of data—are taken up by files on the order of gigabytes and megabytes. There were only seventeen files that were a terabyte or larger in size.

    Incidentally, I don’t find it too surprising that there are so few terabyte-sized files; even in the realm of Hadoop, median job dataset sizes are on the order of a dozen gigabytes (e.g., Facebook has reported that 90% of its jobs read in under 100 GB of data). Examining file sizes with much finer granularity reveals that the research data on this file system isn’t even of Facebook scale though:

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Figure 5. Number of files of a given size on Data Oasis’s scratch file system for Gordon.  This data forms the basis for Figure 3 above</td></tr></tbody></table>

    While there are a large number of files on the order of a few gigabytes, it seems that files on the order of tens of gigabytes or larger are far more scarce. Turning this into relative terms,

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Figure 6. Cumulative distribution of files of a given size on Data Oasis’s scratch file system for Gordon</td></tr></tbody></table>

    we can make more meaningful statements. In particular,

    <ul><li>90% of the files on this Lustre file system are 1 megabyte or smaller</li><li>99% of files are 32 MB or less</li><li>99.9% of files are 512 MB or less</li><li>and 99.99% of files are 4 GB or less</li></ul>
    The first statement is quite powerful when you consider the fact that the default stripe size in Lustre is 1 MB. The fact that 90% of files on the file system are smaller than this means that 90% of users’ files really gain no advantages by living on Lustre. Furthermore, since this is a scratch file system that is meant to hold temporary files, it would appear that either user applications are generating a large amount of small files, or users are copying in large quantities of small files and improperly using it for cold storage. Given the quota policies for Data Oasis, I suspect there is a bit of truth to both.

    Circling back a bit though, I said earlier that comparing just the quantity of files can be a bit misleading since a thousand 1 KB files will take up the same space as a single 1 MB file. We can also look at how much total space is taken up by files of various sizes.

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Figure 7. File system capacity consumed by files of a given size on Data Oasis’s scratch file system for Gordon.  This is just a more finely diced version of the data presented in Figure 4 above.</td></tr></tbody></table>
    The above chart is a bit data-dense so it takes some staring at to understand what’s going on. First looking at the purple line, we can pull out some pretty interesting facts:

    <ul><li>Half of the file system’s used capacity (50%) is consumed by files that are 1 GB or less in size</li><li>Over 20% of the file system’s used capacity is taken up by files smaller than 64 MB</li><li>About 10% of the capacity is used by files that are 64 GB or larger</li></ul>
    The blue boxes represent the derivative of that purple line—that is, how much space is taken up by files of only one specific size. The biggest chunk of the file system (141 TB) is taken up by 4 GB files, but it appears that there is a substantial range of file sizes that take up very similarly sized pieces of the pie. 512 MB files take up a total of 139 TB; 1 GB, 2 GB, and 8 GB files all take up over 100 TB of total space each as well. In fact, files ranging from 512 MB to 8 GB comprise 50% of the total file system capacity.

    Why the sweet spot for space-consuming files is between 512 MB and 8 GB is unclear, but I suspect it’s more caused by the human element in research. In my own research, I worked with files in this range simply because it was enough data to be statistically meaningful while still small enough to quickly re-analyze or transfer to a colleague. For file sizes above this range, the mass of the data made it difficult to manipulate using the “long-tail” cyberinfrastructure available to me. But, perhaps as more national-scale systems comes online to meet the needs of these sorts of workloads, this sweet spot will creep out to larger file sizes.

    <h2>Projects Storage</h2>The above discussion admittedly comes with a lot of caveats.  In particular, the scratch file system we examined was governed by no hard quotas which did lead some people to leave data resident for longer than they probably should have.  However, the other file system we analyzed was SDSC’s Data Oasis projects storage which was architected for capacity over performance and featured substantially more disks per OSS.  This projects storage also came with 500 GB quotas by default, forcing users to be a little more mindful of what was worth keeping.

    Stepping back to the coarse-grained kilobyte/megabyte/gigabyte/terabyte pie charts, here is how projects storage utilization compared to scratch storage:

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Figure 8. Fraction of file count consumed by files of a given size on Data Oasis’s projects file system (shared between Gordon and Trestles users)</td></tr></tbody></table>
    On the basis of file counts, it’s a bit surprising that users seem to store more smaller (kilobyte-sized) files in their projects space than their scratch space.  This may imply that the beginning and end data bookending simulations aren’t as large as the intermediate data generated during the calculation.  Alternately, it may be a reflection of user naïveté; I’ve found that newer users were often afraid to use the scratch space because of the perception that their data may vanish from there without advanced notice.  Either way, gigabyte-sized files comprised a few hundredths of a percent of files, and terabyte-sized files were more scarce still on both file systems.  The trend was uniformly towards smaller sizes on projects space.

    As far as space consumed by these files, the differences remain subtle.

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Figure 9. Fraction of file system capacity consumed by files of a given size on Data Oasis’s projects file system</td></tr></tbody></table>
    There appears to be a trend towards users keeping larger files in their projects space, and the biggest change is the decrease in megabyte-sized files in favor of gigabyte-sized files.  However, this trend is very small and persists across a finer-grained examination of file size distributions:

    <div class="separator" style="clear: both; text-align: center;"></div>

    +
    Figure 10. File system capacity consumed by files of a given size on Data Oasis's projects file system
    +


    Half of the above plot is the same data shown above, making this plot twice as busy and confusing.  However there’s a lot of interesting data captured in it, so it’s worth the confusing presentation.  In particular, the overall distribution of mass with respect to the various file sizes is remarkably consistent between scratch and projects storage.  We see the same general peak of file size preference in the 1 GB to 10 GB range, but there is a subtle bimodal divide in projects storage that reveals preference for 128MB-512MB and 4GB-8GB files which manifests in the integrals (red and purple lines) that show a visibly greater slope in these regions.

    The observant reader will also notice that the absolute values of the bars are smaller for projects storage and scratch storage; this is a result of the fact that the projects file system is subject to quotas and, as a result, is not nearly as full of user data.  To complicate things further, the projects storage represents user data from two different machines (each with unique job size policies, to boot), whereas the scratch storage is only accessible from one of those machines.  Despite these differences though, user data follows very similar distributions between both file systems.

    <h2>Corollaries</h2>It is probably unclear what to take away from these data, and that is with good reason.  There are fundamentally two aspects to quantifying storage utilizations–raw capacity and file count–because they represent two logically separate things.  There is some degree of interchangeability (e.g., storing a whole genome in one file vs. storing each chromosome its own file), and this is likely contributing to the broad peak in file size between 512 MB and 8 GB.  With that being said, it appears that the typical long-tail user stores a substantial amount of decidedly “small” files on Lustre, and this is exemplified by the fact that 90% of the files resident on the file systems analyzed here are 1 MB or less in size.
    <div>
    </div>

    +
    This alone suggests that large parallel file systems may not actually be the most appropriate choice for HPC systems that are designed to support a large group of long-tail users.  While file systems like Lustre and GPFS certainly provide a unique capability in that some types of medium-sized jobs absolutely require the IO capabilities of parallel file systems, there are a larger number of long-tail applications that do single-thread IO, and some of these perform IO in such an abusive way (looking at you, quantum chemistry) that they cannot run on file systems like Lustre or GPFS because of the number of small files and random IO they use.
    +

    +
    So if Lustre and GPFS aren't the unequivocal best choice for storage in long-tail HPC, what are the other options?
    +

    Burst Buffers

    +
    I would be remiss if I neglected to mention burst buffers here since they are designed, in part, to address the limitations of parallel file systems.  However, their actual usability remains unproven.  Anecdotally, long-tail users are generally not quick to alter the way they design their jobs to use cutting-edge technology, and my personal experiences with Gordon (and its 300 TB of flash) were that getting IO-nasty user applications to effectively utilize the flash was often a very manual process that introduced new complexities, pitfalls, and failure modes.  Gordon was a very experimental platform though, and Cray's new DataWarp burst buffer seems to be the first large-scale productization of this idea.  It will be interesting to see how well it works for real users when the technology starts hitting the floor for open science in mid-2016, if not sooner.
    +

    High-Performance NAS

    +
    An emerging trend in HPC storage is the use of high-performance NAS as a complementary file system technology in HPC platforms.  Traditionally, NAS has been a very poor choice for HPC applications because of the limited scalability of the typical NAS architecture--data resides on traditional local file system with network service being provided by an additional software layer like NFS, and the ratio of storage capacity to network bandwidth out of the NAS is very high.
    +

    +
    The emergence of cheap RAM and enterprise SSDs has allowed some sophisticated file systems like ZFS and NetApp's WAFL to demonstrate very high performance, especially in delivering very high random read performance, by using both RAM and flash as a buffer between the network and spinning rust.  This allows certain smaller-scale jobs to enjoy substantially better performance when running on flash-backed NAS than a parallel file system.  Consider the following IOP/metadata benchmark run on a parallel file system and a NAS head with SSDs for caching:

    Figure 11. File stat rate on flash-backed NAS vs. a parallel file system as measured by the mdtest benchmark

    A four-node job that relies on statting many small files (for example, an application that traverses a large directory structure such as the output of one of the Illumina sequencers I mentioned above) can achieve a much higher IO rate on a high-performance NAS than on a parallel file system.  Granted, there are a lot of qualifications to be made with this statement and benchmarking high-performance NAS is worth a post of its own, but the above data illustrate a case where NAS may be preferable over something like Lustre.
    +

    Greater Context

    +
    Parallel file systems like Lustre and GPFS will always play an essential role in HPC, and I don't want to make it sound like they can be universally replaced by high-performance NAS.  They are fundamentally architected to scale out so that increasing file system bandwidth does not require adding new partitions or using software to emulate a single namespace.  In fact, the single namespace of parallel file systems makes the management of the storage system, its users, and the underlying resources very flexible and straightforward.  No volume partitioning needs to be imposed, so scientific applications' and projects' data consumption do not have to align with physical hardware boundaries.

    However, there are cases where a single namespace is not necessary at all; for example, user home directories are naturally partitioned with fine granularity and can be mounted in a uniform location while physically residing on different NAS heads with a simple autofs map.  In this example, leaving user home directories on a pool of NAS filers offers two big benefits:

    1. Full independence of the underlying storage mitigates the impact of one bad user.  A large job dropping multiple files per MPI process will crush both Lustre and NFS, but in the case of Lustre, the MDS may become unresponsive and block IO across all users' home directories.
    2. Flash caches on NAS can provide higher performance on IOP-intensive workloads at long-tail job sizes.  In many ways, high-performance NAS systems have the built-in burst buffers that parallel file systems are only now beginning to incorporate.
    Of course, these two wins come at a cost:
    +
    1. Fully decentralized storage is more difficult to manage.  For example, balancing capacity across all NAS systems is tricky when users have very different data generation rates that they do not disclose ahead of time.
    2. Flash caches can only get you so far, and NFS will fall over when enough IO is thrown at it.  I mentioned that 98% of all jobs use 1024 cores or fewer (see Figure 1), but 1024 cores all performing heavy IO on a typical capacity-rich, bandwidth-poor NAS head will cause it to grind to a halt.
    Flash-backed high-performance NAS is not an end-all storage solution for long-tail computational science, but it also isn't something to be overlooked outright.  As with any technology in the HPC arena, its utility may or may not match up well with users' workloads, but when it does, it can deliver less pain and better performance than parallel file systems.
    +
    +
    +

    Acknowledgments 

    +
    +
    +
    As I mentioned above, the data I presented here was largely generated as a result of an internal project in which I participated while at SDSC.  I couldn't have cobbled this all together without the help of SDSC's HPC Systems group, and I'm really indebted to +Rick+Haisong, and +Trevor for doing a lot of the heavy lifting in terms of generating the original data, getting systems configured to test, and figuring out what it all meant when the dust settled (even after I had left!).  SDSC's really a world-class group of individuals.
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2014/the-shell-for-scientists/index.html b/2014/the-shell-for-scientists/index.html new file mode 100644 index 0000000..b50dd53 --- /dev/null +++ b/2014/the-shell-for-scientists/index.html @@ -0,0 +1,195 @@ + + + + + + + The Shell For Scientists - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    The Shell For Scientists

    +

    I’ve posted a half-day “The Shell for Scientists” +tutorial that +I’ve given variants on a number of times; the motivating problem, +provided by Greg Wilson for a two-day set of of tutorials at the +University of Toronto, was cleaning up a bunch of auditory lab data +on people’s cochlear implants.

    + +

    The focus is on productivity and automation; PDF slides are available +here +(although I really should translate them into a markdown-based format to +make them more re-usable).

    + +

    Covered are a number of basic shell commands

    + +
      +
    • echo
    • +
    • pwd
    • +
    • cd
    • +
    • ls
    • +
    • man
    • +
    • file
    • +
    • cat
    • +
    • more
    • +
    • wc
    • +
    • mv
    • +
    • cp
    • +
    • rm
    • +
    • head
    • +
    • tail
    • +
    • sort
    • +
    • mkdir
    • +
    • rmdir
    • +
    • grep
    • +
    • for..do..done
    • +
    + +

    As well as simple script writing. There is some optional material +on make (again, for automation) and ssh/scp (because that was +frequently necessary for tutorials at SciNet). There are a number +of hands-on exercises sprinkled throughout.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/gaborsamu/2014-3-23-udoo_test.md b/2014/udoo-quad-test-drive/index.html similarity index 64% rename from _posts/gaborsamu/2014-3-23-udoo_test.md rename to 2014/udoo-quad-test-drive/index.html index 221442b..ca53e11 100644 --- a/_posts/gaborsamu/2014-3-23-udoo_test.md +++ b/2014/udoo-quad-test-drive/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2014-03-23 16:53:38' -layout: post -original_url: https://www.gaborsamu.com/blog/udoo_test/ -slug: udoo-quad-test-drive -title: Udoo Quad test drive ---- - -

    Here is a brief update regarding my experiences so far with the Udoo Quad + + + + + + + Udoo Quad test drive - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Udoo Quad test drive

    +

    Here is a brief update regarding my experiences so far with the Udoo Quad board. I call this kicking the tires, but it simply amounts to tinkering with the board and getting a better understanding of it’s capabilities.

    @@ -226,11 +295,81 @@

    I produced a short video showing a run of HPCC along with the Qt CPU temperature app that I created.

    -
    -

    That wraps up a successful first test drive. What’s next? OpenCL sees like -the next logical step.

    \ No newline at end of file +the next logical step.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2015/approximate-mapping-of-nanopore-squiggle-data-with-spatial-indexing/index.html b/2015/approximate-mapping-of-nanopore-squiggle-data-with-spatial-indexing/index.html new file mode 100644 index 0000000..4356972 --- /dev/null +++ b/2015/approximate-mapping-of-nanopore-squiggle-data-with-spatial-indexing/index.html @@ -0,0 +1,161 @@ + + + + + + + Approximate Mapping of Nanopore Squiggle Data with Spatial Indexing - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Approximate Mapping of Nanopore Squiggle Data with Spatial Indexing

    +

    Over at the Simpson Lab blog, +I have an post describing a novel method for Directly Mapping +Squiggle Data, +using k-d trees to map segmented kmers; a simple proof of +concept +is available on github.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/dursi/2015-4-26-coarray-fortran-goes-mainstream-gcc-5-1html.md b/2015/coarray-fortran-goes-mainstream-gcc-5-1/index.html similarity index 69% rename from _posts/dursi/2015-4-26-coarray-fortran-goes-mainstream-gcc-5-1html.md rename to 2015/coarray-fortran-goes-mainstream-gcc-5-1/index.html index 3268ad8..e690ab7 100644 --- a/_posts/dursi/2015-4-26-coarray-fortran-goes-mainstream-gcc-5-1html.md +++ b/2015/coarray-fortran-goes-mainstream-gcc-5-1/index.html @@ -1,31 +1,96 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2015-04-26 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/coarray-fortran-goes-mainstream-gcc-5-1.html -slug: coarray-fortran-goes-mainstream-gcc-5-1 -title: Coarray Fortran Goes Mainstream- GCC 5.1 ---- - -

    This past week’s release of GCC 5.1 contains at least two new features that are important to the big technical computing community: OpenMP4/OpenACC offloading to Intel Phi/NVIDIA accellerators, and compiler support for Coarray Fortran, with the communications layer provided by the OpenCoarrays Project.

    - + + + + + + + Coarray Fortran Goes Mainstream- GCC 5.1 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Coarray Fortran Goes Mainstream- GCC 5.1

    +

    This past week’s release of GCC 5.1 contains at least two new features that are important to the big technical computing community: OpenMP4/OpenACC offloading to Intel Phi/NVIDIA accellerators, and compiler support for Coarray Fortran, with the communications layer provided by the OpenCoarrays Project.

    While I don’t want to downplay the importance or technical accomplishment of the OpenMP 4 offloading now being available, I think it’s important to highlight the widespread availability for the first time of a tried-and-tested post-MPI programming model for HPC; and one that, since it is now part of the Fortran standard, is largely immune to fears that it might go away due to lack of interest. Here I’ll give a quick history of Coarray Fortran (CAF), some examples, and the pros and cons of CAF versus other approaches.

    -

    A quick history of Coarray Fortran

    Coarray Fortran first became widely known as Co-array Fortran, described in a 1998 paper which described an implementation on Cray systems (T3Es and X1s) of a minimal extension to Fortran 95 which included distributed memory computing of enough complexity to allow real applications.

    -

    The basic idea is simple enough from a developer’s point of view. As with most MPI programs, a single program is launched across many processors. Each “image” has its own local variables, as usual. However, variables can also be defined to have a “co-dimension”; that is, a dimension which indexes that variable across all images.

    -
    program coarray1
       implicit none
       integer :: me, right, i
    @@ -45,10 +110,8 @@ 

    A quick history of Coarray Fortran

    where square brackets refer to the co-index across images; recall that Fortran, somewhat unfortunately, uses parenthesis both for array indexing and for function arguments. Note also that, in Fortran fashion, image numbers begin at 1.

    -

    Running this on 4 images gives:

    -
    $ ./coarray1
     Image            2  has a(2) =            4 ; neighbour has            9
     Image            3  has a(2) =            9 ; neighbour has           16
    @@ -58,35 +121,26 @@ 

    A quick history of Coarray FortranWhile it’s often the case that coarrays are also arrays – as is the case here with a – that needn’t be true. Scalar variables - variables with out array dimensions - can nonetheless have codimensions and thus be coarrays.

    -

    Co-indexes needn’t be linear; one can also define co-dimensions of co-rank 2 or higher, to impose a grid-like pattern over the ranks.

    -

    Co-array Fortran continued to be used on Cray systems, and was submitted as a proposal for inclusion into Fortran 2008. A stripped-down version of the original proposal (losing such things as image “teams”, and the hyphen in Co-array) made it through, with some minor syntax changes. The Cray Fortran compiler quickly adopted the standard, and Intel’s fFortran compiler has since version 12 supported SMP coarrays, and distributed-memory coarrays as part of the “Cluster suite” that includes Intel MPI. IBM and PGI are said to be working on Coarray support. In less widely-used compilers, OpenUH supported Coarrays quite early on, as did the now-defunct G95.

    -

    A technical specification which is expected to make it into a future Fortran standard largely unscathed re-instates support for teams (giving overlapping functionality with MPI communicators for coordinating subsets of processes), and adds some collective operations, some atomic operations, and Events, which are something like condition variables. GCC 5.1 supports many of these features already.

    -

    Examples

    Let’s take a look at a couple of simple examples to see how Coarray Fortran works in some familiar cases, and how the code complexity compares to MPI.

    -

    We’ll see in part that, unlike with (say) Spark or Chapel examples from earlier in the month, in Coarray Fortran the developer is still responsible for explicitly decomposing the problem. That means a lot that part of the boilerplate of the MPI versions of the code remains. However, as communication patterns become more complex, the code can still simplify quite a bit.

    -

    However, having the communications built into the language has another completely different advantage, one we’ve gotten used to not thinking about as we’re more used to using external libraries. Communication being part of the language means that the compiler itself can perform high-level optimization on commuincations, just as it would with memory access.

    -

    1D diffusion equation

    Let’s take a look at a simple example I’ve used before, 1d diffusion. Here, we have a 1D domain broken up across images, or MPI ranks, exchanging data just with nearest neighbours.

    -

    Taking a look at the CAF code, we have the data exchange part:

    -
    !
     ! exchange boundary information
     !
    @@ -113,10 +167,8 @@ 

    1D diffusion equation

    There’s a synchronize statement at the beginning, to make sure we don’t get ahead of any of our neighbours (or vice versa), and then we pluck the necessary data for our guardcells out of the coarray of temperature.

    -

    This seems familiar, and indeed it’s not that different than the obvious MPI implementation:

    -
       !...
     
        call MPI_Sendrecv(temperature(locnpoints+1,old), 1, MPI_REAL, right, righttag,  &
    @@ -130,19 +182,14 @@ 

    1D diffusion equation

    (and the update is exactly same).

    -

    But having the exchange done in facilities built into the language has another benefit. Let’s look back to the coarray version. There’s a synchronization point, communications, computation, and (although we don’t see it here), a loop back to the synchronization point, as part of the iteration.

    -

    The compiler will, as it does, perform reorderings that it can prove to itself don’t change the meaning of the code but will likely improve performance. With memory increasingly a bottleneck, compilers frequently perform some sort of prefetch optimization to move requests for data from slow main memory forward, perform computations on data already cache for the ~200 cycles that access will take, and only then work on the data that hopefully has loaded.

    -

    This optimization is familiar in the MPI world, of course; it’s overlapping communication with computation, and is performed using non-blocking Sends and Receives. But because the communication is explicit to the compiler, it’s a difference of degree, not of kind, that the data is coming from over the network rather than from main memory. Thus, this optimization is straightforwardly performed automatically by the compiler.

    -

    On the other hand, it is much less automatic for a developer to rewrite the MPI code:

    -
    !
     ! begin exchange of boundary information
     !
    @@ -187,10 +234,8 @@ 

    Block matrix multiplication

    Let’s take a look at another example, a simple block matrix multiplication where each image/task has one block of the A and B matrices, and we’re calculating \(C = A \times B\).

    -

    In the CAF version, this is almost embarrasingly easy:

    -
        sync all
         c = 0.
         do k=1,ncols
    @@ -201,7 +246,6 @@ 

    Block matrix multiplication

    and the exchange not that bad in the MPI version, either, using the SUMMA algorithm (Cannon’s, which can be better for small $P$, would have been messier):

    -
        do k=0,ncols-1
             aremote = a
             bremote = b
    @@ -213,7 +257,6 @@ 

    Block matrix multiplication

    although it did take us a lot more boilerplate to get there; three communicators, explicit temporary arrays, etc:

    -
        call MPI_Init(ierr)
         call MPI_Comm_size(MPI_COMM_WORLD, comsize, ierr)
     
    @@ -235,69 +278,126 @@ 

    Block matrix multiplication

    and this is still a fairly straightforward communications pattern. As communications become more complex, the advantage of it being performed implicitly becomes more clear.

    -

    Coarray Pros

    We’ve only looked at two examples, but that’s enough to get some feelings about the strengths and weaknesses of CAF vs other options:

    -

    Part of the Language

    Compilers are enormously more sophisticated than they were twenty+ years ago, and using those optimization engines to our advantage in generating fast communications code is an enormous advantage. Having the communications be explicit in the language enables the compiler to perform entire suites of automatic optimizations (prefetching, batching, memory/time tradeoffs) that can’t easily done with library-based approaches.

    -

    Stable

    One concern in the HPC community about trying new approaches is lingering doubt about whether a given new tool or language will be around five or ten years later; a concern that can become self-fulfilling.

    -

    As part of the Fortran standard, Coarray Fortran is quite definitely here to stay; there are now several competing implementations, and competition will only improve them.

    -

    Incremental

    Because Coarray Fortran uses a familiar model — Single Program, Multiple Data, with data manually decomposed — and only changes how the communications are expressed, there is very modest learning curve for developers already familiar with MPI, and very modest porting effort required.

    -

    The familiarity extends in another dimension, as well; Coarray fortran is about as “authentically HPC” as it’s possible to get (Cray! T3Es! Fortran!) for a community that is sometimes skeptical of ideas from the outside.

    -

    In addition, this incremental approach also makes interoperability with MPI relatively straightforward, for those requiring MPI-based library support.

    -

    Already Quite Fast

    OpenCoarrays, which provides the communications support for gfortran’s coarray implementation, is already comparable to and sometimes faster than typical MPI code and even faster in some cases the very-well tested Cray coarray implementation(!). While this is still the first major release of gfortran coarrays, and performance improvements and doubtless bug fixes remain to be made, this is already a fairly solid and fast piece of software.

    -

    Coarray Cons

    On the other side of the ledger are primarily points we’ve already considered as Pros, but viewed from the glass-half-empty side:

    -

    Part of A Language

    Being built into a language means that it necessarily isn’t available to users of other languages. I think this is largely inevitable for next-gen HPC approaches, to take full advantage of the compilers and runtimes that are now available, but it certainly will affect adoption; I can’t imagine too many C++ programmers will migrate to Fortran for their next project. (Although it does start looking intriguing for Matlab or Python/Numpy users).

    -

    Stable

    As I’ve mentioned in the context of MPI, too much stability can be a bad thing, and the Fortran committee makes the MPI Forum look like a squirrel on cocaine. I’m less concerned about that here in the short term, since the Coarrays that went into the standard were based on a model that had been used for years successfully, and new features are already in the works; but any additional new features that are seen to be needed may well be a long time coming.

    -

    Incremental

    That Coarrays are incremental certainly makes it easier to port existing code, but it means that many of my concerns about MPI as a development environment remain unaddressed. A researcher or application developer still has to perform the manual decomposition of a problem. This requires an enormous amount of eminently automatable boilerplate and zillions of opportunities for meaningless bugs like off-by-one errors. (That sort of bookkeeping is precisely what computers are better at than developers!) That burden also means that substantial amounts of code must be rewritten if the decomposition changes.

    -

    Already Quite Fast

    …Ok, it’s hard to see much of a downside here.

    -

    Conclusion

    The release of gcc-5.1 with coarray support is going to be the first time a huge number of HPC developers have ready access to coarrays. From my point of view, it’s notably less ambitious than a large number of projects out there, but that may well make it easier to adopt for a sizable community. Certainly anyone planning to start a new project in Fortran should give it very serious consideration.

    +

    My own hope is that Coarray Fortran will have a large number of delighted users, some of whose appetite then becomes whetted for other still more productive languages and environments for large-scale technical computing. In the next few posts, I’ll take a closer look at some of those.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + -

    My own hope is that Coarray Fortran will have a large number of delighted users, some of whose appetite then becomes whetted for other still more productive languages and environments for large-scale technical computing. In the next few posts, I’ll take a closer look at some of those.

    \ No newline at end of file diff --git a/_posts/dursi/2015-4-3-hpc-is-dying-and-mpi-is-killing-ithtml.md b/2015/hpc-is-dying-and-mpi-is-killing-it/index.html similarity index 82% rename from _posts/dursi/2015-4-3-hpc-is-dying-and-mpi-is-killing-ithtml.md rename to 2015/hpc-is-dying-and-mpi-is-killing-it/index.html index d0f8992..d292ccb 100644 --- a/_posts/dursi/2015-4-3-hpc-is-dying-and-mpi-is-killing-ithtml.md +++ b/2015/hpc-is-dying-and-mpi-is-killing-it/index.html @@ -1,86 +1,134 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2015-04-03 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/hpc-is-dying-and-mpi-is-killing-it.html -slug: hpc-is-dying-and-mpi-is-killing-it -title: HPC is dying, and MPI is killing it ---- - -

    King Canute

    - + + + + + + + HPC is dying, and MPI is killing it - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    HPC is dying, and MPI is killing it

    +

    King Canute

    Pictured: The HPC community bravely holds off the incoming tide of new technologies and applications. Via the BBC.

    -

    This should be a golden age for High Performance Computing.

    -

    For decades, the work of developing algorithms and implementations for tackling simulation and data analysis problems at the largest possible scales was obscure if important work. Then, suddenly, in the mid-2000s, two problems — analyzing internet-scale data, and interpreting an incoming flood of genomics data — arrived on the scene with data volumes and performance requirements which seemed quite familiar to HPCers, but with a size of audience unlike anything that had come before.

    -

    Suddenly discussions of topics of scalability, accuracy, large-scale data storage, and distributed matrix arithmetic all became mainstream and urgent. The number of projects and workshops addressing these topics exploded, and new energy went into implementing solutions problems faced in these domains.

    -

    In that environment, one might expect that programmers with HPC experience – who have dealt routinely with terabytes and now petabytes of data, and have years or decades of experience with designing and optimizing distributed memory algorithms – would be in high demand.

    -

    They are not.

    -

    Job Trends

    -

    Indeed.com job trends data. Note that as many MPI jobs plotted above require certifications with “Master Patient Index” or “Meetings Professionals International” as are seeking someone who knows how to call MPI_Send.

    -

    Google Trends

    -

    Google trends data for MPI, Hadoop, and Spark

    -

    Instead of relying on those with experience in existing HPC technology stacks or problems, people tackling these internet-scale machine learning problems and genomic data analysis tasks have been creating their own parallel computing stacks. New and rediscovered old ideas are flourishing in new ecosystems, and demand for scalable and accurate computation with these new tools is exploding — while the HPC community resolutely stays on the sidelines, occasionally cheering itself with hopeful assertions of relevance like SC14’s rather plaintive tagline, “HPC Matters”.

    -

    Because within the HPC community, the reaction to these new entrants is mostly not excitement at novel technologies and interesting new problems to solve, but scoffing at solutions which were Not Invented Here, and suggestions that those who use other platforms simply aren’t doing “real” high performance computing – and maybe don’t know what they’re doing at all. You can see this attitude even in otherwise well-researched and thought-out pieces, where the suggestion is that it is genomics researchers’ responsibility to alter what they are doing to better fit existing HPC toolsets. This thinking misses the rather important fact that it is HPC’s job to support researchers’ computing needs, rather than vice versa.

    -

    The idea that the people at Google doing large-scale machine learning problems (which involves huge sparse matrices) are oblivious to scale and numerical performance is just delusional. The suggestion that the genomics community is a helpless lot who just don’t know any better and need to be guided back to the one true path is no less so. The reality is simpler; HPC is wedded to a nearly 25-year old technology stack which doesn’t meet the needs of those communities, and if we were being honest with ourselves is meeting fewer and fewer of the needs of even our traditional user base.

    -

    If HPCers don’t start engaging with these other big-computing communities, both exporting our expertise to new platforms and starting to make use of new tools and technologies from within HPC and beyond, we risk serving an ever-narrowing sliver of big research computing. And eventually that last niche will vanish once other technologies can serve even their needs better.

    -

    Why MPI was so successful

    MPI, long the lingua franca of HPC, has nothing to apologize for. It was inarguably one of the “killer apps” which supported the initial growth of cluster computing, helping shape what the computing world has become today. It supported a substantial majority of all supercomputing work scientists and engineers have relied upon for the past two-plus decades. Heroic work has gone into MPI implementations, and development of algorithms for such MPI features as collective operations. All of this work could be carried over to new platforms by a hypothetical HPC community that actively sought to engage with and help improve these new stacks.

    -

    MPI, the Message Passing Interface, began as a needed standardization above a dizzying array of high-performance network layers and often-proprietary libraries for communicating over these networks. It started with routines for explicitly sending and receiving messages, very useful collective operations (broadcast, reduce, etc.), and routines for describing layout of data in memory to more efficiently communicate that data. It eventually added sets of routines for implicit message passing (one-sided communications) and parallel I/O, but remained essentially at the transport layer, with sends and receives and gets and puts operating on strings of data of uniform types.

    -

    Why MPI is the wrong tool for today

    But nothing lasts forever, and at the cutting edge of computing, a quarter-century is coming quite close to an eternity. Not only has MPI stayed largely the same in those 25 years, the idea that “everyone uses MPI” has made it nearly impossible for even made-in-HPC-land tools like Chapel or UPC to make any headway, much less quite different systems like Spark or Flink, meaning that HPC users are largely stuck with using an API which was a big improvement over anything else available 25 years ago, but now clearly shows its age. Today, MPI’s approach is hardly ever the best choice for anyone.

    -

    MPI is at the wrong level of abstraction for application writers

    Programming at the transport layer, where every exchange of data has to be implemented with lovingly hand-crafted sends and receives or gets and puts, is an incredibly awkward fit for numerical application developers, who want to think in terms of distributed arrays, data frames, trees, or hash tables. Instead, with MPI, the researcher/developer needs to manually decompose these common data structures across processors, and every update of the data structure needs to be recast into a flurry of messages, synchronizations, and data exchange. And heaven forbid the developer thinks of a new, better way of decomposing the data in parallel once the program is already written. Because in that case, since a new decomposition changes which processors have to communicate and what data they have to send, every relevant line of MPI code needs to be completely rewritten. This does more than simply slow down development; the huge costs of restructuring parallel software puts up a huge barrier to improvement once a code is mostly working.

    -

    How much extra burden does working at this level of abstraction impose? Let’s take a look at a trivial example that’s pretty much a best-case scenario for MPI, an explicit solver for a 1D diffusion equation. Regular communications on a regular grid is just the sort of pattern that is most natural for MPI, and so you will find this example in just about every MPI tutorial out there.

    -

    At the end of this post are sample programs, written as similarly as possible, of solving the problem in MPI, Spark, and Chapel. I’d encourage you to scroll down and take a look. The lines of code count follows:

    -
    @@ -109,172 +157,122 @@

    MPI is

    Now, this isn’t an entirely fair comparison. It should be mentioned that in addition to the functionality of the MPI program, the Spark version is automatically fault-tolerant, and the Chapel version has features like automatically reading parameters from the command line. In addition, changing the data layout across processors in the Chapel version would only involve changing the variable declaration for the global arrays, and maybe writing some code to implement the decomposition in the unlikely event that your distributed array layout wasn’t already supported; similarly, in Spark, it would mean just changing the hash function used to assign partitions to items.

    -

    But even lacking those important additional functionalities, the MPI version is over twice as long as the others, with an amount of boilerplate that is itself the entire length of the Chapel program. The reason is quite simple. In Chapel, the basic abstraction is of a domain – a dense array, sparse array, graph, or what have you – that is distributed across processors. In Spark, it is a resiliant distributed dataset, a table distributed in one dimension. Either of those can map quite nicely onto various sorts of numerical applications. In MPI, the “abstraction“ is of a message. And thus the huge overhead in lines of code.

    -

    And this is by far the simplest case; introducing asynchronous communications, or multiple variables with differing layouts, or allowing processors to get out of sync, or requiring load balancing, causes levels of complexity explode. Even just moving to 2D, the amount of MPI boilerplate almost exactly doubles, whereas the only lines that change in the Chapel program is the array declaration and the line that actually executes the stencil computation.

    -

    On the one hand, this increase in complexity is perfectly reasonable; those are more challenging cases of networked computation. But on the other hand, of all available models, MPI is the only one where the researcher is required to reinvent from scratch the solutions to these problems inside the heart of their own application software. This requires them to focus on network programming instead of (say) differential-equation solving; and to completely re-architect the entire thing if their application needs change.

    -

    Now, none of this is necessarily a problem. Just because MPI is hugely and unnecessarily burdensome for individual scientists to use directly for complex applications, doesn’t mean that it’s bad, any more than (say) sockets or IB verbs programming is; it could be a useful network-hardware agnostic platform for higher-level tools to be built upon. Except…

    -

    MPI is at the wrong level of abstraction for tool builders

    The original book on MPI, Using MPI, dedicated one of its ten chapters (“Parallel libraries”) to explicitly describing features intended to make it easier for tool builders to build libraries and tools based on MPI, and two others describing implementations and comparing to other models with relevance to tool-builders.

    -

    This was quite prescient; message-passing based frameworks would indeed soon become very important platforms for building complex parallel and distributed software in different communities. Erlang, released to the public just five years later, is a functional language with message-passing built in that has played a very large role in many communications and control environments. Rather more recently, Akka is a Scala-based message passing framework that, for instance, Spark is built on.

    -

    However, all these years later, while there are several specific numerical libraries built on MPI that MPI programs can use, there are no major general-purpose parallel programming frameworks that primarily use MPI as an underlying layer. Both GASNet (that UPC and Chapel implementations make use of) and Charm++ (a parallel computing framework often used for particle simulation methods, amongst other things) have MPI back ends, grudgingly, but they are specifically not recommended for use unless nothing else works; indeed, they have both chosen to re-architect the network-agnostic layer, at significant effort, themselves. (Of the two, GASNet is the more diplomatic about this, “…bypassing the MPI layer in order to provide the best possible performance”, whereas the Charm++ group finds MPI problematic enough that, if you must use MPI for “legacy” applications, they recommend using an MPI-like layer built ontop of Charm++, rather than building Charm++ on top of MPI). Similarly, the group implementing Global Arrays – an example come back to time and again in the MPI books – eventually implemented its own low level library, ARMCI.

    -

    Probably the closest to a truly MPI-based parallel scientific programming framework is Trilinos, which is a well-integrated set of libraries for meshing and numerics rather than a parallel programming model.

    -

    The reason for this disconnect is fairly straightforward. MPI was aimed at two sets of users – the researchers writing applications, and the toolmakers building higher-level tools. But compromises that were made to the semantics of MPI to make it easier to use and reason about for the scientists, such as the in-order guarantee and reliability of messages, made it very difficult to write efficient higher-level tools on top of.

    -

    A particularly strong case study of this dynamic is MPI-2’s one-sided communications, which were aimed squarely at tool developers (certainly a very small fraction of applications written directly in MPI ever used these features). This set of routines had extremely strict semantics, and as a result, they were soundly panned as being unfit for purpose, and more or less studiously ignored. MPI-3’s new one-sided communications routines, introduced 14 years later, largely fixes this; but by this point, with GASNet and ARMCI amongst others available and supporting multiple transports, and coming complete with attractive optional higher-level programming models, there’s little compelling reason to use MPI for this functionality.

    -

    MPI is more than you need for modest levels of parallelism

    At HPC centres around the world, the large majority of HPC use is composed of jobs requiring 128 cores or fewer. At that point, most of the parallelism heavy lifting is best done by threading libraries. For the very modest level of inter-node IPC needed for these 2-4 node jobs, the bare-metal performance of MPI simply isn’t worth the bare-metal complexity. At that level of parallelism, for most applications almost any sensible framework, whether GASNet-based, or Charm++, or Spark, or down to Python multiprocessing or iPython cluster will give decent performance.

    -

    MPI is less than you need at extreme levels of parallelism

    On the other hand, at the emerging extreme high end of supercomputing – the million-core level and up – the bare-metal aspect of MPI causes different sorts of problems.

    -

    The MTBF of modern motherboards is on the order of a few hundred thousand hours. If you’re running on a million cores (say 32,000 nodes or so) for a 24-hour day, failure of some node or another during the run becomes all but certain. At that point, fault-tolerance, and an ability to re-balance the computation on the altered set of resources, becomes essential.

    -

    Today, MPI’s error handling model is what it has always been; you can assign an errorhandler to be called when an error occurs in an MPI program, and when that happens you can… well, you can print a nice message before you crash, instead of crashing without the nice message.

    -

    This isn’t due to anyone’s lack of trying; the MPI Fault Tolerance Working Group has been doing yeomanlike work attempting to bring some level of real fault tolerance to MPI. But it’s a truly difficult problem, due in large part to the very strict semantics imposed by MPI. And after building up 25 years of legacy codes that use MPI, there is absolutely no chance that the pull of the future will exceed the drag of the past in the minds of the MPI Forum - none of those semantics will ever change, for backwards compability reasons.

    -

    Balancing and adapting to changing resources are similarly weak spots for the MPI approach; there’s no way that MPI can possibly be of any use in redistributing your computation for you, any more than you could expect TCP or Infiniband Verbs to automatically do that for you. If the highest-level abstraction a library supports is the message, there is no way that the library can know anything about what your data structures are or how they must be migrated.

    -

    Fault-tolerance and adaptation are of course genuinely challenging problems; but (for instance) Charm++ (and AMPI atop it) can do adaptation, and Spark can do fault tolerance. But that’s because they were architected differently.

    -

    Our users deserve the tools best for them

    None of this is to say that MPI is bad. But after 25 years of successes, it’s become clear what the limitations are of having the communications layer written within the researchers’ application. And today those limitations are holding us and our users back, especially compared to what can be done with other alternatives that are already out there on the market.

    -

    And none of this is to say that we should uninstall MPI libraries from our clusters. For the near term, MPI will remain the best choice for codes that have to run on tens of thousands of cores and have relatively simple communications patterns.

    -

    But it’s always been true that different sorts of computational problems have required different sorts of parallel tools, and it’s time to start agressively exploring those that are already out there, and building on what we already have.

    -

    We have to start using these new tools when they make sense for our users; which is, demonstrably, quite often. It’s already gotten to the point where it’s irresponsible to teach grad students MPI without also exposing them to tools that other groups find useful.

    -

    The HPC community can, and should, be much more than just consumers of these external technologies. Our assertions of relevance don’t have to be purely aspirational. We have real expertise that can be brought to bear on these new problems and technologies. Excellent work has been done in MPI implementations on important problems like the network-agnostic layer, job launching, and collective algorithms. The people who wrote those network-agnostic layers are already looking into refactoring them into new projects that can be widely used in a variety of contexts, at lower levels of the stack.

    -

    But we need to give up the idea that there is a one-sized fits all approach to large-scale technical computing, and that it has always been and will always be MPI. Other groups are using different approaches for a reason; we can borrow from them to the benefit of our users, and contribute to those approaches to make them better.

    -

    We can build the future

    There’s new ways of writing scalable code out there, and completely new classes of problems to tackle, many of which were totally inaccessible just years ago. Isn’t that why we got into this line of work? Why don’t more HPC centres have people contributing code to the Chapel project, and why isn’t everyone at least playing with Spark, which is trivial to get up and running on an HPC cluster? Why are we spending time scoffing at things, when we can instead be making big research computing better, faster, and bigger?

    -

    Are we the big research computing community, or the MPI community? Because one of those two has a bright and growing future.

    -

    Many thanks to my colleague Mike Nolta for many suggestions and improvements to this piece and the arguments it contains.

    -

    Appendix

    (Update: see objections that came up after the publication of this post, on twitter and email, on this new post. And see what I like about MPI and why it suggests low-level applications programming isn’t the answer on the third post.)

    -

    Objections

    But the HPC market is actually growing, so this is all clearly nonsense! Everything’s fine!

    -

    It’s completely true that, although much more slowly in relative or absolute terms than the Hadoop or Spark market, the HPC hardware market is still growing. But that’s not much of a reed to cling to.

    -

    Famously, minicomputer sales (things like System/36 or VAXen) were still growing rapidly a decade or so after personal computers started to be available, well into the mid-80s. They kept being sold, and faster and faster, because they were much better for the problems they were intended for — right up until the point that they weren’t.

    -

    Similarly, photo film sales were going up, if slower, until 2003(!). Let’s continue the disruptive innovation clichés as analogies for a moment — as we all now know, Kodak invented the digital camera. The film company’s problem wasn’t that it lacked the expertise that was needed in the new era; it simply flatly refused to use its expertise in these new ways. And as a result it is a shell of its former self today – a tiny, niche, player. Bringing the comparison closer to home is the experience of the once world-striding Blackberry, which ridiculed the iPhone as being, amongst other things, an inefficient user of network communications. (“It’s going to collapse the network!”)

    -

    Take a look at the market for developers. We’ve clearly passed the market peak for MPI programmers, and if HPC continues to be an MPI-only shop, our community will be shut out of the exciting things that are going on today, while many of our users begin being attracted by the benefits of these other approaches for their problems.

    -

    But MPI is much faster than the others because it’s bare metal!

    -

    If this is so important, why don’t HPC programmers save even more overhead by packing raw Infiniband frames themselves?

    -

    HPC programmers should know better than most that once you have some software that solves a complex problem well, getting it to go fast is comparatively straightforward, given enough developer hours.

    -

    It’s absolutely true that current MPI implementations, having had decades to work on it, have got screamingly fast MPI-1 functionality and, to a lesser extent, decent one-sided communications performance. But we live in an era where even JavaScript can have the same order-of-magnitude performance as C or Fortran - and JavaScript might as well have been explicitly designed to be un-en-fastable.

    -

    Chapel already can be as fast or faster than MPI in many common cases; indeed, higher level abstractions allow compilers and runtimes to make optimizations that can’t be performed one individual library calls.

    -

    And unless the basic abstractions used by Spark (RDDs) or Flink or the myriad of other options are inherently broken in some way to make fast implementations impossible — and there’s no evidence that they are — they too will get faster. There’s no reason why blazing-fast network communications should have be done at the application layer – in the code that is describing the actual scientific computation. The HPC community can choose to help with implementing that tuning, bringing their expertise and experience to bear. Or they can choose not to, in which case it will happen anyway, without them.

    -

    But MPI will adopt new feature X which will change everything!

    -

    Let me tell you a story.

    -

    MPI-1 and MPI-2 used 32-bit integers for all counts. This means that programs using MPI – the lingua franca of supercomputing, in an era when already outputing terabytes of data being routine – could not (for instance) write out more than 2e9 objects at once without taking some meaningless additional steps.

    -

    This was discussed at length in the process leading up to the 2012 release of MPI-3, the first .0 release in 14 years. After much discussion it was decided that changing things would be a “backwards compatability nightmare”, so the result was that the existing API… was left exactly as it is. But! There was a new larger data type, MPI_Count, which is used in a couple new routines (like MPI_Type_get_extent_X, in addition to the old MPI_Type_get_extent) which simplifies some of the pointless steps you have to take. Yay?

    -

    And that’s the story of how, in 2015, our self-imposed standard of supercomputing has a hardcoded in 32-bit limit throughout almost its entire API, limiting how many objects it can deal with at once without going through pointless but straightforward hoops. A 32-bit limit: 90’s retro-cool computing, like chiptune music and pixelated graphics with 8-bit color. This is unfortunate, but inevitable; after a tool has existed for 25 years, maintainers feel more responsibility towards the past than to the future. Which is perfectly reasonable, and maybe even the correct decision for that tool; but that’s when one need to start looking elsewhere for new projects.

    -

    But these other tools use programming languages I find to be icky.

    -

    Yes, well, perhaps the various alternatives involve languages that lack the austere beauty of Fortran and Matlab, but so it goes. One approach to this would be to help expand these tools reach into the HPC community by writing bindings and APIs for languages more familiar in this space.

    -

    But the Hadoop-y communities are incredibly naive about high performance interconnects, multicore/shared memory, complex scheduling,…

    -

    Yes! This is 100% true. And on the HPC community’s side, we’re quite innocent when it comes to fault tolerance at scale, building reusable tools, architecting APIs so that normal scientists can use them while hiding communications complexity beneath, and integrating nicely with systems industry cares about. There’s a window where we can help each other and contribute meaningfully to each groups success. But other communities can and will eventually figure out, say, multicore with or without our help.

    -

    Sample Code

    Below are code samples referred to earlier in the piece.

    -

    MPI

    Here is the 1D diffusion in MPI, Python:

    -
    #!/usr/bin/env python
     import numpy
     from mpi4py import MPI
    @@ -386,7 +384,6 @@ 

    Chapel

    1D diffusion in Chapel (can read parameters from command line)

    -
    use blockDist;
     
     config var ncells = 100, nsteps = 20,  leftX = -10.0, rightX = +10.0,
    @@ -413,4 +410,76 @@ 

    Chapel

    writeln("Final: ", temp); } -
    \ No newline at end of file +
    + + +
    + +
    + + + + + + + + + + + + + + + + diff --git a/2015/hpc-mpi-on-rce-podcast/index.html b/2015/hpc-mpi-on-rce-podcast/index.html new file mode 100644 index 0000000..3c3ccc6 --- /dev/null +++ b/2015/hpc-mpi-on-rce-podcast/index.html @@ -0,0 +1,162 @@ + + + + + + + HPC+MPI on RCE Podcast - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    HPC+MPI on RCE Podcast

    +

    In the latest episode of the RCE podcast, Jeff Squyres, Brock Palen, and I spoke about the HPC and MPI series of blogposts and the community reaction.

    + +

    It was a really interesting discussion; Brock has worked closely with an enormous variety of researchers and helps run an HPC centre, while Jeff deeply understands HPC networking, from the getting ones and zeros onto the wires at the lowest-level of hardware up to being an extremely active member of the MPI forum.

    + +

    I was really pleased that they asked me to join them; I’ve been listening to their show since at least the VisIt episode in 2009 (I had just missed the Hadoop episode, it turns out) and for some years they were the only big computing podcast around.

    + +

    If you were interested in the MPI discussion, you might want to listen to this most recent episode; if you’re interested in big computing software projects more broadly, you should definitely consider subscribing to the podcast.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2015/hpc-seeing-is-believing/index.html b/2015/hpc-seeing-is-believing/index.html new file mode 100644 index 0000000..a4d7c2c --- /dev/null +++ b/2015/hpc-seeing-is-believing/index.html @@ -0,0 +1,181 @@ + + + + + + + HPC- Seeing is believing - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    HPC- Seeing is believing

    +

    People who know me, know that I like to tinker. Whether it’s with cars, +computers or other mechanical gizmos, I’ve always enjoyed dismantling and +reassembling things - understanding what makes them tick. Maintaining classic +computers is a passion of mine and as you’ve seen in my previous blogs on that +topic, I’ve always tried to add an element of high performance computing when +tinkering with computers. Whether on a classic SPARC based laptop, MIPS +smartbook or a modern ARM developer board, there is a sense of achievement in +getting such systems installed in 2015 and successfully running a benchmark +for example. Even when running a simple home network, in this case with a wild +mix of machines, the importance of monitoring is apparent.

    + +

    For organizations that make a considerable investment in high performance +computing infrastructure, monitoring this infrastructure and understanding +how it’s being used is of paramount importance. IBM Platform RTM is a +comprehensive monitoring, reporting and alerting software for HPC environments +running IBM Platform LSF. It takes the guess work out of HPC infrastructure +monitoring by aggregating system, workload as well as license consumption +information, all in a single tool.

    + +
    +
    + +

    Whether you’re a system admin or a line of business manager, this Technical +Brief provides an in-depth look at the importance of comprehensive HPC +infrastructure monitoring - which allows organizations to correlate in a +single tool workload, system and license consumption metrics.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2015/ibm-platform-cluster-manager-how-do-you-like-your-eggs/index.html b/2015/ibm-platform-cluster-manager-how-do-you-like-your-eggs/index.html new file mode 100644 index 0000000..26d4621 --- /dev/null +++ b/2015/ibm-platform-cluster-manager-how-do-you-like-your-eggs/index.html @@ -0,0 +1,184 @@ + + + + + + + IBM Platform Cluster Manager - how do you like your eggs? - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    IBM Platform Cluster Manager - how do you like your eggs?

    +

    Whether your HPC center is in Lilliput or Blefuscu, you’ll appreciate the +importance of a flexible and easy-to-use cluster management solution to empower your populations. Administrators need software that will allow them to easily +setup, manage, monitor and maintain their infrastructure and ensure consistency for repeatable performance. With the varied workloads we see in modern HPC +centers, ranging from traditional HPC to Big Data and Analytics, organizations +may also consider building out heterogeneous environments, where different +hardware types are used for different workloads. As the OpenPOWER Foundation +grows, it stresses the overall importance of workflows across the HPC Data +Life Cycle - it’s clear that when it comes to solutions for technical computing, it’s no longer a one horse race.

    + +

    IBM Platform Cluster Manager is powerful, easy-to-use infrastructure management for today’s scale out computing needs. The latest release of Platform Cluster +Manager V4.2.1 now provides the ability to manage mixed computing environments - +so whether you’re running Linux on POWER Big-Endian or Little-Endian, the choice is yours. In fact, you can even deploy and seamlessly manage a mixed +infrastructure taking advantage of the latest IBM POWER8 and x86 systems.

    + +

    Leveraging xCAT technology, Platform Cluster Manager can manage clusters ranging from ‘Lilliputian’ in size all the way up to 2500 nodes. Platform Cluster +Manager Advanced Edition supports the automated creation of multiple clusters +on a shared infrastructure - allowing you to easily satisfy the business +requirements of Lilliputians and Blufescans. For organizations with a single +HPC cluster, Platform Cluster Manager Standard Edition provides the ability to +quickly provision, run, manage and monitor a technical computing infrastructure with unprecedented ease.

    + +

    For users taking advantage of IBM POWER8 systems, Platform Cluster Manager can +now provision PowerNV nodes as well as PowerKVM hypervisors, which provides +greater flexibility in infrastructure management and optimization. Further +enhancements in this release geared towards administrator productivity include +IBM POWER8 energy, PowerKVM and enhanced switch monitoring

    + +

    So go ahead. With Platform Cluster Manager you can crack your eggs any way you +like.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2015/ibm-platform-lsf-and-docker-a-whale-of-a-time/index.html b/2015/ibm-platform-lsf-and-docker-a-whale-of-a-time/index.html new file mode 100644 index 0000000..e90887b --- /dev/null +++ b/2015/ibm-platform-lsf-and-docker-a-whale-of-a-time/index.html @@ -0,0 +1,179 @@ + + + + + + + IBM Platform LSF and Docker- A Whale of a time! - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    IBM Platform LSF and Docker- A Whale of a time!

    +

    Containers are useful. Whether you’re shipping things across the blue seas or +encapsulating applications on a computer system, they provide numerous benefits. HPC Administrators will know that applications today can depend upon multiple +packages, libraries and environments. Docker, a container technology for Linux, based on well proven technologies brings together ease of setup, use and +efficiency to application management. Leveraging Docker in High-Performance +Computing is one approach to address application “dependency hell”, as well +as easing transition to the cloud.

    + +

    Workload managers are commonly used in High Performance Computing environments +to drive effective use of compute resources and ensure alignment of resources +with business priorities. IBM Platform LSF, a leading workload management +family of products provides support for workloads to run within user-specified +Docker containers by way of an integration package available as an open beta +on Service Management Connect.

    + +

    By leveraging the rich Platform LSF plugin framework, the Docker integration +works seamlessly and allows users to specify a defined Docker image as a +submission option. All resource constraints, environment variables are +automatically passed to the container thanks to the integration and Platform +LSF job lifecycle management functions including monitoring resource usage as +well as control actions (i.e. suspend, resume and terminate) are also supported +for Docker containers.

    + +

    Ease the burden of administration and ensure consistency with IBM Platform LSF +and Docker! - and have a whale of a time!

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2015/ibm-software-defined-infrastructure-put-the-power-down-and-jump-the-chasm/index.html b/2015/ibm-software-defined-infrastructure-put-the-power-down-and-jump-the-chasm/index.html new file mode 100644 index 0000000..74d6c8e --- /dev/null +++ b/2015/ibm-software-defined-infrastructure-put-the-power-down-and-jump-the-chasm/index.html @@ -0,0 +1,181 @@ + + + + + + + IBM Software Defined Infrastructure- Put the POWER down and jump the chasm! - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    IBM Software Defined Infrastructure- Put the POWER down and jump the chasm!

    +

    OpenPOWER continues to put the power down and accelerate strongly in 2015.
    +Earlier this year, the First Annual OpenPOWER Summit took place and more +recently Cabot Partners published the paper Crossing the Performance Chasm with +OpenPOWER, outlining the benefits of OpenPOWER for HPC. Reading through that +paper, one important point which stuck out were the considerations when +choosing a HPC system. It suggests that rather than using point benchmarks, +one must consider the performance of workflows across the HPC Data Life Cycle. This seems a very sensible approach actually. Would you choose a car strictly +on it’s 0-100km/h time? Well, when I was 16 years old probably yes. What +about braking, cornering, economy, safety? You need strong performance in all +categories. OpenPOWER Foundation achieves just this - by bringing together +organizations with broad expertise from accelerators, to interconnects around +IBM POWER server technology.

    + +

    IBM Software Defined Infrastructure helps to wield the sword of OpenPOWER for high performance +computing workloads. Featuring broad OS/platform support including Linux on +POWER (Little Endian), IBM Platform Computing software products provide broad +capabilities including application management, infrastructure management, job +scheduling as well as monitoring and reporting.

    + +

    Learn more about the IBM Software Defined Infrastructure for high performance +computing on OpenPOWER in this presentation from the OpenPOWER Summit. Put +the POWER down and jump the chasm!

    + +
    + +
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2015/ibm-workpad-z50-netbsd-an-interesting-combination/index.html b/2015/ibm-workpad-z50-netbsd-an-interesting-combination/index.html new file mode 100644 index 0000000..ffec5b5 --- /dev/null +++ b/2015/ibm-workpad-z50-netbsd-an-interesting-combination/index.html @@ -0,0 +1,225 @@ + + + + + + + IBM Workpad z50 & NetBSD - an interesting combination - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    IBM Workpad z50 & NetBSD - an interesting combination

    +

    This week we look at another RISC powered notebook, this time from IBM.
    +Although IBM did produce a line of PowerPC based Thinkpad systems, this blog +is focused on a little known system called the IBM Workpad z50. This Microsoft +Handheld PC form factor system was launched in March 1999 and ran Windows CE +at the time. As we’ll see below, with some ingenuity it is also able to run +NetBSD, which makes it a much more interesting proposition (at least for me). +Ironically, although this is a high performance computing (HPC) focused blog, +the “HPC” in this case stands for “Handheld PC”.

    + +

    The Workpad z50 has a form factor smaller than a notebook, but has what I +consider to be an excellent keyboard and of course the trademark Thinkpad +trackpoint! Looking more closely at the specifications:

    + +
      +
    • NEC VR4121 MIPS R4100 CPU @ 131 MHz
    • +
    • 16 MB System RAM (expandable)
    • +
    • 16 MB System ROM
    • +
    • 8.4” LCD Display 640x480 (16-bit)
    • +
    • External Monitor connector (SVGA)
    • +
    • Serial port
    • +
    • Infrared port
    • +
    • CF slot
    • +
    • PCMCIA slot
    • +
    +

    What prevents me from taking my pristine Workpad z50 to the local electronics +recycling facility is NetBSD. With a little effort it is possible to install +recent versions of NetBSD on the Workpad z50 and even run XWindows. There are +a number of sources of information on this topic including some videos on +YouTube which helped me a great deal:

    + +
    + +
    + +

    I won’t run through the install procedure here as that’s been well covered +already in the above series of videos. Rather, let’s look at the boot-up +sequence and of course in keeping with the high performance computing theme, +run a simple benchmark. Links to the videos follow below:

    + +

    The requisite system bootup + +

    + +
    + +

    + +

    Starting XWindows and running Linpack + +

    + +
    + +

    + +

    Using NetBSD pkgsrc, I have setup NetBSD on a x86 based system and have taken +advantage of distcc to cross compile binaries. This helps greatly to get +packages quickly compiled for the system. Note that I ran into a log of local +compiles failing due to lack of RAM. So cross compiling is almost a must.

    + +

    Equipped with PCMCIA, I’m able to easily add to the Workpad z50 such +capabilities as Ethernet, Wireless networking and even SCSI. Below is my +collection of PCMCIA adaptors.

    + +
    +
    + +

    Next steps? I’ll be looking to move to NetBSD 6.x series and compile a more +compact kernel (with drivers removed that I don’t require). And unlike the +system in my previous blog, this one is silent :)

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/dursi/2015-4-19-in-praise-of-mpi-collectives-and-mpi-iohtml.md b/2015/in-praise-of-mpi-collectives-and-mpi-io/index.html similarity index 56% rename from _posts/dursi/2015-4-19-in-praise-of-mpi-collectives-and-mpi-iohtml.md rename to 2015/in-praise-of-mpi-collectives-and-mpi-io/index.html index 4a803f6..4a200ae 100644 --- a/_posts/dursi/2015-4-19-in-praise-of-mpi-collectives-and-mpi-iohtml.md +++ b/2015/in-praise-of-mpi-collectives-and-mpi-io/index.html @@ -1,32 +1,97 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2015-04-19 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/in-praise-of-mpi-collectives-and-mpi-io.html -slug: in-praise-of-mpi-collectives-and-mpi-io -title: In Praise of MPI Collectives and MPI-IO ---- - -

    While I have a number of posts I want to write on other topics and technologies, there is one last followup I want to make to my MPI post.

    - + + + + + + + In Praise of MPI Collectives and MPI-IO - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    In Praise of MPI Collectives and MPI-IO

    +

    While I have a number of posts I want to write on other topics and technologies, there is one last followup I want to make to my MPI post.

    Having said what I think is wrong about MPI (the standard, not the implementations, which are of very high quality), it’s only fair to say something about what I think is very good about it. And why I like these parts gives lie to one of the most common pro-MPI arguments I’ve been hearing for years; that application programmers coding at low levels is somehow essential - or even just a good idea - for performance.

    -

    Two great things about MPI

    Collective Operations

    Since the very beginning, MPI has defined a suite of collective communications that include operations like scatter, gather, prefix scan, and reduce. While these weren’t invented by MPI – many were already implemented as “global communications” routines in the CM-2’s Connection Machine Scientific Software Library, for instance, and there is lots of literature on implementing those operations on other architectures like the iPSC/860-based hypercube systems – it’s certainly fair to say that it was MPI that popularized them to the point that they’ve started getting hardware support in network cards. The popularization stems partly from how widely taught MPI is, but also from useful generalizations that the MPI Forum made, like user-defined reduction operations, or being able to perform these operations on user-defined subsets of tasks.

    -

    A classic use of MPI collective operations would be using a reduce to find a global sum (or max, or min, or a user defined operation) of local values:

    -
    from mpi4py import MPI
     import random
     
    @@ -48,10 +113,8 @@ 

    MPI-IO

    MPI-IO is the foundational middleware for HPC parallel I/O. Parallel HDF5 (and thus Parallel NetCDF4), ADIOS, and others are built on top of it. As a result, even application software that doesn’t explicitly use MPI sometimes relies on MPI-IO for reading and writing large files in parallel.

    -

    The key concept in MPI-IO is a “file view”, which describes (in terms of MPI data layouts) where in the file a process will be writing. Once that’s done, writing data to the file just looks like sending a message to the file. A trivial example follows below; more complex data layouts like (as often happens in scientific computing) non-contiguous slices of large multidimensional arrays being read and written would look exactly the same:

    -
    from mpi4py import MPI
     
     comm = MPI.COMM_WORLD
    @@ -75,7 +138,6 @@ 

    Why they’re great

    These two very different parts of the MPI standard have three important features in common for this discussion.

    -
    • They’re at much higher levels of abstraction than most of the API
    • Application programmers would get worse performance, not better, if they tried to implement their own at lower levels.
    • @@ -86,41 +148,102 @@

      Collectives and

      Calls to MPI collective operations or MPI-IO describe what should be done, not how to do it, and at a much higher level than MPI_Send()/MPI_Put().

      -

      Operations like “All processes sum their results and distribute the result to all processes”, or “Each process writes to their slice of the file” are enormously broader than “Send this message to process X”. There’s a large number of ways they could be implemented, and in fact there’s a huge literature on both collectives and MPI-IO on various approaches to doing so.

      -

      Application programmers reimplementing them would be worse for performance

      If the “low-level application programming is essential for high performance” argument was true, then of course we would be actively dissuading researchers from using these high-level tools. But we don’t, and we’re right not to.

      -

      Most of us who have worked with enough researchers writing their own HPC codes have had the experience of someone coming into our office who was broadcasting data with a loop over MPI_Send()s, or trying to write to a shared file using fseek() or the like, and we’ve directed them to collective operations or MPI-IO instead. We do the same, of course, when someone is trying to type in some Gaussian Elimination code from Numerical Recipes (no link; that book has done enough damage) and we guide them to our local LAPACK implementation instead.

      -

      And we do this because even we don’t believe that scientists implementing these things at low level will give better performance. It’s not about it being “too hard”; it’s something else entirely. We know that it would be a huge amount of wasted effort for a worse, slower, result.

      -

      MPI collective operation implementations make run-time decisions behind the researchers back, based on the size of the data, and the structure of the communicator being used, to decide whether to use k-ary trees, or hyper-cubes, or split-ring approaches, and in one, two, or multiple phases of communications, to perform the operation. MPI-IO implementations uses approaches like data-sieving or two-phase I/O to trade off network communication for disk I/O, and use close integration with the filesystem to inform that tradeoff.

      -

      Somebody had to do all that challenging low-level work, yes. But the idea that those optimizations and algorithmic work is properly the job of the researcher/application programmer is absurd.

      -

      Implementations got faster and faster

      These highly optimized implementations of these high-level abstractions did not, of course, spring fully formed from somewhere, any more than the reference implementation of LAPACK/BLAS was blazingly fast. The abstractions were created with an understanding of both what application programmers needed and what was implementable, and then years and years of work went into developing the algorithms and implementations that we make use of today.

      -

      Initial implementations of MPI-1 collectives were (naturally!) not super optimized, and there were certainly developers who scoffed at the performance and who pointed out they could do better writing low-level network code on their own. They were, in that snapshot in time, narrowly correct; but more broadly and in the longer term, they were flat-out wrong. The most useful and productive approach to a researcher finding out that early versions of those collective operations (say) were slow in some situations was not to break down and re-implement it themselves at low level; it was to file an issue with the library provider, and help them fix it so that it would be faster for everyone.

      -

      These points generalize

      I don’t think anything I’ve said above is particuarly controversial. Performance, as well as productivity, for researchers and applications programmers has clearly improved as a result of MPI’s collectives and MPI-IO.

      -

      But for some reason, the idea that this generalizes — that performance as well as productivity of scientific software development would improve if applications developers spent their time using other, newer higher-level constructs while more tool-builders implemented those constructs in efficient ways — is anathaema to a section of our HPC community.

      +

      I’ve yet to hear compelling reasons why operations on distributed multidimensional arrays, or hash tables, or trees, are completely different from collectives or IO; why application programmers have to implement them directly or indirectly in a low-level tool like MPI sends and receives or gets and puts rather than having them implemented by experts in higher-level environments like Chapel, or Spark, or Ignite, or any of a zillion other projects from within or outside of the HPC community.

      + +
    +
    + +
    + + + + + + + + + + +
    + + + + -

    I’ve yet to hear compelling reasons why operations on distributed multidimensional arrays, or hash tables, or trees, are completely different from collectives or IO; why application programmers have to implement them directly or indirectly in a low-level tool like MPI sends and receives or gets and puts rather than having them implemented by experts in higher-level environments like Chapel, or Spark, or Ignite, or any of a zillion other projects from within or outside of the HPC community.

    \ No newline at end of file diff --git a/2015/looking-back-at-isc-high-performance-2015/index.html b/2015/looking-back-at-isc-high-performance-2015/index.html new file mode 100644 index 0000000..635a19e --- /dev/null +++ b/2015/looking-back-at-isc-high-performance-2015/index.html @@ -0,0 +1,225 @@ + + + + + + + Looking back at ISC High-Performance 2015 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Looking back at ISC High-Performance 2015

    +

    I’ve always enjoyed a good road trip. There’s just something fun about jumping +in the car, and heading to a far off location. As they say, half of the fun is +just getting to your destination. My latest road trip brought me to +Frankfurt for ISC High-Performance 2015.

    + +

    Crossing all of Austria as well as the southern part of Germany, this trip +proved to be no less exciting than the rest. Breaking down about 50 km from +Frankfurt due to a dead battery, I was fortunate enough to meet a local family +who helped to boost my car so that I could make it in time for the show. +Luckily I had some craft beer to reward them for their troubles. Of course, +part of the excitement this time was the fabled Autobahns of Germany. Here I +could get up to some decent speeds - legally :)

    + +

    Refreshments are always needed on long trips…

    + +
    +
    + +

    Frankfurt too had some interesting surprises in store - including the +interesting culinary treat Handkäse mit Musik, which is a sour milk cheese +served with onions. I’ll let you read what the Musik part is all about. There +too is the infamous Apfelsaftschorle which I constantly mistook for beer at +the ISC High-Performance venue. Such is life :)

    + +

    For me, where the rubber hit the road was the ISC High-Performance event. The +IBM booth (928) featured a refreshing bright yellow colour scheme, like the +dawning of a new era of High-Performance Computing built on Data Centric Systemsand OpenPOWER. In terms of demos, the IBM booth featured a number of live and +static demos including:

    + +
      +
    • OpenPOWER HPC Server and Cirrascale GPU Developer System
    • +
    • IBM High Performance Services for HPC
    • +
    • IBM Data Engine for Analytics
    • +
    • IBM Watson tranSMART Transational Medicine Solution
    • +
    • Pluto (astrophysics hydrodynamics/magneto-hydrodynamics) running live on Power8 + GPU
    • +
    • OpenFOAM (CFD)
    • +
    • High Performance Storage System (HPSS)
    • +
    +

    The OpenPOWER hardware that was on the show floor attracted a lot of attention. Many people were impressed to behold the two Power8 systems which included +technology from OpenPOWER members Mellanox and NVIDIA. You may have read about +my interest in Power and ARM based systems in some of my earlier blogs.

    + +
    +
    + +

    Being part of the IBM Platform Computing marketing team, I could frequently be +found at the IBM High Performance Services for HPC demo point. Here we +demonstrated our turnkey cloud solution for HPC workloads built in top of +the IBM SoftLayer cloud and featuring both IBM Platform LSF & Platform Symphony workload management options. The demo leveraged the work done by MINES +ParisTech and Transvalor to provide CFD services to French industry. You can +read more about how MINES ParisTech and Transvalor leverage the +IBM solutions for HPC here.

    + +
    +
    + +

    ISC also offered us the opportunity to showcase the IBM Platform LSF family of +products interactive conceptual demo to passersby. Here users could learn that +the Platform LSF family is not simply about workload management. For example, +Platform Process Manager and Platform Application Center, two add-on products +for Platform LSF help to boost user productivity through ease of use and +simplification.

    + +
    +
    + +

    So what’s next? Toronto to Austin road trip for SC15? Yeah, that doesn’t +sound like a bad idea.

    + +

    See y’all in Texas!

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2015/more-conjecture-on-knl-s-near-memory/index.html b/2015/more-conjecture-on-knl-s-near-memory/index.html new file mode 100644 index 0000000..100b393 --- /dev/null +++ b/2015/more-conjecture-on-knl-s-near-memory/index.html @@ -0,0 +1,156 @@ + + + + + + + More Conjecture on KNL's Near Memory - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    More Conjecture on KNL's Near Memory

    +

    The Platform ran an interesting collection of conjectures on how KNL’s on-package MCDRAM might be used this morning, and I recommend reading through it if you’re following the race to exascale.  I was originally going to write this commentary as a Google+ post, but it got a little long, so pardon the lack of a proper lead-in here.

    I appreciated Mr. Funk’s detailed description of how processor caches interact with DRAM, and how this might translate into KNL’s caching mode.  However, he underplays exactly why MCDRAM (and the GDDR on KNC) exists on these manycore architectures in his discussion on how MCDRAM may act as an L3 cache.  On-package memory is not simply another way to get better performance out of the manycore processor; rather, it is a hard requirement for keeping all 60+ cores (and their 120+ 512-bit vector registers, 1.8+ MB of L1 data cache, etc) loaded.  Without MCDRAM, it would be physically impossible for these KNL processors to achieve their peak performance due to memory starvation.  By extension, Mr. Funk’s assumption that this MCDRAM will come with substantially lower latency than DRAM might not be true.

    As a matter of fact, the massive parallelism game is not about latency at all; it came about as a result of latencies hitting a physical floor.  So, rather than drive clocks up to lower latency and increase performance, the industry has been throwing more but slower clocks at a given problem to mask the latencies of data access for any given worker.  While one thread may be stalled due to a cache miss on a Xeon Phi core, the other three threads are keeping the FPU busy to achieve the high efficiency required for performance.  This is at the core of the Xeon Phi architecture (as well as every other massively parallel architecture including GPUs and Blue Gene), so it is unlikely that Intel has sacrificed their power envelope to actually give MCDRAM lower latency than the off-package DRAM on KNL nodes.

    At an architectural level, accesses to MCDRAM still needs to go through memory controllers like off-package DRAM.  Intel hasn’t been marketing the MCDRAM controllers as “cache controllers,” so it is likely that the latencies of memory access are on par with the off-package memory controllers.  There are simply more of these parallel MCDRAM controllers (eight) operating relative to off-package DRAM controllers (two), again suggesting that bandwidth is the primary capability.

    Judging by current trends in GPGPU and KNC programming, I think it is far more likely that this caching mode acts at a much higher level, and Intel is providing it as a convenience for (1) algorithmically simple workloads with highly predictable memory access patterns, and (2) problems that will fit entirely within MCDRAM.  Like with OpenACC, I’m sure there will be some problems where explicitly on/off-package memory management (analogous to OpenACC’s copyin, copyout, etc) aren’t necessary and cache mode will be fine.  Intel will also likely provide all of the necessary optimizations in their compiler collection and MKL to make many common operations (BLAS, FFTs, etc) work well in cache mode as they did for KNC’s offload mode.

    However, to answer Mr. Funk’s question of “Can pre-knowledge of our application’s data use–and, perhaps, even reorganization of that data–allow our application to run still faster if we instead use Flat Model mode,” the answer is almost unequivocally “YES!”  Programming massively parallel architectures has never been easy, and magically transparent caches rarely deliver reliable, high performance.  Even the L1 and L2 caches do not work well without very deliberate application design to accommodate wide vectors; cache alignment and access patterns are at the core of why, in practice, it’s difficult to get OpenMP codes working with high efficiency on current KNC processors.  As much as I’d like to believe otherwise, the caching mode on KNL will likely be even harder to effectively utilize, and explicitly managing the MCDRAM will be an absolute requirement for the majority of applications.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/dursi/2015-4-9-objections-continuedhtml.md b/2015/objections-continued/index.html similarity index 69% rename from _posts/dursi/2015-4-9-objections-continuedhtml.md rename to 2015/objections-continued/index.html index ea723f9..f0abccf 100644 --- a/_posts/dursi/2015-4-9-objections-continuedhtml.md +++ b/2015/objections-continued/index.html @@ -1,49 +1,109 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2015-04-09 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/objections-continued.html -slug: objections-continued -title: Objections, Continued ---- - -

    Thanks for all of the comments about my HPC and MPI post, on the post itself, or on twitter, or via email. While much of the comments and discussions were positive, it won’t surprise you to learn that there were objections, too; so I thought I’d keep updating the Objections section in a new post. I’ve also posted one (hopefully last) followup.

    - + + + + + + + Objections, Continued - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Objections, Continued

    +

    Thanks for all of the comments about my HPC and MPI post, on the post itself, or on twitter, or via email. While much of the comments and discussions were positive, it won’t surprise you to learn that there were objections, too; so I thought I’d keep updating the Objections section in a new post. I’ve also posted one (hopefully last) followup.

    But do keep sending in your objections!

    -

    Further Objections

    You’re saying we’d have to rewrite all our code!

    If someone had suggested I add this objection to the original list before publishing, I would have rejected it as too straw-man to use; I’d be transparently putting this objection up just to demolish it. Clearly, no one would actually claim that “the HPC community should urgently start engaging with and using new technical computing technologies” means “you have to burn all your old stuff to the ground”.

    -

    But sure enough, it came up frequently, in private email, and most dramatically, on twitter.

    -

    Even though this is by far the most common reaction I got, I hope it’s clear to most readers these aren’t the same things. Learning (say) C++ and using it in development of new codes doesn’t mean your old C and Fortran stuff stops working. Or that you’re under an obligation to take the working code in other languages and re-write it all in the new language before ever using it again to maintain some kind of computational moral consistency.

    -

    Your MPI code won’t stop working for you in a fit of rage because you’re seeing other frameworks. MPI will continue to work and be maintained, exactly because there is 20+ years worth of stuff using it.

    -

    But new software projects are being started every day, in every field, in every region. This argument is about what we should use for those codes. “Because we’ve always done it that way” isn’t a great reason for a community that’s supposed to be on the cutting edge of computing to keep doing things in one particular framework.

    -

    Big data and HPC are completely different, and its ridiculous to compare them

    This was a close second in popularity. And this one worries me quite a bit, because it means that there’s a lot of people in our community that are disturbingly unaware what’s going on in computing and data analysis outside of the confines of their office.

    -

    It’s absolutely true that there are Big-Data-y things that are mainly just I/O with a little bit of processing. But by and large people want to analyze that large amount of data. And then you end up with absolutely classic big numerical computing problems. To take an early example, Page Rank is, after all, an eigenvalue problem. The drive for next-generation big data platforms like Spark is no small part to make machine learning algorithms that would be very familiar to us run as efficiently as possible. Let’s take some example machine learning approaches:

    -
    • Spectral clustering solves an equation for the graph Laplacian - which looks exactly like any other parabolic PDE on an unstructured mesh. (Thanks to Lorena Barba for pointing out an embarrasing mistake in an earlier version of that point.)
    • Support Vector Machines are kernel based methods which involve Green’s functions and 1st order integral equations.
    • @@ -57,138 +117,168 @@

      MPI is great for physics, even if less great for the other stuff

      I got this by email and on twitter several times.

      -

      Great compared to what? And based on what evidence?

      -

      Say a physics grad student walks in to your office who’s going to develop a small bespoke particle code for their dissertation. Pointing them to MPI, rather than other technologies with unimpeachable HPC bona fides like UPC, Chapel, Co-array Fortran, or, (for a particle simulation especially) Charm++ seems like it’s the lazy, easy way for us, and less about what’s actually best for them.

      -

      In what sense is it “great for physics” to have the student increase the amount of code they have to write, and debug by a factor of 3x? In what sense is it great for them to have to re-invent all of the low-level communications algorithms which have been implemented better, in other packages? Maybe you could make an argument about stability or performance against UPC/Chapel (although I’d counter-argue you’d get immediate and helpful support from the developers) - what’s the argument against pointing the student to Charm++? Or Intel’s CAF?

      -

      And this doesn’t even begin to cover things like Spark, Flink, or Ignite - for simulation, or experimental physics work (which is physics too, right?), which is necessarily heavy on data analysis.

      -

      You’re just saying MPI is too hard

      I’m really not. As a community, we don’t mind hard. Solving complex equations is hard, that’s just how it is. We eat hard for breakfast. (And the genomics and big-data communities are the same way, because they’re also filled with top-notch people with big computational problems).

      -

      I’m saying something different: MPI is needlessly, pointlessly, and uselessly a huge sink of researcher and toolbuilder effort for little if any reward.

      -

      How many grad students have had to tediously decompose a 2d or 3d grid by hand, write halo exchange code, get it debugged and running, run in that crude fasion for a while, then tried moving to move to overlapped communication and computation, and spent days or weeks trying to get that to work efficiently - and then had to re-write chunks as they need a new variable laid out differently (or just implemented a really bad transposition?) and still gotten performance that an expert would consider poor?

      -

      And regular grid codes are the easy stuff; how many scientist-decades worth of efforts have gone into implementing and re-implementing tree codes or unstructured meshes; and by and large resulting in efficiencies ranging from “meh” to “ugh”?

      -

      Wouldn’t it be better to have experts working on the common lower level stuff, tuning it and optimizing it, so that the scientists can actually focus on the numerics and not the communications?

      -

      The stuff about levels of abstraction isn’t some aesthetic philosophical preference. And I’m not complaining MPI because it’s hard; I’m complaining about it because it’s resulted in an enormous waste of researcher time and compute resources. Let the scientists focus on hard stuff that matters to their research, not the stuff that can be effectively outsourced to builders.

      -

      Now, we at centres could at least improve this dreadful state of affairs even with MPI just by doing a better job pointing researchers embarking on a code project to libraries and packages like Trillinos or what have you, and stop counseling them to write raw MPI code themselves. But of course, we normally don’t, because we keep telling ourselves and the incoming grad students “MPI is great for physics”…

      -

      It’s important for students to know what’s going on under the hood, even if they’re using other frameworks

      I do have some sympathy for this point, I will admit.

      -

      But anyone who thinks teaching generation after generation of grad students how to manually decompose a 2d mesh and do halo exchange on it using MPI_Sendrecv() is a productive and rewarding use of time, is someone who doesn’t spend enough time doing it.

      -

      As with other pro-low-level arguments: why is MPI automatically the right level to stop at? If we want to teach students how things really work under the covers, why aren’t we going all the way down to Infiniband or TCP/IP, user mode and kernel mode, and the network stack? Or, why don’t we stop a level or two above, draw some diagrams on a whiteboard, and move on to actually solving equations? Why is MPI in particular the right “under the hood” thing to teach, as opposed to GASNet, Charm++, or just pseudo-network-code?

      -

      If the answer to the questions above is “because MPI is what we know and have slides for”, then we need to think about what that implies, and how well it’s serving the research community.

      -

      But my new code will need libraries based on MPI that aren’t supported by Chapel/UPC/Spark/other stuff yet!

      Fair enough. When you choose what you are going to use to write a program, library and tool support really matter. It’s absolutely true that there are great packages that use MPI, and if your project is going to rely on them, then this isn’t an example of a good project to start expermenting with a new platform on. This is why such a large fraction of numerical code was in FORTRAN77 for so long.

      -

      Co-array Fortran, Chapel, and others do have various degree of MPI interoperability, so do check that out; but yes, you need what you need.

      -

      But people are starting to build things based on MPI-3 RMA!

      This coment by Jeff on the original post, is by some measure the most interesting objection I’ve heard so far.

      -

      People are legitimately starting to use MPI-3 RMA in the underlying implementations of higher level tools. If that really took off, then my arguments about MPI not being the right level of abstraction for toolbuilders would clearly be wrong, and a huge part of my post would be rendered irrelevant.

      -

      In that case, I would be completely wrong – and it would be awesome! A higher-level toolset for researchers could finally flourish, the lower level stuff could be handled by a completely separate group of experts, and MPI would have found its place.

      -

      I want to be clear that I think it would be fantastic - really, the best of all possible worlds - to be wrong in this way.

      -

      I’m going to describe why I really don’t think I am, and what the stumbling blocks are. Then I’ll discuss an alternate future which sidesteps the worst of those problems, and how it really could be a path to a very productive and growing HPC future - but it will of course never, ever, happen.

      -

      So MPI-3 - useful RMA, being used. Perfect! To see the problem that concerns me here, consider two questions; (1) What are the benefits of using MPI for this, and (2) what are the downsides?

      -

      On the upside, it’s great that MPI is sufficient to implement these tools. But is it necessary? What is the advantage of using something like MPI over something else, and in particular something lower level? Maybe it would be a little easier or a little harder, but would it make a big difference? Particularly to the end-user of the tool being built?

      -

      I doubt it makes much difference either way; the reason I ask is the downside.

      -

      MPI-3 RMA doesn’t come on its own; it’s part of MPI. And in this context, I’m concerned with two real downsides with using even great parts of MPI for low-level toolbuilding. They’re related: the heavy-weight forum process, and the enormous baggage of backwards compatability.

      -

      Let’s take the forum process first. Let’s say there’s two competing tools you could use to build your next lower-layer tool; MPI-3 RMA and some other low-level network abstraction layer. (What I’m picturing is something like OFWG Libfabric, which you can probably tell I’m quite taken with, but that’s not really right here. But something at roughly that level or a little higher).

      -

      You’re starting to build your new tool, which contains a number of really innovative ideas; but now you’ve discovered you need one additional feature in either package.

      -

      Which will get you there first?

      -

      The MPI forum was really able to innovate with MPI-3 RMA, because they were nearly starting afresh - or at least complementary with what had gone before. But now that MPI-3 is out, and a number of projects have used it, the spec is essentially encased in carbonite; the API in its every last detail will outlive us all. None of the existing APIs will change.

      -

      That’s ok, because the Forum has shown its willingness to add new functions to the spec when justified. Your case sounds interesting; you should get your answer in a couple of years or so.

      -

      And that’s kind of crazy for a low-level network abstraction layer. The other package - whatever it is - won’t have that sort of friction.

      -

      There’s another issue in terms of new features; that’s the backwards compatability legacy.

      -

      Let’s take something like fault tolerance, which is important at extreme scale - but will eventually get important for more moderate scales, as well.

      -

      For a really low-level network abstraction, dealing withfault tolerance isn’t an enormous difficulty. For something higher level like MPI-3 RMA, it’s more challenging, but it’s still something where one could imagine how it might go.

      -

      But for MPI-3+ to develop a feature like fault tolerance, it will have to be created in such a way that it integrates seamlessly with every single MPI feature that has ever existed, without altering any of the semantics of a single one of those calls. The backwards compatability requirements are crushing.

      -

      So this is sort of the tragedy of MPI-3 RMA. It’s a great thing that may have just come too late in the lifecycle of a project to be able to have its full impact.

      -

      Let’s imagine a world where we could just shrug this stuff off. Let’s imagine a new framework – MPING, MPI++, whatever; which is a substantially paired down version of MPI. It’s an MPI that has decided what it wants to be; a low level layer for toolbuilders, never to be taught to grad students who are planning to write application software.

      -

      It contains only pared-to-the bone versions of MPI3 RMA, which are demonstrably being found useful; MPI collectives, which are fantastic; MPI-IO, which is also fantastic; and auxiliary stuff like the datatype creation routines, etc. The communications semantics for everything are greatly relaxed, which would confuse the heck out of newbie end users, but toolbuilders can deal with it. And there’s no decades of backwards compatability to fight with.

      -

      This vision actually discourages me a bit, because it would be terrific; there’d be an active, vendor-supported, high-performance, productive network abstraction layer for toolbuilders; and no confusion about who it was for. We could build high-productivity tools for scientific application writing atop a stable, high performance foundation.

      +

      And of course, it will never, ever, happen.

      + +
    +
    + +
    + + + + + + + + + + +
    + + + + -

    And of course, it will never, ever, happen.

    \ No newline at end of file diff --git a/2015/on-random-vs-streaming-i-o-performance-or-seek-and-you-shall-find-eventually/index.html b/2015/on-random-vs-streaming-i-o-performance-or-seek-and-you-shall-find-eventually/index.html new file mode 100644 index 0000000..139c97d --- /dev/null +++ b/2015/on-random-vs-streaming-i-o-performance-or-seek-and-you-shall-find-eventually/index.html @@ -0,0 +1,160 @@ + + + + + + + On Random vs. Streaming I/O Performance; Or seek(), and You Shall Find --- Eventually. - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    On Random vs. Streaming I/O Performance; Or seek(), and You Shall Find --- Eventually.

    +

    At the Simpson Lab blog, I’ve written a post +on streaming vs random access I/O performance, +an important topic in bioinformatics. Using a very simple problem (randomly choosing lines in a +non-indexed text file) I give a quick overview of the file system stack and what it means for +streaming performance, and reservoir sampling for uniform random online sampling.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2015/spark-in-hpc-clusters/index.html b/2015/spark-in-hpc-clusters/index.html new file mode 100644 index 0000000..1344d3c --- /dev/null +++ b/2015/spark-in-hpc-clusters/index.html @@ -0,0 +1,217 @@ + + + + + + + Spark in HPC clusters - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Spark in HPC clusters

    +

    Over the past several years, as research computing centres and others who run HPC clusters tried to accommodate other forms of computing for data analysis, much effort went into trying to incorporate Hadoop jobs into the scheduler along with other more traditional HPC jobs. It never went especially well, which is a shame, because it seems that those past unsuccessful attempts have discouraged experimentation with related next-generation technologies which are a much better fit for large-scale technical computing.

    + +

    Hadoop v1 was always going to be a niche player and an awkward fit for big technical computing - and HPCers weren’t the only ones to notice this. Hadoop MapReduce’s mandatory dumping of output to disk after every Map/Reduce stage rendered it nearly unusable for any sort of approach which required iteration, or interactive use. Machine learning users, who often rely on many of the same iterative linear algebra solvers that physical science simulation users need, equally found Hadoop unhelpful. Hadoop v1 solved one set of problems – large single-pass data processing – very well, but those weren’t the problems that the technical computing community needed solved.

    + +

    The inefficiency of flushing to disk wasn’t necessarily the difficulty that HPC centres had with incorporating Hadoop into their clusters, however. Dumping to disk could be sped up with caching, or SSDs. The real issue was with HDFS, the filesystem which Hadoop relies on. Because every job needed very rapid access to its data – to read the entire set in to the compute nodes, do minimal processing, then flush it back out – the file system was intimately tied to Hadoop cluster scheduling, which worked very hard (reasonably enough) to schedule the compute next to the data. But with Hadoop “on demand” in a cluster, how is this to work? One could spin up a new HDFS within each Hadoop job – but now the user has to have the new empty HDFS ingest the data files (probably with replication) initially, and then stage the data out of the doomed-to-be-shut-down HDFS afterwards. But this staging in and out will certainly take substantially longer than even the rest of the job’s I/O, which already likely dominates runtime. One can reserve a number of nodes for Hadoop jobs and keep a persistent HDFS store there, but this now defeats the purpose of running Hadoop in the cluster; one might as well just hive off those nodes into a separate system. Probably the best approach, which worked better than I think anyone had any right to expect, was to run Hadoop on Lustre, but it remained awkward even for those who already were using Lustre for their cluster.

    + +

    The HPC community’s reaction to those problems – problems with a technology they were already skeptical of due to Not Invented Here Syndrome – was largely to give up on anything that seemed “Hadoopy” as a sensible approach. The large-scale machine learning community, which didn’t necessarily have that luxury, was instead already looking for in-memory approaches to avoid this problem entirely.

    + +

    Two very promising “post-Hadoop” in-memory approaches which are much better suited to large-scale technical computing than Hadoop v1 ever was are also Apache projects - Spark and Flink. Flink has some really interesting features - including using a database-like query optimizer for almost all computations - but there’s no real question that currently, Spark is the more mature and capable of the offerings.

    + +

    Spark can make use of HDFS, and other related file stores, but those aren’t requirements; since iterative computation can be done in memory given enough RAM, there is much less urgency in having the data local to the computation if the computation is long enough. Instead, Spark can simply use a POSIX interface to whatever filesystem is already running on your cluster.

    + +

    Spark not only lacks hard HDFS-style requirements, but can also run in standalone mode without a heavyweight scheduler like Yarn or Mesos. This standalone mode makes it quite easy to simply spin up a Spark “cluster” within a job, reading from the file system as any other job would. (Earlier versions of Spark made this unnecessarily difficult, with the standalone startup scripts having hardcoded values that assumed only one such job at a time; this is somewhat easier now.)

    + +

    Thus, below is a little job submission script for a Spark job on SciNet; it starts up a Spark master on the head node of the job, sets the workers, and runs a simple wordcount example.

    + +

    Spark’s well-thought-out python interface, standalone mode, and filesystem-agnostic approach, makes Spark a much better match for traditional HPC systems than Hadoop technologies ever were.

    + +

    Spark is covered a little bit in my and Mike Nolta’s Hadoop-for-HPCers workshop.

    + +
    #!/bin/bash
    +#
    +#PBS -l nodes=3:ppn=8,walltime=0:20:00
    +#PBS -N spark-test
    +
    +nodes=($( cat $PBS_NODEFILE | sort | uniq ))
    +nnodes=${#nodes[@]}
    +last=$(( $nnodes - 1 ))
    +
    +cd $PBS_O_WORKDIR
    +
    +export SPARK_HOME=/scinet/gpc/Libraries/spark/spark-1.0.2-bin-hadoop2/
    +ssh ${nodes[0]} "module load java; cd ${SPARK_HOME}; ./sbin/start-master.sh"
    +sparkmaster="spark://${nodes[0]}:7077"
    +
    +for i in $( seq 0 $last )
    +do
    +    ssh ${nodes[$i]} "cd ${SPARK_HOME}; module load java; nohup ./bin/spark-class org.apache.spark.deploy.worker.Worker ${sparkmaster} &> ${SCRATCH}/work/nohup-${nodes[$i]}.out" &
    +done
    +
    +rm -rf ${SCRATCH}/wordcounts
    +
    +cat > sparkscript.py <<EOF
    +from pyspark import SparkContext
    +
    +sc = SparkContext(appName="wordCount")
    +file = sc.textFile("${SCRATCH}/moby-dick.txt")
    +counts = file.flatMap(lambda line: line.split(" ")).map(lambda word: (word, 1)).reduceByKey(lambda a, b: a+b)
    +counts.saveAsTextFile("${SCRATCH}/wordcounts")
    +EOF
    +
    +module load java
    +${SPARK_HOME}/bin/spark-submit --master ${sparkmaster} sparkscript.py
    +
    +ssh ${nnodes[0]} "module load java; cd ${SPARK_HOME}; ./sbin/stop-master"
    +for i in $( seq 0 $last )
    +do
    +    ssh ${nodes[$i]} "killall java"
    +done
    +wait
    +
    +
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2015/thoughts-on-the-nsf-future-directions-interim-report/index.html b/2015/thoughts-on-the-nsf-future-directions-interim-report/index.html new file mode 100644 index 0000000..ca70158 --- /dev/null +++ b/2015/thoughts-on-the-nsf-future-directions-interim-report/index.html @@ -0,0 +1,156 @@ + + + + + + + Thoughts on the NSF Future Directions Interim Report - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Thoughts on the NSF Future Directions Interim Report

    +

    The National Academies recently released an interim report entitled Future Directions for NSF Advanced Computing Infrastructure to Support U.S. Science and Engineering in 2017-2020 as a part of a $723,000 award commissioned to take a hard look at where the NSF’s supercomputing program is going.  Since releasing the interim report, the committee has been soliciting feedback and input from the research community to consider as they draft their final report, and I felt compelled to put some of my thoughts into a response.

    NSF’s HPC programs are something I hold near and dear since I got my start in the industry by supporting two NSF-owned supercomputers.  I put a huge amount of myself into Trestles and Gordon, and I still maintain that job encompassed the most engaging and rewarding work I’ve ever done.  However, the NSF’s lack of a future roadmap for its HPC program made my future feel perpetually uncertain, and this factored heavily in my decision to eventually pursue other opportunities.

    Now that I am no longer affiliated with NSF, I wanted to delineate some of the problems I observed during my time on the inside with the hope that someone more important than me really thinks about how they can be addressed.  The report requested feedback in nine principal areas, so I’ve done my best to contextualize my thoughts with the committee’s findings.

    With that being said, I wrote this all up pretty hastily.  Some of it may be worded strongly, and although I don’t mean to offend anybody, I stand by what I say.  That doesn’t mean that my understanding of everything is correct though, so it’s probably best to assume that I have no idea what I’m talking about here.

    Finally, a glossary of terms may make this more understandable:

    <ul><li>XD is the NSF program that funds XSEDE; it finances infrastructure and people, but it does not fund supercomputer procurements or operations</li><li>Track 1 is the program that funded Blue Waters, the NSF’s leadership-class HPC resource</li><li>Track 2 is the program that funds most of the XSEDE supercomputers.  It funded systems like Ranger, Keeneland, Gordon, and Stampede</li></ul>
    <hr />
    <h2 style="text-align: left;">1. How to create advanced computing infrastructure that enables integrated discovery involving experiments, observations, analysis, theory, and simulation.</h2>Answering this question involves a few key points:
    <ol><li>Stop treating NSF’s cyberinfrastructure as a computer science research project and start treating it like research infrastructure operation.  Office of Cyberinfrastructure (OCI) does not belong in Computer & Information Science & Engineering (CISE).</li><li>Stop funding cyberinfrastructure solely through capital acquisition solicitations and restore reliable core funding to NSF HPC centers.  This will restore a community that is conducive to retaining expert staff.</li><li>Focus OCI/ACI and raise the bar for accountability and transparency.   Stop funding projects and centers that have no proven understanding of operational (rather than theoretical) HPC.</li><li>Either put up or give up.  The present trends in funding lie on a road to death by attrition.  </li><li>Don’t waste time and funding by presuming that outsourcing responsibility and resources to commercial cloud or other federal agencies will effectively serve the needs of the NSF research community.</li></ol>I elaborate on these points below.

    <h2 style="text-align: left;">2. Technical challenges to building future, more capable advanced computing systems and how NSF might best respond to them.</h2><blockquote class="tr_bq">“Today’s approach of federating distributed compute- and data-intensive resources to meet the increasing demand for combined computing and data capabilities is technically challenging and expensive.”</blockquote>This is true.
    <blockquote class="tr_bq">“New approaches that co-locate computational and data resources might reduce costs and improve performance. Recent advances in cloud data center design may provide a viable integrated solution for a significant fraction of (but not all) data- and compute-intensive and combined workloads.”</blockquote>This strong statement is markedly unqualified and unsubstantiated.  If it is really recommending that the NSF start investing in the cloud, consider the following:
    <ul><li>Cloud computing resources are designed for burst capabilities and are only economical when workloads are similarly uneven.  In stark contrast, most well-managed HPCs see constant, high utilization which is where the cloud becomes economically intractable.</li><li>The suggestion that cloud solutions can “improve performance” is unfounded.  At a purely technological level, the cloud will never perform as well as unvirtualized HPC resources, period.  Data-intensive workloads and calculations that require modest inter-node communication will suffer substantially.</li></ul>
    In fact, if any cost reduction or performance improvement can be gained by moving to the cloud, I can almost guarantee that incrementally more can be gained by simply addressing the non-technological aspects of the current approach of operating federated HPC.  Namely, the NSF must
    <ol><li>Stop propping up failing NSF centers who have been unable to demonstrate the ability to effectively design and operate supercomputers. </li><li>Stop spending money on purely experimental systems that domain scientists cannot or will not use.</li></ol>
    The NSF needs to re-focus its priorities and stop treating the XD program like a research project and start treating it like a business.  Its principal function should be to deliver a product (computing resources) to customers (the research community).  Any component that is not helping domain scientists accelerate discovery should be strongly scrutinized.  Who are these investments truly satisfying?
    <blockquote class="tr_bq">“New knowledge and skills will be needed to effectively use these new advanced computing technologies.”</blockquote>This is a critical component of XD that is extremely undervalued and underfunded.  Nobody is born with the ability to know how to use HPC resources, and optimization should be performed on users in addition to code.  There is huge untapped potential in collaborative training between U.S. federal agencies (DOE, DOD) and European organizations (PRACE).  If there is bureaucratic red tape in the way, it needs to be dealt with at an official level or circumvented at the grassroots level.

    <h2 style="text-align: left;">3. The computing needs of individual research areas.</h2>XDMoD shows this.  The principal workloads across XSEDE are from traditional domains like physics and chemistry, and the NSF needs to recognize that this is not going to change substantially over the lifetime of a program like XD.

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Straight from XDMoD for 2014.  MPS = math and physical sciences, BIO = biological sciences, GEO = geosciences.  NSF directorate is not a perfect alignment; for example, I found many projects in BIO were actually chemistry and materials science.</td></tr></tbody></table>

    While I wholeheartedly agree that new communities should be engaged by lowering the barriers to entry, these activities cannot be done at a great expense of undercutting the resources required by the majority of XD users.

    The cost per CPU cycle should not be deviating wildly between Track 2 awards because the ROI on very expensive cycles will be extremely poor.  If the NSF wants to fund experimental systems, it needs to do that as an activity that is separate from the production resources.  Alternatively, only a small fraction of each award should be earmarked for new technologies that represent a high risk; the Stampede award was a fantastic model of how a conservative fraction of the award (10%) can fund an innovative and high-risk technology.

    <h2 style="text-align: left;">4. How to balance resources and demand for the full spectrum of systems, for both compute- and data-intensive applications, and the impacts on the research community if NSF can no longer provide state-of-the-art computing for its research community.</h2><blockquote class="tr_bq">“But it is unclear, given their likely cost, whether NSF will be able to invest in future highest-tier systems in the same class as those being pursued by the Department of Energy, Department of Defense, and other federal mission agencies and overseas.”</blockquote>The NSF does not have the budget to support leadership computing.  This is clear even from a bird’s eye view: DOE ASCR’s budget for FY2012 was $428 million and, by comparison, NSF ACI’s budget was only $211 million.  Worse yet, despite having half the funding of its DOE counterpart, the NSF owned HPC resources at seven universities in FY2012 compared to ASCR’s three centers.

    Even if given the proper funding, the NSF’s practice of spreading Track 2 awards across many universities to operate its HPC assets is not conducive to operating leadership computing.  The unpredictable nature of Track 2 awards has resulted in very uneven funding for NSF centers which, quite frankly, is a terrible way to attract and retain the highly knowledgeable world-class staff that is necessary to operate world-class supercomputers.

    <h2 style="text-align: left;">5. The role of private industry and other federal agencies in providing advanced computing infrastructure.</h2>The report makes some very troubling statements in reference to this question.
    <blockquote class="tr_bq">“Options for providing highest-tier capabilities that merit further exploration include purchasing computing services from federal agencies…”</blockquote>This sounds dirty.  Aren’t there are regulations in place that restrict the way in which money can flow between the NSF and DOE?  I’m also a little put off by the fact that this option is being put forth in a report that is crafted by a number of US DOE folks whose DOE affiliations are masked by university affiliations in the introductory material.
    <blockquote class="tr_bq">“…or by making arrangements with commercial services (rather than more expensive purchases by individual researchers).”</blockquote>Providing advanced cyberinfrastructure for the open science community is not a profitable venture.  There is no money in HPC operations.  I do not see any “leadership” commercial cloud providers offering the NSF a deal on spare cycles, and the going rate for commercial cloud time is known to be far more expensive than deploying HPC resources in-house at the national scale.

    <h2 style="text-align: left;">6. The challenges facing researchers in obtaining allocations of advanced computing resources and suggestions for improving the allocation and review processes.</h2><blockquote class="tr_bq">“Given the “double jeopardy” that arises when researchers must clear two hurdles—first, to obtain funding for their research proposal and, second, to be allocated the necessary computing resources—the chances that a researcher with a good idea can carry out the proposed work under such conditions is diminished.”</blockquote>XD needs to be more tightly integrated with other award processes to mitigate the double jeopardy issue.  I have a difficult time envisioning the form which this integration would take, but the NSF GRF’s approach of prominently featuring NSF HPC resources as a part of the award might be a good start.  As an adaptive proposal reviewer within XSEDE and a front-line interface with first-time users, I found that having the NSF GRF bundle XSEDE time greatly reduced the entry barrier for new users and made it easier for us reviewers to stratify the proposals.  Another idea may be to invite NSF center staff to NSF contractors’ meetings (if such things exist; I know they do for DOE BES) to show a greater amount of integration across NSF divisions.

    In addition, the current XSEDE allocation proposal process is extremely onerous.  The document that describes the process is ridiculously long and contains of obscure requirements that serve absolutely no purpose.  For example, all XSEDE proposals require a separate document detailing the scaling performance of their scientific software.  Demonstrating an awareness of the true costs of performing certain calculations has its merits, but a detailed analysis of scaling is not even relevant for the majority of users who run modest-scale jobs or use off-the-shelf black-box software like Gaussian.  The only thing these obscure requirements do is prevent new users, who are generally less familiar with all of the scaling requirements nonsense, from getting any time.  If massive scalability is truly required by an application, the PI needs to be moved over to the Track 1 system (Blue Waters) or referred to INCITE.

    As a personal anecdote, many of us center staff found ourselves simply short-circuiting the aforementioned allocations guide and providing potential new users with a guide to the guide.  It was often sufficient to provide a checklist of minutia whose absence would result in an immediate proposal rejection and allow the PIs to do what they do best—write scientific proposals for their work.  Quite frankly, the fact that we had to provide a guide to understanding the guide to the allocations process suggests that the allocations process itself is grossly over-engineered.

    <h2 style="text-align: left;">7. Whether wider and more frequent collection of requirements for advanced computing could be used to inform strategic planning and resource allocation; how these requirements might be used; and how they might best be collected and analyzed.</h2>The XD program has already established a solid foundation for reporting the popularity and usability of NSF HPC resources in XDMoD.  The requirements of the majority are evolving more slowly than computer scientists would have everyone believe.

    Having been personally invested in two Track 2 proposals, I have gotten the impression that the review panels who select the destiny of the NSF’s future HPC portfolio are more impressed by cutting edge, albeit untested and under-demanded, proposals.  Consequentially, taking a “functional rather than a technology-focused or structural approach” to future planning will result in further loss of focus.  Instead of delivering conservatively designed architectures that will enjoy guaranteed high utilization, functional approaches will give way to computer scientists on review panels dictating what resources domain scientists should be using to solve their problems.  The cart will be before the horse.

    Instead, it would be far more valuable to include more operational staff in strategic planning.  The people on the ground know how users interact with systems and what will and won’t work.  As with the case of leadership computing, the NSF does not have the financial commitment to be leading the design of novel computing architectures at large scales.  Exotic and high-risk technologies should be simply left out of the NSF’s Track 2 program, incorporated peripherally but funded through other means (e.g., MRIs), or incorporated in the form of a small fraction of a larger, lower-risk resource investment.

    A perspective of the greater context of this has been eloquently written by Dr. Steven Gottlieb.  Given his description of the OCI conversion to ACI, it seems like taking away the Office of Cyberinfrastructure’s (OCI’s) autonomy and placing it under Computer & Information Science & Engineering (CISE) exemplifies an ongoing and significant loss of focus within NSF.  This changed reflected the misconception that architecting and operating HPC resources for domain sciences is a computer science discipline.

    This is wrong.

    Computer scientists have a nasty habit of creating tools that are intellectually interesting but impractical for domain scientists.  These tools get “thrown over the wall,” never to be picked up, and represent an overall waste of effort in the context of operating HPC services for non-computer scientists.  Rather, operating HPC resources for the research community requires experienced technical engineers with a pragmatic approach to HPC.  Such people are most often not computer scientists, but former domain scientists who know what does and doesn’t work for their respective communities.

    <h2 style="text-align: left;">8. The tension between the benefits of competition and the need for continuity as well as alternative models that might more clearly delineate the distinction between performance review and accountability and organizational continuity and service capabilities.</h2><blockquote class="tr_bq">“Although NSF’s use of frequent open competitions has stimulated intellectual competition and increased NSF’s financial leverage, it has also impeded collaboration among frequent competitors, made it more difficult to recruit and retain talented staff, and inhibited longer-term planning.”</blockquote>Speaking from firsthand experience, I can say that working for an NSF center is a life of a perpetually uncertain future and dicing up FTEs into frustratingly tiny pieces.  While some people are driven by competition and fundraising (I am one of them), an entire organization built up to support multi-million dollar cyberinfrastructure cannot be sustained this way.

    At the time I left my job at an NSF center, my salary was covered by six different funding sources at levels ranging from 0.05 to 0.30 FTEs.  Although this officially meant that I was only 30% committed to directly supporting the operation of one of our NSF supercomputers, the reality was that I (and many of my colleagues) simply had to put in more than 100% of my time into the job.  This is a very high-risk way to operate because committed individuals get noticed and almost invariably receive offers of stable salaries elsewhere.  Retaining talent is extremely difficult when you have the least to offer, and the current NSF funding structure makes it very difficult for centers to do much more than continually hire entry-level people to replace the rising stars who find greener pastures.

    Restoring reliable, core funding to the NSF centers would allow them to re-establish a strong foundation that can be an anchor point for other sites wishing to participate in XD.  This will effectively cut off some of the current sites operating Track 2 machines, but frankly, the NSF has spread its HPC resources over too many sites at present and is diluting its investments in people and infrastructure.  The basis for issuing this core funding could follow a pattern similar to that of XD where long-term (10-year) funding is provisioned with a critical 5-year review.

    If the NSF cannot find a way to re-establish reliable funding, it needs to accept defeat and stop trying to provide advanced cyberinfrastructure.  The current method of only funding centers indirectly through HPC acquisitions and associated operations costs is unsustainable for two reasons:
    <ul><li>The length of these Track 2 awards (typically 3 years of operations) makes future planning impossible.  Thus, this current approach forces centers to follow high-risk and inadequately planned roadmaps.</li><li>All of the costs associated with maintaining world-class expertise and facilities have to come from someone else’s coffers.  Competitive proposals for HPC acquisitions simply cannot afford to request budgets that include strong education, training, and outreach programs, so these efforts wind up suffering.</li></ul>

    <h2 style="text-align: left;">9. How NSF might best set overall strategy for advanced computing-related activities and investments as well as the relative merits of both formal, top-down coordination and enhanced, bottom-up process.</h2>Regarding the top-down coordination, the NSF should drop the Track 2 program’s current solicitation model where proposers must have a vendor partner to get in the door.  This is unnecessarily restrictive and fosters an unhealthy ecosystem where vendors and NSF centers are both scrambling to pair up, resulting in high-risk proposals.  Consider the implications:
    <ol><li>Vendors are forced to make promises that they may not be able to fulfill (e.g., Track 2C and Blue Waters).  Given these two (of nine) solicitations resulted in substantial wastes of time and money (over 20% vendor failure rate!), I find it shocking that the NSF continues to operate this way.</li><li>NSF centers are only capable of choosing the subset of vendors who are willing to play ball with them, resulting in a high risk of sub-optimal pricing and configurations for the end users of the system.</li></ol>
    I would recommend a model, similar to many European nations’, where a solicitation is issued for a vendor-neutral proposal to deploy and support a program that is built around a resource.  A winning proposal is selected based on not only the system features, its architecture, and the science it will support, but the plan for training, education, collaboration, and outreach as well.  Following this award, the bidding process for a specific hardware solution begins.

    This addresses the two high-risk processes mentioned above and simultaneously eliminates the current qualification in Track 2 solicitations that no external funding can be included in the proposal.  By leaving the capital expenses out of the selection process, the NSF stands to get the best deal from all vendors and other external entities independent of the winning institution.

    Bottom-up coordination is much more labor-intensive because it requires highly motivated people at the grassroots to participate.  Given the NSF’s current inability to provide stable funding for highly qualified technical staff, I cannot envision how this would actually come together.
    <div>
    </div>

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2015/ultrasparc-powered-laptop-circa-2001/index.html b/2015/ultrasparc-powered-laptop-circa-2001/index.html new file mode 100644 index 0000000..450940f --- /dev/null +++ b/2015/ultrasparc-powered-laptop-circa-2001/index.html @@ -0,0 +1,214 @@ + + + + + + + UltraSPARC powered laptop - circa 2001 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    UltraSPARC powered laptop - circa 2001

    +

    It’s been ages since my last blog. What better way to start off the new year +then by looking at the past. In this case, let’s wind the clock all the way +back to 2001. This was the era of the Intel Pentium 4 processors. However, +today we’ll be looking at something far less pedestrian. Based on the Scalable +Processor ARChitecture (commonly known as SPARC), the NatureTech 777 +GenialStation is an UltraSPARC IIe laptop computer. Why do I have an +UltraSPARC IIe based laptop computer? Why not? And it’s oh so cool with it’s +lovely blue and gray chassis as opposed to boring old black. This NatureTech777 +laptop boasts the following specs:

    + +
      +
    • SUN UltraSPARC IIe @ 500 MHz w/256-KB L2 Cache
    • +
    • 15.0" TFT SXGA LCD Panel
    • +
    • 256MB ECC RAM
    • +
    • 80GB IDE disk
    • +
    • CD/DVD Combo drive
    • +
    • 3.5” Floppy disk drive
    • +
    • 5400mAh/ 11.1V. Li-ion Smart Battery Pack (mine is dead)
    • +
    • Built-in H/W Security Controller, 4 button input
    • +
    • A honking noisy fan that always runs at full speed
    • +
    +

    What can you do with a NatureTech 777 laptop? Well, at this stage of its life, I don’t use it for much apart from tinkering. Back in the day, being able to +take SUN Solaris on the road in a portable package was quite impressive +and I understand that these systems also went for a premium price at the time.

    + +

    I was surprised to not find any NatureTech video on YouTube or other such sites. So, I’m pleased to present this beast of a laptop in all its glory booting up +Solaris 9 and running Linpack - of course compiled with the requisite SunPro +compilers (and SUN math libraries). No speed records broken here of course, +and with that fan running constantly in overdrive, I would not expect any +thermal issues either :)

    + +

    Booting Solaris 9 + +

    + +
    + +

    + +

    Stressing the mighty UltraSPARC IIe with Linpack + +

    + +
    + +

    + +

    I’m lucky enough to have the fancy laptop bag from the manufacturer which +proudly proclaims that it’s carrying a SPARC based piece of equipment.

    + +
    +
    + +

    As the SUN sets on this blog (pun intended), I reminisce about the days of +variety in computing - different processors, operating systems - and when RISC +was king. Hopefully, we are entering another such era with the rise of ARM, +OpenPower, MIPS as well as the others that are out there.

    + +

    Varietas Delectat!

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2015/understanding-partial-order-alignment-for-multiple-sequence-alignment/index.html b/2015/understanding-partial-order-alignment-for-multiple-sequence-alignment/index.html new file mode 100644 index 0000000..30b0ac5 --- /dev/null +++ b/2015/understanding-partial-order-alignment-for-multiple-sequence-alignment/index.html @@ -0,0 +1,160 @@ + + + + + + + Understanding Partial Order Alignment for Multiple Sequence Alignment - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Understanding Partial Order Alignment for Multiple Sequence Alignment

    +

    Over at the Simpson Lab blog, I have an explainer +on Understanding Partial Order Alignment, +an under-appreciated method for multiple sequence alignment; I hope the explanation there +(and explanatory implementation) is useful to those +exploring graph-based approaches to alignment.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2016/an-uninformed-perspective-on-taihulight-s-design/index.html b/2016/an-uninformed-perspective-on-taihulight-s-design/index.html new file mode 100644 index 0000000..6186d6d --- /dev/null +++ b/2016/an-uninformed-perspective-on-taihulight-s-design/index.html @@ -0,0 +1,161 @@ + + + + + + + An uninformed perspective on TaihuLight's design - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    An uninformed perspective on TaihuLight's design

    +
    Note: What follows are my own personal thoughts, opinions, and analyses.  I am not a computer scientist and I don't really know anything about processor design or application performance, so it is safe to assume I don't know what I'm talking about.  None of this represents the views of my employer, the U.S. government, or anyone except me.
    +


    China’s new 93 PF TaihuLight system is impressive given the indigenous processor design and its substantial increase in its HPL score over the #2 system, Tianhe-2.  The popular media has started covering this new system and the increasing presence of Chinese systems on Top500, suggesting that China’s string of #1 systems may be a sign of shifting tides.  And maybe it is.  China is undeniably committed to investing in supercomputing and positioning itself as a leader in extreme-scale computing.

    That being said, the TaihuLight system isn’t quite the technological marvel and threat to the HPC hegemony that it may seem at first glance.  The system features some some critically limiting design choices that make the system smell like a supercomputer that was designed to be #1 on Top500, not solve scientific problems.  This probably sounds like sour grapes at this point, so let’s take a look at some of the details.

    <h2>Back-of-the-envelope math</h2>Consider the fact that each TaihuLight node turns 3,062 GFLOPS (that’s 3 TFLOPS) and has 136.51 GB/sec of memory bandwidth. This means that in the time it takes for the processor to load two 64-bit floats into the processor from memory, it could theoretically perform over 350 floating point operations. But it won’t, because it can only load the two operands for one single FLOP.

    Of course, this is an oversimplification of how CPUs work.  Caches exist to feed the extremely high operation rate of modern processors, and where there are so many cores that their caches can’t be fed fast enough, we see technologies like GDDR DRAM and HBM (on accelerators) and on-package MCDRAM (on KNL) appearing so that dozens or hundreds of cores can all retrieve enough floating-point operands from memory to sustain high rates of floating point calculations.

    However, the ShenWei SW26010 chips in the TaihuLight machine have neither GDDR nor MCDRAM; they rely on four DDR3 controllers running at 136 GB/sec to keep all 256 compute elements fed with data.  Dongarra’s report on the TaihuLight design briefly mentions this high skew:

    <blockquote class="tr_bq">“The ratio of floating point operations per byte of data from memory on the SW26010 is 22.4 Flops(DP)/Byte transfer, which shows an imbalance or an overcapacity of floating point operations per data transfer from memory. By comparison the Intel Knights Landing processor with 7.2 Flops(DP)/Byte transfer.”</blockquote>
    This measure of “Flops(DP)/Byte transfer” is called arithmetic intensity, and it is a critical optimization parameter when writing applications for manycore architectures.  Highly optimized GPU codes can show arithmetic intensities of around 10 FLOPS/byte, but such applications are often the exception; there are classes of problems that simply do not have high arithmetic intensities.  This diagram, which I stole from the Performance and Algorithms Research group at Berkeley Lab, illustrates the spectrum:

    <div class="separator" style="clear: both; text-align: center;"></div> +
    To put this into perspective in the context of hardware, let’s look at the #3 supercomputer, the Titan system at Oak Ridge National Lab.  The GPUs on which it is built (NVIDIA’s K20X) each have a GDDR5-based memory subsystem that can feed the 1.3 TFLOP GPUs at 250 GB/sec.  This means that Titan’s FLOPS/byte ratio is around 5.3, or over 4x lower (more balanced) than the 22 FLOPS/byte of TaihuLight’s SW26010 chips.

    This huge gap means that an application that is perfectly balanced to run on a Titan GPU–that is, an application with an arithmetic intensity of 5.3–will run 4x slower on one of TaihuLight’s SW26010 processors than a Titan GPU.  Put simply, despite being theoretically capable of doing 3 TFLOPS of computing, TaihuLight’s processors would only be able to deliver performance to 1/4th of that, or 0.75 TFLOPS, to this application.  Because of the severely limited per-node memory bandwidth, this 93 PFLOP system would perform like a 23 PFLOP system on an application that, given an arithmetic intensity of 5.3, would be considered highly optimized by most standards.

    Of course, the indigenous architecture also means that application developers will have to rely on indigenous implementations or ports of performance runtimes like OpenMP and OpenACC, libraries like BLAS, and ISA-specific vector intrinsics.  The maturity of this software stack for the ShenWei-64 architecture remains unknown.

    <h2>What is interesting</h2>This all isn’t to say that the TaihuLight system isn’t a notable achievement; it is the first massive-scale deployment of a CPU-based manycore processor, it is the first massive-scale deployment of EDR InfiniBand, and its CPU design is extremely interesting in a number of ways.

    The CPU block diagrams included in Dongarra’s report are a bit like a Rorschach test; my esteemed colleagues at The Next Platform astutely pointed out its similarities to KNL, but my first reaction was to compare it with IBM’s Cell processor:

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">IBM Cell BE vs. ShenWei SW26010.  Cell diagram stolen from NAS; SW26010 diagram stolen from the Dongarra report.</td></tr></tbody></table>
    The Cell processor was ahead of its time in many ways and arguably the first manycore chip targeted at HPC.  It had
    <ul><li>a single controller core (the PPE) with L1 and L2 caches</li><li>eight simpler cores (the SPEs) on an on-chip network with no L2 cache, but an embedded SRAM scratchpad</li></ul><div>and by comparison, the SW26010 has</div>

    +
    • a single controller core (the MPE) with L1 and L2 caches
    • sixty-four simpler cores (the CPEs) on an on-chip network with no L2 cache, but an embedded SRAM scratchpad
    +

    Of course, the similarities are largely superficial and there are vast differences between the two architectures, but the incorporation of heterogeneous (albeit very similar) cores on a single package is quite bold and is a design point that may play a role in exascale processor designs:

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">What an exascale processor might look like, as stolen from Kathy Yelick</td></tr></tbody></table>
    which may feature a combination of many lightweight cores (not unlike the CPE arrays on the TaihuLight processor) and are accompanied by a few capable cores (not unlike the MPE cores).

    The scratchpad SRAM present on all of the CPE cores is also quite intriguing, as it is a marked departure from the cache-oriented design of on-package SRAM that has dominated CPU architectures for decades.  The Dongarra report doesn’t detail how the scratchpad SRAM is used by applications, but it may offer a unique new way to perform byte-granular loads and stores that do not necessarily waste a full cache line’s worth of memory bandwidth if the application knows that memory access is to be unaligned.

    This is a rather forward-looking design decision that makes the CPU look a little more like a GPU.  Some experimental processor designs targeting exascale have proposed eschewing deep cache hierarchies in favor of similar scratchpads:

    <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">The Traleika Glacier processor design, featuring separate control and execution blocks and scratchpad SRAM.  Adapted from the Traleika Glacier wiki page.</td></tr></tbody></table>
    Whether or not we ever hear about how successful or unsuccessful these processor features are remains to be seen, but there may be valuable lessons to be learned ahead of the first generation of exascale processors from architectures like those in the TaihuLight system.

    <h2>Outlook</h2>At a glance, it is easy to call out the irony in the U.S. government’s decision to ban the sale of Intel’s KNL processors to the Chinese now that the TaihuLight system is public.  It is clear that China is in a position to begin building extreme-scale supercomputers without the help of Intel, and it is very likely that the U.S. embargo accelerated this effort.  As pondered by an notable pundit in the HPC community,

    <blockquote class="twitter-tweet"><div dir="ltr" lang="en">If US gov hadn’t barred US #HPC tech to China, new No.1 #supercomputer could’ve been #KNL-powered instead of Chinese CPUs? #ISC16 #backfired</div> +— Andrew Jones (@hpcnotes) June 20, 2016</blockquote>
    And this may have been the case.  However, despite the TaihuLight system’s #1 position and very noteworthy Linpack performance and efficiency, is not the massive disruptor that puts the U.S. in the back seat.  Underneath TaihuLight’s shiny, 93-petaflop veneer are some cut corners that substantially lower its ability to reliably deliver the performance and scientific impact commensurate to its Linpack score.  As pointed out by a colleague wiser than me, Intel’s impending KNL chip is the product of years of effort, and it is likely that it will be years before ShenWei’s chip designs and fabs are able to be really deliver a fully balanced, competitive, HPC-oriented microarchitecture.

    With that being said, TaihuLight is still a massive system, and even if its peak Linpack score is not representative of its actual achievable performance in solving real scientific problems, it is undeniably a leadership system.  Even if applications can only realize a small fraction of its Linpack performance, there is a lot of discovery to be made in petascale computing.

    Further, the SW201060 processor itself features some bold design points, and being able to test a heterogeneous processor with scratchpad SRAM at extreme scale may give China a leg up in the exascale architecture design space.  Only time will tell if these opportunities are pursued, or if TaihuLight follows its predecessors into an existence of disuse in a moldy datacenter caused by a high electric bill, poor system design, and lack of software.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2016/basics-of-i-o-benchmarking/index.html b/2016/basics-of-i-o-benchmarking/index.html new file mode 100644 index 0000000..06657a7 --- /dev/null +++ b/2016/basics-of-i-o-benchmarking/index.html @@ -0,0 +1,183 @@ + + + + + + + Basics of I/O Benchmarking - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Basics of I/O Benchmarking

    +

    Most people in the supercomputing business are familiar with using FLOPS as a proxy for how fast or capable a supercomputer is.  This measurement, as observed using the High-Performance Linpack (HPL) benchmark, is the basis for the Top500 list.  However, I/O performance is becoming increasingly important as data-intensive computing becomes a driving force in the HPC community, and even though there is no Top500 list for I/O subsystems, the IOR benchmark has become the de facto standard way to measure the I/O capability for clusters and supercomputers.

    Unfortunately, I/O performance tends to be trickier to measure using synthetic benchmarks because of the complexity of the I/O stack that lies between where data is generated (the CPU) to where it’ll ultimately be stored (a spinning disk or SSD on a network file system).  In the interests of clarifying some of the confusion that can arise when trying to determine how capable an I/O subsystem really is, let’s take a look at some of the specifics of running IOR.

    <h2>Getting Started with IOR</h2>IOR writes data sequentially with the following parameters:
    <ul><li>blockSize (-b)</li><li>transferSize (-t)</li><li>segmentCount (-s)</li><li>numTasks (-n)</li></ul><div>which are best illustrated with a diagram:</div> +
    <div class="separator" style="clear: both; text-align: center;"></div> +
    These four parameters are all you need to get started with IOR.  However, naively running IOR usually gives disappointing results.  For example, if we run a four-node IOR test that writes a total of 16 GiB:

    <pre>$ mpirun -n 64 ./ior -t 1m -b 16m -s 16

    access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
    —— ——— ———- ——— ——– ——– ——– ——– —-
    write 427.36 16384 1024.00 0.107961 38.34 32.48 38.34 2
    read 239.08 16384 1024.00 0.005789 68.53 65.53 68.53 2
    remove - - - - - - 0.534400 2
    </pre><div>
    we can only get a couple hundred megabytes per second out of a Lustre file system that should be capable of a lot more.

    Switching from writing to a single-shared file to one file per process using the -F (filePerProcess=1) option changes the performance dramatically:</div>

    +

    +
    $ mpirun -n 64 ./ior -t 1m -b 16m -s 16 -F
    ...
    access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
    ------ --------- ---------- --------- -------- -------- -------- -------- ----
    write 33645 16384 1024.00 0.007693 0.486249 0.195494 0.486972 1
    read 149473 16384 1024.00 0.004936 0.108627 0.016479 0.109612 1
    remove - - - - - - 6.08 1
    +

    This is in large part because letting each MPI process work on its own file cuts out any contention that would arise because of file locking.  
    +

    +
    However, the performance difference between our naive test and the file-per-process test is a bit extreme.  In fact, the only way that 146 GB/sec read rate could be achievable on Lustre is if each of the four compute nodes had over 45 GB/sec of network bandwidth to Lustre--that is, a 400 Gbit link on every compute and storage node.

    +

    Effect of Page Cache on Benchmarking

    What's really happening is that the data being read by IOR isn't actually coming from Lustre; rather, files' contents are already cached, and IOR is able to read them directly out of each compute node's DRAM.  The data wound up getting cached during the write phase of IOR as a result of Linux (and Lustre) using a write-back cache to buffer I/O, so that instead of IOR writing and reading data directly to Lustre, it's actually mostly talking to the memory on each compute node.
    +

    +
    To be more specific, although each IOR process thinks it is writing to a file on Lustre and then reading back the contents of that file from Lustre, it is actually
    +
    +
    1. writing data to a copy of the file that is cached in memory.  If there is no copy of the file cached in memory before this write, the parts being modified are loaded into memory first.
    2. those parts of the file in memory (called "pages") that are now different from what's on Lustre are marked as being "dirty"
    3. the write() call completes and IOR continues on, even though the written data still hasn't been committed to Lustre
    4. independent of IOR, the OS kernel continually scans the file cache for files who have been updated in memory but not on Lustre ("dirt pages"), and then commits the cached modifications to Lustre
    5. dirty pages are declared non-dirty since they are now in sync with what's on disk, but they remain in memory
    Then when the read phase of IOR follows the write phase, IOR is able to just retrieve the file's contents from memory instead of having to communicate with Lustre over the network.
    +

    +
    There are a couple of ways to measure the read performance of the underlying Lustre file system. The most crude way is to simply write more data than will fit into the total page cache so that by the time the write phase has completed, the beginning of the file has already been evicted from cache. For example, increasing the number of segments (-s) to write more data reveals the point at which the nodes' page cache on my test system runs over very clearly:

    +
    +
    However, this can make running IOR on systems with a lot of on-node memory take forever.

    +
    A better option would be to get the MPI processes on each node to only read data that they didn't write.  For example, on a four-process-per-node test, shifting the mapping of MPI processes to blocks by four makes each node N read the data written by node N-1.

    +
    +
    +
    Since page cache is not shared between compute nodes, shifting tasks this way ensures that each MPI process is reading data it did not write.
    +

    IOR provides the -C option (reorderTasks) to do this, and it forces each MPI process to read the data written by its neighboring node.  Running IOR with this option gives much more credible read performance:
    +

    +
    $ mpirun -n 64 ./ior -t 1m -b 16m -s 16 -F -C
    ...
    access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
    ------ --------- ---------- --------- -------- -------- -------- -------- ----
    write 41326 16384 1024.00 0.005756 0.395859 0.095360 0.396453 0
    read 3310.00 16384 1024.00 0.011786 4.95 4.20 4.95 1
    remove - - - - - - 0.237291 1

    But now it should seem obvious that the write performance is also ridiculously high. And again, this is due to the page cache, which signals to IOR that writes are complete when they have been committed to memory rather than the underlying Lustre file system.

    To work around the effects of the page cache on write performance, we can issue an fsync() call immediately after all of the write()s return to force the dirty pages we just wrote to flush out to Lustre. Including the time it takes for fsync() to finish gives us a measure of how long it takes for our data to write to the page cache and for the page cache to write back to Lustre.

    IOR provides another convenient option, -e (fsync), to do just this. And, once again, using this option changes our performance measurement quite a bit:

    +
    $ mpirun -n 64 ./ior -t 1m -b 16m -s 16 -F -C -e
    ...
    access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
    ------ --------- ---------- --------- -------- -------- -------- -------- ----
    write 2937.89 16384 1024.00 0.011841 5.56 4.93 5.58 0
    read 2712.55 16384 1024.00 0.005214 6.04 5.08 6.04 3
    remove - - - - - - 0.037706 0
    +


    and we finally have a believable bandwidth measurement for our file system.

    <h2>Defeating Page Cache</h2>Since IOR is specifically designed to benchmark I/O, it provides these options that make it as easy as possible to ensure that you are actually measuring the performance of your file system and not your compute nodes’ memory.  That being said, the I/O patterns it generates are designed to demonstrate peak performance, not reflect what a real application might be trying to do, and as a result, there are plenty of cases where measuring I/O performance with IOR is not always the best choice.  There are several ways in which we can get clever and defeat page cache in a more general sense to get meaningful performance numbers.

    When measuring write performance, bypassing page cache is actually quite simple; opening a file with the O_DIRECT flag going directly to disk.  In addition, the fsync() call can be inserted into applications, as is done with IOR’s -e option.

    Measuring read performance is a lot trickier.  If you are fortunate enough to have root access on a test system, you can force the Linux kernel to empty out its page cache by doing
    <blockquote class="tr_bq"># echo 1 > /proc/sys/vm/drop_caches</blockquote>and in fact, this is often good practice before running any benchmark (e.g., Linpack) because it ensures that you aren’t losing performance to the kernel trying to evict pages as your benchmark application starts allocating memory for its own use.

    Unfortunately, many of us do not have root on our systems, so we have to get even more clever.  As it turns out, there is a way to pass a hint to the kernel that a file is no longer needed in page cache:


    The effect of passing POSIX_FADV_DONTNEED using posix_fadvise() is usually that all pages belonging to that file are evicted from page cache in Linux.  However, this is just a hint–not a guarantee–and the kernel evicts these pages asynchronously, so it may take a second or two for pages to actually leave page cache.  Fortunately, Linux also provides a way to probe pages in a file to see if they are resident in memory.

    Finally, it’s often easiest to just limit the amount of memory available for page cache.  Because application memory always takes precedence over cache memory, simply allocating most of the memory on a node will force most of the cached pages to be evicted.  Newer versions of IOR provide the memoryPerNode option that do just that, and the effects are what one would expect:

    <div class="separator" style="clear: both; text-align: center;"></div> +
    The above diagram shows the measured bandwidth from a single node with 128 GiB of total DRAM.  The first percent on each x-label is the amount of this 128 GiB that was reserved by the benchmark as application memory, and the second percent is the total write volume.  For example, the “50%/150%” data points correspond to 50% of the node memory (64 GiB) being allocated for the application, and a total of 192 GiB of data being read.

    This benchmark was run on a single spinning disk which is not capable of more than 130 MB/sec, so the conditions that showed performance higher than this were benefiting from some pages being served from cache.  And this makes perfect sense given that the anomalously high performance measurements were obtained when there was plenty of memory to cache relative to the amount of data being read.

    <h2>Corollary </h2>Measuring I/O performance is a bit trickier than CPU performance in large part due to the effects of page caching.  That being said, page cache exists for a reason, and there are many cases where an application’s I/O performance really is best represented by a benchmark that heavily utilizes cache.

    For example, the BLAST bioinformatics application re-reads all of its input data twice; the first time initializes data structures, and the second time fills them up.  Because the first read caches each page and allows the second read to come out of cache rather than the file system, running this I/O pattern with page cache disabled causes it to be about 2x slower:

    <div class="separator" style="clear: both; text-align: center;"></div> +
    Thus, letting the page cache do its thing is often the most realistic way to benchmark with realistic application I/O patterns.  Once you know how page cache might be affecting your measurements, you stand a good chance of being able to reason about what the most meaningful performance metrics are.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2016/jupyter-notebooks-for-performing-and-sharing-bioinformatics-analyses/index.html b/2016/jupyter-notebooks-for-performing-and-sharing-bioinformatics-analyses/index.html new file mode 100644 index 0000000..69a6264 --- /dev/null +++ b/2016/jupyter-notebooks-for-performing-and-sharing-bioinformatics-analyses/index.html @@ -0,0 +1,178 @@ + + + + + + + Jupyter Notebooks for Performing and Sharing Bioinformatics Analyses - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Jupyter Notebooks for Performing and Sharing Bioinformatics Analyses

    +

    I was asked to do a half-day tutorial at the Great Lakes Bioinformatics conference Workshop session. +The focus was mainly on R, with some python as well. We covered:

    + +
      +
    • The basics of Jupyter notebooks - what they are and how they work
    • +
    • How to install and run Jupyter notebooks on their laptop, in R and Python
    • +
    • How to perform interactive analyses in a web browser using Jupyter
    • +
    • Using markdown and latex to
    • +
    • How to “Port” an R bioinformatics workflow from some scripts into a Jupyter notebook
    • +
    • How to share a Jupyter notebook online, using three different approaches +
        +
      • SageMathCloud
      • +
      • GitHub and
      • +
      • mybinder.org
      • +
      +
    • +
    + +

    I think it went prety well; the materials are available On GitHub. +It was largely hands-on, so apart from some introductory slides, +it was mainly about giving a tour of the notebook and how use Jupyter to share analyses; the “scripts” that I went through +in presenting the material were aimed at having the students produce the notebooks +here.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2016/mpi-s-place-in-big-computing/index.html b/2016/mpi-s-place-in-big-computing/index.html new file mode 100644 index 0000000..7df7c9d --- /dev/null +++ b/2016/mpi-s-place-in-big-computing/index.html @@ -0,0 +1,183 @@ + + + + + + + MPI's Place in Big Computing - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    MPI's Place in Big Computing

    +

    The organizers of EuroMPI 2016 were kind enough to invite me to give a keynote and participate in a panel at their meeting, which was held at the end of September in beautiful Edinburgh. The event was terrific, with lots of very interesting work going on in MPI implementations and with MPI.

    + +

    The topic of my talk was “MPI’s Place in Big Computing”; the materials from the talk can be found on github. The talk, as you might expect, included discussion of high-productivity big data frameworks, but also — and missing from the discussion in my “HPC is dying” blog post — the “data layer” frameworks that underpin them.

    + +

    I think a lot of people have taken, quite reasonably, my that blog post to suggest that Spark for example is a competitor to MPI; the point I wanted to make is a little more nuanced that that.

    + +

    I’m actually skeptical of Spark’s utility for (e.g.) large-scale simulations. However attractive the model is from a variety of points of view, absent some huge breakthrough I don’t think that functional models with immutable data can support the performance, memory requirements, or performance predictability we require. (But who knows; maybe that’ll be one of the compromises we find we have to make on the road to exascale).

    + +

    But whatever you might think of Spark’s efficacy for your particular use case,

    + +
      +
    • A lot of people manifestly find it to be extremely useful for their use case; and
    • +
    • Performance is quite important to those communities.
    • +
    + +

    So given that, why isn’t Spark built atop of MPI for network communications? And why isn’t TensorFlow, or Dask, or SeaStar?

    + +

    The past five years have seen a huge number of high-productivity tools for large-scale number crunching gain extremely rapid adoption. Even if you don’t like those particular tools for your problems, surely you’d like for there to exist some tools like that for the traditional HPC community; why do other communications frameworks support this flourishing ecosystem of platforms, and MPI doesn’t?

    + +

    There’s another argument there, too - simply from a self-preservation point of view, it would be in MPI’s interest to be adopted by a high-profile big data platform to ensure continued success and support. But none are; why? It’s not because the developers of Spark or at Google are just too dumb to figure out MPI’s syntax.

    + +

    Going through what does get used for these packages and what doesn’t — which is what I do in this talk — I think the issues become fairly clear. MPI wants to be both a low-level communications framework and a higher-level programming model, and ends up tripping over it’s own feet trying to dance both dances. As a communications “data plane” it imposes too many high-level decisions on applications — no fault tolerance, restrictive communications semantics (in-order and arrival guarantees), and provides too few services (e.g. a performant active message/RPC layer). And as a high-level programming model it is too low level and is missing different services (communications-aware scheduling came up in several guises at the meeting).

    + +

    I don’t think that’s insurmountable; I think inside MPI implementations there is a performant, network-agnostic low-level communications layer trying to get out. Exposing more MPI runtime services is a move in the right direction. I was surprised at how open the meeting participants were to making judicious changes — even perhaps breaking some backwards compatability — in the right directions.

    + +

    Thanks again to the organizers for extending the opportunity to participate; it was great.

    + +

    My slides can be seen below or on github, where the complete materials can be found.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2016/reminiscing-and-the-computing-renaissance/index.html b/2016/reminiscing-and-the-computing-renaissance/index.html new file mode 100644 index 0000000..8bb90aa --- /dev/null +++ b/2016/reminiscing-and-the-computing-renaissance/index.html @@ -0,0 +1,186 @@ + + + + + + + Reminiscing and the computing renaissance - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Reminiscing and the computing renaissance

    +

    Sifting through boxes of 3.5 inch floppy diskettes - some of questionable +provenance in a dusty basement. Gingerly packing up what I consider to be the +holy trinity of Commodore Amiga computers - A1000, A2000, A3000 - all in some +state of working condition. Of course, back in the day, only Amiga made it all +possible - awesome graphic demos, games, word processing, and ray tracing to +Amiga Unix - AMIX, which was one of the first ports of SVR4 to the MC68000 +series processor (yes I do have AMIX installed also).

    + +

    The frustration watching the Death Bed Vigil movie in which Dave Haynie of +Commodore Amiga fame gives us a tour through the Commodore engineering at +headquarters and of course the fire (bankruptcy) sale which happened at +Commodore Canada on Pharmacy Avenue in Toronto.

    + +
    + +
    + +

    Once upon a time, we all carried the respective flags of our favorite platforms - which were varied. It was this rivalry which I think led to these respective +user communities squeezing tremendous performance out of these systems in the +race to show which platform was best.

    + +

    Then it all seemed to change. Suddenly we were all forced to march to the same +clock rhythm - and boredom set in. With this course seemingly set in stone, +how are we to escape this computing Sturm and Drang?

    + +

    GAME ON!

    + +

    Well, for me this hope appeared in 2013 with the announcement of the OpenPOWER Consortium - an open technical community built around the IBM POWER architecture to grow solutions to serve the evolving computing needs of today and the future. +Next week the second annual OpenPOWER Summit takes place in San Jose, United States and if the first event was any indication, this should be a very exciting +event. So Power Up and strap on your accelerators as we’re in for a very +interesting ride!

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2016/sc16-stir-it-up/index.html b/2016/sc16-stir-it-up/index.html new file mode 100644 index 0000000..ac645d9 --- /dev/null +++ b/2016/sc16-stir-it-up/index.html @@ -0,0 +1,199 @@ + + + + + + + SC16- Stir it up! - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    SC16- Stir it up!

    +

    It’s been ages since I’ve posted to this blog. I’ve not forgotten about it - I’ve been +figuratively stirring the technical computing goulash pot over on the IBM Systems +In the Making blog.

    + +

    Having recently moved house, all of the old classic and newer Arm based systems that I’ve +written about previously are still mostly packed away. My hands have been more focused on +home improvement rather than tinkering. As those in HPC circles will know, the annual +Supercomputing SC16 event starts this coming Sunday in Salt Lake City, UT. +Interestingly, if my memory serves me well the last time we were in Salt Lake City for SC12, +I was a newbie with IBM, having joined as a result of the acquisition of Platform Computing.

    + +

    The HPC landscape has changed quite a bit since then, including the divestiture of the IBM +x86 server business to Lenovo and the birth of the OpenPOWER Foundation. The OpenPOWER +Foundation has gone from baby steps to sprinting with a huge and diverse group of members +from accelerators, interconnects, research organizations and more - all united on a common +goal - to drive innovation and change in enterprise computing and HPC via the OpenPOWER +platform. It’s like somebody has taken a big wooden spoon and stirred the goulash in the +pot - because we all know that if things stand still for too long in the pot, it’s going to +burn.

    + +

    As I’ve banged on about in previous blogs, I’m more pleased than ever to see this explosion +of diversity in HPC from A(RM), P(OWER) to X(86). When you throw accelerators such as FPGAs, +GPUs into the mix, what is needed more than ever to address this complexity in diverse +environments is a software defined approach - which hides this complexity from +the users and allows them to leverage the power of todays environments.

    + +

    IBM Spectrum LSF (formerly Platform LSF) has been making this possible for over 20 years. A +glance at the OS and platform support list illustrates the breadth and depth of OS and +processor support. Not only does IBM Spectrum LSF make tying together heterogeneous +resources easy, it’s proven technology allows organizations to share resources on a global +scale. In fact, the latest IBM Spectrum LSF V10 release from June 2016 contained contained +numerous enhancements all focused on improving the productivity the users of HPC and +controlling costs. Read more in this top 10 cool things about IBM Spectrum LSF blog. And +looking beyond HPC, the IBM Spectrum Computing family of products helps provide advanced +resource management capabilities for diverse workloads including Hadoop, Spark.

    + +

    Yours truly will be in Salt Lake City for SC16. Drop by booth 1018 to talk about how IBM +software defined computing can help your organization. IBM will be holding a number of user +groups and seminars covering the broad spectrum of IBM solutions for HPC. And for IBM +Spectrum LSF users, we’ll be holding our annual user group, where you can hear how +your peers are using IBM Spectrum LSF to get an advantage, and learn about the latest +developments in IBM Spectrum LSF from our experts.

    + +

    Come on and stir it up! You’ll like it!

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2016/spark-chapel-tensorflow-workshop-at-umich/index.html b/2016/spark-chapel-tensorflow-workshop-at-umich/index.html new file mode 100644 index 0000000..505fe13 --- /dev/null +++ b/2016/spark-chapel-tensorflow-workshop-at-umich/index.html @@ -0,0 +1,169 @@ + + + + + + + Spark, Chapel, TensorFlow- Workshop at UMich - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Spark, Chapel, TensorFlow- Workshop at UMich

    +

    The kind folks at the University of Michigan’s Center for Computational Discovery and Engineering (MICDE), which is just part of the very impressive Advanced Research Computing division, invited me to give a workshop there a couple of months ago about the rapidly-evolving large-scale numerical computing ecosystem.

    + +

    There’s lots that I want to do to extend this to a half-day length, but the workshop materials — including a VM that can be used to play with Spark, Chapel and TensorFlow, along with Jupyter notebooks for each — can be found on GitHub and may be of some use to others as they stand.

    + +

    The title and abstract follow.

    + +
    +

    Next Generation HPC? What Spark, TensorFlow, and Chapel are teaching us about large-scale numerical computing

    +
    + +
    +

    For years, the academic science and engineering community was almost alone in pursuing very large-scale numerical computing, and MPI - the 1990s-era message passing library - was the lingua franca for such work. But starting in the mid-2000s, others became interesting in large-scale computing on data. First internet-scale companies like Google and Yahoo! started performing fairly basic analytics tasks at enormous scale, and now many others are tackling increasingly complex and data-heavy machine-learning computations, which involve very familiar scientific computing tasks such as linear algebra, unstructured mesh decomposition, and numerical optimization. But these new communities have created programming environments which emphasize what we’ve learned about computer science and programmability since 1994 - with greater levels of abstraction and encapsulation, separating high-level computation from the low-level implementation details, and some in HPC are starting to notice. This talk will give a brief introduction to Apache Spark environment and Google’s Tensor Flow machine-learning package for high-level numerical computation, as well as the HPC-focused Chapel language from Cray, to show where each can be used today and how they might be used in the future. The slides for this talk, and examples for each package along with a virtual machine which can be used for running them, will be available at https://github.com/ljdursi/Spark-Chapel-TF-UMich-2016 .

    + +
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2017/beyond-single-core-r-parallel-data-analysis/index.html b/2017/beyond-single-core-r-parallel-data-analysis/index.html new file mode 100644 index 0000000..2949948 --- /dev/null +++ b/2017/beyond-single-core-r-parallel-data-analysis/index.html @@ -0,0 +1,183 @@ + + + + + + + Beyond Single Core R- Parallel Data Analysis - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Beyond Single Core R- Parallel Data Analysis

    +

    I was asked recently to do short presentation for the Greater Toronto R Users Group +on parallel computing in R; My slides can be seen below or on github, where the complete materials can be found.

    + +

    I covered some similar things I had covered in a half-day workshop +a couple of years earlier (though, obviously, without the hands-on +component):

    + +
      +
    • How to think about parallelism and scalability in data analysis
    • +
    • The standard parallel package, including what was the snow and multicore facilities, using airline data as an example
    • +
    • The foreach package, using airline data and simple stock data;
    • +
    • A summary of best practices,
    • +
    + +

    with some bonus material tacked on the end touching on a couple advanced topics.

    + +

    I was quite surprised at how little had changed since late 2014, other than +further development of SparkR (which +I didn’t cover), and the interesting but seemingly not very much used future +package. I was also struck by how hard it is to find similar materials +online, covering a range of parallel computing topics in R - it’s rare enough +that even this simple effort made it to the HPC project view on CRAN +(under “related links”). R continues to grow in popularity for data analysis; +is this all desktop computing? Is Spark siphoning off the clustered-dataframe +usage?

    + +

    (This was also my first time with RPres in RStudio; +wow, not a fan, RPres was not ready for general release. And I’m a big fan of RMarkdown.)

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2017/chapel-s-home-in-the-landscape-of-new-scientific-computing-languages/index.html b/2017/chapel-s-home-in-the-landscape-of-new-scientific-computing-languages/index.html new file mode 100644 index 0000000..1d29566 --- /dev/null +++ b/2017/chapel-s-home-in-the-landscape-of-new-scientific-computing-languages/index.html @@ -0,0 +1,170 @@ + + + + + + + Chapel's Home in the Landscape of New Scientific Computing Languages - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Chapel's Home in the Landscape of New Scientific Computing Languages

    +

    I was invited to speak at this past weekend’s fourth annual Chapel Implementers and Users Workshop (CHIUW 2017). +It was a great meeting, with lots of extremely high-quality talks on work being done with and on Chapel. The slides from the presentations +will be up shortly, and I recommend them - the libfabric, +KNL, use-after-free tracking, and GraphBLAS works +were of particular interest to me. The Code Camp on the next day, working with members the Chapel team on individual particular projects, was also a lot of fun.

    + +

    The topic of my own talk was “Chapel’s Home in the Landscape +of New Scientific Computing Languages (and what it can learn from +the neighbours)”; the materials from the talk can be found +on github. I described +the sorts of problems I’m particularly interested in, surveyed +some of the languages/frameworks in there, and tried to identify +what I saw as Chapel’s role in the environment.

    + +

    My slides can be seen below or on github, where the complete materials can be found.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2017/cleaning-up-gracc/index.html b/2017/cleaning-up-gracc/index.html new file mode 100644 index 0000000..0589f9b --- /dev/null +++ b/2017/cleaning-up-gracc/index.html @@ -0,0 +1,234 @@ + + + + + + + Cleaning Up GRACC - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Dereks Web Thoughts from Derek. See the original post here.
    + +
    +

    Cleaning Up GRACC

    +

    The GRid ACcounting Collector (GRACC) is the OSG’s new version of accounting software, replacing Gratia. It has been running in production since March 2017. Last week, on Friday November 3rd, we held a GRACC Focus Day. Our goal was to clean up data that is presented in GRACC. My changes where:

    + +
      +
    • Update the GRACC-Collector to version 1.1.8. The primary change in this release is setting the messages sent to RabbitMQ to be “persistent”. The persistent messages are then saved to disk in order to survive a RabbitMQ reboot.
    • +
    • Use case-insenstive comparisons to determine the Open Science Grid Information Management system (OIM) information. This was an issue with GPGrid (Fermilab), which was registered as GPGRID.
    • +
    • Set the OIM_Site equal to the Host_description attribute if the OIM logic is unable to determine the registered OIM site. This is especially useful for the LIGO collaboration, which uses sites in Europe that are not registered in OIM. Now, instead of a lot of Unknown sites listed on the LIGO site listing, it shows the somewhat reported site name of where the job ran.
    • +
    + +
    + GRACC Projects Page
    + GRACC Projects Page for LIGO + +
    + +

    Regular Expression Corrections

    + +

    One of the common problems we have in GRACC is poor data coming from the various probes installed at hundreds of sites. We don’t control the data coming into GRACC, so occasionally we must make corrections to the data for clarity or correctness. One of these corrections is misreporting the “site” that the jobs ran on.

    + +

    In many instances, the probe is unable to determine the site and simply lists the hostname of the worker node where the job ran. This can cause the cardinality of sites listed in GRACC to increase dramatically as we get new hostnames inserted into the sites listing. If the hostnames are predictable, a regular expression matching algorithm can match a worker node hostname to a proper site name.

    + +

    The largest change for GRACC was the regular expression corrections. With this new feature, GRACC administrators can set corrections to match on attributes using regular expression patterns. For example, consider the following correction configuration.

    + +
    [[Corrections]]
    +index = 'gracc.corrections'
    +doc_type = 'host_description_regex'
    +match_fields = ['Host_description']
    +source_field = 'Corrected_OIM_Site'
    +dest_field = 'OIM_Site'
    +regex = true
    +
    +
    + +

    This configuration means:

    + +
    +

    Match the Host_description field in the incoming job record with the regular expression Host_description field in the corrections table. If they are a match, take the value in the Corrected_OIM_Site field in the corrections table and place it into the OIM_Site field in the job record.

    + +
    + +

    And the correction document would look like:

    + +
    {
    +  "_index": "gracc.corrections-0",
    +  "_type": "host_description_regex",
    +  "_id": "asldkfj;alksjdf",
    +  "_score": 1,
    +  "_source": {
    +    "Host_description": ".*\.bridges\.psc\.edu",
    +    "Corrected_OIM_Site": "PSC Bridges",
    +  }
    +}
    +
    +
    + +

    The regular expression is in the Host_description FIELD.

    + +

    So, if the incoming job record is similar to :

    + +
    {
    +...
    +"Host_description": "l006.pvt.bridges.psc.edu"
    +...
    +}
    +
    +
    + +

    Then the correction would modify or create values such that the final record would approximate:

    + +
    {
    +...
    +"Host_description": "l006.pvt.bridges.psc.edu",
    +"OIM_Site": "PSC Bridges",
    +"RawOIM_Site": ""
    +...
    +}
    +
    +
    + +

    Note that the Host_description field stays the same. We must keep it the same because it is used in record duplicate detection. If we modified the field and resummarized previous records, then it would cause multiple records to represent the same job.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2017/compute-canadian-building-a-successful-and-federated-computational-research-enterprise-together/index.html b/2017/compute-canadian-building-a-successful-and-federated-computational-research-enterprise-together/index.html new file mode 100644 index 0000000..3262b4e --- /dev/null +++ b/2017/compute-canadian-building-a-successful-and-federated-computational-research-enterprise-together/index.html @@ -0,0 +1,223 @@ + + + + + + + Compute Canadian- Building a successful and federated computational research enterprise, together - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Compute Canadian- Building a successful and federated computational research enterprise, together

    +

    Canada is a federated nation, and this is particularly visible in +areas of research funding, where both the federal and provincial +orders of government play a role. In building a successful digital +research infrastructure to support Canadian science and scholarship, +we must recognize that reality, and rely on the successful examples +of many organizations in Canada and around the world that embrace +such a federated approach.

    + +

    In this discussion paper, +my colleague Jill Kowalchuck and I lay out what we hope to be the beginnings +of a discussion of what a renewed federation for supporting Canadian +science with advanced research computing and data could look like.

    + +

    Executive Summary

    + +

    Computing and data, and the expertise and tools to make use of both, is +now central to all fields of study. Ten years after the creation of +Compute Canada in response to the National Platforms Fund call, and +after the Naylor Report on science funding, it is an apt time for the +Canadian community built around this national research platform to take +stock. Is it doing what we need it to do for Canadian researchers? Is it +working the way we want it to? What should a Canadian computation and +data platform for supporting research look like in the coming years? +This document aims to begin that discussion within the community.

    + +

    Here we propose seven principles to guide us in this discussion — that +our project should serve Canadian research in a researcher-centred, +service-oriented, and truly national way; and that it should operate as +a true federation of equal partners, interoperable but not identical, +collaborative and up-to-date. We suggest in particular that it is vital +that our national platform is adaptive and responsive to researchers, +making choices driven by research needs and not technical choices, and +should make full use of the diversity and specialization that a Canadian +federation and its partners offer.

    + +

    From those principles, we make evidence-based proposals for a renewed +Canadian organization. Comparisons with successful examples of federated +organizations within Canada and abroad suggest that while the basic +architecture of our federation is sound, important roles and +relationships need to be clarified. While a central office must be +responsible for the processes of defining priorities, strategies, and +standards of interoperability, a successful federation requires those +processes to have buy-in from partners committed to the goals of the +federation. The Board of Directors of the central office in a federation +must have experience and training to handle the delicate task of +governing a central office but being responsible to a national +community. The Members need adequate visibility into the operations of +the central office and the federation as a whole so that they can +support their vital role to the organization. And that engagement needs +to extend to all who are invested in the success of research in Canada: +regional staff and Boards, institutional staff, researchers and funders, +and other organizations that provide digital infrastructure for research +in Canada. This document focusses on Compute Canada in particular, but +the principles and proposals apply to any digital research +infrastructure providers, or the system as a whole.

    + +

    Success for this document will mean starting conversations, inspiring +other documents and differing points of view, and the emerging of a +consensus within the community of what a renewed national platform for +the next ten years looks like. That does not mean this document is a +straw-man. The authors have played roles in the national platform +starting at its inception, from researcher to consortium and regional +(east and west) staff and management, and within the Compute Canada +central office, and hope that experience plus the benefit of some +distance have produced a coherent and compelling vision of what the +Compute Canada national project could be. But what matters is not this +proposal; it is what the community as a whole decides it wants its +national platform to be.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2017/cool-and-quiet-benchmarking-on-macchiatobin-armada-8040/index.html b/2017/cool-and-quiet-benchmarking-on-macchiatobin-armada-8040/index.html new file mode 100644 index 0000000..13acec2 --- /dev/null +++ b/2017/cool-and-quiet-benchmarking-on-macchiatobin-armada-8040/index.html @@ -0,0 +1,183 @@ + + + + + + + Cool and quiet benchmarking on MACCHIATObin (Armada 8040) - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Cool and quiet benchmarking on MACCHIATObin (Armada 8040)

    +

    I’ve recently taken delivery of a few new goodies to complement the MACCHIATObin Arm v8 powered board that I’ve written about recently on my blog.

    + + +

    Youi'll recall that my efforts to do some rudimentary testing including running HPL were thwarted by overheating. So I decided to +address the issue with some parts I’ve been meaning to pickup anyway for some other interesting projects I have in the pipeline +(fingers crossed):

    + + +

    And this is what is looks like now…

    + +
    +
    + +

    Now, the red workbench and shiny heatsinks scream performance. So what about my run of HPL (Linpack)? Well, I decided to start over +from scratch and built my own Linpack against ATLAS, which I also compiled from scratch (let that run overnight).

    + +

    The result? I went from hitting the thermal limiter (and a non-result) to a successful Linpack run - with the CPU temperature never +really going much past 50C. As for my Linpack score, you can see that below.

    + +
    +
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2017/installing-scitokens-on-a-mac/index.html b/2017/installing-scitokens-on-a-mac/index.html new file mode 100644 index 0000000..dcbb9a6 --- /dev/null +++ b/2017/installing-scitokens-on-a-mac/index.html @@ -0,0 +1,198 @@ + + + + + + + Installing SciTokens on a Mac - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Dereks Web Thoughts from Derek. See the original post here.
    + +
    +

    Installing SciTokens on a Mac

    +

    In case I ever have to install SciTokens again, the steps I took to make it work on my mac. The most difficult part of this is installing openssl headers for the jwt python library. I followed the advice on this blog post.

    + +
      +
    1. Install Homebrew
    2. +
    3. +

      Install openssl:

      + + +
       brew install openssl
      +
      +
      + +
    4. +
    5. +

      Download the SciTokens library:

      + + +
       git clone https://github.com/scitokens/scitokens.git
      + cd scitokens
      +
      +
      + +
    6. +
    7. +

      Create the virtualenv to install the jwt library

      + + +
       virtualenv jwt
      + . jwt/bin/activate
      +
      +
      + +
    8. +
    9. +

      Install jwt pointing to the Homebrew installed openssl headers:

      + + +
       env LDFLAGS="-L$(brew --prefix openssl)/lib" CFLAGS="-I$(brew --prefix openssl)/include" pip install cryptography PyJWT
      +
      +
      + +
    10. +
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2017/oh-woe-is-2016-not/index.html b/2017/oh-woe-is-2016-not/index.html new file mode 100644 index 0000000..a7c911f --- /dev/null +++ b/2017/oh-woe-is-2016-not/index.html @@ -0,0 +1,198 @@ + + + + + + + Oh Woe is 2016 - NOT! - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Oh Woe is 2016 - NOT!

    +

    As we enter a new year, 2016 seems to have been tarnished it the closing month by events +around the world. Far be it for me to to talk about world events here, I’d like to focus on +the good - at least from my perspective. 2016 was a great year for me. It was the year in +which I managed to:

    + +
      +
    • Moved house
    • +
    • Upgraded from a late 1980’s to a late 1990’German station wagon (“estate” for those who +speak real English)
    • +
    • Moved from Blackberry 10 to Android - blech - but I’ll admit my HTC 10 is a fantastic +piece of hardware
    • +
    • Decided that I no longer revere Apple products as I once did - before any harsh words, I +am writing this on a Macbook Pro Retina…and I have a veritable museum of Apple kit at home
    • +
    • Took my first steps to learn about machine learning frameworks like Caffe, Tensorflow - +yes I’ve been tinkering with Caffe on one of my ARM developer boards
    • +
    • Stuck with Linux for my work laptop even with the tantalizing choice of a shiny new +Macbook with OS X
    • +
    • Entrusted the security of my home internet to the Turris Omnia
    • +
    +
      +
    • because using a router that hasn’t been patched in years is well - silly, to put it politely
    • +
    +
      +
    • Finally got myself an OpenPOWER t-shirt at ISC High-Performance - which I wear proudly +because OpenPOWER rocks!
    • +
    • Understood that getting the future generations interested in technology is key - and did +my part by giving an intro to High-Performance Computing talk at a local school
    • +
    • Successfully launched IBM Spectrum LSF 10.1 with the help of my many great peers. And +yes, it does run on Linux on Arm v7&v8 and Linux on POWER8 Little Endian :)
    • +
    +

    And that’s just what I can think of as I write this blog…so for me, 2016 has an aura rather +than a tarnish to it.

    + +

    So as we enter the year of Canada’s 150th birthday with a full head of steam, I’m looking +forward to hitchin' my wagon to some of cool things coming up including:

    + +
      +
    • Exploring the wonderful national parks of Canada at no charge with my Parks Canada pass
    • +
    • OpenPOWER and IBM POWER9
    • +
    • Building up of my home ARMy with a pre-ordered Armada 8040 Community Board, which should +help to speed up the machine learning I’ve been tinkering with
    • +
    +

    And that’s just for starters. What’s your plan?

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/glennklockwood/2017-3-13-tagbloggercom1999blog-4307061427721284246post-3928423618033745788.md b/2017/reviewing-the-state-of-the-art-of-burst-buffers/index.html similarity index 52% rename from _posts/glennklockwood/2017-3-13-tagbloggercom1999blog-4307061427721284246post-3928423618033745788.md rename to 2017/reviewing-the-state-of-the-art-of-burst-buffers/index.html index 987f023..0f269cb 100644 --- a/_posts/glennklockwood/2017-3-13-tagbloggercom1999blog-4307061427721284246post-3928423618033745788.md +++ b/2017/reviewing-the-state-of-the-art-of-burst-buffers/index.html @@ -1,31 +1,102 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2017-03-13 01:07:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2017/03/reviewing-state-of-art-of-burst-buffers.html -slug: reviewing-the-state-of-the-art-of-burst-buffers -title: Reviewing the state of the art of burst buffers ---- - -Just over two years ago I attended my first DOE workshop as a guest representative of the NSF supercomputing centers, and I wrote a post that summarized my key observations of how the DOE was approaching the increase in data-intensive computing problems.  At the time, the most significant thrusts seemed to be
    1. understanding scientific workflows to keep pace with the need to process data in complex ways
    2. deploying burst buffers to overcome the performance limitations of spinning disk relative to the increasing scale of simulation data
    3. developing methods and processes to curate scientific data
    Here we are now two years later, and these issues still take center stage in the discussion surrounding the future of  data-intensive computing.  The DOE has made significant progress in defining its path forward in these areas though, and in particular, both the roles of burst buffers and scientific workflows have a much clearer focus on DOE’s HPC roadmap.  Burst buffers in particular are becoming a major area of interest since they are now becoming commercially available, so in the interests of updating some of the incorrect or incomplete thoughts I wrote about two years ago, I thought I'd write about the current state of the art in burst buffers in HPC.

    Two years ago I had observed that there were two major camps in burst buffer implementations: one that is more tightly integrated with the compute side of the platform that utilizes explicit allocation and use, and another that is more closely integrated with the storage subsystem and acts as a transparent I/O accelerator.  Shortly after I made that observation though, Oak Ridge and Lawrence Livermore announced their GPU-based leadership systems, Summit and Sierra, which would feature a new type of burst buffer design altogether that featured on-node nonvolatile memory.

    This CORAL announcement, combined with the deployment of production, large-scale burst buffers at NERSCLos Alamos, and KAUST, has led me to re-think my taxonomy of burst buffers.  Specifically, it really is important to divide burst buffers into their hardware architectures and software usage modes; different burst buffer architectures can provide the same usage modalities to users, and different modalities can be supported by the same architecture.

    -For the sake of laying it all out, let's walk through the taxonomy of burst buffer hardware architectures and burst buffer software usage modalities.

    Burst Buffer Hardware Architectures

    First, consider your typical medium- or large-scale HPC system architecture without a burst buffer:

    -
    In this design, you have

    • Compute Nodes (CN), which might be commodity whitebox nodes like the Dell C6320 nodes in SDSC's Comet system or Cray XC compute blades
    • I/O Nodes (ION), which might be commodity Lustre LNET routers (commodity clusters), Cray DVS nodes (Cray XC), or CIOD forwarders (Blue Gene)
    • Storage Nodes (SN), which might be Lustre Object Storage Servers (OSSes) or GPFS Network Shared Disk (NSD) servers
    • The compute fabric (blue lines), which is typically Mellanox InfiniBand, Intel OmniPath, or Cray Aries
    • The storage fabric (red lines), which is typically Mellanox InfiniBand or Intel OmniPath

    Given all these parts, there are a bunch of different places you can stick flash devices to create a burst buffer.  For example...

    ION-attached Flash

    You can put SSDs inside IO nodes, resulting in an ION-attached flash architecture that looks like this:

    -
    Gordon, which was the first large-scale deployment of what one could call a burst buffer, had this architecture.  The flash was presented to the compute nodes as block devices using iSCSI, and a compute node could have anywhere between zero and sixteen SSDs mounted to it entirely via software.  More recently, the Tianhe-2 system at NUDT also deployed this architecture and exposes the flash to user applications via their H2FS middleware.

    Fabric-attached Flash

    A very similar architecture is to add specific burst buffer nodes on the compute fabric that don't route I/O, resulting in a fabric-attached flash architecture:

    -Like the ION-attached flash design of Gordon, the flash is still embedded within the compute fabric and is logically closer to the compute nodes than the storage nodes.  Cray's DataWarp solution uses this architecture.

    Because the flash is still on the compute fabric, this design is very similar to ION-attached flash and the decision to chose it over the ION-attached flash design is mostly non-technical.  It can be more economical to embed flash directly in I/O nodes if those nodes have enough peripheral ports (or physical space!) to support the NICs for the compute fabric, the NICs for the storage fabric, and the flash devices.  However as flash technology moves away from being attached via SAS and towards being directly attached to PCIe, it becomes more difficult to stuff that many high-performance peripherals into a single box without imbalancing something.  As such, it is likely that fabric-attached flash architectures will replace ION-attached flash going forward.

    Fortunately, any burst buffer software designed for ION-attached flash designs will also probably work on fabric-attached flash designs just fine.  The only difference is that the burst buffer software will no longer have to compete against the I/O routing software for on-node resources like memory or PCIe bandwidth.

    CN-attached Flash

    A very different approach to building burst buffers is to attach a flash device to every single compute node in the system, resulting in a CN-attached flash architecture:

    -
    This design is neither superior nor inferior to the ION/fabric-attached flash design.  The advantages it has over ION/fabric-attached flash include

    • Extremely high peak I/O performance -The peak performance scales linearly with the number of compute nodes, so the larger your job, the more performance your job can have.
    • Very low variation in I/O performance - Because each compute node has direct access to its locally attached SSD, contention on the compute fabric doesn't affect I/O performance.
    However, these advantages come at a cost:
    + + + + + + + Reviewing the state of the art of burst buffers - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    Reviewing the state of the art of burst buffers

    + +

    Just over two years ago I attended my first DOE workshop as a guest representative of the NSF supercomputing centers, and I wrote a post that summarized my key observations of how the DOE was approaching the increase in data-intensive computing problems.  At the time, the most significant thrusts seemed to be
    <ol><li>understanding scientific workflows to keep pace with the need to process data in complex ways</li><li>deploying burst buffers to overcome the performance limitations of spinning disk relative to the increasing scale of simulation data</li><li>developing methods and processes to curate scientific data</li></ol>Here we are now two years later, and these issues still take center stage in the discussion surrounding the future of  data-intensive computing.  The DOE has made significant progress in defining its path forward in these areas though, and in particular, both the roles of burst buffers and scientific workflows have a much clearer focus on DOE’s HPC roadmap.  Burst buffers in particular are becoming a major area of interest since they are now becoming commercially available, so in the interests of updating some of the incorrect or incomplete thoughts I wrote about two years ago, I thought I’d write about the current state of the art in burst buffers in HPC.

    Two years ago I had observed that there were two major camps in burst buffer implementations: one that is more tightly integrated with the compute side of the platform that utilizes explicit allocation and use, and another that is more closely integrated with the storage subsystem and acts as a transparent I/O accelerator.  Shortly after I made that observation though, Oak Ridge and Lawrence Livermore announced their GPU-based leadership systems, Summit and Sierra, which would feature a new type of burst buffer design altogether that featured on-node nonvolatile memory.

    This CORAL announcement, combined with the deployment of production, large-scale burst buffers at NERSCLos Alamos, and KAUST, has led me to re-think my taxonomy of burst buffers.  Specifically, it really is important to divide burst buffers into their hardware architectures and software usage modes; different burst buffer architectures can provide the same usage modalities to users, and different modalities can be supported by the same architecture.
    <div>
    </div> +For the sake of laying it all out, let’s walk through the taxonomy of burst buffer hardware architectures and burst buffer software usage modalities.

    <h2>Burst Buffer Hardware Architectures</h2>First, consider your typical medium- or large-scale HPC system architecture without a burst buffer:

    <div class="separator" style="clear: both; text-align: center;"></div> +
    In this design, you have

    <ul><li>Compute Nodes (CN), which might be commodity whitebox nodes like the Dell C6320 nodes in SDSC’s Comet system or Cray XC compute blades</li><li>I/O Nodes (ION), which might be commodity Lustre LNET routers (commodity clusters), Cray DVS nodes (Cray XC), or CIOD forwarders (Blue Gene)</li><li>Storage Nodes (SN), which might be Lustre Object Storage Servers (OSSes) or GPFS Network Shared Disk (NSD) servers</li><li>The compute fabric (blue lines), which is typically Mellanox InfiniBand, Intel OmniPath, or Cray Aries</li><li>The storage fabric (red lines), which is typically Mellanox InfiniBand or Intel OmniPath</li></ul>
    Given all these parts, there are a bunch of different places you can stick flash devices to create a burst buffer.  For example…

    <h3>ION-attached Flash</h3>You can put SSDs inside IO nodes, resulting in an ION-attached flash architecture that looks like this:

    <div class="separator" style="clear: both; text-align: center;"></div> +
    Gordon, which was the first large-scale deployment of what one could call a burst buffer, had this architecture.  The flash was presented to the compute nodes as block devices using iSCSI, and a compute node could have anywhere between zero and sixteen SSDs mounted to it entirely via software.  More recently, the Tianhe-2 system at NUDT also deployed this architecture and exposes the flash to user applications via their H2FS middleware.

    <h3>Fabric-attached Flash</h3>A very similar architecture is to add specific burst buffer nodes on the compute fabric that don’t route I/O, resulting in a fabric-attached flash architecture:

    <div class="separator" style="clear: both; text-align: center;"></div> +Like the ION-attached flash design of Gordon, the flash is still embedded within the compute fabric and is logically closer to the compute nodes than the storage nodes.  Cray’s DataWarp solution uses this architecture.

    Because the flash is still on the compute fabric, this design is very similar to ION-attached flash and the decision to chose it over the ION-attached flash design is mostly non-technical.  It can be more economical to embed flash directly in I/O nodes if those nodes have enough peripheral ports (or physical space!) to support the NICs for the compute fabric, the NICs for the storage fabric, and the flash devices.  However as flash technology moves away from being attached via SAS and towards being directly attached to PCIe, it becomes more difficult to stuff that many high-performance peripherals into a single box without imbalancing something.  As such, it is likely that fabric-attached flash architectures will replace ION-attached flash going forward.

    Fortunately, any burst buffer software designed for ION-attached flash designs will also probably work on fabric-attached flash designs just fine.  The only difference is that the burst buffer software will no longer have to compete against the I/O routing software for on-node resources like memory or PCIe bandwidth.

    <h3>CN-attached Flash</h3>A very different approach to building burst buffers is to attach a flash device to every single compute node in the system, resulting in a CN-attached flash architecture:

    <div><div class="separator" style="clear: both; text-align: center;"></div> +
    This design is neither superior nor inferior to the ION/fabric-attached flash design.  The advantages it has over ION/fabric-attached flash include

    <ul><li>Extremely high peak I/O performance -The peak performance scales linearly with the number of compute nodes, so the larger your job, the more performance your job can have.</li><li>Very low variation in I/O performance - Because each compute node has direct access to its locally attached SSD, contention on the compute fabric doesn’t affect I/O performance.</li></ul><div>However, these advantages come at a cost:</div>

    • Limited support for shared-file I/O -  Because each compute node doesn't share its SSD with other compute nodes, having many compute nodes write to a single shared file is not a straightforward process.  The solution to this issue include from such N-1 style I/O being simply impossible (the default case), relying on I/O middleware like the SCR library to manage data distribution, or relying on sophisticated I/O services like Intel CPPR to essentially journal all I/O to the node-local flash and flush it to the parallel file system asynchronously.
    • Data movement outside of jobs becomes difficult - Burst buffers allow users to stage data into the flash before their job starts and stage data back to the parallel file system after their job ends.  However in CN-attached flash, this staging will occur while someone else's job might be using the node.  This can cause interference, capacity contention, or bandwidth contention.  Furthermore, it becomes very difficult to persist data on a burst buffer allocation across multiple jobs without flushing and re-staging it.
    • Node failures become more problematic - The point of writing out a checkpoint file is to allow you to restart a job in case one of its nodes fails.  If your checkpoint file is actually stored on one of the nodes that failed, though, the whole checkpoint gets lost when a node fails.  Thus, it becomes critical to flush checkpoint files to the parallel file system as quickly as possible so that your checkpoint file is safe if a node fails.  Realistically though, most application failures are not caused by node failures; a study by LLNL found that 85% of job interrupts do not take out the whole node.
    • Performance cannot be decoupled from job size - Since you get more SSDs by requesting more compute nodes, there is no way to request only a few nodes and a lot of SSDs.  While this is less an issue for extremely large HPC jobs whose I/O volumes typically scale linearly with the number of compute nodes, data-intensive applications often have to read and write large volumes of data but cannot effectively use a huge number of compute nodes.
    If you take a step back and look at what these strengths and weaknesses play to, you might be able to envision what sort of supercomputer design might be best suited for this type of architecture:
    • Relatively low node count, so that you aren't buying way more SSD capacity or performance than you can realistically use given the bandwidth of the parallel file system to which the SSDs must eventually flush
    • Relatively beefy compute nodes, so that the low node count doesn't hurt you and so that you can tolerate running I/O services to facilitate the asynchronous staging of data and middleware to support shared-file I/O
    • Relatively beefy network injection bandwidth, so that asynchronous stage in/out doesn't severely impact the MPI performance of the jobs that run before/after yours
    There are also specific application workloads that are better suited to this CN-attached flash design:
    • Relatively large job sizes on average, so that applications routinely use enough compute nodes to get enough I/O bandwidth.  Small jobs may be better off using the parallel file system directly, since parallel file systems can usually deliver more I/O bandwidth to smaller compute node counts.
    • Relatively low diversity of applications, so that any applications that rely on shared-file I/O (which is not well supported by CN-attached flash, as we'll discuss later) can either be converted into using the necessary I/O middleware like SCR, or can be restructured to use only file-per-process or not rely on any strong consistency semantics.
    And indeed, if you look at the systems that are planning on deploying this type of CN-attached flash burst buffer in the near future, they all fit this mold.  In particular, the CORAL Summit and Sierra systems will be deploying these burst buffers at extreme scale, and before them, Tokyo Tech's Tsubame 3.0 will as well.  All of these systems derive the majority of their performance from GPUs, leaving the CPUs with the capacity to implement more functionality of their burst buffers in software on the CNs.

    -

    Storage Fabric-attached Flash

    The last notable burst buffer architecture involves attaching the flash on the storage fabric rather than the compute fabric, resulting in SF-attached flash:
    +

    Storage Fabric-attached Flash

    +
    The last notable burst buffer architecture involves attaching the flash on the storage fabric rather than the compute fabric, resulting in SF-attached flash:


    @@ -33,17 +104,20 @@

    Storage Fabric-attached Flash

    The last notable burst buffer archite
    1. it moves the flash far away from the compute node, which is counterproductive to low latency
    2. it requires that the I/O forwarding layer (the IONs) support enough bandwidth to saturate the burst buffer, which can get expensive
    However, for those HPC systems with custom compute fabrics that are not amenable to adding third-party burst buffers, this may be the only possible architecture.  For example, the Argonne Leadership Computing Facility has deployed a high-performance GPFS file system as a burst buffer alongside their high-capacity GPFS file system in this fashion because it is impractical to integrate flash into their Blue Gene/Q's proprietary compute fabric.  Similarly, sites that deploy DDN's Infinite Memory Engine burst buffer solution on systems with proprietary compute fabrics (e.g., Cray Aries on Cray XC) will have to deploy their burst buffer nodes on the storage fabric.

    -

    Burst Buffer Software

    Ultimately, all of the different burst buffer architectures still amount to sticking a bunch of SSDs into a supercomputing system, and if this was all it took to make a burst buffer though, burst buffers wouldn't be very interesting.  Thus, there is another half of the burst buffer ecosystem: the software and middleware that transform a pile of flash into an I/O layer that applications can actually use productively.
    +

    Burst Buffer Software

    +
    Ultimately, all of the different burst buffer architectures still amount to sticking a bunch of SSDs into a supercomputing system, and if this was all it took to make a burst buffer though, burst buffers wouldn't be very interesting.  Thus, there is another half of the burst buffer ecosystem: the software and middleware that transform a pile of flash into an I/O layer that applications can actually use productively.

    In the absolute simplest case, this software layer can just be an XFS file system atop RAIDed SSDs that is presented to user applications as node-local storage.  And indeed, this is what SDSC's Gordon system did; for many workloads such as file-per-process I/O, it is a suitable way to get great performance.  However, as commercial vendors have gotten into the burst buffer game, they have all started using this software layer to differentiate their burst buffer solutions from their competitors'.  This has resulted in modern burst buffers now having a lot of functionality that allow users to do interesting new things with their I/O.

    Because this burst buffer differentiation happens entirely in software, it should be no surprise that these burst buffer software solutions look a lot like the software-defined storage products being sold in the enterprise cloud space.  The difference is that burst buffer software can be optimized specifically for HPC workloads and technologies, resulting in much nicer and accessible ways in which they can be used by HPC applications.

    -

    Common Software Features

    Before getting too far, it may be helpful to enumerate the features common to many burst buffer software solutions:
    +

    Common Software Features

    +
    Before getting too far, it may be helpful to enumerate the features common to many burst buffer software solutions:
    • Stage-in and stage-out - Burst buffers are designed to make a job's input data already be available on the burst buffer immediately when the job starts, and to allow the flushing of output data to the parallel file system after the job ends.  To make this happen, the burst buffer service must give users a way to indicate what files they want to be available on the burst buffer when they submit their job, and they must also have a way to indicate what files they want to flush back to the file system after the job ends.
    • Background data movement - Burst buffers are also not designed to be long-term storage, so their reliability can be lower than the underlying parallel file system.  As such, users must also have a way to tell the burst buffer to flush intermediate data back to the parallel file system while the job is still running.  This should happen using server-to-server copying that doesn't involve the compute node at all.
    • POSIX I/O API compatibility - The vast majority of HPC applications rely on the POSIX I/O API (open/close/read/write) to perform I/O, and most job scripts rely on tools developed for the POSIX I/O API (cd, ls, cp, mkdir).  As such, all burst buffers provide the ability to interact with data through the POSIX I/O API so that they look like regular old file systems to user applications.  That said, the POSIX I/O semantics might not be fully supported; as will be described below, you may get an I/O error if you try to perform I/O in a fashion that is not supported by the burst buffer.
    With all this being said, there are still a variety of ways in which these core features can be implemented into a complete burst buffer software solution.  Specifically, burst buffers can be accessed through one of several different modes, and each mode provides a different balance of peak performance and usability.

    -

    Transparent Caching Mode

    The most user-friendly burst buffer mode uses flash to simply act as a giant cache for the parallel file system which I call transparent caching mode.  Applications see the burst buffer as a mount point on their compute nodes, and this mount point mirrors the contents of the parallel file system, and any changes I make to one will appear on the other.  For example,

    +

    Transparent Caching Mode

    +
    The most user-friendly burst buffer mode uses flash to simply act as a giant cache for the parallel file system which I call transparent caching mode.  Applications see the burst buffer as a mount point on their compute nodes, and this mount point mirrors the contents of the parallel file system, and any changes I make to one will appear on the other.  For example,

    $ ls /mnt/lustre/glock
    bin project1 project2 public_html src

    ### Burst buffer mount point contains the same stuff as Lustre
    $ ls /mnt/burstbuffer/glock
    bin project1 project2 public_html src

    ### Create a file on Lustre...
    $ touch /mnt/lustre/glock/hello.txt

    $ ls /mnt/lustre/glock
    bin hello.txt project1 project2 public_html src

    ### ...and it automatically appears on the burst buffer.
    $ ls /mnt/burstbuffer/glock
    bin hello.txt project1 project2 public_html src

    ### However its contents are probably not on the burst buffer's flash
    ### yet since we haven't read its contents through the burst buffer
    ### mount point, which is what would cause it to be cached

    However, if I access a file through the burst buffer mount (/mnt/burstbuffer/glock) rather than the parallel file system mount (/mnt/lustre/glock),
    1. if hello.txt is already cached on the burst buffer's SSDs, it will be read directly from flash
    2. if hello.txt is not already cached on the SSDs, the burst buffer will read it from the parallel file system, cache its contents on the SSDs, and return its contents to me
    Similarly, if I write to hello.txt via the burst buffer mount, my data will be cached to the SSDs and will not immediately appear on the parallel file system.  It will eventually flush out to the parallel file system, or I could tell the burst buffer service to explicitly flush it myself.

    This transparent caching mode is by far the easiest, since it looks exactly like the parallel file system for all intents and purposes.  However if you know that your application will never read any data more than once, it's far less useful in this fully transparent mode.  As such, burst buffers that implement this mode provide proprietary APIs that allow you to stage-in data, control the caching heuristics, and explicitly flush data from the flash to the parallel file system.  
    @@ -54,9 +128,81 @@

    Transparent Caching Mode

    The most user-friendly burst buffer mode u

    Private PFS Mode

    Although the transparent caching mode is the easiest to use, it doesn't give users a lot of control over what data does or doesn't need to be staged into the burst buffer.  Another access mode involves creating a private parallel file system on-demand for jobs, which I will call private PFS mode.  It provides a new parallel file system that is only mounted on your job's compute nodes, and this mount point contains only the data you explicitly copy to it:

    ### Burst buffer mount point is empty; we haven't put anything there,
    ### and this file system is private to my job
    $ ls /mnt/burstbuffer

    ### Create a file on the burst buffer file system...
    $ dd if=/dev/urandom of=/mnt/burstbuffer/mydata.bin bs=1M count=10
    10+0 records in
    10+0 records out
    10485760 bytes (10 MB) copied, 0.776115 s, 13.5 MB/s

    ### ...it appears on the burst buffer file system...
    $ ls -l /mnt/burstbuffer
    -rw-r----- 1 glock glock 10485760 Jan 1 00:00 mydata.bin

    ### ...and Lustre remains entirely unaffected
    $ ls /mnt/lustre/glock
    bin project1 project2 public_html src

    This is a little more complicated than transparent caching mode because you must now manage two file system namespaces: the parallel file system and your private burst buffer file system.  However this gives you the option to target your I/O to one or the other, so that a tiny input deck can stay on Lustre while your checkpoints are written out to the burst buffer file system.

    In addition, the burst buffer private file system is strongly consistent; as soon as you write data out to it, you can read that data back from any other node in your compute job.  While this is true of transparent caching mode if you always access your data through the burst buffer mount point, you can run into trouble if you accidentally try to read a file from the original parallel file system mount point after writing out to the burst buffer mount.  Since private PFS mode provides a completely different file system and namespace, it's a bit harder to make this mistake.

    Cray's DataWarp implements private PFS mode, and the Tsubame 3.0 burst buffer will be implementing private PFS mode using on-demand BeeGFS.  This mode is most easily implemented on fabric/ION-attached flash architectures, but Tsubame 3.0 is demonstrating that it can also be done on CN-attached flash.

    Log-structured/Journaling Mode

    As probably the least user-friendly but highest-performing use mode, log-structured (or journaling) mode burst buffers present themselves to users like a file system, but they do not support the full extent of file system features.  Under the hood, writes are saved to the flash not as files, but as records that contain a timestamp, the data to be written, and the location in the file to which the data should be written.  These logs are continually appended as the application performs its writes, and when it comes time to flush the data to the parallel file system, the logs are replayed to effectively reconstruct the file that the application was trying to write.

    This can perform extremely well since even random I/O winds up being restructured as sequentially appended I/O.  Furthermore, there can be as many logs as there are writers; this allows writes to happen with zero lock contention, since contended writes are resolved out when the data is re-played and flushed.

    Unfortunately, log-structured writes make reading very difficult, since the read can no longer seek directly to a file offset to find the data it needs.  Instead, the log needs to be replayed to some degree, effectively forcing a flush to occur.  Furthermore, if the logs are spread out across different logical flash domains (as would happen in CN-attached flash architectures), read-back may require the logs to be centrally collected before the replay can happen, or it may require inter-node communication to coordinate who owns the different bytes that the application needs to read.

    What this amounts to is functionality that may present itself like a private parallel file system burst buffer, but behaves very differently on reads and writes.  For example, attempting to read the data that exists in a log that doesn't belong to the writer might generate an I/O error, so applications (or I/O middleware) probably need to have very well-behaved I/O to get the full performance benefits of this mode.  Most extreme-scale HPC applications already do this, so log-structured/journaling mode is a very attractive approach for very large applications that rely on extreme write performance to checkpoint their progress.

    Log-structured/journaling mode is well suited for CN-attached flash since logs do not need to live on a file system that presents a single shared namespace across all compute nodes.  In practice, the IBM CORAL systems will probably provide log-structured/journaling mode through IBM's burst buffer software.  Oak Ridge National Laboratory has also demonstrated a log-structured burst buffer system called BurstMem on a fabric-attached flash architecture.  Intel's CPPR library, to be deployed with the Argonne Aurora system, may also implement this functionality atop the 3D XPoint to be embedded in each compute node.

    Other Modes

    The above three modes are not the only ones that burst buffers may implement, and some burst buffers support more than one of the above modes.  For example, Cray's DataWarp, in addition to supporting private PFS and transparent caching modes, also has a swap mode that allows compute nodes to use the flash as swap space to prevent hard failures for data analysis applications that consume non-deterministic amounts of memory.  In addition, Intel's CPPR library is targeting byte-addressable nonvolatile memory which would expose a load/store interface, rather than the typical POSIX open/write/read/close interface, to applications.

    Outlook

    -
    +

    </div>

    Burst buffers, practically speaking, remain in their infancy, and there is a lot of room for the landscape I've outlined here to change.  For example, the common software features I highlighted (staging, background data movement, and POSIX API support) are still largely implemented via proprietary, non-standard APIs at present.  There is effort to get burst buffer vendors to agree to a common API, and as this process proceeds, features may appear or disappear as customers define what is and isn't a worthwhile differentiating feature.

    On the hardware front, the burst buffer ecosystem is also in flux.  ION-attached flash is where burst buffers began, but as discussed above, they are likely to be replaced by dedicated fabric-attached flash servers.  In addition, the emergence of storage-class memory (that is, byte-addressable nonvolatile memory) will also add a new dimension to burst buffers that may make one architecture the clear winner over the others.  At present though, both fabric-attached and CN-attached burst buffers have their strengths and weaknesses, and neither is at risk of disappearing in the next five years.

    -
    As more extreme-scale systems begin to hit the floor and users figure out what does and doesn't work across the diversity of burst buffer hardware and software features, the picture is certain to become clearer.  Once that happens, I'll be sure to post another update.
    \ No newline at end of file +
    As more extreme-scale systems begin to hit the floor and users figure out what does and doesn't work across the diversity of burst buffer hardware and software features, the picture is certain to become clearer.  Once that happens, I'll be sure to post another update.
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/dursi/2017-5-28-julia-vs-chapelhtml.md b/2017/should-i-use-chapel-or-julia-for-my-next-project/index.html similarity index 93% rename from _posts/dursi/2017-5-28-julia-vs-chapelhtml.md rename to 2017/should-i-use-chapel-or-julia-for-my-next-project/index.html index adca77a..6c7dfc4 100644 --- a/_posts/dursi/2017-5-28-julia-vs-chapelhtml.md +++ b/2017/should-i-use-chapel-or-julia-for-my-next-project/index.html @@ -1,18 +1,87 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2017-05-28 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/julia-vs-chapel.html -slug: should-i-use-chapel-or-julia-for-my-next-project- -title: Should I use Chapel or Julia for my next project? ---- - -

    Julia and Chapel + + + + + + + Should I use Chapel or Julia for my next project? - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Should I use Chapel or Julia for my next project?

    +

    Julia and Chapel are both newish languages aimed at productitive scientific computing, with parallel computing capabilities baked in from the start. There’s lots of information about both online, but not much comparing @@ -20,7 +89,6 @@ and are willing to try something new, which should you choose? What are their strengths and weaknesses, and how do they compare?

    -

    Here we walk through a comparison, focusing on distributed-memory parallelism of the sort one would want for HPC-style simulation. Both have strengths in largely disjoint areas. If you want matlib-like @@ -31,13 +99,11 @@ potential and room to grow; we’ll talk about future prospects of the two languages at the end.

    -

    Update: I’ve updated the timings - I hadn’t been using @inbounds in the Julia code, and I had misconfigured my Chapel install so that the compiles weren’t optimized; this makes a huge difference on the 2d advection problem. All timings now are on an AWS c4.8x instance.

    -
    • A quick overview of the two languages
      • Julia
      • @@ -93,14 +159,12 @@

        Julia

        language that gives performance less than but competitive with C or Fortran.

        -

        The project sees the language as more or less a matlab-killer, and so focusses on that sort of interface; interactive, through a REPL or Jupyter notebook (both available to try online), with integrated plotting; also, indexing begins at one, as God intended.1

        -
    @@ -154,7 +218,6 @@

    Julia

    puts user-written code on an equal footing with much “official” julia code.

    -

    The second way Julia blurs the line between user and developer is the package system which uses git and GitHub; this means that once you’ve installed @@ -164,13 +227,11 @@

    Julia

    contribute a package if you’re already using GitHub to develop the package.

    -

    Julia has support for remote function execution (“out of the box” using SSH + TCP/IP, but other transports are available through packages), and distributed rectangular arrays; thread support is still experimental, as is shared-memory on-node arrays.

    -

    Chapel

    While Julia is a scientific programming language with parallel @@ -182,7 +243,6 @@

    Chapel

    Fortran, UPC, or X10.

    -

    The first extension is to define all large data structures (arrays, associative arrays, graphs) as being defined over domains, and then definining a library of domain maps for distributing these @@ -194,7 +254,6 @@

    Chapel

    associative arrays (dictionaries) and unstructured meshes/graphs as well.

    -

    The second is to couple those domain maps with parallel iterators over the domains, meaning that one can loop over the data in parallel in one loop (think OpenMP) with a “global view” rather than expressing @@ -205,11 +264,9 @@

    Chapel

    the program, or…) doesn’t require rewriting the internals of the computation.

    -

    The distributions and layouts are written in Chapel, so that users can contribute new domain maps to the project.

    -
    ptb2k=X|Hl+E@d>45ABz_OC{`}}-3}cMC+0W~?#e;e?T|lA7 z)gWdLEi8nI>wGg4e7&NYG(oq#&WeHMv0}S|p)&|rlcWuIk$gAd3K@_MC!uMuMu^bY4Vj2Ro@Z{2VVmp|g0)y|C5es1 zE3Qhg_`Ts+S&xSsO_|bAmm^0syS}@z4$WrI7Fz8VM^EZ zRA58kkPEkE7*tLEM$Qb|n6p_rQ|x|BxEkc8mzesJkRD2Aps z@`I!H_KZ|LgMr=tF>1OS+GOCwiX!2(nbQm>`+K9?p_;$m&;`nxuA({vW@Xam#oJqy z4M%oN%ROs8VGmfF!Lsly72Th7`t zOn4O+-==`n{=j1DHgsetIgLs7qo?VD!|z076s~6W5&9yP=9b&eBht3jF$KzS;;c<# z-U=l`{xSF=?itIHPM%z0K1LwKmSY%{qP5ai?vwes5^26M__96K5}w%NFISV~s$;NP zB3tViaLp&Q3NFPQT){fZ2`@bThB2VFyxm1AFwY;6IF%Q!jpj&6rYov2luGCm!NvKc z9F(_w^-xu#efQbI;}h!S2QhRRyC6qlB~sM<+p-rup8*4Kubd)d0RKBwdX`*awMa&){7q1#KS@P6 zqURskJG0M`-v@erz&D}cc3&Hb2%B6lSihGbvh1wQM6ksa&0L?Z6K+jF;J^ZhTf<}; zFzJ+!itjam{tv=c%6iR?_8^tE{ts7l<_y<@Z=}KXBx}inP}Tw^<9qqS_TdxfKV@lz zi;NUj!Le*xQqV!yMFA9cUv>LjF+n)%`=Y>X?%@C`s|`zRI|@H-)Wp$J%0;u}$Z)}U zb7H$a)5p0Bcsxxz0rl|M3~RCVKpf#MF%j3OD`H8Z#FNi3T`3+!STkiS)g+nHMQ|**40s z-csc`WFp4}bH*L4EMQX2yW0n(d@E?;=fnzEp1Sv}G3v|X?AJF_9n<`26gF(O?g7AhO^**NmE50moZ2z^4`^E=ok>!zI z#PeqbBadPFr?bXbeOsG7CwjrbeUcXoI}&TQE$Qx#T|Q~k_S<=%Ux_0&op>ggELt>r z6IV`Ul;X)<6Z>JqnBk+Hb0jCTLaC~bA>rIt-~a63@I~+RMIos zmKER8-2FM^eWmiZ876Rk^tT1eRtVw^WvW-`A2Ow1#6S5~;Y7vE&_Y0+s>9|cpHw{0 zvg;J@m<3ZUCD+G=G8|}2DDszj)6uMFi?)zg7OZ8e2B0{*()-~&J7}T&UV;!d1$n4H zjTf4{uLs(NI5=N0kTJ)VHLRZ2jZs!PKv{5d!zmAI0G5pW2++-T;5HB$jAHC69+kX>Y$2! zd`(a2nF`gZ*iocs>e@${YE#Y9_6r6txgbW#;tewsQeiNLomcn2849NM_UT}@Mkc0J zf*ym*gE1YP71ju=H0+?*mK2@*LFtGP;+O2^9#WnlK?T23N_wm9+4Z|Cguv=hB=JSm zei`CXZAgnVX-T}{l&PeUhz%i+nZ^cWmC;sEfdV)Glv!GM621z7_bP8$38L>(ygEJo zCC9N?(v?>liWqo=q~ndWGzuK`Gcs<|7nj9_b2@0L(fNu`@XtVe@RO7PIjuU?&klW3 zmvAt>|HBZ?>s@injya?>k79OoL!Bo5^#_`;f+jJnLbxRtzE5r*g};^ja|pUPMHM&Y zlYrWE<-x)u;3GiNZ$gZ!C^H#lBKe_UEV+DH6)opJDX6{X*FU1ir@RlZ*B>9}erOu~ z=RBLX5l>`yyaEFKTOY+Hna7M>#N=V+gr7&+3F}T*J~EW>UgYotfwemeg{xS&6vFpm zNvYG*7p{(4vN=BnCYCA6BAM32OGOG5j~I24t+3nct&*@tCeTv|-8s4lMI3(jPe2(< zXQ~kSDJX)8w^wagaY@y!SZlsk?l#i$Qjrdo$WQB?FGKVILR+r59|2pZ7Tk(0%D+`3 zGv>g$3UR4?vXgUzhfCOo5!|0%ZN_3&8i6;2)1uGMR<1! zj^^JLarX*@jS#q0sRYJa9{V`N#Q+t`!0wwWM=!97BV)Z!ncpR+do;*3i${XB+*vc1 zVz@N&a6^tzA;IC(qq=|tZ$S^6K{#d70f94k_9fEJaz#exK!!pM$xd~Zbl-jPVrf0w zz+;+=YQtkdje2-8itfnP92C-UA@Ki=01_gkt(O6G`MfpKo%$}a0Z-WaiKhFo!G?j! z!x*UxNe^sVXk168y;~S+Y@>7ZC6e>I+}A2nzyH|wIu)4A1~Dxye+|vz`Lp}j_a|!d ziplWcPWY$%A41(!D#3?W_s5~6o*=sJXB@$ovsYpS{_GSD)u+`DoJo=f>uv0RRHw8}1m`Mr{vZ$DXCS?;b3P_3po&r(qj6n$z0>Mv0HIIG-iT`a1{#||a(i(dp zN*!V}9WA&fN1dE}AWkaILz78%fR>)QvyeFIl@!ooS3Kb8NeSkbqakY87JIaT)P;kh za-!a-kY~pHRh%azXA^2i!^Rc3uP#-qeo|>)ZfJ^~pGIvo3v)~Uru4JH0?=32q+1bD zA45uAbII0PFc*D*%(aH)Gqk9O9xFXV38z8WSe4Etsx8ho1U5C3L(j3vjDS?tb6Xz@ zDZBc~+#NMmp8$WWRi$OPj2;$*k*o92%i9s%H|YSS=jbp2sW4N~tFDf71r(+tZwHyV z0&xYiAFNovb7AJU%ieJE;uG!oGq#L#TE+Z+B^nScjMk#4o4AVs23O8ASV7Fp%&s82 z(3JI+L#nc2Q<^46d-Ix8lhI*Gm5oJ!Kx^5VB@+$D$}ZZ)QkECSoFj$lv=AOLaXjzL+2T%!k5fGKxxNO#7NHnjg5GSyW)(DymFW!Jk71Z!5D zI`&5jGZ&vy7Z5MC{nD6Utt=d<>Ap0=&lF{j2SBRtRnSi0F~r|H$p3Bqv~9sLGh|)c zPn3gozY}dM1Lwe?UNL*F=y1m+BADVb-E#%?eqi!J^azIg;O_o6G0!U^@X{QL5hrwy zFZ@XMA+sQx(B$7~9b`|6O-4%#_CSiFl?{uK{LqAsUQKG6HpXUo;>#5f>3^}ib?79oRx2AT?dNRk z5VvbP|D^OdT>WKEurtvgD^L&gqTZFmHY+BKC*?-`!6>az6O1`0hh}QYc|u?L1yD%I zCsj*VieS!sJF2q-CGdB5TlcDlL@4+&0(bRF4|kGqR{i(EB}$p^1{zr78~eR|lindf zYEMW|Ukt}h$<4Y?DkS-1+(mZz+}@yE`LD}Gc6NLGseL2HJ+t)A0 zUM}rV!e}fYO?g|6CA5Y%_`@L#_$|kn)?)qf9SsQ1e4Zoce_Df@ld7p({efy9KF58! zZ2Kt;%O1I>M;^jhK~%>tW?3?RHDRkOW5My?B0ud_gJx?XHbtg#@@C@RVii2~N7^G} z)s-jFvKHBTI7NNB9J(~lGs>Jdd0EVVCsg4fq6rF@vg(9Zaf&g;X~sNV@&*u`ERgO0 zhW_@y%;~n9oJ>yA*!G(GyR1wLF8^&ql(7+LPhL{wY07CC-(>WhIbKmb$M#j=6CQ9BPYd^R$4XOEh*y6CE2u63b%>ZhwbLVG@8i(-u0CK-90G(r!PU+Yjw8_Rkk zd|ySmWuI6vnceK3T^J=5Y@IlD2GV%uf6)3+xE4h#T`s1HpW%644LEFth;*Vi71E()APtSb^Fq1~h~4S4GUyL(G^Tig zqI+>vJS~FE@dGN=66$auk6GcBQ(@9PdSj=T266CGVQ3X@!sHPOrUJ;}^Z*n(m@~Du zjPVMwSsZdJ5p&QM}AQfU@M}ilVhUc=)hWQ z&;XKMx-nRT;I!FXO4gZmM%25nMUcJP9~X9!CJGgov6qthQ^J5eB&zGYVHLkK)vBGe z5hwv;ANXy?k0mf1XY&_4k(ySzve(rWPPd6WQT>BVL5fD(YXF3y_pS7*daTBua>_qys`UxZuSJAq%QRyG%H#ix) zo+Ymv{owwBGb=D2xfiu}V9ebXDTXrvIx<5GylM)OhZ&_5K1=A8--WMLZ zQ#I6u4Yh%)qd|~0fP!rmWU%_fv{xlZFkEH_z8!W+-4aFoR~RcPj$?f*$vrt;*!nJE zX1ax12@2p+hd#-W_uU*5YsD~ecRaKf+s9{F(u~q)ExITdGGDyv^#6Q z{gCZ4Y4)o}yzbzhcb*T}_bI>kAJ^5T-67vz8>qzol9ZN~KF+0e2meQXH8A}qATn$- zW8}W?*#0e$xu2&wM%#7bvUCSHPMJ@LBc1$!zJC@QsYlXE6F}$E6Uu{MWACHlUxk3N z`?tl0GTy`8hynJPa~JG~UN!<5a;+Edj_aaDCF$Nu@Li`KZh@7O%a1US(cII4{?Bu) z(od*dg_M0wyd|%-*--M7vc3XyuIm{)s+241sS1HF7qMSQzw=@Yj$UI-;CAykz1ioEt!8z*f>nI)#}mRJuL`qdBwv zXljYL2B(GtL&_3MFVb`24x(L<_+txAAUp8$-_YH#n>+r{c{;WF)T5QdJO7kOK#nur zwu+pZ%|d&{(vA?JgpEx!e!h_VE%%uk<$lXxT?BW%A;YOxN^Q1XbaxF27$|ZrArk)S z$$+~~t<+iNB=T#tk`SUTrQG=H+dLfeNg&6#_>kniY#d&_-cDQiHUIXr-A{W!E2ZZ@ z088=b3SV9Pm3mzX@pgThz^LH02u>B}peUSDl5t95T%X77kCkH#WNMADH?d)GYgpB} z5wxdQH!!r5YrAh#+7W)Mie9ShGl=?P9Mhq?w1rZvXctn~+5mWn(dT?_Y^TR;&rp?U zbI7NL2%}HJ!rWe6psZ!$PLe?G+GbqI)>R&Eg!smk(o~_u5CSDMo>Fr$NN2`mN6?s% z?1}^8dc#rYDk{#jV%G#@ZVzYm0BgAR8&;uVH#YSD$zH&WjQ{mPW_< zMnu4Y&k(Ua!IA=}q3^tBxuBFa=qWbjP&OnV3u-FFQ3K-J0y0 zGt}nZVXE&RO;H`nbCVc4?~wPZTvq|y*7M|R-^hk$J-lLV@jmXm;=l!5m(zdC#b zgFjXD|C|sgx4Vr|dfc$jTcZXPX>!W8*P52E9eVChACHDf`*pfs0r#h*%YRIUa|C`| z-HfyBhTs{44uv21e2yf@`b=IPJ_v~Ort(sz&#Ls9nX+qY|c2;W>j}bF1xao-@!S8b#lxTAYu4Qmm`tYD+JF^GHTO3^Q;C3LTi90(FMpuASjp;5Lxjpb5n zAqfW<+P7_kX<9Xr$n@E_F+tU_l^wkPS#N@Hg93XZX$)&$RZ2wj#4%R%cascTv4bDU> zL>tes4hBKP;NBvRCUdUKKR4uy)5vln1)?NG-<`M}@1{z9<$@UFI)>lBXM|YYmlhZ~ zgoncVTPkm+Ijd_hxh4(Ax?(NoTnA1*NbL%Hr~}o>n+8k{$@=@7RY6;)PABkh8RuCP zb!Ljn7uw5YGr%{&9L`XrNGg;fETlH%ic|y+n6j2j)kQ=zd$nmy_V;x-(kkjmp;!`5 z@tte*P8LDo16t&7;d${Z3#bChoA=18cqvMb#E#0iTe^859BABCw+EepZ(D5q(rmqj zW7}KA{t>zr%X0n;@JF(+!ZKpvkHnsAUPG~I!RXJV zW-<*Q%V5Q_FV38mRGxo;P|dLzP78Tz`SFwJLM`<%k6HoI2xJqI^t;wRx5HF5A7vx@ zKYX4POoq~W9%){6{4RTf&v94kLCfyhmvDOgqci2HPQoqr=%gezG(=it<`ukwAF(O~ z>hAyPi)yh1$j-uZ~mql z1DA|0br^DzOF{m2f%%}{*fzxNinuCm-}R_{?3 zmknE6wibv#qPyaxm;Mzeu}irFcOXTN8vR_^CRH7GcWIm{ z-T_i2YTdpMJ4W#VW_(k{I9K_XqTXL)5g#XmE3{9qeSidl=mAwjFLU18Y_`W^F!HFX z+5KleHSVs!!Jbi0%-M|0gl}pd?dkfyn_NrLvr9f?_e^6qU4>@w(3XIdmVjIIGeE1U zKz1)16I{nb_T{xk8YxJ}*=RUz7ug$iK1KpyW1p(pT#2Kk=w0@2Q}QQ07R(FC}M2jo$yVr1U{eN~3KRh@5KHl z-wo58HwaIHQ4FB=5R>im1<&;Al#8JwDgVAqzuV}yU1^d3Jg~qcAP(+r(rl{gbi89R zZ1=D6`*-@dP%0Ag_Rc91RqE=(7IdCU>$z^eLPD~=_lR#F`SCL9vUB$;7{!`5x6hE! zd|xJ?6~o2D9E~Zoj9p~_AMqX-;xQU;=TI&tZj~BW_Zia8s(Lyb2*a`+g({{A5{;Ep5ZeQ_v@-Qw-t&!3{?|&*APHNF28exH$)RGVj^9X zqGl9#>;>8-u^Q11=JK8OB`!&nFiYa}^@bp#Bki9o&LkyKtZgUm3CiBWNK=Q``)7#L zilHu~zWRpbx($_9Zxj3@a2Rq??}FYWnqiuWd60Zeu5R8J#mAvbxO=)e74YBA5d59e zlY5NW_6BKAq8mSoPT+U+2`qslQfE=MoihOH9AZr&KL}T0&QsK?K3Dp>(FW5Sku2kh zb$_;J5`03wDV0b{2e6ZJ5^p=?x@TcDkuP>(OZH7RBwYyivB|3&AVXI+%c)(*L^i_e zt=bA&Kv5tl{0{(>Kx@Cu$UF|E2woe(XNPc;8-X#>=boQ=Lg<{=XQr^!_s^KzKo<0C zHi~WM$p_vheI-Bts5sk#uG0S0%`d9rO1=O9AOJ~3K~$rA%Px>xK=jZh^XdS%zzVdL z3P>4y%8I2GLfYE%JfGP>t+=fNzkPlKAG-6cv}#zTBNg8Qttb)Mz-Lp!H9t?;7V4o; z$`IbyE`kdPyl;0O-h1&uT!6tf;dmtxcNuUstYj2ml-GSU;%@Bn-V7xxB0Ps>qBzWLD&(3k@jaxWBT|4Sg%^K_)OxfWCYHpwN*8*9l$5=yoe{xZsFV%yKq>{A=*Mqk5gqNA=5tTXp5QC;$$1k*6{gr zg32Vo;p@m$3Zb1!ZL(yLAluyt+`D(Y9ftdy1GO4hRCKsbTjZ#O1&LP5Wldweo@${o zQ;g>K$6*{4$55Qu5sp*JfQS_%Qh$&U%QL##%U@9z(x_aAm1!`F(bO4j^#-STc`mBv z%1eSngh8VRA;}($ES9N63Az#L-dHw6EoefSRYAudrr(ftmkiF<^3Sa?6Fr8{)D)w= z2H{M_B52w3J6_l5v^ zJCJC0lgq4ABc^gsDf`OkU#dnVa(Igk_`2R1)R0K21Oyyb0g z!w>wxyKr*xa(ak0r~k3x%fSP|Sg6oBFsQjp$iR@mt##m3fWb|diD9t>a}kK2%@Yd_ z*UB0!@8mBU!4R~F**R!cV<1@}_g8Y>!ApSkIxmIrfoED4xJF+}g1;#q^A`okJOg7m3j>}3>t5v2? zm|3fz5KPnv%?oGGmfp1a8cP2`o;yqVurS`PIwWplI|ZUH3fdTIlUWc?C|MD@oq;o&EU_cDnJV^Co}yf0`82~ zE$bNLC~KHia_s#6X?H1bY<&i=EpA2>hH-Gqm{DahEs?1-HK`SMEHJVcvdR|Tm!#D2 z()EVd=5W*T#*yM1a*qG)+B5iCI94d3rc;71wS_M}MzCP-LCwY@SC8&Z6H8q=fWTD% zKX&0>Jc1)E>%d+E1Kd6{sV2A3^*nco58l$;-ZC{}Wde)1V~oJw2h0SQmmQAB?8;-C zQ;Gr=5^_3oXnTO1uOFQasr+7V!}@)rMw)%CmSw|Z+9kTVl#+i*E3f(`!Ny$F`0k8u zFpIC$gLqZkMS#_qsy*ek(Szb@&NjoO+61>Qy^@mV{~nQXjiH1l2XJPFuj3qF++WA- z;~b>hz~m2PW-UGYDKCr#AM|5;CCb|MN(7?l2fD{uvgqBsQ#&QR6`&a0wXn3#ayEU%!L$Z0boleS1AF> z0|O=sqw+%$xV#ViYtVvI>#vG%o~b=}bXgAR8;9ffJ`N0t!*Mv?gyX^cAN)7(oZfj?#dwy7UUe2# zwFQ#St=|ykWy^ceo=8i0Z+;q1R|Og)y3&R~Z@W%NSJW2YcrRp23CVV?6klx8m4#nKV(uh~Uf14Yu3i)*El&m;Z-f#!D}}%vi8ZF=^#X zE^lngEV=7#4|Rm#5jXtw!v2%Kxe8f(x%R3Qt3IoVHOA(zFL(|Id! zTf_3QP+Tlsz3AQ`Z6(br*Z}85v^O+q4_KkH(EgGIkti@UeQ*q6?17O;4i%IW0^>%x zNrK#(4K29W+%^@MtF#-#=isCDwiO$y8O11)ly8x2Mm3DhUS30kd<|eWd|-r`3JLIx zPz9q4v&gdE@Ksb{6&)d(1imQyj3@6Rci+jMfTy$^fib2(H8#x=xKS!!ZxjA8J-4cp z$)cLn@Te_kfi)>tNq)kF$qIsyP9yM-Fg~W1lp_ib`0AZWZ&DVM&WPakPGtw1bNKZC zW)m9CXaFU{)!>*ExK#l&ZVX2j<)Vb_@fpBtrwl7X4nYkgQ1P=S(^|oyihA^*CB+;{ za4rkx&;;HEAi8Ys)ySb0G=C@0fk35vh)6a8c&5`ll!j`0^RzXRVNN05=KiO>zB zg3q-Wk@u|_hk%B_$g~?a)Qc<`>BsaT18{pWeD%%^Jh8uqQy7K{7REUTp||zaF1w3( zC6P>8glPP81`+@7qZ+<^U+_CI!ORtsW|C}4gIbC{Q9hR@)1`Dg{kftdI|)2&b)HV1 z1b?vxc9v~Jw1;n7^+bK{GSX>tF#=QXCWHm2YQ{})CXS0M@SY3z;ccEE%VE;d!Q$w3 zK~C6{Em0AX#qJngU!$xY>O@6U?6GjTnnT$&1}%guF3AG7BRn;3;5B~2rPyCO z-?bTIicN|N898&_H7z~$c?(7=vxdc{#w}S~bF#Nl^r`YmrbHZ=5{KiUkT@KNJoEH(c=@H5@Uf3PhEIO-Q-}!ceZ@MwIR>(10Urpg=jXV7 z^E$3xy$Um(5c+P;sKg-Yz2Qm?D5R9z1h4ohUOhXy+VM@p+ zK&~Q51%kcLG>5E4jp_yfri@g}#?7H{S?Eiqh=Wm^9`u5jxn9!~TPXqVDX)YJWDFKa zff1BjmYR+N+mC&l@XO8 zRuX>EfWj$Ik>)8>QxsGX49SK$l^A3_6hzZ2xrRd{$svwM`ih-CauLPwq|H0wmr`jc z#)3|*8w;1a<8winB?wo8miJo_W6uq7^ni=P>b#CMi}Sp;SGbR5I6&*<`X+=AQv4PNn!M4?XQo1)S~cnoln8tOrCG z48U7O@UxfRj_*Id48xk&IvcxPDV(ZVtKp*th5&10N&9|@8)O_jxkMR4hu zxUeNRs>Mvm#2w)flufAzZR1<%{<;dXo~zRIO3M*rYwrD zv2B!{F|$nYs3BD|QHE?LB|4id-Nb`(DAy5~J_U|7;HZI0#afISwhC9Ym0?NGb>N z*=x-UQnhARa?6$t5WzpArx8py?Afw zowh1kOoq?+RMrkA)*xW*)RY`v7+D`)0+^s0jCQz$UP}(^sT9qGA!{{RS+5dKThPrw zjmB!iWxQqzv{{!~U_d-A6&2&|9+?QHF{6CZVw?su=X@ov?KMDd2gjiX?74i|iQrfz z0jIIz`0Nz_%7sgK|8gJh8O+LXpUPN5LjA65oh50-ms6eR_56~#2&e>@uzJrkO=|-K z?Y~zzxDQOlRK%k@JJ>f4$6+9GI1a~m<#^xw-}ksJ?1@sF%rdP{0YT2<2nNg4ZC2X5cIh0lNCar~nX ze*{lI`!rM+>=xIa(+&?2zql?0}M{DqPaPACeoqDlD(-9T8dV}4_!DIAt0=y&?ueXi9&U0aBTMF)TS>L_sS%aH5xFN zh15zp^qS|b27Q%Mf|(|ww#>LTcVOsjd^{U%<>GT4>ukqF$>k_gf5K$Vvhi9Ytt1|U z4QrjNJdbN_mdtn$WI=3U1bz#`N@^6 z3z0|;glD2njEXm1v&+ z9o~wQ;;odIy7gOHRy-&0j>#HSBwY&N1NH#^%!R9XU=Lteeimf}*5rkL{ z4$0S0lJ-RhRVBF*9WfvwSk8gtu;S#UFP-;sP%1w7|1<9)Z#P4y*M!8I@Rs~%DJ(jd zy3dsen>8jTOfd4a!IWY{D$RS0u=?#L5dXe zrUs+IKRaQv!J(r^`V@mht>P+G3QdIevnf24O{U}%aEMXrDM;kA--A{ysvm{Ts;6Av zIs}WjVkTcjS;y)rB^e7nJSYrF6;P`<1AM=0FG8YhtKm$Z5-RXY*TNcrsz#UMqtH%J z&-Ib!hg2Tq08c7Dw^WQNPJuDWBe^Cq!M_@JD^fWiu7nb(U}4;N{qM_?U}+!0IP93V zv9fouLvs{d95aH5&k$dd{JV<EK;m#5jyL;w z_q*Tyg$pN_3FqTWtsq{TsIaE7SJT6-J~r9d46veqbh9mL`GF9ywuxQ?I?@kn>D3i7 zz-#5tf~x)Zve_F zITp(;cMPZ~%$5X6RR+Cr(@aTwdElk2945^XC^BUrFha=*h(ZZK%rYvGYL6&RNGOp8 z(hX`W7g$sj45}m42+vGK2)Zc*)iCL|FIGmj4bH|Qn{t60hc;rlucMz|%fZSGve%U* zmho)-9U=q;RS$9<*6;?z_3vZ`EZXu*p{&?6+MloV@_P*?D7O|$fQ3Vo=&cHe7rd?% zLdo`ETv@_C^Zca+r#3Iaq?i~S<_-QX^en@fQoKN@vp-P%VdJEd5ESpOzN%khJ4y&8BU~V{Xw=g)o}0BckoEPC0UJ_Ot{ly0vR? zXfc4r(-wBafZx9LD!#a0$FMP321?*;os$q+9V+Drt%@p>17sw`mL7Dc1YC6H0NoGZ zC+~R+9>EFp+_55)FnX&Tc^#}682gwhG`YH*(r8-2)KmGiBcwRm&Z9VX#X?I=fq5n2 zDs5+5`P^!I;Z{-bM{$(RiVO$g^I|?vrP(X)1*#nR%#)0Ol!E!Xv^PFMnA!%65O3#O z+pHw|Ua9Rt!F^MyMRg3{6BRjgQw-m{eG{)GzLM95V&B(BHq|a0i%Jb0@A{8VJBp9~ z(@s@h581)kUJedM-*0?8DJQL>2+b5H6r{226~!>}F_WV_Eq@(LEwvZV=R{c#g%8(? zylhA)SwHa6gOgUBNZKOArH5jwf<4J!7N_*7F=`bkJ2259edf=Fxd&B5>(t#))S9@L}< zZTT#BST4uAI2Tk!y|In~H_f=>Xp zP)RAyfv`+AleDtc3u%NAx`EafuV+)6NaC;WDH>03A3ro207+XfDd$~jbNMc-tN<=$j(iHRf_p~ znUMaj!r0Hyl-XCdwYCG4n{#QH-pjAOf@hw25h9B9Y{kd_@yGGuU-=ch^x{i7PZ*<{ zZ>Zqrty}oWhd+YPeCD$lD9P!@c0$mWgiINH`3!qZk%Nx20G=PGxV<<+4a+$R|FzW! z1y-jocFAFYC+WWmyUMvoTV{mOXo46{8M6REk2^4#7bUAz#aafD7+AYZWo_retw_S> zG9g2|uvMWf6@af<$fk@ui$OYr`Lo#!5A?>>v@19clV;qevrs7bmU0`G{DT4b%Ihv0 zSfi{|qneo_cmr!p`80T)x_cM>gDDwl!yg&_7FEn81W~zd54EOu8&w0Vmj_ldx@?y~ z$W$#@OO%zcx=eP+qRk*!Mn;=vpaE4J7%9{DfK{}g`du-CdCKT(p5gTv!-?#W{#<3~ z^_Z%pp#d!wkw$-Kprkz;Ic^QB=vAiKa7)N+Hw>M$ek|65kW!ntCp^-4vk4+ppDFVH9b7o>j`T9APsewj81yv29K2-}L zsWMqw3Y3wP+*WG@<O2};wYlUj1V!Nc= zx`3N#CPxm--~)Fq7@k<)z^`3<3fDBU5d#%Z9IU{n>{vEututx*TFQ%7tHFoP!KLRg z;5b&id%1)kIl6+QNOUVF)LD2z|fW+^-Xjt zEo#mLkpXm)P9_C$WEsFx)e3)q8FlGV4KecFGq9<5V(lT2HgJ*A+{OEGXM{M%*}-g$ zVp;hd&Iku2IgF!q6iWh{D{V@0xGWNBiy48L1I8#$2`na9AdD&nhcwd3JqsMB2^St! zi!IU#R8oq^kxvgRa5)5_gP_jJJ*ve<^m|RgrHLrcWnie{#CF)pf}sG0v$n8f| zJYIp6s-VK$9R1#vTQnb`lo*q;&7z&#xvk_Q@{JW_|$KF5;tyMZ>VH(A(Y^(!bwMw!P~+h-7HEtP651d=O(VLD?}Yfk_CW6 zpqi9z)oM|413A`xW|%klunnphY+PKUhXFo3N%Xn1tW(8;j=)LEK!rZZ0@?*P1_J|Q z@0=@5o`a*J-Ow8&4bVQ4HKG+rtCMb)8t01V^Hmp-_5R| zouV@aQy6FJ>qcErU^wBnbOT$oY%VL-amv@V7-s$NCN$Bf>VpH2AUM}36&5vMrL?!3 zRk8<00ap`t2d`^7sM@yvU5fEp(Qxg&_Q@MaN&B=(1lb3snf?+I{rE~p5-mtf9w=p+ z$IKNnQH%)eM_$+F11^eZbj7c}@+7{AGwiD&s+ieoAhpys8&DsFRD4R2>YFgwbolQ&~n)YuR$>_MFo%6We& zQz_h6?9a}y8*3`2m>vMHYt#9?=5tjyqJW%Y#UI;uZVq1gJ5+q3#8mJpHpwLDcN;d} zNmRq?vKcJZ;G!VLWsjS0tthR#7;nE|bh6+D1ipFu25wru|CTY|Gl;b#5^LtCR0Zi~ zyrsOC6%H{f05}5YK7%$pC&+Unk#}#Ff^(BEBVTe0W_+!?Z{AtnaP8PvtnCKW$_CY` za#Sf$^gRE?XV^ui5$V#@;WVfa)>?y|1aZf-gjpraM8hkSVIj1Mdf#Wx)SC(Moc2kf z!Y9Ie;UgZfvQ5aKB~Ip$NxdS$lF>wdN|CF|cIKr@q!k8(=^@*JE$cq_eGaxOF4n#O zzWl6di2;XFv8Kw$D3=`NFAC`-r5YD2axs#M&>dD4tV406iX&MvP3?T(!By~qOK-tL zOzRk2NJ&W^Vy34FW*SkYWkoM66sq38ht^AP!=I>OY=?cuwB1Ei4E-ifoX>d=dDQeU zlsFuRfyCiB9BKKsfPwb=*8VcPqyu$m;@E~#D{vdi5-_&xzHWo_y;4M z1JP6bf+R^bCUcK82OhObVh8!kE3e?=zy1k)`qQ7r@or%gmtNg<-Q$U`d>OC4`VyOZ zVU~4F=v$PxGkRDN^&-@{2@Ow-*KK#4Q&!xAm?~IQfOSBJaM%-AY?+z&b)IMkC~%{X zd0xvwqTI@^F_LhN^0e0$J$jNq7!{Tq&-~0b_0k7*sK!Qxt>|lx=<35ZR0Rt?n&l#+Wf|RU}7Pog7#LMoSW0*PbF4o-=-LUg-tg zrni4uH3tYXA%UQj-a{~y2@A@k5GQO8X}_`Buti3QRs?09=V)6-DPw}bMVQg|&-gYo z4q^gerkG)|1%m}(%KN<+ZK-BuydLc|VxIpEo(aTuG z@>cuw-2;rpeqqKPN*l%Elp7EX?A*neab6U4&`i?e;aFCgz0CcR-p}4sSe(Q}_ z@tJWAw~^9m@VV^-`EAoC&`jRAOXX4aho}Sxtt_C{fdQsCLEy*bGCpwmZMd@Ly*wmVWLSga(n@t?<#HUBO0um;8oRpzS zM6rlMZK2{N?RL3)n950YFoj2(%aBY9eii_c@}nNYr5&umzD(#Df$$YRDmObj^&Zh! z4P~;iAA}zhMVVYEzW>FR)N#?`k%RNmLong@=#o~H(CGJK094tQkOAmo+=?2-nGq&;T#fNFdNvnJhN{Pd9 z7)TtB!|`Sv_uhZ+<3~rkW<*z0Uotjq{&}F{GgAW@l=-%lc;oe+Tc2<5C?KXvLlAlt z@lO|Cg3U{X|HJ9&DgNO<{0I1-|Cj#>U;EnEaCUkdW1M3hXSj0ZGM3{D>}W^6U^Po7 z=T@coVc-iY3R30V$YB6h_7?i-7oksd=7SgXsif!H_KPB!_gXPk|k9}`aI%ZM=rW2l+ z5sunLby381;TAp|1N-T5#vCR|d0lIPQN<;Jc*VEK#1SAUV~(mdigzNDmOZ!GGbJU) zNVUl#U6>;rJ3`lj8r*8WrED^6+(Hu)ns7Omh#pWg6u1&fKeZR&lf#}x;bVm}-c@4B z%@k(Fy|h|LdI;kPIJY;5%CVo!gO!_f@frClv>Lg#5|v7a=;m~+fl=3)&THIPrf$%> z73vs1uqXN4nL1y-=DYm42#n!a_fBXg(?eMFmL04;(UP%&r4(vA2TC=g%~PnRW_Q6j ztl0wN2IbOBr(h#K)^)0i*3ny9)L_7sJloN6hA}f+b5KhNZ<7A$&xU650JqK*^!8KA zor54Xj2PL76t_YKQyOF=&F2u5&!Qge28|fQalsWs15fHXK5_GTJd40krJ^Rcw+ZRB z4h3p;R}IZDt#`8F1j*JzUP%c8$Z)&`z`t?-_u`#yI2tR?K^Zf@vsKdWRE=1~K+Qjk z?b3imkJ)%TpKwE&l#%I0N2}q1-3hM9g;x3?@_MSc7`|AAY-g)AMk;O3^p`YW-<@PD5`eX9tr%FwDxeQ zU&|4tddfKDL^za#RV^{y8W6&HGfgOm!j%G3YEN}^(w4-^SlyaR$c&p5htkflRCr= zr7$GtL!x4#`3!L7Im^$g126}C#Ks0zQQ*SpQ7LC&KF0|MO%+e6%p`xl0I}=(@J}p+I@H zQOR6wpd`?5K!VBKv)haih!-PvDxT0UwrWeJoeaQc>M9l z@#K?F;nmk)#naC{i|f~K;Q#uK-@r>RzR>>Lh^;-OMxWA6q4o=qn^RO)yWrsvy#K!M z!S}3!W$y?R7GODC7@9+iXz(Pru96MB56_LOPkFUkzW9}r(O{8j*eC<|0uEI|NsH7i zz2lVHe3lPXH+k&E_GY&iksF;3pJ4Q>c6hn=R=ddiGuADAnzflw%^^lMCt$KjV2C#F z0h`Ylvf<*fp~!iFQEsUlS$3B~?Gu(79`J8*vIGjawuokNSD(4kQc86|8^}^gDx&OH zb2FF?y{FZJV8&8d6f+|jk=g!#66ObOKxUZ)A_TgHr|(uHGPX`ip+UO+*yPP~FqT^! z$85bQfsIs9jFPoVDm#?QiryPD+OCWHO7JDFcaQo&Mk>6js8Kj4{;e&-c0hpSDG^n;*zR37i65?9@V#e=D4FW6HFA5mT z@q{<79~z!~5(Hj77JT%^i}>i-i@34529xBWY)bWRxMD>%V@59~*LLz4ibs&J&FF#e zy$Jl-qX+R9EIyPNi>uP>#JU81B&3v54bz zF+6_rH9VzzjE!y1V*-_DD}2}d=ntsK`~UCJ1B4nPGq$^9xr@mphSePpu7V%B@&N8R z&*9#>SXJ8^S~>%v zLd`UXItvuW>2R|{VfH zC9KlIaeRD?XW|SmtY=uIi5_jeC<|#+G8GE;ew{a7 zf{V31Rm`D=FfDXbWT!khD5V#p|5|e}Fk*5@(5bJa_cIkn zq*uWZEO1NT?&<-9QCY)n4Az1nS;IcK0>EMKa5LRJuw< z_Az}#1S_NX`o1VxZR`*_+PhrPv&NF@2BfNn;PA^p^>2(6HE1Q8WFhIHkm0mARpCBU zOh`{j*>L2&eG1%lPR_7i{mfTRbBFYe&@vK@!csSHDwpKJSMqxmdY73uyP+?2o z_Y0F;jFRmdVI(Le$?=O)Ns|MG2*)T;EA1}z71lO57`0bjM#!E6GbVCGg5d^|%adDU z=kHfa3TYLVNDf&#I@e=D+1T`%0;js*D{&kD=dI`PI($QjPpQ4QcSLY-7&WLq3YS@E zR|AF_-JGAtnRFun7VLb*pTvScbLAnt&391WdMZvd1Z>saMjr>_C2;o!?Eij0B zZy%k*q9p?7>u6?=M&P3EaM|)RoN5lRmSPm4m?F7F*T6=b^4)kB{fmA?58jKx5`?zG z1+c&^2!3aO9j}i)-k}O}#p;ClEws!0-1*4dhhYR_Ywit&zGiH}v2FIyzdsmMuzDG^a?4v z>>Rp`;$p;9T6w!q72)~WMU|En#^kQj3Jld`RK&=1uEFOf3eG$nVv4m&f?e8HBKA4l zF7^coX0G9YXRR^RAwy`ZvA#czT2~(q#R8EPI}wB~7=DH;?)aH|-iD`Mdjr2S9Q$3q zTcw`!Lnh$7hRs^vBkyDwNV^Gy%TRf`C zLivTmv*w_XI2?!LaC{rbFaQ02aO>&=SAXHT=bv9n=AF%^HBf6xMx_DgS{}uEP|J6z z03=adwi{l?G6uIBy?CU@wWR5lNi6`*&-b`<`zD@$?gf1Fo8Q3M`MzG8t;Ud;jctLR zO3EBltIDWSr93)t8Hyjfaux51BU~JTAv2%jH8IG^ypXWvt)HXw6_er`ULY}2-vQp=ToZE>vJ)Yk7N z%hE~&3j$Uh5c6!8Xuj$G=YQO@#B> ztGH&gf@eP0H!6eLre`z{9tQB2EP{p2dHoI8fqJxJSH*>r-Dk$gs&S#N^}pJB>W z6N&lr_L9%gyowV%xCF2`us_-1OQ$#RwQ&sT6Il&UsVrYD}CPJ|L7s1#9;ret_-ZLw3-|4nR+3K!xMuA$I*nH| zrj#}wDgH}j;?Tl@s7St}Bm_HCynepNGdRPk+FV9V9%>IEg;C(8Ozr1ktt|=|jZ$cC z|9dIzWDcSt(t=yV^(hVQdTicP3P65VMNT5(pp-ZaBo4>nI2_;3aq;4X|KQrSYnQbe zVc*cbReQTDnEJfheB52-eD|s-E3jgeRCE)G1~6lsF^Hj97Q?;wUctpn7qO_}!i5XC zc<~}0dH7-6d*xnSzkUN_U8{FV=`yNu+sGPA6kHYsEiFS?3j{}S{K(0D_>tWuTsTb* z#n@>w4wPvWOKxbKGIZ1sXE^2j+M5lflu_#Us`x!_wY4}B4Legd=|UMbQ!h4qOy}l6 z*ycp}aPMwMxG_z+t&+@*YK+0AE-E9vild4zbs%!cQiOM57JKvtmT3nYc|N$Yo(*`^ z5~wWzPLp_Y6ey)<@#3hNO48YPvT$ILBN35uv9bvBx#?Eg8hR2!;(Mi|comZ6>0h$F z9!kqVZEULYyE+31JY zP_4KZ0aIX%fn`y|8WRd9Bc;Zga@~lL+v)C93%2(Eu=lpVx~A8C&~L4EJ^SU%+u0tE z-%Y5L#7*3nU?^0nTiRk^(_I{rGT8j_sx7Ky+-`9Oz zYyH;mW$Tp$Nr^=_1C)9B7M#k{%YsOvf&hdrZx7X#+TyYRPK2egW(`5>xtSENCn?^R z&&+-6h&*LAy>eKhqi3Tc_R4#btfeSF9DWw%T2e-|R`DsMMH^Ex#Fbm$4p5VFUnfPn zsN%Zh#_!sheZxceA9M#qJO&Sv9dFDzbou73T|#k@4jc0*i@@Z6eOk z@U`t7yortGl-mAPl@=o)tFuA&DvnA|vcQ@GZc6g8!i1wieW_#*=q;nsOe4zm6D=-H zu^gqRpEl7uP~r}dxV!GIyXzn2a`%7x=6HI4YwW6M(KPf?#I2~k@>)>2t&9E*te!YA zSf5U0(YAX!zw;On4cvd}MOq+kUUDW|6y;U{eaE`$FDYC((l+GdYJ`!x|RTVaKe;ma=W3#Q5YDAThdG3EghLJfkX#({5@nEA1%9irI4o?4sk+o1H#92hh}HD@A`1DLZ!ZZHB?Bl2F0PnZYB78o=AZ$=0XTQcAV4LD0+Re)A{ zisW890F3E49M14y+wk?+@YDzvM6pM90JM-sqPEy_Ql`GX!2VcF2hqjC^Bri0-H=9` zEm58vG|Q$$RAk5401%;{7Sl`IffC;n*PVyN-F0_;2d^hj-+8D)y&W~~&ISeQE7o0> zHV>LE`5NB^wQlU@_`3=-HS?4-z`J;H$%gHlufKtBeDjT4KkD!Q@|SM|9Xwc~4zs38 zMw{!o%Wk4nh(Aa*3>*R6gks~=Cqgi{GTKb(LS|hnHLC%SQ<{TW{_Z*V=7B6aEZ{yd z&8VxU9|aRSZW(Zb4Zo81*-Q*F6k?gxd1bMyV}vC=A7CK#>ONu5*`I!FWPj zO{%cKtwJyu-T#Sk5>*3BrH4s+5izo*k1 zLX>_TU~@==#_}k!nqTJBr#@%7bA(6A0BTamm|B-LCEWO4HvQgdI~iFyd87-w+Q5kA z^$}90fjPNPP$Zh6<1y0{K&(?klntlQH^5c-{K~kDMW2%nT$6JM4$4$kQjBY`AB-l9 zmbXL;Qp^ZAa0;r3Hd-?=OqujG3529!Vrfi*v5*R2o@O@5x+aEgidl^i_L220e*3-C zW=e@MF=R@zPNd!Iu$>41nzZ5N#4?QShKKQsvN=Kn#~C;`lE+o?!~$Queu$qtzK!D$ zNLUi13ydITJi&;PdC`GiKtMAK|H%R!YR%vg_ht|Qf-5*ackd(kr|lB=P60nVWWl~| zFab8FU3~y%*__P@IJPX*$(a|;B%e$YEE!oa(IDj<;ei7e)8u|72An(a;c zpQM`f4Kvcm0j8NW#e|1Cm7oUQ(o{CM=S>y6Bbf)~=QR`Pai5S- zY%^tf$tV!UmqvWyH;4lk6lNd>+31%ooeAfQ^b;WgK+^EBL(Hz?|Ei zr^Fo~ad+Kach|3DTJkVz(pJv#qMd#Ktb_>Z{Bvgm@taJIEk;DwrqUlL;@9}sT?uhl z;shyqESjnj4IXAWZOuj%LvTC^zWMYq{G|j8G!#Qu^XLfpbi|O3DipQqfdMs`Cb=;S zhrG`-vT4kr1P@<=5d|8VXvS~}%g@Ku15e9IFKC{n!K7?GYNJ&>Cb`K6fzQM@pMR`Y-v#1l^@dN3^O)}D~4sV$%JCq z0p^Mqb>IaYAaMk2SZ z7dmh*iZdM?WX#+fT`(v+yL7>GQ*m}Y!-Z{76WnYY!Z$oW1K%$f_$TxnzwFoermh%p zqXB!dSX>qW-YFwY6bWBZ_U$po8gB_LwfAwMw$jPUYX1$VgtpAqSZ5gyRY+5utp%PN(=tkK(*t}qKGmSR69}NTofBp) z^RNg;Xq9f4papgSctD50jV1q#d&WjKb3@oDP`c0&iER9wQ|_FWTCx{3xxNvD43liY zeP;j=CXBcY=B(R#>ZH+3OiEay{}cSK6@iwX1eseNlaY1hi^Azw(U1{j3c1l3klOgt zky=Ryq>T`@rZ$|yI4J-=S9(VjmA%*qTS|TugWe4Zg2X8hI&b)nO_DPm(QGo9d=Ex8IS!fbb#<=zpV z+1!?6c`=Aoi)c$J(yS=I!L*CHi_2r~50OYA@}yBye}xWBMh#;Ga{`)a0cIsfN+2Ww zf|Yl#5!hH*2h%LvHT^gWkm9#VK#;5 zX5JctFTV2@{)RumwHX|W5lL@P=}wx*DcLQdmgz2mmAFAhP4LgRfeF`l=t_I}pcp=T z^^fBplS}C78nA4#WB`~WWi^-(8wtK?5D=42meH|1hr|-|eQ*@^L}^y4Y%)Jj@e45CW^xx4<~XngEw^Fe0RaR6UBKE_Wxc@y6{ zT;S-An;Rj8f0FrP@KZ9a5{RJmh|H!*u;Rui!t!UI#BoI6o#P3osC|9I6ktO%8*adH z6Pfm~(Gc<^2`rpq;zZ#7h`IRwlK9JFqV7>Le7$|013CcRuvV{oBH69gZ z%=DfbER+z=v5OXt$j`$){>0=cjfCURur%+|z-{z^K-gf#4Wl|E6Q_Wg`yn#o^&$iZ zvb>z~A86prm7hV|N8>pha6sS!2Y$B+5rvU)d4U7&iQ;{sc%Pi%N)Gtw+4Fei{0z@M z1D@YD__J&L$k|Ky;`KXt=z(WUKoc+jo-$UCLfQ~5$-Es$Uo{Q7mZ{*lildfJ#wwpJ ztpQLmS!nKUQ8C<$NgIq1XZ`SbQA5l-P~r}dxV!GIyX)6-JuF5*H?2Gropiv~veTR> zI-}yd;34r{V_J|Z>gBW?yBJio>0zE#vt8X!qIjx-YtO>y0L78x?CCT+qD%br6)~Bb zg7gvqr}82>ZAtKFa$>=WdL^Dz6N~&Dn<$hL5iJ_DmhS+k%bE?B$Y4{R=5!Sv;%r#5 z!{g_+MYT@~WjUn-LsIro32!+IZ92Fi3@mO zH5Z}?B_Lx0q?Dgi_>^S~nps%^X#^TH%ju+{vTSSvzGC2$m61{fW!8Ho30Ov>6-$V0 zoRXC!T}b=ig-XXZCaty7CbVEZcFp#9YnkD2@NfYI;92)Rf6l8k`uD0oc=NQ)$z~i zOZeRRecZb_;n_G~GBwpbsrm_A4l;G3%{r?g9a8B!(lWmdz+miX5L`mB@z}76PnZqF zM&K$ooY@&JS|*l~pGsE|%Dvi%l5&;nWhqR&RhN;1+s?BWCd2kE{cSF zXPPB@QuV#g<&Y*fx&%Fqa>AcrU2oL6w^#gdO~6O)N3~~2#~^dv6yQkYQ`FlxHhj|9 z(Axv1jb!U!E#ObffHh^6p_4VEGX!&#D=~Ym6XD{J0d1{N<&9Oc2f-*!Lrehf%LP(cq!S1sge27Bkgh*0IB}QdSQ8u#Tshn=;2_Ow> zOr=dMwPHT$ry*rb$xLlw^N;{VHeE85QCl^E6i&&NTF;_A&X$AJ!EKCNU2+L#lAGpt zB(Naj*aWaCkU>MUG2nA1#nJ^~P6kDm2o*E8?liq5HZ+xg(wx(!Xtii5kF)A7ux-lH zoq9Q*Q+w+t&&+f(1j;DS7zH*xuuM~O>K`I`|4Wa`muQxzII?NYdm!yPFD-A$!fnpU zL=C~#sWNG+=DrLZ#T45%smo0u36&H>S&h_sO%ws4TQW96sHt}PicoG+Yz=^>kb>nA z=9$>x{JT-k;u&XJ1)BDf;eBIlsf4uhCn)U^ku-WGpp}^4Os0mdQI@Htv}Dd)x0(!; z;YA-l^Gr&% z>>B@)F$Jf*5J@V#Nv6)Qn4P5~NBW+uxVkj6)Bs*T+wilG-oP*T+jv@uM;4i1FY6@^+f$s7;FHMKl>6!; z>V8J=BVxDmZeU!L-Z@D8-5hYF1{eLBo?Z_QZEJje3X zn=MZ&p9*CRC0x>6HwvgOFBm|JUDe)N{JriCc4for?WrS?@QHgxQ*)1HRC;;H1So5o zxj(r2lBZ!ee3*Bxl(&>$rXhO zP14U~2Po~pZj|w5*+CjI)UZ-If4Y}eiis~ypWq&WQB#J3gQ@i?dD&7ZFXAV%-E*|)8qWJ8KAI8fu zfD85s-)0LrK%}7SB`OeoM(?mJ>6k!9E zz7}PeS&^A}z*cw_qti?)9=s=-H7Ssh4S2;umJw7Krm zK9P-;@E#7e{Pm{0(w=@5G{&?x$6}#c->VDh*gYc}8`50WcfJJPF>0g{qwWm>2(5Lw zVk|etNgCs6iE6O1T&XIt#6nH|RS3Det0}LlFDf3Az=}w~#%LCgs=~y2U70PWhnjT@ zR8f$J`-WYtO}c&O5yO8AD;2v5I`+&4617Dd^Y%B?-vAUK=AHV*h@ zzs8r25Al@XjRY`vsYpUVRG9h^j2upePwbk{yX#k8T5=6R0?V@q1$;k(LWIC#=~X{z zl*38N5BnA?*P=?BG#6kHA*feMxW6i!drVy*ZvAJ>j@NA8;2G<)OYZw6U5mYhi7~ns z(eJ1Q-}tq4XBr45>3_v%OJwYNw2aWNHXgeDZzDf6mu0e`mOJpCjQxobNuP zNU@ABGd2^9>nzYExR8b79E!z0?16@YlGl*3H<%}4*dKa zFQ2`Dmu@y(O~?K7bG*Ji#T(dg8VBgy+AjyL9r3O7Knhv4K$`v5?#0zlBi$9<%=^+t zG-U{`D}BkaWHK2!#bl&nkgOz+_QgVcTGZ|$LGJ*GyX)?{yZ(WFxY_TJn13XC%GW~z zPTDwe+8OKB)S?QSTOz%8`^=~s4Xyc0bopJG)KyFb8>f$Y;19p>aeQEAZuzXN>mfMC zOx6VPhv@jm{INQKZ93heK?5oh33(tU08$i|EW)FrAv=jc+tDryhSYLb9Hf^kHR=08 z(8DCn$V@a1?l=Y0raXEVWKtkN&8IXQhzzQbjxnvzoX*#}^y}q)z=VUSro5)oJCoF? z+<7yCH8d?O6Ud0pIqy5OaYEnP2{Jf*uAFkso0h2vba_XIu#@4!W{Ym>u`5c;lOu{o z5XpowvyQwhO>j+{4t6NJztq?=S~Dl~O5i3Nbte#1?_Cu%D(S0`F_Jk&9wrMgKs}41 z_G*dlX<(+Qt*s8k88aEw*=p%%(*iVW(r9o;BiFW!O#i3?OhVgit>;9Qml_)D(!KI>) zrNMdki3&zv^qOLa+y-o*Y|1ObSTjPjVpk;`M!yTd8mt&WR)gfddK?qqJU+q!jt?I0 z;rVUiKF)D|{v5t`dI#V36Q&7dmM2DZya917AhZ~w(GgXPXeiNu znC#vkk@6QSc?a5@IHD~wc?2s2@o9lv2JS$KJ3!*@y1Rbs6$KX%c?FOU2;{?J_Jb<= ziDCA`HpV9phr@@?FD_oXckj8g%gf7muCK4ZT{eB7jE7foXkW}KGQ*l!mr9$gh z#y#Ragh%X(nf(bQg~a6iog;~8O7MqW@Y(wx#fLV9oB$uv=9o^@J&X;fcu!-52R({v zjKzxZnyhnj?=0vmPA@9@kaULheX#T-K){B8^I75wb*R#ykP}*$=xlYRL12)StuUrd zu;^=vPSU-tsIoOBt9<+LWQJ5h6FV@XruzK-VkJ#jO?9+JwHW!*fK8fHqKi}2)N5kn zM-`)~V~yUt8~&tj>XNe{CxgnyZSq5iFk+SWjNy`Lf*JeL&ROgzwwRPIFinA;8kVyw z$~UMLrpczLF})$>2qKwAk~#oy)3fHpk40-WEC45DC8?Ag#Hul!q_dZtA&MEoLm}x5s^r{TT-g51f>|{FBfKv!G9Ea(6Y!iR=<`Mq%lUMPY2HestH{m~1jrmU*sXlt`}ZQXcCno@oYf`9$;WB7gg0?tkoH$yU_kn%8BL8vufE`;)s z67TjTPSC%l&$_;mGR5JhcJYp?I1}@fP|$t z5rkkLeKOi)Sr+?7Di)IaCh{E$RRXfY=DuDTyebk0=$iG+|~es*+(>YXq_+9NKA4ShaGp z3qP$PFX{&i5o0j3Q(YTGgd>+`lYR+v;-G^CsWIb?#-7w_2c5=)re{GxK>~|eXiiRpyw+DnsI2LFpc`Dl@>9^7 zsuk^6VyCfn3{6e9$OgFJB${}kqAMeG z#St?>qG0ljin3rvr==v$ke`2sL!DDgm2}8wJZun9tBAtz4Kbz783mIv8DX?2+T>O+ z)2vjI6lyf>X7YKA>DW~8svhwdAH9lKWy@x6b7%ll zS>T3t#~tBqCR%%y(+em=d1mR8`0UCWGaI9G2Rk&=ujFUbGD%YlmSe# z3E5bq%&jSxZSvnTYb;~gl1v7nAEmETNf5GJne@I`Ni~)Bl^{A}Tpdg$5_%>1svRQt zOji;$H;2F_;UxGESHo7F}(}g(agS0nzZvi^ZAMAFhzE8$dDk* ztYWyy;1p?wR(colf76N&M*~h$K4hT?!@nQye;mX9GsXOqzR_v>?z#gc?ymR7bvQq} zaQCZ-xJ2MW0#}f@1mq$dR{$;raRDJP5V#PDD{;9D!36|Y5L^cQLME;PdI`Zx0@vDH zDjG5(qkVu4Q!-oIeIqSwrBHZV2}h1L$CcY4=e{F83n_m}0Lgk@Tqa8dXdqC*ky15AHNn90?JJh!)g#XC# zo4@tgD{aCSq(|2@)HDxa_iR)Tc8)V8bzRa8e@way*{4jh$xUPfC^BS?mdg(M(A zBf9;RS+Go=s;DeuDT`RsDKb1p>V2}9(`OYm&L==b2$X~3yuDFk4SNz*W+acj6UN^!045I(c=JBW;?Gt0@Jgu|8h zTr)`oZ8Avku2>jQh&g-1G~X1p77{=+Kc3Swrm%n5iYG%o(ud?2k@5*{q(UDM)$#>!$P652zHftj zXbusrtmdve#bn55r!TUM~`;M^@NAA5&hK0_2M-qGGK1{C| zFSeCW`fbUgmCj;hRD5?OLwQc)qnv%vlBAN*6qt_k)~U27)-B3?!YlTfejerB6ZE_& z&y_llk`Wk;_DwG8sAm3Q?KDE?SP{IPV3-kyr;7X|SJfqo(2 zKj|J{`G*cld_K(EmW4u1&tx6Ly&>ZkXhQ0vkR0gQ^toj8mI-iy{LYWwtpLTo>FDV) zCp0MStil{wh^ygr+|v9}bHaC;CqNCOqVb5H7oeaX9VJ+9&C_SzM_mPE(X6Lup)^p_ z-GJH1IJH^RB!?-R5Ea04+O$bdLtGuZ^h!(#YJO?{s1pvsoI%QYKn~XCg4s-G(I^07 ze;{XmJ4y1?tCwp?LIPF zrxPefC%xn|v!XsnV4NjOqCkTdRC=_>NHBFFBk3ZU3x)QfOjyyAC7A?l0tr4)n;;|O zIxFT85jj-kvq4#qig&u@q#M7?pae${j00^cO!-K#i1v+!k&vgNn}W4h(wJ1OzX6;77-c_ydXtG#p|X9hmQ8U})Ym^K5)|(nuFhV?D+2z!o`$JP!qC;7ZSNU+zKi z2!OZL3N=ZmSz=UV&?1VoCk}8I`?)|WdPYbZfQ-r)O}X9=YgL6DUaJIrW#A&MEnSWhp&fUF6N9W#XMuQq9eVCuv`v9;(~Q9cil z1h@+vajO`8)lkU}cxJqHNqfuNDntpGqK%?`a%e)7yP`LxSs#0EoILxc30_Cw z&m13M(}{on{60SD1OLK!A0D4J{3U;ehnjK?3nV8c^7*99j%x)=(&bpDz7>@!Q&)0R zf4+Pdf@?hYQ>JNPGQ}iPY1zqFWjwwNVVwg>iOA^WJez(<1baQL~w=BOATDO$W?@2 z0`byx*N)Js&41tU^{KhCrd1|Qht^aFq{gCX^H@~(kkv2{_{QGoKpLe)$Au*eUAxKl z1{rozghKtDheCCWqecPQO{QH`}zGzx%^sLbXS5!k?Y z(WfmNvJ(Urb4S^HccKz$P!w%Q$#G!f;+jp&HCZs{k_K=kbud*NBj9tQ{oVzbGhmwY zd*%9uB-M1AH213Rn+1icU|O(fpo??_Hcs8GJS1G$xUq;S0F04{$-qHV(nZ^;EK7rF zoSaW;csccFl-De&04L=Nf$88hg<0gSK$@I5Pxw}u>^rFk1TCc$f?5uyl6acpRkUI( z3UKuB(;zaNhM#Smrb!k397=wa6D4bUt{w;yMNRpmCqX77pO?-q;Iq(`23ypTpakPs zMU$Fa(wCaZY@Vi@5orQh>k8EB^Pk&0gFsY>!opIcERi&8Wn-Y8 ztQEDC7;Mya0}#vNyd*b@q&;gzzHu#4=#f({?vJ5T@i8o{lhL3GY+j9v0bK80z|rIm zSPY6$4X>q{Srvd;!A3U^#o$V5OafykN(Z97O>+I3=JVZLkYo%>S*9a2Wh0o#dj%m7 z=pE~+oPKIZ%jr~Q>7RK%l#6>e`!tCZ0fsA_TI(6!Oo_}urEWVW!UqH!#wG|DzA8uj zZ*P7Te^ZX|F(4aLC1g>wYdF7D_t=!Inlg>K-8Ox1gAtWXQ7K2& zWL4wDa;H3Il6t;NrD<7&CY4`zz`VE5Wg83T|5lNrn(s}}pQ8RxD1Y!`XcAy@#$895 zMMld2y*Afk9o1YtG^KlWWHO1xA#lc%D)*sgMbt2eribZzn>V5i52JG)6^Ka&X@X}6 z#U?vzVR|A|V$$-zNJf`90a)|)TF9Y%_MwdfGfVCj{nIY;kDBZe(d5Ed}7|izlsmwp1+R25YO<2DdsR>+b~36 zo~*VYl`PdN1fnfiGYAJDvJ$AsWQ%YDW73wMRfp7aDeoGi&ypZoBvNdYY$fPJCE3?B zOT`5u^D}_{90dRE)z$saKX~iGH}7_=J3!)H1RBUigj}lN!XZ~GxD1br0Iq7ZAA$#HG08pEq1$#s$>huVH6Ry#-REjupRGT3u%`U50isY1C(o zkE+R3aGrRtGFFYVbLU^ju69~flUL)aA*t&UFEghp0yD1d?zN^ zHJ1Gq%nTyH(I*b3@Q~E(Hw{_(nMw!@Mnr!NF|vOr@MTI71-Bg#i01NW;NhI5W z4|}L%b5dD-#hU_oqf>jFasqb;Vr#mmnqo!)BPsBjkxi$j}jYa)cCFl`k7zEiRJI@6nJEEp@CWsB4z3ya1VF+R)C6;t- zpG;UGOQa^~!H7P@q@;~I;ug@LABzK~#*_@;{E+uzZj;zZG)S9LqUDQ`%Pl1%eyZZK zSZUs7X2Ej=rQsHIOy*8)pMV}CXJ^GEqg>ka9wcqox|xeI{F{6yDK|KCKxD&%q4>*> zUdR6#Z{eEppIRP0Dh==zVA|=u!-~pQfg|h0h0rK&h-EA4HlZ9mHVQ?Z@HeVB~wW~&wXWLs^KWWG~lTyu1)cd zpYR~A@z@vlJ5xdCTb;LytRX}Oa)Q&=oGLQsdFgD=6fMK7Q9WYFpDV$Sy)?_iEVii7# zN|VS^hf$4=HYKjTT%;%$T8ZQI<0`5E&P1#NAQUK& zg*9ZEMR)B@{|Kn1*GC9}#QIEms=Io!D{C-FW7-m7gf7-^fe>|$w;3tZRrY4bYu-q| zCNEQB?_?m4wYG|QJgdO8O8hc9^xnG z1H3v~jjo?|dOI!;WWwm5eL`r8@*f;1QL32(iK!#^N zQ=Nj1LOs{u&COa(p#n9tfAi6!NB^>j{Fk5nouB+Ke(|sW;^Vt+_qTj|@a|$81aJk9 zOA)zli;zClMaOsE(1FmMsrHWrf;7S#j;d0?BSK_#Y#7mSl_e8Ra zQSlu`jMHnce@AL-?JPyZY8k?Ug!#|j(gg9HMy$2ai-znyPGSMzD{?7n@?U7SugJgL zdz40U8BOPN=iI-uTB_V@Cnc=qp|bRTvSUJ2WF5s7OQsM)b2EYrHGQOhP8biE6mQBU zB;CB74o|Y9bO=rBih|aMW3`Q%25Zq&KJ2K8vX~WgQI^((qjSCc`}(?Xi@si^G7<89 z#8puRg>sCQYLwe(?9g(-tk}2XdP1u4)nq8TxK{c>#6DBKq)@xEOqu<{)sFdZw*0l8*?VllJ@pKL& za#B7uSWr?zQ1OzE_E_kmmj;>OL=0teD##XPPKX+$P0t8Q#pt3IvE`Hy2`1&fk|6Hn zS&`eUBqYNwK}UMCl>Z7C=~{Yaw0x&Nt8=ecrqSo!mtE-Y)JS7$+)(#o2q zRXky{^V((J8*mQPRCxz1x;M4`VhfxE_$%@R|J9qngfHmCQ5D#>JeR0dkCJ!Qo=z{K zWl~ZgV#dN6zefQ@%6sZ#Kx5)}O~IeI{|WrFcn*i@xE2HR>L9Av#1U#Rv85O1i#|_| zgATYF!g7z;kni(J1=9e>z%eF{e!|l^ahenF%xgTpKH}|oiU$#RxLxC|?HX_5h(~a| zjfoQi&k#5QxB+mjij78d!M#k_P)FUDnW{99?Nu?u-B34}i{%x;x3SOjB;xubc}-mAFc znJ4Z%f3bWST9#0ACg|Rm&#C=0D&=d(Iu-(6u`t;-%q*!6M_nm_ zER*H>NS;hI6~)Ergipx8??1ePACP-^?r?^`ee)3i$Myz(NhaO~*@GUHQI8d7$3|S$3-RGSxeVm6FoQVyc)Y|N8r0 ze&Ij=+~+>`C;$8Z?oZtrUw*Fu5@zE9@JonZO!q4dxrD?;cw9l`5*imGdKEq|730DY zSEjg(z(oYE5Rztyh$}O>bjL+Vygz`m+@H9S=rWkXP8S|+2!wWmtxd%kZx&LhTt)Uo zZ(wci#5r2hn|jxb-;*i8l-2GSy{6ciR$h4P~iXy1J* zbfM8lVx!Yb3%Fc^ivqq1f}WJr?`RI@PSU~WUk0DKBG{matotOSc?%1SceUB2(1Rq` zvP@2y2XM9pe$U|&f8zT-g-<;JUbqq548aM3@C`l$n~bD$a~gdj0416d3juH{?IHEh zS~^)>#aO@)oA(45zT(Jgg0#8AxQ?ZoiK1*%2QDo7YRWVb?TgY=AQjA*Jvm1u!nDC- zHj!SaC#Op?6>d2Zs6##(8DX2>72}Q&0o4PJ+bQ+@0>F$K=46D-IWdMY zHqvRrn&5EnB&5-npT{;gSee~<%4FaR+>8(!wx%<`N&a_<<&3X8sa}CiL25B9=;~hO zTCyd=*r0xiu3&7J^^Kf1(MH2PIR< zO5uLTb)8FBcZn`pNc&8doLDl141)`K13YlHIn;sI&ISMVn|}*`ZhI4N4NKV$eJMg# z)=y8lE9!PP(JV{*W>av|mny2+hJ)v!_-&Z@H=g?#{_>iVH3qnc5r0Wd}_7Z_WwS zAffy!EDhONT&$X+Yry`Wq$NXZD725+ticnSAu64TgO2AFPS@3=XA3D?s}v%%v1Ks< zR12)oTM~|Do>r~K;<0>ov^)@E#~#?7rOg0B86~0_uggpu9TOSTbC0Xxkt|6`aDOnx zj?ux|qoj%47gQF;(dJ#&#ZjlWbX4`us8@<5NZDk8sj_1b^afUv7%jsPTrla}{Je}=t1S#}jdb_dx!<$z7Vs~8Wi2AvZhbHVS%IeypS zCEP!p9@hP0VG*F2fZ8pVnRrKPcB)vVWd)x-n> zo1M*AW~6a%v2vUeMlRNl()WG*WB>72zWTNQ^$r4f4@)*w^_SrI zq154L@) zD?)Y}?tDqKMSs*|3Np@*kYdO2q&p&FmMSZe;mn_>4qU1bWIiuazlKRYVcv`^uDMx- zm+7JJdHk4Vgyk8zMR;;k*#Hyu`V>_iB=*eNYNDJ#-np{ z*GSj?Nje}wl&O^KQ z$?03_yIDk)NvGa2Rm_QVQt<571Yq6IMdz(et%(dpE8Z2dL0WqILByhXw^Sx}?%CXM zr6#tFSxof5m>>ff>4=XuUE87e@2ZOvH15DyVI;I69jDoB87H4yaORG~ z20R2i5KgDb%BF2m}s_5B~R7WLM%TPiWqY2Jon)c(Lqf|0!+~Kb z^(=Nuo;!Jut5$W6))N5Q-AmsawB;F5UJzb;W!2o8HD0AxHLI{Ab=MHAnO|jiTSGLjuw7re5ZYMm{nFFO>|K4X-5PSmPom@f$+)=T&%b0oHw;gB`3v3J}5aDIg ztt}0#9X)E`(rjhvfmE?5s)tn7Pk!ZVU;FcF@)P&(U;PJfefzDyaR&vw2LTBP&iX~M zXx%*s4hSdSGRXqip@HsMSQVQoYSLZ6WG%hqdhMG1y@8{*Zm*{ES(hnBcs4Vwlaoea z4W+-Ob&42W(Fr8fl(WqtE4n;4F=KS&jw_V8y=+FM5qT$)y*5$TT?|kiz(f}XEBdMF ze6dj89Jpv_)jKDakfMocHeJ<#kX3R^-$%-CH5{VW2YO8*qOzzx2=Y{pw)Fv~Mu3J0 za+^C9ec1%zZ00PM0Z-JQ zULv)MZi$RLfiR_Jxsd!e{Nf^WepaR9eSL|Pvw5d|6cFAiA*Ej|s)a9PyL}aW{rl#S zR2>-`u%$bGM~V6ZOWt*mgiCAW6Uin(Cyq-M9Exh^4ikerE%*j#Qt7+rflf%6l4%YW zM#+w@y~0zYTC$paj@jI99xxf*Mtf1tS+K}P`Hz$rG(BIO+FsX=Zma0dZJU|okedt> zHZp}HrgwB>col7yrj0%^tkJGcu1U})4HaRONJYkYg1!|cGf|O_Rm|U|%@hg?#-Y;C zaXL){vvFy&DXDiRK})NcUPwz8jx*`(!94QRyrW}#jS>xIpP&ZJEp^hM*MsWL(cc8R zIi}F_to&_au82AMd`NjFFe)z_~G^Iq~^6O1)>Eu-A|IoZzrcEUX{!f`gUKmV$3_=}Iffv>1%G^kHT z0t)uo0%bT{Bj;qD85>7$|Dd#t$@fPI?4VhNzz4=Te&Br{!pR1{e6e9e;08y$vrW7a z&+zS=8@x3)ymMxFcsk;p?TBaLcn1?tV&WY*-Ue`kz!4KCA<3Njd<|eL;F9Nl8B~`p z95mfr=Kn~Xn=>({xf1)q~E7JZ@7=x8rav7G&NI+(l;k|T$Aqr_^>Z0 zOZ@=vrdr4@5KVcK`x<(M(j1aLTVAoKO>xc8&$}$rtdq9JLeUukh)w;2ge2{=>OAHE#C_s#9RK?P$WRWP@AP0T|r@ujY(3Ca4u zhK9VytaqU`p>>#+j=Hzr>7xAA?~Z{r8$MSS4mdHk^6#|y`I@MXWowzvP(GXWwdktNTQ9O!Ov|TpZ_AXH^5;SruuZ-8e!iHVLkMQ>*kELHf35oicb3BBor)p=@fjo6CgyAZ1Z2qRUeQ1vZdMT~4}D zn`N|lkcRbhaGxv+I0=b`o|nR~JsgOF1{R$#CwqqJvuRtoVQzcwCm=1NBrp&#Q*128 zE1k+D;{}tEGn}^w%=lEtke_i3kli%uHW*2 z1a;b|vjDuYw;KxlfOWL%rJtjYz;9F`M#WjYM@08-{* zWuk@d3h_PK*&5$H10ZSp^)vF*MHlSe(v?+fti10647}H>X-=(TSAa?9*c0R=(4nMz zDrF_IQy|snG3tM}xuMd;laLZCDy;S-St$q3qz{Dk#LPLlEGEZtU1!=4;~8s6gPDe8 zMo*DSpYYh|%57u>>~-0i)u|$#>Pa_s#)JCalZL_T37*gP1gZBgJr4+O3S&z{AcG*3 zuedi+P5A*6kCIxvB&}ss?9C|K03C~I$Gyp;42x5b?IIYG=^SdLWRF2<3>%r&c28)G zmN4ZcEkXD4LTcYaP72HFFcmkk(*uHz_mfd=yS?A6B}mdhxb?f5?7WiwSp!NYxb^cF zrv&`#X2My zS6V&+nJzN%(J}Bt<2hXU#O97?A$T&6xR!~>;dll>!W1I_03ZNKL_t*G8o*HnM^EXQ zFb3NzPFJ((Z9_WlD;kkndTPuMGO7EpKish; z+MKVxim$YbmI6~s*ggo;TXG@vyy1qqWDFvmkg@n4B@JFQ`dtJ?u74#1VD)e$TaY&! zwwNPiQ=p6qOMcigl%3Z`n~tDm2K{GiZ|J&|dy($ctzDlFs48cnuw-Q^G4Sv2~@VEEZ_$ekHGzK8pJBcgz3p!$ezD%vK z(mQfcwSkFd*b3gRr2rwI8B|78&m~u};g!B8ZTV@ogj~UARf~3#XW;80@^8HV!3Y2P z=byiP^ZjVx{|^ia)xW7O|A5OXiS@cTBtzyhw07B9cutb7zjr ziX)ZXo8;M;3Zk|mQ9xCZmN7!~AnRMew#>Utzg!k2D4U2)?8)YdDD%G zjGPlKhkQk+MPV8$5ifa~d&Hg1+Prq-l?OE#?UoWvF!Hg^GRD3Q#w;cxJ+tKF1UcvkC?uZ;&GZ#ECYtsg=cFH##W=xO zgnlaRS5w*a*2IeM5LE!WJlmj}Cu+U{1tScl3!V1Fa-fn?q=GKeu!3JJ7?ImBDUeod zy5r0;CZNbr*qn5ZaN9VfhR9Cqg&PMiuJADm*oeh@TC;BT=Fud)V6;ulrqyOpC~ruR zVx9#?b)IemGAFm>i6`gCM^QuPIIkK`j8gB#C%Er{1btqDts4t{{0v^_n_j2m)YUnC}zvwQ3xS9UxJ7D%F6oHSGYr+S=NT zVf%rSe(yE1QxV8i)4J=8&v5{GBQfBDu)ip)<17ln`yi#@Q3H_wAzCq^<#G2=nkWRr zfZOxLdE4;U^jrM7pZ{O@t9Zoy2sKC~Ql!#P8pf#Pfms(Iai~a0%&soM3^Zm$Z#Ejwf#^>m44o@hsa#gRitLa#8+iskHw$8C}W)sgbDSdIkA=IIB%Qs-(gqy7Fo^Y&|Es>IMW=9(TAf3Z(!sK$2X%YF+Xo-mx78a&PJzDzzc+Sh%yJoYRF$sCyN?M zn2cocYhbuwn-lNA@i7KIK0U#sJ3ieX@TE`O1K3l+lS32lL0?+`EEy(mjV_aqrxr@5 zO!3J&BZr6ieC73PWT=!Y`Q=yk z{fOZA_y3LG-RXw>OD_IFUbQiO)|wUO>P~#riBnCvCc}XbZPJ}Njm>Hy544<2L_`}K z3GJde3may$1erZz8 zv%t+sWyX2dW;~_CqD7OJq)cfaP?EAKuN8blo0Xkug68j$?!rcz}N{UhFaW&2IyTF={KJ8!%)G#fl0q?v!AX!oMjt zXh_?2um}~5F`}SWh05U1;y8{2bV&SMW8?57_yXHuU}*g3X$ky=1F{XsP#k6B z)-xJS)rgeS9NdUwjIF7}BfotK2|Qyi^JmUD%BH6k`?;Uh z%rz2i#fkLxC%b`^5atvjy@aKl$iF+Ey}uh%&;{<9cBGKt#^tcN;a6^7;Ex?{aC7E> zX((mOfNzSqe5R^f$!Rz34N1O}q84#=Luwd=A>7`A;8QOvlPvJTnC_#_2#pNIuo;lv z6o02un#z;{Wh`PAM%>0Y<&*jR=lh#ZQY*ehM^7fLx|EtqOGK-JtDqdN3kg4AnN}$w zE#@W9p|~*%V`FgOK&p7bd$riwaMe(27;P3<9y{>a)&{fYdy}wRM<(mg3TF);%|cC< zcsk{oKZLPt3CgT8E+T^m?}u4>y!hTR`}N5}MP&rnMte&tI%0qzJ%n&yih0`TR7Y?( zN=37HgtZ4a6*oMz)GIa(hi%jlAcw{)IlKla7C%!ethR(?x_0~|trsJ--0_{}Fa`;* zT#_AVvr$)w%`hYIz0k>bF|C6$8V`+BPN;W7r`%i*MQn~|@FUa#wTWSX>=OqEbT@3% zAalp?@R-Y-7#1pkv4<^Vo4~O6dp8Hv3_4LZJu#-7xZvo(;Sv_9zb!ld!*72Bzvi#; zO51}hI%2b&5MsC3ymOQ0(y-M^UCJk?K5MR0Y@xQZ$re%gh-YJF@_?q0Cz}ncxJ+?i zv-qxprDsbL1Ml%%CRB4;uFHVWF$E}O;iDSVOvw;RxAV=@;jn3~c<>y?Ey+P7;59EY zt%_3K`W$ANL@aWuFFkn$gJ)rFC}pvKN%eWvNag^IakrHRkxQMGrY}~ej)Vk*k&RV{ zdDzs-63gb6a+`t=y9Z09H~3t%SVzE$%awgXa$veLVz@<*zaMSSC{C8N^(3uXOjzq> zmUs6pXlQxb5u^t+R_Q-vy42P0b-pVt9#>?KlAU?04>H6}rI-?s=2$=nQt8~Aenyyj zF2wXoebF|tR9s~`k5ogQ%PbSA!EXW=v-TG;F4^L~O{SLAUFTH}Rjeje$V$so{%`V< z4Yl4V)>^SC@?j$jNH1kQAMsUDRCLCElW%>6|@QoVDX|F65eGsBrhHnRMidz5xh~wH}3e_ z&k%Dw8Am)BifMZc_a)dNsaz3Bt7t4J#pCBR!E!?J9k+~+wT^A^QL?riMpU8-;rf}9 zl*{S*ENd1M!>%FuZl%>dRbkReSip0L{PXt@5C5D%|0dwS`TdyS59mMwDt}5LA9j{a zZyWq7!QPk1#TpA>owuiKP`fKDQZQTGe51Z}}EACWBQD=8rCp z;n#HAoR-%X4q-76PCaUJOIp9P>8vrdbdg`b6AMyVY@wpwK4t0HB=)_?D4b}1mD`N? zcgcwVYmD%{juG_tZ75_MVi3U#XNY1uMAM%ey}^S-HHI0e#yz)fLF>;=-Jseck_Lp0 zB4JDUzagDDWrDJe8cU2psWp!%U0^waD1OFLY)KtqZ1#B=>BhrN2me#534#PQe~f_< zhJJ_&4r2>GhDH>hZUZs|V}w9fR57+lK^cqpjgTCY&LUG%w+Gct`CJr`RXRPTGrgNf;_Bp~ zppr9u$rNw0ViXI?hBW-U?G0{e!C=yX*Hi5D!zXh^BSs?{#F71ZQ>6=JRwe+gBFlU- z78cbYkE}ptG^nJc!kRgr6%Cg~NIW(H$y9D?MZ(u#TYVjXOC%R9mW0)XouIF>#wpk7 zoD0RGCWqC@&q=>1NggSMjayYY@OjzNd;oRk_Qb@XozsFl2ih@hL^;4|f5#61AM znn@Tb1(%SsY7MacI<}ZY40FRqYssKGEH=1Nq)%_RK1Fg=088#0#h`K|3uVF;k<7R<+hYmK6F=$W^HbD$2!pbCO zF-$On?v7!54AX5^RNU|&qz}a4K@l;N!a;^u3?Vj`8j@`McZ|s)hdVa+I2RoFOn3a{ zZ-0#c!oR{#Re~L%ixQ{W`i%G0QUi4g7Gp?Ns-eo1nb_8iVADWLedav7ba#x!hPlxe zr0(Q507GLlt|?L195i^T!&2a7j8L`Q2$rf$uD;8zgskt%YPJ$ty+lYR^MDK!QEp;0 zid?%+%kyMtw=%h*+|q8J4Oxe_))2i%>b)$9d&%kw%58Pb=tD=71NN?=P?5Us{gx$A-(Y2&JmSUEU+OL(z!04$3gItE&;%BcZova51Pp#f9PFH3SF zhh-s^DlU7=)yljHl!D%ZGt#vM76`Tz zWQ)bZmAn_hrf(^^n9ikSVR>3~9I-rrexl<4!WiS95S72-=3jh2F8G6{kP!V-?)V5S z1}oDTx#dq5M3)4BL&pMkl)ik|0D;_`2~RCy0TJK>yNXB5RhVRCPo^uBR6iwbV~Jeq z_h0L>jgLpi65;9n5@Ufdv6p~(d$&|aDeCAtq%UU>B~H!OS(v<1mrurX zDF+5nsqLjwx(i7N9o|v`peKq&&6a9xpi;4J;X@8tc+un)iysXvT(#qcQYz?DTZfu5 zBQM6cHVWQENF9>NX-x0pK906Hh7#Kt~OhMEZH$l+D|gq~wky#Hin- zhX$gxJ(EQ~nV_v4ROG0qlenZawpqMoF_g`<;;kb0_hLxF0>xz+jK~TuH0Fgn?%eT* z-+6(%2^`#G%j&c|kPE;vmC&_TGFU9*B#p3$e0={usO zxDY2mGtWFI*!oc8T>N(Y4bk|=aE13Y3`ci}Pi!9V zr$R8cNCWcWu%WQA;UbDTHe4iPCwB$xptz`DABxKuu%Xz8L^=qEs6{aHP!kLnjLXEB zz|kj;dpMo{*0$p>zWOcvCHWG+NlTr0ZjF~H=~N9Rq7~5nCb| zQ%xz|#7QZ_WU*bMBgdBrNh}K!sh>}-F7au;USpm?^@h*YDSA!g;E9l_osjZr5TvxuE zp+0FiZ6*bvRKl9JQx05}i3e+PK;HQYlD(p`mco``%Yv&f$dyzf=SDwMMn-?{Bb7o( zge9+L?PzPKiZl%?E0hLJiMG-%eOkue zGi}EhHT)eMuL)~5kGrjgwyLcxXJmLnHC(#yE4!L|q()`3ldPWb9{$I<8Hydr``-bS)!U26MWC)4}I@8u5z@-L2|zvS@$369s_j}88SFeF6ve+TdnX0x76 zsniAC64n&!xYDl=tAnET3|5oAemRo9Pp{M!`4=T|&OF<&VQTrn99W&u!)_!m_+_MgknjgjVIc39+ z2H*tBAmoo*6_*{Grz(8eYwQBs8j2 zU?X@AOJPE1S6zaUQ4jctLtp4e9tq@PgcK|wIGUY=#Slfu;AQXh>3dttRmo-0itP&D z>ZHHTq7mD@)f^RDWGL%+mRc6y!<3K}J3Z+k4JDhxQA4EIB=3Yu8CJN$4v;HfEYTrni&;R9k(u@5DRn9MJzV%q)~+OC^vk%#M{PqP+jZd z<4wegxy3W>&e+ia-qm@L((1Bzpi*ICn`>-drMMC<;iXNms(6#)bacE{76@Yt9z$1o zDd1&8E*T?-Y64TG5Dnt2h}%3sOn}WbV8;YFLbixyM*pn&ynA5gc?KCa@)e9;3@O4H zi?QWnEz-6a=c57Y6qjIy@a!4l#T$r7&1MONFLhwJ;LMHm7y`Sbl_jNXapuiE2klix zR}*U~EoO{e4o_w_OW5kC#>zObh~b>wS^#_8S1O|{2^*vQOf0jISz|Q_%(Z1Zk+uuH%ZF(5K<{VyOk9(VpI(`<4y2|2dZ=*-U7!4w?smC9$HSrTJc2+BN!| zE@^u4MV2OmDx(0M8pLVqaa-2VSfJPoRMz@4Nw6NcP4}E$P?ezBd~nDkvNZo0&AM>w zQu$VfxwM&?IE_vTrgP_QCY{hJ0m#xR|eU(7(H=4 z%~}lDnsStbKJs#kCF9-POSR#1v3By^D(evKhsmBPrp*E*B?C$7PFn^oi#^lx@1Yge ze6f@kza!XT&e%#{gww6o^XgTE{Yk3?7tfToV$>@7Ijjm{;q*%7ieN!+HLWxqqHG@4 zyb0$4*Mo-z05+#c^l?Ni@J^4+`_Aqg-qd4{+0W#zE)AFJpq z7ymcsw0{le58saue(?tq|Es9~icjf=)JO3UT)9)2v(g#b|IU|67f)bL5J_s60yd(2 zg8g^!!0=XP*pLzWqQXFdO zO?nnV{59tEMP3)pNDl&XTQG{c48%5a5+teRmLWlXa4W{kH)!xuv3_c!8WdvoitA87 zOr^Y%w~x5gwz6YMI|0ISyGc@V#=!EC$wO~i z9>S=DY3KqIgu^E~8PG>@$yYU#D83!8rWIm7jD9u2Z*<}%|Pk&${9 zncwBxD>08V`mzt(Ob1C~aajr=#mo+vme=-4y#4N8_Mz*9cr9vp!l3${2>oeq6>B@h z$+pTBsqFt5O?`2ccIt0l$2@!{2*)f?qn`;6pv& z&J<7Q*6=BhoYjtQZUi#3=xBJcGz+cV39jI6#q6(YKbr#}auK z-d=?bk~U$Gjl;y5yc!SoP5yf`?I$;fPWiS-F4J#HA>h%d&VrRWxAayUS5>K4&^W8Twk}nWWub%y2j`Jp0Jps8X_C*8vS})7EPjJ+=mMCv0%FGdgxW%9Lo}IKftgANZ}6B2b~kLBES`IT z??H?cUs5=Do=1Fa1^{zNfH)fhA&D9il*W`N;RaYlB7T3Bg@E6+@_ zJSaKEv^Ynz#~HLond4wN2oE@#5%w1MlVO()u8KzhyEx7=aXD;wZH}+}jMoGBVm{#I zJmcm0HNKlWzMVV1+jrb!Vz(U^@fe`EV2I(Q2OM}_MilIhvq!$ofMNr%L9mJ9D8LDd zgKT&z8}7D&r+&cQcEo9HIJ)2`mwWuR^H=yk@PO|`V|X}|lbo7*U7%(txo*J~#e=V5 z%N-=jCl(NVMl`VzMfh>hX^X*g|rjZVG)u`v6t6JU`tCo~9 z&gm;pfNL3LeJRlMJZE}9Db!XaozSY{DW&gZulyTAaVEgNs!e+dLNqo?6r#G3ZX9uJll2iA>h?9!xeW;#Ue)tSLhYJd|Z{#WT9( zZwY>+(yIousgA1i#w$riK$Z;~Mal1NUC?2tovc-*^*ko4!TQ`rK_Xd8suB#`?6e%3 zucFwn#{h z+5MGt(YR^M38qPG!py9JfQxOy)+TD{wmO^-rm!cn=R0rjQh$26L0__ z1@D$1JTJaK$rwdT1q2TS3n-{Ea!uIu4>qi`Y=M)$vy3Pm<);Hep^`LrYF-F9vTDj; z!K(l-ze!kI!a}oV5gi@VS8;4RpQ};w!IHuelCxgS!SqnfNfp8soTxWZGcQs$M8<;6 z7>8L&B5nnkOATX2I4ZZ@szKVW%^y03Hn-g+UoyJDAZoVrkau~x7cuusu2J{001BWNkl3AxRCl0&-@XI*h@7HJe zrNa$=^yC>nK0Lwu$0Lr|a8k#ODD>>G#~G32Gv5D^TjB}W#)!$G@ZB*BPS(JujCP#b zv~~9h(Lv>n5jdE3lXypr{&VL$ILS8H;IKJyIy8$ZO!hE13|T+@=Ca0ZiKytMJoM_m*PS+K1W@ijU@)O>~smfWUQxI=BWog zJB!Cn6|)j4_yLnmQ&7@MI1Mh0&z*%ey{Lln0Kz>v1}ynwT%i6K|GH~4%+l=v|=Tz zEA5IQEuSlsfE&xHlRTFAzSRQ|r>@vwz#h$A1PWasW8hH-9><37WXE?p@dXSoW#Xsj zdwek;@YUsv&)?kR8|?Vb&-fY_+zYUaVj|Kd%C4XpxWFNa#2um2uNMO#i(nH%2Zcxm%CpKn{_JYHVrx`n;@F6-HvjjwAf5Mof(8}5b+Q0d52 z3ttuO^><+@^50TfEV0%MX^-E}jNIS;^GvGjdv0Y)WW7Z@nwWGdh%gOAh>E z6twKH2L-g>h&dbOMLFC0`LLV9s`gA)m(*&g+V_gsEPGgfx$afp#V{fj8_fq;xlV1v zlNwT)+f^K7Dqo{13cE+y!RlR+V)x`AAb);C4!5nBS9-N6W3A*ouY`kLtEESCd<-HR zj44i9F_Tu$I>fFLCqn4VNK2*|G*ElV4^zfUemYDINy$zRmXro8e#v;7RdrEZte_ow5Pg=63lK!i_CgSccDA&&EL&RNGGRlV$n>+KO^Cu(XB%Gkb|+Ji$pmja0->)u*3$IY7h5PGw9fs zB$o8_<%Zd=4%AAj7(G2szuz?ejM>bS930X`ExL6|%sn@v)52R$JA)60@9pBT-C)DG zOT5G&$2(9A+`qafvt!nO>n;^5mvKjBrNPp88m@|;+yd7WKcQ&(T$i5knhBA>K+iG0 zUgh*IsCVsz2rORw{2N7=zn1Ujj7IHN1W(=Zz5u_36F$;Ad~~?MAG+D_@$nWfe8aQD z33sZv1=%2zgP0jI{gFOIQMiRy-!s;hDjOTzut{vZhZ$_lh@Hz}kAy!U@`A-8&FLP6^gZx50)^!ffM!q?$xQnir+JoYw>JsjAgwnS@gm!ty;I* zB8tiMjX`g8rF0aJK$+xb*a*&Z347Qkkv!#d@(ybmw%Xt$s4y|?wnHqedPtQ9Z4Rmm zoALYGVc_C|vrjyZfqejH7+%|spYD%%dA`SI=Wp@SF8K8F3SYzV5<6at;8B3x4U>uu z2Oyh-5Qz7YiejoUGPMVnAy2o3QPJ?XUiWpXO4_%=A*D>$m|<^I4TWR!4qzN4{P>BS=v-i`3F_}cr-%tAclD+~5s5s$4z@FFCk zElso%qTbz(oD`}>VZHECAvBQBm@$7s(8h0MD#LWND&$=UICqv;`xOTU0D& z%FB9JEMPvLw4+uZD3!Y5o)(QoG2z)f^g<->YvsbCBPA(~ug$raJ630J;9JZ+qhZ_P zyB6uQBO2aW_cHRnr;k0{0<>?Se|Lz)_P6jnqhgvxvew<3E0Z=c$%xMdW}OqEWtf+_ zaSeZLuf11V#VloQerXy1m^GMLgO}s#_EOU+c{&(Wg<06V8fseYeOp$#5$iM9u9Zls z;OSlC3iVvoO5R}QtIwe!k;rr4%C5q@kUvw}9eRj|Qa_cAS-{1nSE7p7VltZSoH%MO z4&PF?#pr4FKXDGpX~7YpI$(&_F&Ih zdcmjJ@8uaQJ=Bcf>OWV(pPu{tzrP5ZCs;CdaqRH=Gn zMUqCltXkYg#E%(0_MG(Pb~u5SG2t5ZeC40*tYzcyBBg&0wTzt$+Hh~O+w)lB3Wc7bX>4f7-5)f;KRp%pue=G zTW;2ebZOcKpI4M$IwEKdbH;GWseVvyl#Nl6c{$|wIK5#-DaV{{75_LdliR>Jq$l|% zmUbMErq=l1fsn$8qm7@@N|I`2AAoud+CcAR!Ol`#Dh-;dJB}B#jkQ&KwnM-v1oEVzzOM<&<(#K({c9OX)1&%atMN&P1Y%0^P;>Coh;V~8v zT*{-_RL!@}6}N;dw#?itA!R5Td!>rxqb3=l8AXu0N4zkMpv4#njB3p~RVL1v3HJ@d zV>6nYMfhkEefocMYNAam&r$O36Wb!`SArhnS>|t>EE?V)1rp7{sw~o6PftlOA<}v# zDW?_0z}9z4!5s$0n;_$R=RW3LRV(vE09*KZoT-Tb+z#&lX8I7>(=gD&#z zhU9sH=W;+$_pBJrLAgI8EDZ8pKAnsCgxey>e_3{{nkCJr(Q#55|r zg0W&`M1|@w*e-)h79e#+2}+hyadnd?NKkA;Mup%%hp>*#^nq)`dT)%gPI7Xuz$7Or z(R@c6E=#}wS8!WxrLN^lu9zeRVb&PTlwt=QkxhI4#l1 z<#3)6CH`B7jjQ8z0jE0Q?uJlfVx)aR$_kct5ToTXV|U$>-4*qhR#P4So*{x=oT_iu z&mKyV;*Krq0TbHtDn@|MxMy_yXyE=b%xm_!P|3U)v}NH1>#`V2`!Et{%Ua2t^~K5o zl(w#re*_P8Za&GIMuM8ULbezv$9v?JI%8brRMc-u+!!&}O3_xICSRrAO(4`j3)e1R zfMf#6T-7vOdQLLqhC@s*Q2a%K|CxQa?-MzG5e5JY>hrBUSLbc`gR zyacrXq^HNQ!b(h+)-EZa(JTq9nOkFXtAulA)F$>B+eA%l zxdVeRI*}`~LQc8!(8z4-Oy-!@F@me)+6kN#;bAc}isPNl=xC^ycov*X{j zcu41E>&+Em<(hK%v@yuy;~$PigxGSu6&C7QU`o6P8Gm;J-~`|u2cE<60XBSiIN=k$ z!w+ts;zxH+@!|1^7rNor29B;c%!nBr24J_^T zom5mQ8+4VjjnwcG{0=;Zj=Ioal;=XiNNoX%7JYxlfVqHY+Kq0&LidQx++$#uLz71Z zb_gEFhBqR3RL8e|!PjQ^8fScVeudw7^%Y+3XMBMNd7ehEi`9 z)|F`PXWIoG0QLn)s0)iTez(rRTJPFX#j7#4APPPwbUD5!V`2+rvdQ#PicHhGB27b$ zhHVssr4&~=q~$cUy6?^DXvhSttSZg3lxGybMn$K4y#o=n>S{MQT{>2p$g0(p~DRC?nWHaN6 zU1*QVx3Gthg^?SwRQ4aA?TyAcsS~z$LYSp6E2P8d&xq zDfu3n45Yk&GftI5leCy-GwliBM{n3gn!i?vIta$-9zwYwv}JFbKcnF)8VZmR$}Lc_ zD!MR{uB}+IEX*)QNB`y7l3zauF8u>^jo*nFiYo_2vNNpxF4rL2rJuX%fS3WmfpAP} zn)z~_UdTvxYvq;YDon$MQDOen^8ZvK}9`sb(ZzyAG*;qRtpL-enT;7^o5i z>J@=Ghorgb?vxzrEZ)Y}Bul$X%DK=1-RH6b=% zj3R0?Vr^JVHdO?+r`+Fa>OV$YVlk;pMF%G%+yvP=rOr{UHdw8SzeSSRS=@^kOnXY( zSSIf&xb$#FM@qCNhe3lC;Dk$3xCN7PSfOOtRp9%ie@^SWk|oYE5B+KM;#KZzyx>Y$ z+t>n9Xl3Kzd{n#mQP1y-*De3I&g6(e0(2@`O1=g_Wn&XRA$o{{+oLh?A?@8(si-Z> z0+JgfF2lk$QWoL1CCC#0N1qDT+9?z+F^rg*cRiHJ5Q3eb?`{D}R00`O(iSm(u@N!a ziIm&KjDU-a(Fxw<_r2uQ#=dz%ZB&8dl#`1}(TB^{ZEPaC;s&=@#*3`yNUa1BOKLBsSw21z?O*Gl^zoY7FUicgS{###fvi zOoiDF!GT~Xht?E+!$m`}V%pSuH--5Q8Cx>BV>o6le>S9aH>@c-S|eoHM7dKT!2(N^ z9#dz^#Y*&#_hid$L|xvw7n||fnl^aO0pAod+gg`kviPtJKKV{A=@*i)HP7m>hk-e? zN~pMa=a9=LuzkYKFt!boO?}$VQU+byT93wgGAhr^3-m^Pbqp2wUe}hm*hpPC1xQ%S z!(4%mq>ra1ZmZ{AGkU$Ykx61tjo`r@W??-XGQiHDfuS0}&s`M{04_(x!`$(uJ>utZ ze5Zy_&yV=>{x!b1+~ae9z~?yQ6%6+r0qa~kmOUL1jarho4P4&qwC#d zYr9f<>m#7ltAI-v_B*AzX+3i=J;rjFVoT?#6`!e)QsS))gk&w`!osTIPQYV#D#?Wy zLY1kn-goKYkfP$Fi#VUDKD2At!t+wGpyhkV|Ha*pN=FOE5#tpz-62`{ZuNgJ(3b|4 zce~$;=xc^vfh6l4^yDgqsxc+fnz62nA`4f+W>mJNor9p)8eWW$Xy?#i;VD;Bm+3+7 ztIs|fnMyull9EYV8q{0nzEByd=+K5j=Kff&NY8+BNm1-4he}a&wo;(j|xU zEtZqi!Y^XQ$>xv-vzH1lV}=P);4jz_+jNalSlqSp&H3INjdFP=DWJ5n~3G1FdcRL`3&Tnb$l5@b1;RnIa#5{?ouwsbJ%r!US*+LtvVq$Lxm2+a5L z*;>EgX*}`6)fau%w!RHvn!HU<<1CT^B%(TuFU$=MX*uP zkz9~e)raze=SQ8Ppyg64eX1<2gp6Z!7v&4%Hw69iYa;l!;rPqb=H>fw!|yvoLjDJc z{9{RzuIXe-d#2Z695Iyi2vtjLi?6V!)IGhD#e>P|sR6oC>ZHDS!!jBVOUAR+p>NQV zzNfD;FYY4xCh0zbYqCI7JQM(C%Tws-ftQY*@}+uUwzl*0DBBV*|12JC;Ucv#fl7~N zq4n%A<=azg>J6n8q@q_F-j+Yo6fa0yH#L3HD&?u3NdP6K?4$&n5y_dg=}XxxWni@{u3`xmP2l9gJyPMrC%g_)j>(M?NUAMo^URcoav0)4 zk?(RTHsKbjGKA`PEF_FDNh|;!D<(_+ZwwB*c#{TbJ8{bNc9~2ViKZck9#|lcT?~6| z$_0%k5Boz;`<0uw_2iKSc2;uC)e0sxF1v~smutRRYl1D?qt;n!#=0_10JKLPJm~(n z0GwnQjh z`n+#iehn-vpJ)Co8EKk?Jvf*+V_(lz1;*6K_ zh%W^Ajc1VwdP*h4;Po*8OtlisnQ09koswBe z;2{S)O6{`K_kC5~dQV>Bxn`tg^{ctC@T$-X^{I5;r3HVIFZb3eka6oC+7QAhky_xJy zW9~*%6`D{?l)cOAU6nctoXnoK?ZxjEmaDX)dMCUO6Bz^U(#>DT+)}AL(UF&9!w`WU6q^p*-rix;PzVkT+}xaS zJZ;zx2W%PXbH_FYPNy3kO ziFU`F6T9zt^X3tAUQ#9-XQo}lzO#`{!a~3;TS*`VM!hmMRFwEN8%3%&DJ~oza^pWM zMNz6=mtpD>bG6uVYChigujO8J0kP&HR&`lOE$yVW;!+xITFgBp-wn#MK^iQdit9}C z^R6*X7M|F0hSy-116C}Sr-Wo$MT2&2n5tHwtk($yK{g!Xc4b{Ns=c&$6xM${ zqe|na{RtHRfjj=ZPy6)yvBU2hLqdMdA^*56g++QNrQ)`NA`uTa55xiZPRSdzsIabf zwJ9yi|5c19>cB>2FjxF^CRTu5E+XIN$L>D30#ub z#kICDiuT~Ou6@E8-mMrg%<{d?g@lXx8@MSU+_88E-jceM_m?+Z2fbf04YrwJyS<=% zDih`!(s&`EdNwB^XTmKF)J38hzn57SSh7rOQ5*U2(^AI4X(I z`!ZZ(0W=)&QDn1_dTZ!nWAv~LOeBG%LJ7ac&udNV53jO!EYUDv&(F}yE9}(}2zDc% zB@5LV1*uIhwne5BWNelpWE+z(S@G~j`UvHQW-M6EAr|%oH#sEtT@_QQYJ$zBIBykH z6|uf%Wn=M~FT=62AS{wiEU6~m`>-5d<*;)szF!f+fjVjRp!&ayffs(jN82qv+HUZn z9`VsO@bS$Pe4t0XXB(dD0k?Vxp2Y-o8q{};3uTXlSk4jt^0~!O^dj-8>xeCl`hbYW zCc+mwJS{R-(5L|$1hX>zCW`?hTL);+U0vX8z6S)(>^AWnhIKN>l9F6o+bPD`K>#+V z`_e##uiBU^-@{>C`G!}Un_H$NS#2-Xway@=KUgwxMiHqZa5#3jC=O8Ul=dGq&Q7P- z)+{0vWklf#r7S%o>Qo?m4B#g160($5WwBvKO6IMJDv+vVdX3X>8?lxv4~FluTQ@2b z&@r(=aWH}Hh9euHDS``teJIXH!37&0e8-zP@#TELSLa83dEW8a{0_e{U*R)6;Ae3B z91{<4JZgOYm$XHaqhWhpI}7kvDnSKmw7Yrv+>`1*>kbcY5E9<0X|LfxrXbAHQHmZZ z42|=Y^6sbiSUPTG;azsnu}v^4G?OM6;oQ>&HjJp~yh^aC1qM$SMJLOYEPf%l=ed|M zU zZL4GUB1jjf1z87L7Aju~|F;Hzf{FsHV2J`KCfmX)3eqKc8~tY!AlwNSaUKI&1K3tH zCcHIcpS3pEQrw(18BZd#001BWNklqH-d|jpikw72 z6*DFpOwtfjY)pRJjt6o~;`uxr4>%r<7&>rsdxPW65y!&`+v$Y6+gse8PPn^$f}6Wr zoDN4kef|tj?{0BC93eU|M*Q!F4v0G5fAKzGf``ik=Dr6j&48QJF>JLqVbiet4!DG6 zgf)`w6BCC0e8&0w2oc5o{VTk_e}v6T471Jg0CPZ$zs1Wh@Y$!I;pgA{9NXal)q%&y zdptfohP~J>c)VON_lbSKgc5^i(|PWUX!dLCLm8pSh#-j6M@P6;4}xnoVp*H7Y#8L& z$<{*)bYB+vL*R0)$@xL2urlESuGt`D^yzi;{0oymNAV_%Swal zJso})k^cDyt7FHq%AC=!;}t z<(i`Gymc#!IEm!xl2)tqhw^G7DCst>qa~$>JfK%NL7C=Dm4T%L&DGNI%Kdg8O!@mj z0=l+Fuvta=0Rslv4h`ss$DjyYp$KE@WRo3z@LC%X^m$-HUx;z9|NwRvH6ZjFfKrLPMxH=f@yy#^fzRos~4PJkx>p38>kIO2ot1|J=6 z@Zs$fytui+2eRRZIO6>|aA$^-D7Fi55Dkf=%>e0olKYwHL2&EbHa4alK<4{fK*S}x zTe8Q^Xk`NUHmM@SM1VQTtqA|4(pHu5++y&gil|85t)N)r^Ng`s$(w;_z=$flm{p8Ai84f!AQwG2UAkc z^_fzQ8ulfRQOgCvQLsV22os^q!Eo9bRLsi-2lW{Eatk9CYKOEmOjvw=hhxNL!eOd7 zyWv3;uk?a%%<$X(fS2b7e0qL`uO46HrJwQje8jhyc!b~#;0@J&Q=N(fVake>?7Y1C zcCA9fNQh>q9f%Ijwwq#}DjQVn_8KcJOUJ?gyMT0I^8ph^#H{aRs!OskZJXty{*$N` zDgqG8f`ZRUTaG+qXO@M(w6$zmBDm1nK^92GHA1e`3qm_0mOFan{aA#vgr!Jhv*r9Sv)1QcfA0*SD@^+ zyJ#l3IjC`$Vl36*s356c31}%tx5Zmu!1V>H>`Sy_@{)QeA{_ejPOV97s>}wH1Gt6MDkAOz(RBjr3gXQu3+%_EaF)HjLN0m^i~030SmgUtSBiyRNB*gNw&*G zA*uT!W%r|a7HNl0OLIg?N1c+xka$06RZ}dfBK*87+SMzCC3sF8l0YFtaX1`sI393w zJmGXa;pXNB@4oX6-v96ey#MaII6Zp`?1tmvhL8j>jY3eg3@8?90AGRIqIu z6oT_*XRrwG=F0^tg0USiVVLJVzPp_^0vWhGT=3@c4eOv?%zD%d^zL% zaK=|(e}zpHx@~y*`RDlYr$5I1{R1`~7$dk`uU>zLhldATE<4WWb6AjNmc7!iLSZ=s zPivpF<-8Sxb_IbQQr`{-hbYdO_vR-C?i?CKJ;%jo@-DO^v?Z4(Ws%cqF*j)mAz36w8C7ZA%qcqV$}DnsGY)5@Bq-yC#2b>R1U>Q4cV;r@iy=wr)?Vah zBqoN$#pE526$f-Q<5rNTCB*~n=`%(GRZM4PVCR(DuLI1b2M{GOrs~)y>98sFW&u?z z4eJK2H4KawHHU98fzNG~E{O&io0L<|EV6?18=KTA#tCUlB-1OF>kYU3``NG*RBDL_ zqxwab42&dsd%^;<3iOm+^B|&svU0sS_4SO4Tg@yIQrNcJyd}$2U$(SfQGW>{ggOTt z(%$HV)z#o)A;?QNw_ZTj_&mlK)lf|r3~6!m&_M`LF8!q=WmPc}ot7SNhAc$S7XZSH zXd0@iM6Llz%&NTont^~1Mh2ztmJAneM|O{xwe5B!hHtmZGz}H`4L* z5BD?9;!z1D1kVQkotpx;({T&nT^V>Gg7tS(R;~T@8Gkv^Y!u6_EfZ*VmG+m*giY;% zV)ge+v61l5v|dSX8nf^gvP_!#&+Vpg2&hf(ZnhTfT51s^Y8|_w+*&uhcOlCAgp4io zF=d{u|-q1@{D;vUIb7UTSRM-trm8Do*uOImDfvf=OK z4xN#PF;by2VrcWIipLFjC5~4|;LFR7&o2-7$-^6basCcJ+b{UiUgL9I@Jb9{tK%LT zT%JKGuCm$Oldq8598`0WO+wx|mUk*Q+X+}*lWPL5O1Pd!Y{d{S1)&!p&Y4irfVtEI zCTAlCSJK;C&LJT5AS0p%XUgp%+{9|Bpp-@F6mQ0aEb(rOy!dybDrJcnTwsK0Rj>x3 zNoVYl6O}fk8AF;S9<)`9i)XvkgFUphWiWCjtLBg}`Aafqfl-`88kdF!oAq5i#4F`D zA&6cv%55_d3`}O4$y@e_jDD=2nRQiyTssMg_orl93EzDVcyiF5wT2~#dZg_i?$YgZFM!oq18VX+3C5n}w%}~O)lMf|YjjUy&3*L%@ts9s1 zoQ=go;Kk9&#ZA=rafvCjdZW&1xO{$JLeQ>FLXy*N2f^uh3XYOGZccZ2@A-SUxxK;D zr%&%lP%o$mXw>iK}Tef9--cn%Y z;Gq7^1%hUMHvpNwD35eed7OGBB_Tl|v2N|X1CpiAXOZBNsZLGGed;0|KjaGpZAsE9ZiY#~9wWtIve9m6 zmF2dLACjk>7Cp2nA8&4gH1n2oXck#d3Pye|{x;n+fL6*;Au+!}6GHJ!!_^L<@ zC>YZgmJeo8^H9_=u#J?#kJ=7eS~qr&4MY0+Er(qeO%;@&VJ}rmq{n3XN2ntAO;Vf&$j0V#+dV*a^yh{2HJX+%n*XX65OO%U`ea8YEe&gL9}cV z(RQRta$&R6b3HF+OVIzCUr;=j1%Mb^@ZR#P14j{@TyYD>Ee76$;5`}mNC!SXJ;Nu5 zC-~&}1TVG|-W@l1V!(}T7(wR3aW`!H#31Vhh=Is10lc!&(BGX6l1XNw%-T>{sW4*X z;)CoGZX7LTx|PZzREbCNewog}qPM)i&glH!3cFR3XZKDuaG5;g1@0T1(pYQ>+m`Z2 zPe!KYb~K+A(C%lsDdQ`;F>k$M2lLO$;(;wny%zRPB<18ABlrmCnJh?D7QnVw`pu>T zBb9E+y+ID>y6E!b0Xr>KXLn3hyfMS8`G}u9JmBT|9-r;6@%em>&;5dza=|xlcm=^j z2LlKGf>^7V;;8SynNxx!^mBvbQ^@(r@35I4ncOhAOo$=5 z7xXMPYAgh?#6)wwTsE)8yYC#R%D5(@JBPpVgj$Xg8P%9tK0^I8heJ|DwCDZkJwVPy zLRmyDBcyYXMP>rCUUQI<0Ib~Y=TI1{kGh5}tCGMMfMLtkGdZAmQK?b0jgj6pw_I_8W0r4<84zJ?^sJL@c}Fno6=rM6w5qC>gdSwp7M29VE0 z?aDKX)FX~n&D*wP4r~Dgg*vY22qs-KRhKO_d}&+2BBBHVvPcOyDzeYrOA*$0K!Z!A z#$l7tOr!OJi;CQqlJ_*-2+>~ExL$*qw2NpCi@X;N&L**ccCWK@67re_M<;c3?5Ueo z%0><%#J!X_Yl<9kN>Fi#4Z=%vBi(WYN3+kIv5G>=s=Un^(S%udiiNY5u4R~Yid_P) zc`JqGV+dWET1z>r4p_Z26+D-RVLg1E#cv@;>3;AkJ>9nSwOT4Gc5{}xlbwjHD}urog5k2E;TZaLp`S2rS`~;tr;J<;0`#JJn<_yA9JQPCYWh^ zN6O1xF$OT_8vN{6!vM|8)v<(xwR}zr6zEs7(d=q7sl-<)D?0uX>#$}i0ty0|6H#V* zYpO2L@l}!*lZwK@mCPz@iY&@fGm%UWqu04OgloPy6&J9O>lyhIf(D6}w;Q$w2J?Gn z+-x3DwP{~qPMTZ?!S2h{RlGxoI8_Cw@RJvD$=e-J9SvvoL9ehipfaOZ;TU5hrJwcq z7#XZxa=}WlU{iD$hZ>ILFeRjf@yn?Uj3JAyq_CUucNl%|GCJqGsN|D1KM(tT0zg;7_0s)e+NT6EKagSLE&62-n$%jybc)aua< zPZYQ@!Fv#VG){Q2-QWk?6MS_01V1<(@u6<`NRIg7W#GmXgF_lQqoXegf88}c-^m6) zQedro-tpYbXiDin-|3wcjg37Jk)n0L{7~_UsSt||Dk}X*mT16y2GS~>vKJwcSS#w0Tf}WJxh5GAv)DT& z{g0eZ4zaE7pGf1iw?LEu_!JA^B;a@;hi4AKc_5 z=XS==>>l4qfNCFT8=r__s3DaYglx@tLz8?j1?UQ8L2X85;f)9u2r;3W1?i)r&;1H) zxZVX`)+xmx>*(t5tSCgUL9<^~6to@~Qz}Z<&GR#r0p5tVc=x%5PG*ldb`C646{40* zi?mOq3a0BwFD8rSGPsgbur`g79UUWOGeWC$zp~t&OUjhDTpBeLm1R)V5X-U-P#$7# zwMgl-hdxts604L&8vHuFLOS*`QgOVJt%OqTilOAcGeO^2CG?AuYGu7FMyUE73+$bZ zi#OcSCS$s5V3Wl<>lN)N#Dpd zV1zd-0bD>8lry9J7>lS247-7#lh)G3a*_%g4!hizcu(Xzn{{pLCyh1=C;YVD%_VlR zHri@bFz~Zop@}Oyga3|VplI_1jPc;b$*^7;X8i7C{T2g`81RHzCznf z+0+FaEvOd{4v_0|%mi&8-{8Afvm;)V8b zGH_Bwq_W|7bHvT*gqu@vDNeUHxVt%F98S2qJ>dr*{}4}Z?(p5~*Ld&wJNWpMPcX(I z63zhZ^McbMlo|K0Ug3N`W4k#r(%Eo04Cn~m>*M*1+tUftC+3_5aDVmc9-n>oDZcsk zTYUZXH~9MNukiSI!QqmNbw#LvZ-X(aM;VgD@NhfPE)$M@S z_QcU~S(#IvnMypwvXGHw$6$ILdn9O5bu}w7keDtQBypESNLHJ|>V5JhA0VMC!nmBB z9(XL7$KBXe-a5PhN8YR4&JjoCIp=_+w}IL8v-I!e^zv7HSQch0$7~371TNpEO5TjRTVggB3`0W(t6pBBhyU)=--Tl2^d3WL*(z_%`PR zW~e4m_MGr5CS=nxPs+T=-7-cfhkRDkV$)z2x#MP5yYcfrG4f~7_-D3z*bI14?bEI{ zfXQm`V$S52n%lJ;q}2sul`fGKYg4JsXPzI{6R`GPU&V#yfO8qtt?DLwi#mN(M2Fng zu55%_YN=kKY8fkIfG2?OTxSErL&-1zJaOQ;54?cjhldS6+MeL=J3PfFchB(g=?*Xa zfMptb2k=X|Hl+E@d>45ABz_OC{`}}-3}cMC+0W~?#e;e?T|lA7 z)gWdLEi8nI>wGg4e7&NYG(oq#&WeHMv0}S|p)&|rlcWuIk$gAd3K@_MC!uMuMu^bY4Vj2Ro@Z{2VVmp|g0)y|C5es1 zE3Qhg_`Ts+S&xSsO_|bAmm^0syS}@z4$WrI7Fz8VM^EZ zRA58kkPEkE7*tLEM$Qb|n6p_rQ|x|BxEkc8mzesJkRD2Aps z@`I!H_KZ|LgMr=tF>1OS+GOCwiX!2(nbQm>`+K9?p_;$m&;`nxuA({vW@Xam#oJqy z4M%oN%ROs8VGmfF!Lsly72Th7`t zOn4O+-==`n{=j1DHgsetIgLs7qo?VD!|z076s~6W5&9yP=9b&eBht3jF$KzS;;c<# z-U=l`{xSF=?itIHPM%z0K1LwKmSY%{qP5ai?vwes5^26M__96K5}w%NFISV~s$;NP zB3tViaLp&Q3NFPQT){fZ2`@bThB2VFyxm1AFwY;6IF%Q!jpj&6rYov2luGCm!NvKc z9F(_w^-xu#efQbI;}h!S2QhRRyC6qlB~sM<+p-rup8*4Kubd)d0RKBwdX`*awMa&){7q1#KS@P6 zqURskJG0M`-v@erz&D}cc3&Hb2%B6lSihGbvh1wQM6ksa&0L?Z6K+jF;J^ZhTf<}; zFzJ+!itjam{tv=c%6iR?_8^tE{ts7l<_y<@Z=}KXBx}inP}Tw^<9qqS_TdxfKV@lz zi;NUj!Le*xQqV!yMFA9cUv>LjF+n)%`=Y>X?%@C`s|`zRI|@H-)Wp$J%0;u}$Z)}U zb7H$a)5p0Bcsxxz0rl|M3~RCVKpf#MF%j3OD`H8Z#FNi3T`3+!STkiS)g+nHMQ|**40s z-csc`WFp4}bH*L4EMQX2yW0n(d@E?;=fnzEp1Sv}G3v|X?AJF_9n<`26gF(O?g7AhO^**NmE50moZ2z^4`^E=ok>!zI z#PeqbBadPFr?bXbeOsG7CwjrbeUcXoI}&TQE$Qx#T|Q~k_S<=%Ux_0&op>ggELt>r z6IV`Ul;X)<6Z>JqnBk+Hb0jCTLaC~bA>rIt-~a63@I~+RMIos zmKER8-2FM^eWmiZ876Rk^tT1eRtVw^WvW-`A2Ow1#6S5~;Y7vE&_Y0+s>9|cpHw{0 zvg;J@m<3ZUCD+G=G8|}2DDszj)6uMFi?)zg7OZ8e2B0{*()-~&J7}T&UV;!d1$n4H zjTf4{uLs(NI5=N0kTJ)VHLRZ2jZs!PKv{5d!zmAI0G5pW2++-T;5HB$jAHC69+kX>Y$2! zd`(a2nF`gZ*iocs>e@${YE#Y9_6r6txgbW#;tewsQeiNLomcn2849NM_UT}@Mkc0J zf*ym*gE1YP71ju=H0+?*mK2@*LFtGP;+O2^9#WnlK?T23N_wm9+4Z|Cguv=hB=JSm zei`CXZAgnVX-T}{l&PeUhz%i+nZ^cWmC;sEfdV)Glv!GM621z7_bP8$38L>(ygEJo zCC9N?(v?>liWqo=q~ndWGzuK`Gcs<|7nj9_b2@0L(fNu`@XtVe@RO7PIjuU?&klW3 zmvAt>|HBZ?>s@injya?>k79OoL!Bo5^#_`;f+jJnLbxRtzE5r*g};^ja|pUPMHM&Y zlYrWE<-x)u;3GiNZ$gZ!C^H#lBKe_UEV+DH6)opJDX6{X*FU1ir@RlZ*B>9}erOu~ z=RBLX5l>`yyaEFKTOY+Hna7M>#N=V+gr7&+3F}T*J~EW>UgYotfwemeg{xS&6vFpm zNvYG*7p{(4vN=BnCYCA6BAM32OGOG5j~I24t+3nct&*@tCeTv|-8s4lMI3(jPe2(< zXQ~kSDJX)8w^wagaY@y!SZlsk?l#i$Qjrdo$WQB?FGKVILR+r59|2pZ7Tk(0%D+`3 zGv>g$3UR4?vXgUzhfCOo5!|0%ZN_3&8i6;2)1uGMR<1! zj^^JLarX*@jS#q0sRYJa9{V`N#Q+t`!0wwWM=!97BV)Z!ncpR+do;*3i${XB+*vc1 zVz@N&a6^tzA;IC(qq=|tZ$S^6K{#d70f94k_9fEJaz#exK!!pM$xd~Zbl-jPVrf0w zz+;+=YQtkdje2-8itfnP92C-UA@Ki=01_gkt(O6G`MfpKo%$}a0Z-WaiKhFo!G?j! z!x*UxNe^sVXk168y;~S+Y@>7ZC6e>I+}A2nzyH|wIu)4A1~Dxye+|vz`Lp}j_a|!d ziplWcPWY$%A41(!D#3?W_s5~6o*=sJXB@$ovsYpS{_GSD)u+`DoJo=f>uv0RRHw8}1m`Mr{vZ$DXCS?;b3P_3po&r(qj6n$z0>Mv0HIIG-iT`a1{#||a(i(dp zN*!V}9WA&fN1dE}AWkaILz78%fR>)QvyeFIl@!ooS3Kb8NeSkbqakY87JIaT)P;kh za-!a-kY~pHRh%azXA^2i!^Rc3uP#-qeo|>)ZfJ^~pGIvo3v)~Uru4JH0?=32q+1bD zA45uAbII0PFc*D*%(aH)Gqk9O9xFXV38z8WSe4Etsx8ho1U5C3L(j3vjDS?tb6Xz@ zDZBc~+#NMmp8$WWRi$OPj2;$*k*o92%i9s%H|YSS=jbp2sW4N~tFDf71r(+tZwHyV z0&xYiAFNovb7AJU%ieJE;uG!oGq#L#TE+Z+B^nScjMk#4o4AVs23O8ASV7Fp%&s82 z(3JI+L#nc2Q<^46d-Ix8lhI*Gm5oJ!Kx^5VB@+$D$}ZZ)QkECSoFj$lv=AOLaXjzL+2T%!k5fGKxxNO#7NHnjg5GSyW)(DymFW!Jk71Z!5D zI`&5jGZ&vy7Z5MC{nD6Utt=d<>Ap0=&lF{j2SBRtRnSi0F~r|H$p3Bqv~9sLGh|)c zPn3gozY}dM1Lwe?UNL*F=y1m+BADVb-E#%?eqi!J^azIg;O_o6G0!U^@X{QL5hrwy zFZ@XMA+sQx(B$7~9b`|6O-4%#_CSiFl?{uK{LqAsUQKG6HpXUo;>#5f>3^}ib?79oRx2AT?dNRk z5VvbP|D^OdT>WKEurtvgD^L&gqTZFmHY+BKC*?-`!6>az6O1`0hh}QYc|u?L1yD%I zCsj*VieS!sJF2q-CGdB5TlcDlL@4+&0(bRF4|kGqR{i(EB}$p^1{zr78~eR|lindf zYEMW|Ukt}h$<4Y?DkS-1+(mZz+}@yE`LD}Gc6NLGseL2HJ+t)A0 zUM}rV!e}fYO?g|6CA5Y%_`@L#_$|kn)?)qf9SsQ1e4Zoce_Df@ld7p({efy9KF58! zZ2Kt;%O1I>M;^jhK~%>tW?3?RHDRkOW5My?B0ud_gJx?XHbtg#@@C@RVii2~N7^G} z)s-jFvKHBTI7NNB9J(~lGs>Jdd0EVVCsg4fq6rF@vg(9Zaf&g;X~sNV@&*u`ERgO0 zhW_@y%;~n9oJ>yA*!G(GyR1wLF8^&ql(7+LPhL{wY07CC-(>WhIbKmb$M#j=6CQ9BPYd^R$4XOEh*y6CE2u63b%>ZhwbLVG@8i(-u0CK-90G(r!PU+Yjw8_Rkk zd|ySmWuI6vnceK3T^J=5Y@IlD2GV%uf6)3+xE4h#T`s1HpW%644LEFth;*Vi71E()APtSb^Fq1~h~4S4GUyL(G^Tig zqI+>vJS~FE@dGN=66$auk6GcBQ(@9PdSj=T266CGVQ3X@!sHPOrUJ;}^Z*n(m@~Du zjPVMwSsZdJ5p&QM}AQfU@M}ilVhUc=)hWQ z&;XKMx-nRT;I!FXO4gZmM%25nMUcJP9~X9!CJGgov6qthQ^J5eB&zGYVHLkK)vBGe z5hwv;ANXy?k0mf1XY&_4k(ySzve(rWPPd6WQT>BVL5fD(YXF3y_pS7*daTBua>_qys`UxZuSJAq%QRyG%H#ix) zo+Ymv{owwBGb=D2xfiu}V9ebXDTXrvIx<5GylM)OhZ&_5K1=A8--WMLZ zQ#I6u4Yh%)qd|~0fP!rmWU%_fv{xlZFkEH_z8!W+-4aFoR~RcPj$?f*$vrt;*!nJE zX1ax12@2p+hd#-W_uU*5YsD~ecRaKf+s9{F(u~q)ExITdGGDyv^#6Q z{gCZ4Y4)o}yzbzhcb*T}_bI>kAJ^5T-67vz8>qzol9ZN~KF+0e2meQXH8A}qATn$- zW8}W?*#0e$xu2&wM%#7bvUCSHPMJ@LBc1$!zJC@QsYlXE6F}$E6Uu{MWACHlUxk3N z`?tl0GTy`8hynJPa~JG~UN!<5a;+Edj_aaDCF$Nu@Li`KZh@7O%a1US(cII4{?Bu) z(od*dg_M0wyd|%-*--M7vc3XyuIm{)s+241sS1HF7qMSQzw=@Yj$UI-;CAykz1ioEt!8z*f>nI)#}mRJuL`qdBwv zXljYL2B(GtL&_3MFVb`24x(L<_+txAAUp8$-_YH#n>+r{c{;WF)T5QdJO7kOK#nur zwu+pZ%|d&{(vA?JgpEx!e!h_VE%%uk<$lXxT?BW%A;YOxN^Q1XbaxF27$|ZrArk)S z$$+~~t<+iNB=T#tk`SUTrQG=H+dLfeNg&6#_>kniY#d&_-cDQiHUIXr-A{W!E2ZZ@ z088=b3SV9Pm3mzX@pgThz^LH02u>B}peUSDl5t95T%X77kCkH#WNMADH?d)GYgpB} z5wxdQH!!r5YrAh#+7W)Mie9ShGl=?P9Mhq?w1rZvXctn~+5mWn(dT?_Y^TR;&rp?U zbI7NL2%}HJ!rWe6psZ!$PLe?G+GbqI)>R&Eg!smk(o~_u5CSDMo>Fr$NN2`mN6?s% z?1}^8dc#rYDk{#jV%G#@ZVzYm0BgAR8&;uVH#YSD$zH&WjQ{mPW_< zMnu4Y&k(Ua!IA=}q3^tBxuBFa=qWbjP&OnV3u-FFQ3K-J0y0 zGt}nZVXE&RO;H`nbCVc4?~wPZTvq|y*7M|R-^hk$J-lLV@jmXm;=l!5m(zdC#b zgFjXD|C|sgx4Vr|dfc$jTcZXPX>!W8*P52E9eVChACHDf`*pfs0r#h*%YRIUa|C`| z-HfyBhTs{44uv21e2yf@`b=IPJ_v~Ort(sz&#Ls9nX+qY|c2;W>j}bF1xao-@!S8b#lxTAYu4Qmm`tYD+JF^GHTO3^Q;C3LTi90(FMpuASjp;5Lxjpb5n zAqfW<+P7_kX<9Xr$n@E_F+tU_l^wkPS#N@Hg93XZX$)&$RZ2wj#4%R%cascTv4bDU> zL>tes4hBKP;NBvRCUdUKKR4uy)5vln1)?NG-<`M}@1{z9<$@UFI)>lBXM|YYmlhZ~ zgoncVTPkm+Ijd_hxh4(Ax?(NoTnA1*NbL%Hr~}o>n+8k{$@=@7RY6;)PABkh8RuCP zb!Ljn7uw5YGr%{&9L`XrNGg;fETlH%ic|y+n6j2j)kQ=zd$nmy_V;x-(kkjmp;!`5 z@tte*P8LDo16t&7;d${Z3#bChoA=18cqvMb#E#0iTe^859BABCw+EepZ(D5q(rmqj zW7}KA{t>zr%X0n;@JF(+!ZKpvkHnsAUPG~I!RXJV zW-<*Q%V5Q_FV38mRGxo;P|dLzP78Tz`SFwJLM`<%k6HoI2xJqI^t;wRx5HF5A7vx@ zKYX4POoq~W9%){6{4RTf&v94kLCfyhmvDOgqci2HPQoqr=%gezG(=it<`ukwAF(O~ z>hAyPi)yh1$j-uZ~mql z1DA|0br^DzOF{m2f%%}{*fzxNinuCm-}R_{?3 zmknE6wibv#qPyaxm;Mzeu}irFcOXTN8vR_^CRH7GcWIm{ z-T_i2YTdpMJ4W#VW_(k{I9K_XqTXL)5g#XmE3{9qeSidl=mAwjFLU18Y_`W^F!HFX z+5KleHSVs!!Jbi0%-M|0gl}pd?dkfyn_NrLvr9f?_e^6qU4>@w(3XIdmVjIIGeE1U zKz1)16I{nb_T{xk8YxJ}*=RUz7ug$iK1KpyW1p(pT#2Kk=w0@2Q}QQ07R(FC}M2jo$yVr1U{eN~3KRh@5KHl z-wo58HwaIHQ4FB=5R>im1<&;Al#8JwDgVAqzuV}yU1^d3Jg~qcAP(+r(rl{gbi89R zZ1=D6`*-@dP%0Ag_Rc91RqE=(7IdCU>$z^eLPD~=_lR#F`SCL9vUB$;7{!`5x6hE! zd|xJ?6~o2D9E~Zoj9p~_AMqX-;xQU;=TI&tZj~BW_Zia8s(Lyb2*a`+g({{A5{;Ep5ZeQ_v@-Qw-t&!3{?|&*APHNF28exH$)RGVj^9X zqGl9#>;>8-u^Q11=JK8OB`!&nFiYa}^@bp#Bki9o&LkyKtZgUm3CiBWNK=Q``)7#L zilHu~zWRpbx($_9Zxj3@a2Rq??}FYWnqiuWd60Zeu5R8J#mAvbxO=)e74YBA5d59e zlY5NW_6BKAq8mSoPT+U+2`qslQfE=MoihOH9AZr&KL}T0&QsK?K3Dp>(FW5Sku2kh zb$_;J5`03wDV0b{2e6ZJ5^p=?x@TcDkuP>(OZH7RBwYyivB|3&AVXI+%c)(*L^i_e zt=bA&Kv5tl{0{(>Kx@Cu$UF|E2woe(XNPc;8-X#>=boQ=Lg<{=XQr^!_s^KzKo<0C zHi~WM$p_vheI-Bts5sk#uG0S0%`d9rO1=O9AOJ~3K~$rA%Px>xK=jZh^XdS%zzVdL z3P>4y%8I2GLfYE%JfGP>t+=fNzkPlKAG-6cv}#zTBNg8Qttb)Mz-Lp!H9t?;7V4o; z$`IbyE`kdPyl;0O-h1&uT!6tf;dmtxcNuUstYj2ml-GSU;%@Bn-V7xxB0Ps>qBzWLD&(3k@jaxWBT|4Sg%^K_)OxfWCYHpwN*8*9l$5=yoe{xZsFV%yKq>{A=*Mqk5gqNA=5tTXp5QC;$$1k*6{gr zg32Vo;p@m$3Zb1!ZL(yLAluyt+`D(Y9ftdy1GO4hRCKsbTjZ#O1&LP5Wldweo@${o zQ;g>K$6*{4$55Qu5sp*JfQS_%Qh$&U%QL##%U@9z(x_aAm1!`F(bO4j^#-STc`mBv z%1eSngh8VRA;}($ES9N63Az#L-dHw6EoefSRYAudrr(ftmkiF<^3Sa?6Fr8{)D)w= z2H{M_B52w3J6_l5v^ zJCJC0lgq4ABc^gsDf`OkU#dnVa(Igk_`2R1)R0K21Oyyb0g z!w>wxyKr*xa(ak0r~k3x%fSP|Sg6oBFsQjp$iR@mt##m3fWb|diD9t>a}kK2%@Yd_ z*UB0!@8mBU!4R~F**R!cV<1@}_g8Y>!ApSkIxmIrfoED4xJF+}g1;#q^A`okJOg7m3j>}3>t5v2? zm|3fz5KPnv%?oGGmfp1a8cP2`o;yqVurS`PIwWplI|ZUH3fdTIlUWc?C|MD@oq;o&EU_cDnJV^Co}yf0`82~ zE$bNLC~KHia_s#6X?H1bY<&i=EpA2>hH-Gqm{DahEs?1-HK`SMEHJVcvdR|Tm!#D2 z()EVd=5W*T#*yM1a*qG)+B5iCI94d3rc;71wS_M}MzCP-LCwY@SC8&Z6H8q=fWTD% zKX&0>Jc1)E>%d+E1Kd6{sV2A3^*nco58l$;-ZC{}Wde)1V~oJw2h0SQmmQAB?8;-C zQ;Gr=5^_3oXnTO1uOFQasr+7V!}@)rMw)%CmSw|Z+9kTVl#+i*E3f(`!Ny$F`0k8u zFpIC$gLqZkMS#_qsy*ek(Szb@&NjoO+61>Qy^@mV{~nQXjiH1l2XJPFuj3qF++WA- z;~b>hz~m2PW-UGYDKCr#AM|5;CCb|MN(7?l2fD{uvgqBsQ#&QR6`&a0wXn3#ayEU%!L$Z0boleS1AF> z0|O=sqw+%$xV#ViYtVvI>#vG%o~b=}bXgAR8;9ffJ`N0t!*Mv?gyX^cAN)7(oZfj?#dwy7UUe2# zwFQ#St=|ykWy^ceo=8i0Z+;q1R|Og)y3&R~Z@W%NSJW2YcrRp23CVV?6klx8m4#nKV(uh~Uf14Yu3i)*El&m;Z-f#!D}}%vi8ZF=^#X zE^lngEV=7#4|Rm#5jXtw!v2%Kxe8f(x%R3Qt3IoVHOA(zFL(|Id! zTf_3QP+Tlsz3AQ`Z6(br*Z}85v^O+q4_KkH(EgGIkti@UeQ*q6?17O;4i%IW0^>%x zNrK#(4K29W+%^@MtF#-#=isCDwiO$y8O11)ly8x2Mm3DhUS30kd<|eWd|-r`3JLIx zPz9q4v&gdE@Ksb{6&)d(1imQyj3@6Rci+jMfTy$^fib2(H8#x=xKS!!ZxjA8J-4cp z$)cLn@Te_kfi)>tNq)kF$qIsyP9yM-Fg~W1lp_ib`0AZWZ&DVM&WPakPGtw1bNKZC zW)m9CXaFU{)!>*ExK#l&ZVX2j<)Vb_@fpBtrwl7X4nYkgQ1P=S(^|oyihA^*CB+;{ za4rkx&;;HEAi8Ys)ySb0G=C@0fk35vh)6a8c&5`ll!j`0^RzXRVNN05=KiO>zB zg3q-Wk@u|_hk%B_$g~?a)Qc<`>BsaT18{pWeD%%^Jh8uqQy7K{7REUTp||zaF1w3( zC6P>8glPP81`+@7qZ+<^U+_CI!ORtsW|C}4gIbC{Q9hR@)1`Dg{kftdI|)2&b)HV1 z1b?vxc9v~Jw1;n7^+bK{GSX>tF#=QXCWHm2YQ{})CXS0M@SY3z;ccEE%VE;d!Q$w3 zK~C6{Em0AX#qJngU!$xY>O@6U?6GjTnnT$&1}%guF3AG7BRn;3;5B~2rPyCO z-?bTIicN|N898&_H7z~$c?(7=vxdc{#w}S~bF#Nl^r`YmrbHZ=5{KiUkT@KNJoEH(c=@H5@Uf3PhEIO-Q-}!ceZ@MwIR>(10Urpg=jXV7 z^E$3xy$Um(5c+P;sKg-Yz2Qm?D5R9z1h4ohUOhXy+VM@p+ zK&~Q51%kcLG>5E4jp_yfri@g}#?7H{S?Eiqh=Wm^9`u5jxn9!~TPXqVDX)YJWDFKa zff1BjmYR+N+mC&l@XO8 zRuX>EfWj$Ik>)8>QxsGX49SK$l^A3_6hzZ2xrRd{$svwM`ih-CauLPwq|H0wmr`jc z#)3|*8w;1a<8winB?wo8miJo_W6uq7^ni=P>b#CMi}Sp;SGbR5I6&*<`X+=AQv4PNn!M4?XQo1)S~cnoln8tOrCG z48U7O@UxfRj_*Id48xk&IvcxPDV(ZVtKp*th5&10N&9|@8)O_jxkMR4hu zxUeNRs>Mvm#2w)flufAzZR1<%{<;dXo~zRIO3M*rYwrD zv2B!{F|$nYs3BD|QHE?LB|4id-Nb`(DAy5~J_U|7;HZI0#afISwhC9Ym0?NGb>N z*=x-UQnhARa?6$t5WzpArx8py?Afw zowh1kOoq?+RMrkA)*xW*)RY`v7+D`)0+^s0jCQz$UP}(^sT9qGA!{{RS+5dKThPrw zjmB!iWxQqzv{{!~U_d-A6&2&|9+?QHF{6CZVw?su=X@ov?KMDd2gjiX?74i|iQrfz z0jIIz`0Nz_%7sgK|8gJh8O+LXpUPN5LjA65oh50-ms6eR_56~#2&e>@uzJrkO=|-K z?Y~zzxDQOlRK%k@JJ>f4$6+9GI1a~m<#^xw-}ksJ?1@sF%rdP{0YT2<2nNg4ZC2X5cIh0lNCar~nX ze*{lI`!rM+>=xIa(+&?2zql?0}M{DqPaPACeoqDlD(-9T8dV}4_!DIAt0=y&?ueXi9&U0aBTMF)TS>L_sS%aH5xFN zh15zp^qS|b27Q%Mf|(|ww#>LTcVOsjd^{U%<>GT4>ukqF$>k_gf5K$Vvhi9Ytt1|U z4QrjNJdbN_mdtn$WI=3U1bz#`N@^6 z3z0|;glD2njEXm1v&+ z9o~wQ;;odIy7gOHRy-&0j>#HSBwY&N1NH#^%!R9XU=Lteeimf}*5rkL{ z4$0S0lJ-RhRVBF*9WfvwSk8gtu;S#UFP-;sP%1w7|1<9)Z#P4y*M!8I@Rs~%DJ(jd zy3dsen>8jTOfd4a!IWY{D$RS0u=?#L5dXe zrUs+IKRaQv!J(r^`V@mht>P+G3QdIevnf24O{U}%aEMXrDM;kA--A{ysvm{Ts;6Av zIs}WjVkTcjS;y)rB^e7nJSYrF6;P`<1AM=0FG8YhtKm$Z5-RXY*TNcrsz#UMqtH%J z&-Ib!hg2Tq08c7Dw^WQNPJuDWBe^Cq!M_@JD^fWiu7nb(U}4;N{qM_?U}+!0IP93V zv9fouLvs{d95aH5&k$dd{JV<EK;m#5jyL;w z_q*Tyg$pN_3FqTWtsq{TsIaE7SJT6-J~r9d46veqbh9mL`GF9ywuxQ?I?@kn>D3i7 zz-#5tf~x)Zve_F zITp(;cMPZ~%$5X6RR+Cr(@aTwdElk2945^XC^BUrFha=*h(ZZK%rYvGYL6&RNGOp8 z(hX`W7g$sj45}m42+vGK2)Zc*)iCL|FIGmj4bH|Qn{t60hc;rlucMz|%fZSGve%U* zmho)-9U=q;RS$9<*6;?z_3vZ`EZXu*p{&?6+MloV@_P*?D7O|$fQ3Vo=&cHe7rd?% zLdo`ETv@_C^Zca+r#3Iaq?i~S<_-QX^en@fQoKN@vp-P%VdJEd5ESpOzN%khJ4y&8BU~V{Xw=g)o}0BckoEPC0UJ_Ot{ly0vR? zXfc4r(-wBafZx9LD!#a0$FMP321?*;os$q+9V+Drt%@p>17sw`mL7Dc1YC6H0NoGZ zC+~R+9>EFp+_55)FnX&Tc^#}682gwhG`YH*(r8-2)KmGiBcwRm&Z9VX#X?I=fq5n2 zDs5+5`P^!I;Z{-bM{$(RiVO$g^I|?vrP(X)1*#nR%#)0Ol!E!Xv^PFMnA!%65O3#O z+pHw|Ua9Rt!F^MyMRg3{6BRjgQw-m{eG{)GzLM95V&B(BHq|a0i%Jb0@A{8VJBp9~ z(@s@h581)kUJedM-*0?8DJQL>2+b5H6r{226~!>}F_WV_Eq@(LEwvZV=R{c#g%8(? zylhA)SwHa6gOgUBNZKOArH5jwf<4J!7N_*7F=`bkJ2259edf=Fxd&B5>(t#))S9@L}< zZTT#BST4uAI2Tk!y|In~H_f=>Xp zP)RAyfv`+AleDtc3u%NAx`EafuV+)6NaC;WDH>03A3ro207+XfDd$~jbNMc-tN<=$j(iHRf_p~ znUMaj!r0Hyl-XCdwYCG4n{#QH-pjAOf@hw25h9B9Y{kd_@yGGuU-=ch^x{i7PZ*<{ zZ>Zqrty}oWhd+YPeCD$lD9P!@c0$mWgiINH`3!qZk%Nx20G=PGxV<<+4a+$R|FzW! z1y-jocFAFYC+WWmyUMvoTV{mOXo46{8M6REk2^4#7bUAz#aafD7+AYZWo_retw_S> zG9g2|uvMWf6@af<$fk@ui$OYr`Lo#!5A?>>v@19clV;qevrs7bmU0`G{DT4b%Ihv0 zSfi{|qneo_cmr!p`80T)x_cM>gDDwl!yg&_7FEn81W~zd54EOu8&w0Vmj_ldx@?y~ z$W$#@OO%zcx=eP+qRk*!Mn;=vpaE4J7%9{DfK{}g`du-CdCKT(p5gTv!-?#W{#<3~ z^_Z%pp#d!wkw$-Kprkz;Ic^QB=vAiKa7)N+Hw>M$ek|65kW!ntCp^-4vk4+ppDFVH9b7o>j`T9APsewj81yv29K2-}L zsWMqw3Y3wP+*WG@<O2};wYlUj1V!Nc= zx`3N#CPxm--~)Fq7@k<)z^`3<3fDBU5d#%Z9IU{n>{vEututx*TFQ%7tHFoP!KLRg z;5b&id%1)kIl6+QNOUVF)LD2z|fW+^-Xjt zEo#mLkpXm)P9_C$WEsFx)e3)q8FlGV4KecFGq9<5V(lT2HgJ*A+{OEGXM{M%*}-g$ zVp;hd&Iku2IgF!q6iWh{D{V@0xGWNBiy48L1I8#$2`na9AdD&nhcwd3JqsMB2^St! zi!IU#R8oq^kxvgRa5)5_gP_jJJ*ve<^m|RgrHLrcWnie{#CF)pf}sG0v$n8f| zJYIp6s-VK$9R1#vTQnb`lo*q;&7z&#xvk_Q@{JW_|$KF5;tyMZ>VH(A(Y^(!bwMw!P~+h-7HEtP651d=O(VLD?}Yfk_CW6 zpqi9z)oM|413A`xW|%klunnphY+PKUhXFo3N%Xn1tW(8;j=)LEK!rZZ0@?*P1_J|Q z@0=@5o`a*J-Ow8&4bVQ4HKG+rtCMb)8t01V^Hmp-_5R| zouV@aQy6FJ>qcErU^wBnbOT$oY%VL-amv@V7-s$NCN$Bf>VpH2AUM}36&5vMrL?!3 zRk8<00ap`t2d`^7sM@yvU5fEp(Qxg&_Q@MaN&B=(1lb3snf?+I{rE~p5-mtf9w=p+ z$IKNnQH%)eM_$+F11^eZbj7c}@+7{AGwiD&s+ieoAhpys8&DsFRD4R2>YFgwbolQ&~n)YuR$>_MFo%6We& zQz_h6?9a}y8*3`2m>vMHYt#9?=5tjyqJW%Y#UI;uZVq1gJ5+q3#8mJpHpwLDcN;d} zNmRq?vKcJZ;G!VLWsjS0tthR#7;nE|bh6+D1ipFu25wru|CTY|Gl;b#5^LtCR0Zi~ zyrsOC6%H{f05}5YK7%$pC&+Unk#}#Ff^(BEBVTe0W_+!?Z{AtnaP8PvtnCKW$_CY` za#Sf$^gRE?XV^ui5$V#@;WVfa)>?y|1aZf-gjpraM8hkSVIj1Mdf#Wx)SC(Moc2kf z!Y9Ie;UgZfvQ5aKB~Ip$NxdS$lF>wdN|CF|cIKr@q!k8(=^@*JE$cq_eGaxOF4n#O zzWl6di2;XFv8Kw$D3=`NFAC`-r5YD2axs#M&>dD4tV406iX&MvP3?T(!By~qOK-tL zOzRk2NJ&W^Vy34FW*SkYWkoM66sq38ht^AP!=I>OY=?cuwB1Ei4E-ifoX>d=dDQeU zlsFuRfyCiB9BKKsfPwb=*8VcPqyu$m;@E~#D{vdi5-_&xzHWo_y;4M z1JP6bf+R^bCUcK82OhObVh8!kE3e?=zy1k)`qQ7r@or%gmtNg<-Q$U`d>OC4`VyOZ zVU~4F=v$PxGkRDN^&-@{2@Ow-*KK#4Q&!xAm?~IQfOSBJaM%-AY?+z&b)IMkC~%{X zd0xvwqTI@^F_LhN^0e0$J$jNq7!{Tq&-~0b_0k7*sK!Qxt>|lx=<35ZR0Rt?n&l#+Wf|RU}7Pog7#LMoSW0*PbF4o-=-LUg-tg zrni4uH3tYXA%UQj-a{~y2@A@k5GQO8X}_`Buti3QRs?09=V)6-DPw}bMVQg|&-gYo z4q^gerkG)|1%m}(%KN<+ZK-BuydLc|VxIpEo(aTuG z@>cuw-2;rpeqqKPN*l%Elp7EX?A*neab6U4&`i?e;aFCgz0CcR-p}4sSe(Q}_ z@tJWAw~^9m@VV^-`EAoC&`jRAOXX4aho}Sxtt_C{fdQsCLEy*bGCpwmZMd@Ly*wmVWLSga(n@t?<#HUBO0um;8oRpzS zM6rlMZK2{N?RL3)n950YFoj2(%aBY9eii_c@}nNYr5&umzD(#Df$$YRDmObj^&Zh! z4P~;iAA}zhMVVYEzW>FR)N#?`k%RNmLong@=#o~H(CGJK094tQkOAmo+=?2-nGq&;T#fNFdNvnJhN{Pd9 z7)TtB!|`Sv_uhZ+<3~rkW<*z0Uotjq{&}F{GgAW@l=-%lc;oe+Tc2<5C?KXvLlAlt z@lO|Cg3U{X|HJ9&DgNO<{0I1-|Cj#>U;EnEaCUkdW1M3hXSj0ZGM3{D>}W^6U^Po7 z=T@coVc-iY3R30V$YB6h_7?i-7oksd=7SgXsif!H_KPB!_gXPk|k9}`aI%ZM=rW2l+ z5sunLby381;TAp|1N-T5#vCR|d0lIPQN<;Jc*VEK#1SAUV~(mdigzNDmOZ!GGbJU) zNVUl#U6>;rJ3`lj8r*8WrED^6+(Hu)ns7Omh#pWg6u1&fKeZR&lf#}x;bVm}-c@4B z%@k(Fy|h|LdI;kPIJY;5%CVo!gO!_f@frClv>Lg#5|v7a=;m~+fl=3)&THIPrf$%> z73vs1uqXN4nL1y-=DYm42#n!a_fBXg(?eMFmL04;(UP%&r4(vA2TC=g%~PnRW_Q6j ztl0wN2IbOBr(h#K)^)0i*3ny9)L_7sJloN6hA}f+b5KhNZ<7A$&xU650JqK*^!8KA zor54Xj2PL76t_YKQyOF=&F2u5&!Qge28|fQalsWs15fHXK5_GTJd40krJ^Rcw+ZRB z4h3p;R}IZDt#`8F1j*JzUP%c8$Z)&`z`t?-_u`#yI2tR?K^Zf@vsKdWRE=1~K+Qjk z?b3imkJ)%TpKwE&l#%I0N2}q1-3hM9g;x3?@_MSc7`|AAY-g)AMk;O3^p`YW-<@PD5`eX9tr%FwDxeQ zU&|4tddfKDL^za#RV^{y8W6&HGfgOm!j%G3YEN}^(w4-^SlyaR$c&p5htkflRCr= zr7$GtL!x4#`3!L7Im^$g126}C#Ks0zQQ*SpQ7LC&KF0|MO%+e6%p`xl0I}=(@J}p+I@H zQOR6wpd`?5K!VBKv)haih!-PvDxT0UwrWeJoeaQc>M9l z@#K?F;nmk)#naC{i|f~K;Q#uK-@r>RzR>>Lh^;-OMxWA6q4o=qn^RO)yWrsvy#K!M z!S}3!W$y?R7GODC7@9+iXz(Pru96MB56_LOPkFUkzW9}r(O{8j*eC<|0uEI|NsH7i zz2lVHe3lPXH+k&E_GY&iksF;3pJ4Q>c6hn=R=ddiGuADAnzflw%^^lMCt$KjV2C#F z0h`Ylvf<*fp~!iFQEsUlS$3B~?Gu(79`J8*vIGjawuokNSD(4kQc86|8^}^gDx&OH zb2FF?y{FZJV8&8d6f+|jk=g!#66ObOKxUZ)A_TgHr|(uHGPX`ip+UO+*yPP~FqT^! z$85bQfsIs9jFPoVDm#?QiryPD+OCWHO7JDFcaQo&Mk>6js8Kj4{;e&-c0hpSDG^n;*zR37i65?9@V#e=D4FW6HFA5mT z@q{<79~z!~5(Hj77JT%^i}>i-i@34529xBWY)bWRxMD>%V@59~*LLz4ibs&J&FF#e zy$Jl-qX+R9EIyPNi>uP>#JU81B&3v54bz zF+6_rH9VzzjE!y1V*-_DD}2}d=ntsK`~UCJ1B4nPGq$^9xr@mphSePpu7V%B@&N8R z&*9#>SXJ8^S~>%v zLd`UXItvuW>2R|{VfH zC9KlIaeRD?XW|SmtY=uIi5_jeC<|#+G8GE;ew{a7 zf{V31Rm`D=FfDXbWT!khD5V#p|5|e}Fk*5@(5bJa_cIkn zq*uWZEO1NT?&<-9QCY)n4Az1nS;IcK0>EMKa5LRJuw< z_Az}#1S_NX`o1VxZR`*_+PhrPv&NF@2BfNn;PA^p^>2(6HE1Q8WFhIHkm0mARpCBU zOh`{j*>L2&eG1%lPR_7i{mfTRbBFYe&@vK@!csSHDwpKJSMqxmdY73uyP+?2o z_Y0F;jFRmdVI(Le$?=O)Ns|MG2*)T;EA1}z71lO57`0bjM#!E6GbVCGg5d^|%adDU z=kHfa3TYLVNDf&#I@e=D+1T`%0;js*D{&kD=dI`PI($QjPpQ4QcSLY-7&WLq3YS@E zR|AF_-JGAtnRFun7VLb*pTvScbLAnt&391WdMZvd1Z>saMjr>_C2;o!?Eij0B zZy%k*q9p?7>u6?=M&P3EaM|)RoN5lRmSPm4m?F7F*T6=b^4)kB{fmA?58jKx5`?zG z1+c&^2!3aO9j}i)-k}O}#p;ClEws!0-1*4dhhYR_Ywit&zGiH}v2FIyzdsmMuzDG^a?4v z>>Rp`;$p;9T6w!q72)~WMU|En#^kQj3Jld`RK&=1uEFOf3eG$nVv4m&f?e8HBKA4l zF7^coX0G9YXRR^RAwy`ZvA#czT2~(q#R8EPI}wB~7=DH;?)aH|-iD`Mdjr2S9Q$3q zTcw`!Lnh$7hRs^vBkyDwNV^Gy%TRf`C zLivTmv*w_XI2?!LaC{rbFaQ02aO>&=SAXHT=bv9n=AF%^HBf6xMx_DgS{}uEP|J6z z03=adwi{l?G6uIBy?CU@wWR5lNi6`*&-b`<`zD@$?gf1Fo8Q3M`MzG8t;Ud;jctLR zO3EBltIDWSr93)t8Hyjfaux51BU~JTAv2%jH8IG^ypXWvt)HXw6_er`ULY}2-vQp=ToZE>vJ)Yk7N z%hE~&3j$Uh5c6!8Xuj$G=YQO@#B> ztGH&gf@eP0H!6eLre`z{9tQB2EP{p2dHoI8fqJxJSH*>r-Dk$gs&S#N^}pJB>W z6N&lr_L9%gyowV%xCF2`us_-1OQ$#RwQ&sT6Il&UsVrYD}CPJ|L7s1#9;ret_-ZLw3-|4nR+3K!xMuA$I*nH| zrj#}wDgH}j;?Tl@s7St}Bm_HCynepNGdRPk+FV9V9%>IEg;C(8Ozr1ktt|=|jZ$cC z|9dIzWDcSt(t=yV^(hVQdTicP3P65VMNT5(pp-ZaBo4>nI2_;3aq;4X|KQrSYnQbe zVc*cbReQTDnEJfheB52-eD|s-E3jgeRCE)G1~6lsF^Hj97Q?;wUctpn7qO_}!i5XC zc<~}0dH7-6d*xnSzkUN_U8{FV=`yNu+sGPA6kHYsEiFS?3j{}S{K(0D_>tWuTsTb* z#n@>w4wPvWOKxbKGIZ1sXE^2j+M5lflu_#Us`x!_wY4}B4Legd=|UMbQ!h4qOy}l6 z*ycp}aPMwMxG_z+t&+@*YK+0AE-E9vild4zbs%!cQiOM57JKvtmT3nYc|N$Yo(*`^ z5~wWzPLp_Y6ey)<@#3hNO48YPvT$ILBN35uv9bvBx#?Eg8hR2!;(Mi|comZ6>0h$F z9!kqVZEULYyE+31JY zP_4KZ0aIX%fn`y|8WRd9Bc;Zga@~lL+v)C93%2(Eu=lpVx~A8C&~L4EJ^SU%+u0tE z-%Y5L#7*3nU?^0nTiRk^(_I{rGT8j_sx7Ky+-`9Oz zYyH;mW$Tp$Nr^=_1C)9B7M#k{%YsOvf&hdrZx7X#+TyYRPK2egW(`5>xtSENCn?^R z&&+-6h&*LAy>eKhqi3Tc_R4#btfeSF9DWw%T2e-|R`DsMMH^Ex#Fbm$4p5VFUnfPn zsN%Zh#_!sheZxceA9M#qJO&Sv9dFDzbou73T|#k@4jc0*i@@Z6eOk z@U`t7yortGl-mAPl@=o)tFuA&DvnA|vcQ@GZc6g8!i1wieW_#*=q;nsOe4zm6D=-H zu^gqRpEl7uP~r}dxV!GIyXzn2a`%7x=6HI4YwW6M(KPf?#I2~k@>)>2t&9E*te!YA zSf5U0(YAX!zw;On4cvd}MOq+kUUDW|6y;U{eaE`$FDYC((l+GdYJ`!x|RTVaKe;ma=W3#Q5YDAThdG3EghLJfkX#({5@nEA1%9irI4o?4sk+o1H#92hh}HD@A`1DLZ!ZZHB?Bl2F0PnZYB78o=AZ$=0XTQcAV4LD0+Re)A{ zisW890F3E49M14y+wk?+@YDzvM6pM90JM-sqPEy_Ql`GX!2VcF2hqjC^Bri0-H=9` zEm58vG|Q$$RAk5401%;{7Sl`IffC;n*PVyN-F0_;2d^hj-+8D)y&W~~&ISeQE7o0> zHV>LE`5NB^wQlU@_`3=-HS?4-z`J;H$%gHlufKtBeDjT4KkD!Q@|SM|9Xwc~4zs38 zMw{!o%Wk4nh(Aa*3>*R6gks~=Cqgi{GTKb(LS|hnHLC%SQ<{TW{_Z*V=7B6aEZ{yd z&8VxU9|aRSZW(Zb4Zo81*-Q*F6k?gxd1bMyV}vC=A7CK#>ONu5*`I!FWPj zO{%cKtwJyu-T#Sk5>*3BrH4s+5izo*k1 zLX>_TU~@==#_}k!nqTJBr#@%7bA(6A0BTamm|B-LCEWO4HvQgdI~iFyd87-w+Q5kA z^$}90fjPNPP$Zh6<1y0{K&(?klntlQH^5c-{K~kDMW2%nT$6JM4$4$kQjBY`AB-l9 zmbXL;Qp^ZAa0;r3Hd-?=OqujG3529!Vrfi*v5*R2o@O@5x+aEgidl^i_L220e*3-C zW=e@MF=R@zPNd!Iu$>41nzZ5N#4?QShKKQsvN=Kn#~C;`lE+o?!~$Queu$qtzK!D$ zNLUi13ydITJi&;PdC`GiKtMAK|H%R!YR%vg_ht|Qf-5*ackd(kr|lB=P60nVWWl~| zFab8FU3~y%*__P@IJPX*$(a|;B%e$YEE!oa(IDj<;ei7e)8u|72An(a;c zpQM`f4Kvcm0j8NW#e|1Cm7oUQ(o{CM=S>y6Bbf)~=QR`Pai5S- zY%^tf$tV!UmqvWyH;4lk6lNd>+31%ooeAfQ^b;WgK+^EBL(Hz?|Ei zr^Fo~ad+Kach|3DTJkVz(pJv#qMd#Ktb_>Z{Bvgm@taJIEk;DwrqUlL;@9}sT?uhl z;shyqESjnj4IXAWZOuj%LvTC^zWMYq{G|j8G!#Qu^XLfpbi|O3DipQqfdMs`Cb=;S zhrG`-vT4kr1P@<=5d|8VXvS~}%g@Ku15e9IFKC{n!K7?GYNJ&>Cb`K6fzQM@pMR`Y-v#1l^@dN3^O)}D~4sV$%JCq z0p^Mqb>IaYAaMk2SZ z7dmh*iZdM?WX#+fT`(v+yL7>GQ*m}Y!-Z{76WnYY!Z$oW1K%$f_$TxnzwFoermh%p zqXB!dSX>qW-YFwY6bWBZ_U$po8gB_LwfAwMw$jPUYX1$VgtpAqSZ5gyRY+5utp%PN(=tkK(*t}qKGmSR69}NTofBp) z^RNg;Xq9f4papgSctD50jV1q#d&WjKb3@oDP`c0&iER9wQ|_FWTCx{3xxNvD43liY zeP;j=CXBcY=B(R#>ZH+3OiEay{}cSK6@iwX1eseNlaY1hi^Azw(U1{j3c1l3klOgt zky=Ryq>T`@rZ$|yI4J-=S9(VjmA%*qTS|TugWe4Zg2X8hI&b)nO_DPm(QGo9d=Ex8IS!fbb#<=zpV z+1!?6c`=Aoi)c$J(yS=I!L*CHi_2r~50OYA@}yBye}xWBMh#;Ga{`)a0cIsfN+2Ww zf|Yl#5!hH*2h%LvHT^gWkm9#VK#;5 zX5JctFTV2@{)RumwHX|W5lL@P=}wx*DcLQdmgz2mmAFAhP4LgRfeF`l=t_I}pcp=T z^^fBplS}C78nA4#WB`~WWi^-(8wtK?5D=42meH|1hr|-|eQ*@^L}^y4Y%)Jj@e45CW^xx4<~XngEw^Fe0RaR6UBKE_Wxc@y6{ zT;S-An;Rj8f0FrP@KZ9a5{RJmh|H!*u;Rui!t!UI#BoI6o#P3osC|9I6ktO%8*adH z6Pfm~(Gc<^2`rpq;zZ#7h`IRwlK9JFqV7>Le7$|013CcRuvV{oBH69gZ z%=DfbER+z=v5OXt$j`$){>0=cjfCURur%+|z-{z^K-gf#4Wl|E6Q_Wg`yn#o^&$iZ zvb>z~A86prm7hV|N8>pha6sS!2Y$B+5rvU)d4U7&iQ;{sc%Pi%N)Gtw+4Fei{0z@M z1D@YD__J&L$k|Ky;`KXt=z(WUKoc+jo-$UCLfQ~5$-Es$Uo{Q7mZ{*lildfJ#wwpJ ztpQLmS!nKUQ8C<$NgIq1XZ`SbQA5l-P~r}dxV!GIyX)6-JuF5*H?2Gropiv~veTR> zI-}yd;34r{V_J|Z>gBW?yBJio>0zE#vt8X!qIjx-YtO>y0L78x?CCT+qD%br6)~Bb zg7gvqr}82>ZAtKFa$>=WdL^Dz6N~&Dn<$hL5iJ_DmhS+k%bE?B$Y4{R=5!Sv;%r#5 z!{g_+MYT@~WjUn-LsIro32!+IZ92Fi3@mO zH5Z}?B_Lx0q?Dgi_>^S~nps%^X#^TH%ju+{vTSSvzGC2$m61{fW!8Ho30Ov>6-$V0 zoRXC!T}b=ig-XXZCaty7CbVEZcFp#9YnkD2@NfYI;92)Rf6l8k`uD0oc=NQ)$z~i zOZeRRecZb_;n_G~GBwpbsrm_A4l;G3%{r?g9a8B!(lWmdz+miX5L`mB@z}76PnZqF zM&K$ooY@&JS|*l~pGsE|%Dvi%l5&;nWhqR&RhN;1+s?BWCd2kE{cSF zXPPB@QuV#g<&Y*fx&%Fqa>AcrU2oL6w^#gdO~6O)N3~~2#~^dv6yQkYQ`FlxHhj|9 z(Axv1jb!U!E#ObffHh^6p_4VEGX!&#D=~Ym6XD{J0d1{N<&9Oc2f-*!Lrehf%LP(cq!S1sge27Bkgh*0IB}QdSQ8u#Tshn=;2_Ow> zOr=dMwPHT$ry*rb$xLlw^N;{VHeE85QCl^E6i&&NTF;_A&X$AJ!EKCNU2+L#lAGpt zB(Naj*aWaCkU>MUG2nA1#nJ^~P6kDm2o*E8?liq5HZ+xg(wx(!Xtii5kF)A7ux-lH zoq9Q*Q+w+t&&+f(1j;DS7zH*xuuM~O>K`I`|4Wa`muQxzII?NYdm!yPFD-A$!fnpU zL=C~#sWNG+=DrLZ#T45%smo0u36&H>S&h_sO%ws4TQW96sHt}PicoG+Yz=^>kb>nA z=9$>x{JT-k;u&XJ1)BDf;eBIlsf4uhCn)U^ku-WGpp}^4Os0mdQI@Htv}Dd)x0(!; z;YA-l^Gr&% z>>B@)F$Jf*5J@V#Nv6)Qn4P5~NBW+uxVkj6)Bs*T+wilG-oP*T+jv@uM;4i1FY6@^+f$s7;FHMKl>6!; z>V8J=BVxDmZeU!L-Z@D8-5hYF1{eLBo?Z_QZEJje3X zn=MZ&p9*CRC0x>6HwvgOFBm|JUDe)N{JriCc4for?WrS?@QHgxQ*)1HRC;;H1So5o zxj(r2lBZ!ee3*Bxl(&>$rXhO zP14U~2Po~pZj|w5*+CjI)UZ-If4Y}eiis~ypWq&WQB#J3gQ@i?dD&7ZFXAV%-E*|)8qWJ8KAI8fu zfD85s-)0LrK%}7SB`OeoM(?mJ>6k!9E zz7}PeS&^A}z*cw_qti?)9=s=-H7Ssh4S2;umJw7Krm zK9P-;@E#7e{Pm{0(w=@5G{&?x$6}#c->VDh*gYc}8`50WcfJJPF>0g{qwWm>2(5Lw zVk|etNgCs6iE6O1T&XIt#6nH|RS3Det0}LlFDf3Az=}w~#%LCgs=~y2U70PWhnjT@ zR8f$J`-WYtO}c&O5yO8AD;2v5I`+&4617Dd^Y%B?-vAUK=AHV*h@ zzs8r25Al@XjRY`vsYpUVRG9h^j2upePwbk{yX#k8T5=6R0?V@q1$;k(LWIC#=~X{z zl*38N5BnA?*P=?BG#6kHA*feMxW6i!drVy*ZvAJ>j@NA8;2G<)OYZw6U5mYhi7~ns z(eJ1Q-}tq4XBr45>3_v%OJwYNw2aWNHXgeDZzDf6mu0e`mOJpCjQxobNuP zNU@ABGd2^9>nzYExR8b79E!z0?16@YlGl*3H<%}4*dKa zFQ2`Dmu@y(O~?K7bG*Ji#T(dg8VBgy+AjyL9r3O7Knhv4K$`v5?#0zlBi$9<%=^+t zG-U{`D}BkaWHK2!#bl&nkgOz+_QgVcTGZ|$LGJ*GyX)?{yZ(WFxY_TJn13XC%GW~z zPTDwe+8OKB)S?QSTOz%8`^=~s4Xyc0bopJG)KyFb8>f$Y;19p>aeQEAZuzXN>mfMC zOx6VPhv@jm{INQKZ93heK?5oh33(tU08$i|EW)FrAv=jc+tDryhSYLb9Hf^kHR=08 z(8DCn$V@a1?l=Y0raXEVWKtkN&8IXQhzzQbjxnvzoX*#}^y}q)z=VUSro5)oJCoF? z+<7yCH8d?O6Ud0pIqy5OaYEnP2{Jf*uAFkso0h2vba_XIu#@4!W{Ym>u`5c;lOu{o z5XpowvyQwhO>j+{4t6NJztq?=S~Dl~O5i3Nbte#1?_Cu%D(S0`F_Jk&9wrMgKs}41 z_G*dlX<(+Qt*s8k88aEw*=p%%(*iVW(r9o;BiFW!O#i3?OhVgit>;9Qml_)D(!KI>) zrNMdki3&zv^qOLa+y-o*Y|1ObSTjPjVpk;`M!yTd8mt&WR)gfddK?qqJU+q!jt?I0 z;rVUiKF)D|{v5t`dI#V36Q&7dmM2DZya917AhZ~w(GgXPXeiNu znC#vkk@6QSc?a5@IHD~wc?2s2@o9lv2JS$KJ3!*@y1Rbs6$KX%c?FOU2;{?J_Jb<= ziDCA`HpV9phr@@?FD_oXckj8g%gf7muCK4ZT{eB7jE7foXkW}KGQ*l!mr9$gh z#y#Ragh%X(nf(bQg~a6iog;~8O7MqW@Y(wx#fLV9oB$uv=9o^@J&X;fcu!-52R({v zjKzxZnyhnj?=0vmPA@9@kaULheX#T-K){B8^I75wb*R#ykP}*$=xlYRL12)StuUrd zu;^=vPSU-tsIoOBt9<+LWQJ5h6FV@XruzK-VkJ#jO?9+JwHW!*fK8fHqKi}2)N5kn zM-`)~V~yUt8~&tj>XNe{CxgnyZSq5iFk+SWjNy`Lf*JeL&ROgzwwRPIFinA;8kVyw z$~UMLrpczLF})$>2qKwAk~#oy)3fHpk40-WEC45DC8?Ag#Hul!q_dZtA&MEoLm}x5s^r{TT-g51f>|{FBfKv!G9Ea(6Y!iR=<`Mq%lUMPY2HestH{m~1jrmU*sXlt`}ZQXcCno@oYf`9$;WB7gg0?tkoH$yU_kn%8BL8vufE`;)s z67TjTPSC%l&$_;mGR5JhcJYp?I1}@fP|$t z5rkkLeKOi)Sr+?7Di)IaCh{E$RRXfY=DuDTyebk0=$iG+|~es*+(>YXq_+9NKA4ShaGp z3qP$PFX{&i5o0j3Q(YTGgd>+`lYR+v;-G^CsWIb?#-7w_2c5=)re{GxK>~|eXiiRpyw+DnsI2LFpc`Dl@>9^7 zsuk^6VyCfn3{6e9$OgFJB${}kqAMeG z#St?>qG0ljin3rvr==v$ke`2sL!DDgm2}8wJZun9tBAtz4Kbz783mIv8DX?2+T>O+ z)2vjI6lyf>X7YKA>DW~8svhwdAH9lKWy@x6b7%ll zS>T3t#~tBqCR%%y(+em=d1mR8`0UCWGaI9G2Rk&=ujFUbGD%YlmSe# z3E5bq%&jSxZSvnTYb;~gl1v7nAEmETNf5GJne@I`Ni~)Bl^{A}Tpdg$5_%>1svRQt zOji;$H;2F_;UxGESHo7F}(}g(agS0nzZvi^ZAMAFhzE8$dDk* ztYWyy;1p?wR(colf76N&M*~h$K4hT?!@nQye;mX9GsXOqzR_v>?z#gc?ymR7bvQq} zaQCZ-xJ2MW0#}f@1mq$dR{$;raRDJP5V#PDD{;9D!36|Y5L^cQLME;PdI`Zx0@vDH zDjG5(qkVu4Q!-oIeIqSwrBHZV2}h1L$CcY4=e{F83n_m}0Lgk@Tqa8dXdqC*ky15AHNn90?JJh!)g#XC# zo4@tgD{aCSq(|2@)HDxa_iR)Tc8)V8bzRa8e@way*{4jh$xUPfC^BS?mdg(M(A zBf9;RS+Go=s;DeuDT`RsDKb1p>V2}9(`OYm&L==b2$X~3yuDFk4SNz*W+acj6UN^!045I(c=JBW;?Gt0@Jgu|8h zTr)`oZ8Avku2>jQh&g-1G~X1p77{=+Kc3Swrm%n5iYG%o(ud?2k@5*{q(UDM)$#>!$P652zHftj zXbusrtmdve#bn55r!TUM~`;M^@NAA5&hK0_2M-qGGK1{C| zFSeCW`fbUgmCj;hRD5?OLwQc)qnv%vlBAN*6qt_k)~U27)-B3?!YlTfejerB6ZE_& z&y_llk`Wk;_DwG8sAm3Q?KDE?SP{IPV3-kyr;7X|SJfqo(2 zKj|J{`G*cld_K(EmW4u1&tx6Ly&>ZkXhQ0vkR0gQ^toj8mI-iy{LYWwtpLTo>FDV) zCp0MStil{wh^ygr+|v9}bHaC;CqNCOqVb5H7oeaX9VJ+9&C_SzM_mPE(X6Lup)^p_ z-GJH1IJH^RB!?-R5Ea04+O$bdLtGuZ^h!(#YJO?{s1pvsoI%QYKn~XCg4s-G(I^07 ze;{XmJ4y1?tCwp?LIPF zrxPefC%xn|v!XsnV4NjOqCkTdRC=_>NHBFFBk3ZU3x)QfOjyyAC7A?l0tr4)n;;|O zIxFT85jj-kvq4#qig&u@q#M7?pae${j00^cO!-K#i1v+!k&vgNn}W4h(wJ1OzX6;77-c_ydXtG#p|X9hmQ8U})Ym^K5)|(nuFhV?D+2z!o`$JP!qC;7ZSNU+zKi z2!OZL3N=ZmSz=UV&?1VoCk}8I`?)|WdPYbZfQ-r)O}X9=YgL6DUaJIrW#A&MEnSWhp&fUF6N9W#XMuQq9eVCuv`v9;(~Q9cil z1h@+vajO`8)lkU}cxJqHNqfuNDntpGqK%?`a%e)7yP`LxSs#0EoILxc30_Cw z&m13M(}{on{60SD1OLK!A0D4J{3U;ehnjK?3nV8c^7*99j%x)=(&bpDz7>@!Q&)0R zf4+Pdf@?hYQ>JNPGQ}iPY1zqFWjwwNVVwg>iOA^WJez(<1baQL~w=BOATDO$W?@2 z0`byx*N)Js&41tU^{KhCrd1|Qht^aFq{gCX^H@~(kkv2{_{QGoKpLe)$Au*eUAxKl z1{rozghKtDheCCWqecPQO{QH`}zGzx%^sLbXS5!k?Y z(WfmNvJ(Urb4S^HccKz$P!w%Q$#G!f;+jp&HCZs{k_K=kbud*NBj9tQ{oVzbGhmwY zd*%9uB-M1AH213Rn+1icU|O(fpo??_Hcs8GJS1G$xUq;S0F04{$-qHV(nZ^;EK7rF zoSaW;csccFl-De&04L=Nf$88hg<0gSK$@I5Pxw}u>^rFk1TCc$f?5uyl6acpRkUI( z3UKuB(;zaNhM#Smrb!k397=wa6D4bUt{w;yMNRpmCqX77pO?-q;Iq(`23ypTpakPs zMU$Fa(wCaZY@Vi@5orQh>k8EB^Pk&0gFsY>!opIcERi&8Wn-Y8 ztQEDC7;Mya0}#vNyd*b@q&;gzzHu#4=#f({?vJ5T@i8o{lhL3GY+j9v0bK80z|rIm zSPY6$4X>q{Srvd;!A3U^#o$V5OafykN(Z97O>+I3=JVZLkYo%>S*9a2Wh0o#dj%m7 z=pE~+oPKIZ%jr~Q>7RK%l#6>e`!tCZ0fsA_TI(6!Oo_}urEWVW!UqH!#wG|DzA8uj zZ*P7Te^ZX|F(4aLC1g>wYdF7D_t=!Inlg>K-8Ox1gAtWXQ7K2& zWL4wDa;H3Il6t;NrD<7&CY4`zz`VE5Wg83T|5lNrn(s}}pQ8RxD1Y!`XcAy@#$895 zMMld2y*Afk9o1YtG^KlWWHO1xA#lc%D)*sgMbt2eribZzn>V5i52JG)6^Ka&X@X}6 z#U?vzVR|A|V$$-zNJf`90a)|)TF9Y%_MwdfGfVCj{nIY;kDBZe(d5Ed}7|izlsmwp1+R25YO<2DdsR>+b~36 zo~*VYl`PdN1fnfiGYAJDvJ$AsWQ%YDW73wMRfp7aDeoGi&ypZoBvNdYY$fPJCE3?B zOT`5u^D}_{90dRE)z$saKX~iGH}7_=J3!)H1RBUigj}lN!XZ~GxD1br0Iq7ZAA$#HG08pEq1$#s$>huVH6Ry#-REjupRGT3u%`U50isY1C(o zkE+R3aGrRtGFFYVbLU^ju69~flUL)aA*t&UFEghp0yD1d?zN^ zHJ1Gq%nTyH(I*b3@Q~E(Hw{_(nMw!@Mnr!NF|vOr@MTI71-Bg#i01NW;NhI5W z4|}L%b5dD-#hU_oqf>jFasqb;Vr#mmnqo!)BPsBjkxi$j}jYa)cCFl`k7zEiRJI@6nJEEp@CWsB4z3ya1VF+R)C6;t- zpG;UGOQa^~!H7P@q@;~I;ug@LABzK~#*_@;{E+uzZj;zZG)S9LqUDQ`%Pl1%eyZZK zSZUs7X2Ej=rQsHIOy*8)pMV}CXJ^GEqg>ka9wcqox|xeI{F{6yDK|KCKxD&%q4>*> zUdR6#Z{eEppIRP0Dh==zVA|=u!-~pQfg|h0h0rK&h-EA4HlZ9mHVQ?Z@HeVB~wW~&wXWLs^KWWG~lTyu1)cd zpYR~A@z@vlJ5xdCTb;LytRX}Oa)Q&=oGLQsdFgD=6fMK7Q9WYFpDV$Sy)?_iEVii7# zN|VS^hf$4=HYKjTT%;%$T8ZQI<0`5E&P1#NAQUK& zg*9ZEMR)B@{|Kn1*GC9}#QIEms=Io!D{C-FW7-m7gf7-^fe>|$w;3tZRrY4bYu-q| zCNEQB?_?m4wYG|QJgdO8O8hc9^xnG z1H3v~jjo?|dOI!;WWwm5eL`r8@*f;1QL32(iK!#^N zQ=Nj1LOs{u&COa(p#n9tfAi6!NB^>j{Fk5nouB+Ke(|sW;^Vt+_qTj|@a|$81aJk9 zOA)zli;zClMaOsE(1FmMsrHWrf;7S#j;d0?BSK_#Y#7mSl_e8Ra zQSlu`jMHnce@AL-?JPyZY8k?Ug!#|j(gg9HMy$2ai-znyPGSMzD{?7n@?U7SugJgL zdz40U8BOPN=iI-uTB_V@Cnc=qp|bRTvSUJ2WF5s7OQsM)b2EYrHGQOhP8biE6mQBU zB;CB74o|Y9bO=rBih|aMW3`Q%25Zq&KJ2K8vX~WgQI^((qjSCc`}(?Xi@si^G7<89 z#8puRg>sCQYLwe(?9g(-tk}2XdP1u4)nq8TxK{c>#6DBKq)@xEOqu<{)sFdZw*0l8*?VllJ@pKL& za#B7uSWr?zQ1OzE_E_kmmj;>OL=0teD##XPPKX+$P0t8Q#pt3IvE`Hy2`1&fk|6Hn zS&`eUBqYNwK}UMCl>Z7C=~{Yaw0x&Nt8=ecrqSo!mtE-Y)JS7$+)(#o2q zRXky{^V((J8*mQPRCxz1x;M4`VhfxE_$%@R|J9qngfHmCQ5D#>JeR0dkCJ!Qo=z{K zWl~ZgV#dN6zefQ@%6sZ#Kx5)}O~IeI{|WrFcn*i@xE2HR>L9Av#1U#Rv85O1i#|_| zgATYF!g7z;kni(J1=9e>z%eF{e!|l^ahenF%xgTpKH}|oiU$#RxLxC|?HX_5h(~a| zjfoQi&k#5QxB+mjij78d!M#k_P)FUDnW{99?Nu?u-B34}i{%x;x3SOjB;xubc}-mAFc znJ4Z%f3bWST9#0ACg|Rm&#C=0D&=d(Iu-(6u`t;-%q*!6M_nm_ zER*H>NS;hI6~)Ergipx8??1ePACP-^?r?^`ee)3i$Myz(NhaO~*@GUHQI8d7$3|S$3-RGSxeVm6FoQVyc)Y|N8r0 ze&Ij=+~+>`C;$8Z?oZtrUw*Fu5@zE9@JonZO!q4dxrD?;cw9l`5*imGdKEq|730DY zSEjg(z(oYE5Rztyh$}O>bjL+Vygz`m+@H9S=rWkXP8S|+2!wWmtxd%kZx&LhTt)Uo zZ(wci#5r2hn|jxb-;*i8l-2GSy{6ciR$h4P~iXy1J* zbfM8lVx!Yb3%Fc^ivqq1f}WJr?`RI@PSU~WUk0DKBG{matotOSc?%1SceUB2(1Rq` zvP@2y2XM9pe$U|&f8zT-g-<;JUbqq548aM3@C`l$n~bD$a~gdj0416d3juH{?IHEh zS~^)>#aO@)oA(45zT(Jgg0#8AxQ?ZoiK1*%2QDo7YRWVb?TgY=AQjA*Jvm1u!nDC- zHj!SaC#Op?6>d2Zs6##(8DX2>72}Q&0o4PJ+bQ+@0>F$K=46D-IWdMY zHqvRrn&5EnB&5-npT{;gSee~<%4FaR+>8(!wx%<`N&a_<<&3X8sa}CiL25B9=;~hO zTCyd=*r0xiu3&7J^^Kf1(MH2PIR< zO5uLTb)8FBcZn`pNc&8doLDl141)`K13YlHIn;sI&ISMVn|}*`ZhI4N4NKV$eJMg# z)=y8lE9!PP(JV{*W>av|mny2+hJ)v!_-&Z@H=g?#{_>iVH3qnc5r0Wd}_7Z_WwS zAffy!EDhONT&$X+Yry`Wq$NXZD725+ticnSAu64TgO2AFPS@3=XA3D?s}v%%v1Ks< zR12)oTM~|Do>r~K;<0>ov^)@E#~#?7rOg0B86~0_uggpu9TOSTbC0Xxkt|6`aDOnx zj?ux|qoj%47gQF;(dJ#&#ZjlWbX4`us8@<5NZDk8sj_1b^afUv7%jsPTrla}{Je}=t1S#}jdb_dx!<$z7Vs~8Wi2AvZhbHVS%IeypS zCEP!p9@hP0VG*F2fZ8pVnRrKPcB)vVWd)x-n> zo1M*AW~6a%v2vUeMlRNl()WG*WB>72zWTNQ^$r4f4@)*w^_SrI zq154L@) zD?)Y}?tDqKMSs*|3Np@*kYdO2q&p&FmMSZe;mn_>4qU1bWIiuazlKRYVcv`^uDMx- zm+7JJdHk4Vgyk8zMR;;k*#Hyu`V>_iB=*eNYNDJ#-np{ z*GSj?Nje}wl&O^KQ z$?03_yIDk)NvGa2Rm_QVQt<571Yq6IMdz(et%(dpE8Z2dL0WqILByhXw^Sx}?%CXM zr6#tFSxof5m>>ff>4=XuUE87e@2ZOvH15DyVI;I69jDoB87H4yaORG~ z20R2i5KgDb%BF2m}s_5B~R7WLM%TPiWqY2Jon)c(Lqf|0!+~Kb z^(=Nuo;!Jut5$W6))N5Q-AmsawB;F5UJzb;W!2o8HD0AxHLI{Ab=MHAnO|jiTSGLjuw7re5ZYMm{nFFO>|K4X-5PSmPom@f$+)=T&%b0oHw;gB`3v3J}5aDIg ztt}0#9X)E`(rjhvfmE?5s)tn7Pk!ZVU;FcF@)P&(U;PJfefzDyaR&vw2LTBP&iX~M zXx%*s4hSdSGRXqip@HsMSQVQoYSLZ6WG%hqdhMG1y@8{*Zm*{ES(hnBcs4Vwlaoea z4W+-Ob&42W(Fr8fl(WqtE4n;4F=KS&jw_V8y=+FM5qT$)y*5$TT?|kiz(f}XEBdMF ze6dj89Jpv_)jKDakfMocHeJ<#kX3R^-$%-CH5{VW2YO8*qOzzx2=Y{pw)Fv~Mu3J0 za+^C9ec1%zZ00PM0Z-JQ zULv)MZi$RLfiR_Jxsd!e{Nf^WepaR9eSL|Pvw5d|6cFAiA*Ej|s)a9PyL}aW{rl#S zR2>-`u%$bGM~V6ZOWt*mgiCAW6Uin(Cyq-M9Exh^4ikerE%*j#Qt7+rflf%6l4%YW zM#+w@y~0zYTC$paj@jI99xxf*Mtf1tS+K}P`Hz$rG(BIO+FsX=Zma0dZJU|okedt> zHZp}HrgwB>col7yrj0%^tkJGcu1U})4HaRONJYkYg1!|cGf|O_Rm|U|%@hg?#-Y;C zaXL){vvFy&DXDiRK})NcUPwz8jx*`(!94QRyrW}#jS>xIpP&ZJEp^hM*MsWL(cc8R zIi}F_to&_au82AMd`NjFFe)z_~G^Iq~^6O1)>Eu-A|IoZzrcEUX{!f`gUKmV$3_=}Iffv>1%G^kHT z0t)uo0%bT{Bj;qD85>7$|Dd#t$@fPI?4VhNzz4=Te&Br{!pR1{e6e9e;08y$vrW7a z&+zS=8@x3)ymMxFcsk;p?TBaLcn1?tV&WY*-Ue`kz!4KCA<3Njd<|eL;F9Nl8B~`p z95mfr=Kn~Xn=>({xf1)q~E7JZ@7=x8rav7G&NI+(l;k|T$Aqr_^>Z0 zOZ@=vrdr4@5KVcK`x<(M(j1aLTVAoKO>xc8&$}$rtdq9JLeUukh)w;2ge2{=>OAHE#C_s#9RK?P$WRWP@AP0T|r@ujY(3Ca4u zhK9VytaqU`p>>#+j=Hzr>7xAA?~Z{r8$MSS4mdHk^6#|y`I@MXWowzvP(GXWwdktNTQ9O!Ov|TpZ_AXH^5;SruuZ-8e!iHVLkMQ>*kELHf35oicb3BBor)p=@fjo6CgyAZ1Z2qRUeQ1vZdMT~4}D zn`N|lkcRbhaGxv+I0=b`o|nR~JsgOF1{R$#CwqqJvuRtoVQzcwCm=1NBrp&#Q*128 zE1k+D;{}tEGn}^w%=lEtke_i3kli%uHW*2 z1a;b|vjDuYw;KxlfOWL%rJtjYz;9F`M#WjYM@08-{* zWuk@d3h_PK*&5$H10ZSp^)vF*MHlSe(v?+fti10647}H>X-=(TSAa?9*c0R=(4nMz zDrF_IQy|snG3tM}xuMd;laLZCDy;S-St$q3qz{Dk#LPLlEGEZtU1!=4;~8s6gPDe8 zMo*DSpYYh|%57u>>~-0i)u|$#>Pa_s#)JCalZL_T37*gP1gZBgJr4+O3S&z{AcG*3 zuedi+P5A*6kCIxvB&}ss?9C|K03C~I$Gyp;42x5b?IIYG=^SdLWRF2<3>%r&c28)G zmN4ZcEkXD4LTcYaP72HFFcmkk(*uHz_mfd=yS?A6B}mdhxb?f5?7WiwSp!NYxb^cF zrv&`#X2My zS6V&+nJzN%(J}Bt<2hXU#O97?A$T&6xR!~>;dll>!W1I_03ZNKL_t*G8o*HnM^EXQ zFb3NzPFJ((Z9_WlD;kkndTPuMGO7EpKish; z+MKVxim$YbmI6~s*ggo;TXG@vyy1qqWDFvmkg@n4B@JFQ`dtJ?u74#1VD)e$TaY&! zwwNPiQ=p6qOMcigl%3Z`n~tDm2K{GiZ|J&|dy($ctzDlFs48cnuw-Q^G4Sv2~@VEEZ_$ekHGzK8pJBcgz3p!$ezD%vK z(mQfcwSkFd*b3gRr2rwI8B|78&m~u};g!B8ZTV@ogj~UARf~3#XW;80@^8HV!3Y2P z=byiP^ZjVx{|^ia)xW7O|A5OXiS@cTBtzyhw07B9cutb7zjr ziX)ZXo8;M;3Zk|mQ9xCZmN7!~AnRMew#>Utzg!k2D4U2)?8)YdDD%G zjGPlKhkQk+MPV8$5ifa~d&Hg1+Prq-l?OE#?UoWvF!Hg^GRD3Q#w;cxJ+tKF1UcvkC?uZ;&GZ#ECYtsg=cFH##W=xO zgnlaRS5w*a*2IeM5LE!WJlmj}Cu+U{1tScl3!V1Fa-fn?q=GKeu!3JJ7?ImBDUeod zy5r0;CZNbr*qn5ZaN9VfhR9Cqg&PMiuJADm*oeh@TC;BT=Fud)V6;ulrqyOpC~ruR zVx9#?b)IemGAFm>i6`gCM^QuPIIkK`j8gB#C%Er{1btqDts4t{{0v^_n_j2m)YUnC}zvwQ3xS9UxJ7D%F6oHSGYr+S=NT zVf%rSe(yE1QxV8i)4J=8&v5{GBQfBDu)ip)<17ln`yi#@Q3H_wAzCq^<#G2=nkWRr zfZOxLdE4;U^jrM7pZ{O@t9Zoy2sKC~Ql!#P8pf#Pfms(Iai~a0%&soM3^Zm$Z#Ejwf#^>m44o@hsa#gRitLa#8+iskHw$8C}W)sgbDSdIkA=IIB%Qs-(gqy7Fo^Y&|Es>IMW=9(TAf3Z(!sK$2X%YF+Xo-mx78a&PJzDzzc+Sh%yJoYRF$sCyN?M zn2cocYhbuwn-lNA@i7KIK0U#sJ3ieX@TE`O1K3l+lS32lL0?+`EEy(mjV_aqrxr@5 zO!3J&BZr6ieC73PWT=!Y`Q=yk z{fOZA_y3LG-RXw>OD_IFUbQiO)|wUO>P~#riBnCvCc}XbZPJ}Njm>Hy544<2L_`}K z3GJde3may$1erZz8 zv%t+sWyX2dW;~_CqD7OJq)cfaP?EAKuN8blo0Xkug68j$?!rcz}N{UhFaW&2IyTF={KJ8!%)G#fl0q?v!AX!oMjt zXh_?2um}~5F`}SWh05U1;y8{2bV&SMW8?57_yXHuU}*g3X$ky=1F{XsP#k6B z)-xJS)rgeS9NdUwjIF7}BfotK2|Qyi^JmUD%BH6k`?;Uh z%rz2i#fkLxC%b`^5atvjy@aKl$iF+Ey}uh%&;{<9cBGKt#^tcN;a6^7;Ex?{aC7E> zX((mOfNzSqe5R^f$!Rz34N1O}q84#=Luwd=A>7`A;8QOvlPvJTnC_#_2#pNIuo;lv z6o02un#z;{Wh`PAM%>0Y<&*jR=lh#ZQY*ehM^7fLx|EtqOGK-JtDqdN3kg4AnN}$w zE#@W9p|~*%V`FgOK&p7bd$riwaMe(27;P3<9y{>a)&{fYdy}wRM<(mg3TF);%|cC< zcsk{oKZLPt3CgT8E+T^m?}u4>y!hTR`}N5}MP&rnMte&tI%0qzJ%n&yih0`TR7Y?( zN=37HgtZ4a6*oMz)GIa(hi%jlAcw{)IlKla7C%!ethR(?x_0~|trsJ--0_{}Fa`;* zT#_AVvr$)w%`hYIz0k>bF|C6$8V`+BPN;W7r`%i*MQn~|@FUa#wTWSX>=OqEbT@3% zAalp?@R-Y-7#1pkv4<^Vo4~O6dp8Hv3_4LZJu#-7xZvo(;Sv_9zb!ld!*72Bzvi#; zO51}hI%2b&5MsC3ymOQ0(y-M^UCJk?K5MR0Y@xQZ$re%gh-YJF@_?q0Cz}ncxJ+?i zv-qxprDsbL1Ml%%CRB4;uFHVWF$E}O;iDSVOvw;RxAV=@;jn3~c<>y?Ey+P7;59EY zt%_3K`W$ANL@aWuFFkn$gJ)rFC}pvKN%eWvNag^IakrHRkxQMGrY}~ej)Vk*k&RV{ zdDzs-63gb6a+`t=y9Z09H~3t%SVzE$%awgXa$veLVz@<*zaMSSC{C8N^(3uXOjzq> zmUs6pXlQxb5u^t+R_Q-vy42P0b-pVt9#>?KlAU?04>H6}rI-?s=2$=nQt8~Aenyyj zF2wXoebF|tR9s~`k5ogQ%PbSA!EXW=v-TG;F4^L~O{SLAUFTH}Rjeje$V$so{%`V< z4Yl4V)>^SC@?j$jNH1kQAMsUDRCLCElW%>6|@QoVDX|F65eGsBrhHnRMidz5xh~wH}3e_ z&k%Dw8Am)BifMZc_a)dNsaz3Bt7t4J#pCBR!E!?J9k+~+wT^A^QL?riMpU8-;rf}9 zl*{S*ENd1M!>%FuZl%>dRbkReSip0L{PXt@5C5D%|0dwS`TdyS59mMwDt}5LA9j{a zZyWq7!QPk1#TpA>owuiKP`fKDQZQTGe51Z}}EACWBQD=8rCp z;n#HAoR-%X4q-76PCaUJOIp9P>8vrdbdg`b6AMyVY@wpwK4t0HB=)_?D4b}1mD`N? zcgcwVYmD%{juG_tZ75_MVi3U#XNY1uMAM%ey}^S-HHI0e#yz)fLF>;=-Jseck_Lp0 zB4JDUzagDDWrDJe8cU2psWp!%U0^waD1OFLY)KtqZ1#B=>BhrN2me#534#PQe~f_< zhJJ_&4r2>GhDH>hZUZs|V}w9fR57+lK^cqpjgTCY&LUG%w+Gct`CJr`RXRPTGrgNf;_Bp~ zppr9u$rNw0ViXI?hBW-U?G0{e!C=yX*Hi5D!zXh^BSs?{#F71ZQ>6=JRwe+gBFlU- z78cbYkE}ptG^nJc!kRgr6%Cg~NIW(H$y9D?MZ(u#TYVjXOC%R9mW0)XouIF>#wpk7 zoD0RGCWqC@&q=>1NggSMjayYY@OjzNd;oRk_Qb@XozsFl2ih@hL^;4|f5#61AM znn@Tb1(%SsY7MacI<}ZY40FRqYssKGEH=1Nq)%_RK1Fg=088#0#h`K|3uVF;k<7R<+hYmK6F=$W^HbD$2!pbCO zF-$On?v7!54AX5^RNU|&qz}a4K@l;N!a;^u3?Vj`8j@`McZ|s)hdVa+I2RoFOn3a{ zZ-0#c!oR{#Re~L%ixQ{W`i%G0QUi4g7Gp?Ns-eo1nb_8iVADWLedav7ba#x!hPlxe zr0(Q507GLlt|?L195i^T!&2a7j8L`Q2$rf$uD;8zgskt%YPJ$ty+lYR^MDK!QEp;0 zid?%+%kyMtw=%h*+|q8J4Oxe_))2i%>b)$9d&%kw%58Pb=tD=71NN?=P?5Us{gx$A-(Y2&JmSUEU+OL(z!04$3gItE&;%BcZova51Pp#f9PFH3SF zhh-s^DlU7=)yljHl!D%ZGt#vM76`Tz zWQ)bZmAn_hrf(^^n9ikSVR>3~9I-rrexl<4!WiS95S72-=3jh2F8G6{kP!V-?)V5S z1}oDTx#dq5M3)4BL&pMkl)ik|0D;_`2~RCy0TJK>yNXB5RhVRCPo^uBR6iwbV~Jeq z_h0L>jgLpi65;9n5@Ufdv6p~(d$&|aDeCAtq%UU>B~H!OS(v<1mrurX zDF+5nsqLjwx(i7N9o|v`peKq&&6a9xpi;4J;X@8tc+un)iysXvT(#qcQYz?DTZfu5 zBQM6cHVWQENF9>NX-x0pK906Hh7#Kt~OhMEZH$l+D|gq~wky#Hin- zhX$gxJ(EQ~nV_v4ROG0qlenZawpqMoF_g`<;;kb0_hLxF0>xz+jK~TuH0Fgn?%eT* z-+6(%2^`#G%j&c|kPE;vmC&_TGFU9*B#p3$e0={usO zxDY2mGtWFI*!oc8T>N(Y4bk|=aE13Y3`ci}Pi!9V zr$R8cNCWcWu%WQA;UbDTHe4iPCwB$xptz`DABxKuu%Xz8L^=qEs6{aHP!kLnjLXEB zz|kj;dpMo{*0$p>zWOcvCHWG+NlTr0ZjF~H=~N9Rq7~5nCb| zQ%xz|#7QZ_WU*bMBgdBrNh}K!sh>}-F7au;USpm?^@h*YDSA!g;E9l_osjZr5TvxuE zp+0FiZ6*bvRKl9JQx05}i3e+PK;HQYlD(p`mco``%Yv&f$dyzf=SDwMMn-?{Bb7o( zge9+L?PzPKiZl%?E0hLJiMG-%eOkue zGi}EhHT)eMuL)~5kGrjgwyLcxXJmLnHC(#yE4!L|q()`3ldPWb9{$I<8Hydr``-bS)!U26MWC)4}I@8u5z@-L2|zvS@$369s_j}88SFeF6ve+TdnX0x76 zsniAC64n&!xYDl=tAnET3|5oAemRo9Pp{M!`4=T|&OF<&VQTrn99W&u!)_!m_+_MgknjgjVIc39+ z2H*tBAmoo*6_*{Grz(8eYwQBs8j2 zU?X@AOJPE1S6zaUQ4jctLtp4e9tq@PgcK|wIGUY=#Slfu;AQXh>3dttRmo-0itP&D z>ZHHTq7mD@)f^RDWGL%+mRc6y!<3K}J3Z+k4JDhxQA4EIB=3Yu8CJN$4v;HfEYTrni&;R9k(u@5DRn9MJzV%q)~+OC^vk%#M{PqP+jZd z<4wegxy3W>&e+ia-qm@L((1Bzpi*ICn`>-drMMC<;iXNms(6#)bacE{76@Yt9z$1o zDd1&8E*T?-Y64TG5Dnt2h}%3sOn}WbV8;YFLbixyM*pn&ynA5gc?KCa@)e9;3@O4H zi?QWnEz-6a=c57Y6qjIy@a!4l#T$r7&1MONFLhwJ;LMHm7y`Sbl_jNXapuiE2klix zR}*U~EoO{e4o_w_OW5kC#>zObh~b>wS^#_8S1O|{2^*vQOf0jISz|Q_%(Z1Zk+uuH%ZF(5K<{VyOk9(VpI(`<4y2|2dZ=*-U7!4w?smC9$HSrTJc2+BN!| zE@^u4MV2OmDx(0M8pLVqaa-2VSfJPoRMz@4Nw6NcP4}E$P?ezBd~nDkvNZo0&AM>w zQu$VfxwM&?IE_vTrgP_QCY{hJ0m#xR|eU(7(H=4 z%~}lDnsStbKJs#kCF9-POSR#1v3By^D(evKhsmBPrp*E*B?C$7PFn^oi#^lx@1Yge ze6f@kza!XT&e%#{gww6o^XgTE{Yk3?7tfToV$>@7Ijjm{;q*%7ieN!+HLWxqqHG@4 zyb0$4*Mo-z05+#c^l?Ni@J^4+`_Aqg-qd4{+0W#zE)AFJpq z7ymcsw0{le58saue(?tq|Es9~icjf=)JO3UT)9)2v(g#b|IU|67f)bL5J_s60yd(2 zg8g^!!0=XP*pLzWqQXFdO zO?nnV{59tEMP3)pNDl&XTQG{c48%5a5+teRmLWlXa4W{kH)!xuv3_c!8WdvoitA87 zOr^Y%w~x5gwz6YMI|0ISyGc@V#=!EC$wO~i z9>S=DY3KqIgu^E~8PG>@$yYU#D83!8rWIm7jD9u2Z*<}%|Pk&${9 zncwBxD>08V`mzt(Ob1C~aajr=#mo+vme=-4y#4N8_Mz*9cr9vp!l3${2>oeq6>B@h z$+pTBsqFt5O?`2ccIt0l$2@!{2*)f?qn`;6pv& z&J<7Q*6=BhoYjtQZUi#3=xBJcGz+cV39jI6#q6(YKbr#}auK z-d=?bk~U$Gjl;y5yc!SoP5yf`?I$;fPWiS-F4J#HA>h%d&VrRWxAayUS5>K4&^W8Twk}nWWub%y2j`Jp0Jps8X_C*8vS})7EPjJ+=mMCv0%FGdgxW%9Lo}IKftgANZ}6B2b~kLBES`IT z??H?cUs5=Do=1Fa1^{zNfH)fhA&D9il*W`N;RaYlB7T3Bg@E6+@_ zJSaKEv^Ynz#~HLond4wN2oE@#5%w1MlVO()u8KzhyEx7=aXD;wZH}+}jMoGBVm{#I zJmcm0HNKlWzMVV1+jrb!Vz(U^@fe`EV2I(Q2OM}_MilIhvq!$ofMNr%L9mJ9D8LDd zgKT&z8}7D&r+&cQcEo9HIJ)2`mwWuR^H=yk@PO|`V|X}|lbo7*U7%(txo*J~#e=V5 z%N-=jCl(NVMl`VzMfh>hX^X*g|rjZVG)u`v6t6JU`tCo~9 z&gm;pfNL3LeJRlMJZE}9Db!XaozSY{DW&gZulyTAaVEgNs!e+dLNqo?6r#G3ZX9uJll2iA>h?9!xeW;#Ue)tSLhYJd|Z{#WT9( zZwY>+(yIousgA1i#w$riK$Z;~Mal1NUC?2tovc-*^*ko4!TQ`rK_Xd8suB#`?6e%3 zucFwn#{h z+5MGt(YR^M38qPG!py9JfQxOy)+TD{wmO^-rm!cn=R0rjQh$26L0__ z1@D$1JTJaK$rwdT1q2TS3n-{Ea!uIu4>qi`Y=M)$vy3Pm<);Hep^`LrYF-F9vTDj; z!K(l-ze!kI!a}oV5gi@VS8;4RpQ};w!IHuelCxgS!SqnfNfp8soTxWZGcQs$M8<;6 z7>8L&B5nnkOATX2I4ZZ@szKVW%^y03Hn-g+UoyJDAZoVrkau~x7cuusu2J{001BWNkl3AxRCl0&-@XI*h@7HJe zrNa$=^yC>nK0Lwu$0Lr|a8k#ODD>>G#~G32Gv5D^TjB}W#)!$G@ZB*BPS(JujCP#b zv~~9h(Lv>n5jdE3lXypr{&VL$ILS8H;IKJyIy8$ZO!hE13|T+@=Ca0ZiKytMJoM_m*PS+K1W@ijU@)O>~smfWUQxI=BWog zJB!Cn6|)j4_yLnmQ&7@MI1Mh0&z*%ey{Lln0Kz>v1}ynwT%i6K|GH~4%+l=v|=Tz zEA5IQEuSlsfE&xHlRTFAzSRQ|r>@vwz#h$A1PWasW8hH-9><37WXE?p@dXSoW#Xsj zdwek;@YUsv&)?kR8|?Vb&-fY_+zYUaVj|Kd%C4XpxWFNa#2um2uNMO#i(nH%2Zcxm%CpKn{_JYHVrx`n;@F6-HvjjwAf5Mof(8}5b+Q0d52 z3ttuO^><+@^50TfEV0%MX^-E}jNIS;^GvGjdv0Y)WW7Z@nwWGdh%gOAh>E z6twKH2L-g>h&dbOMLFC0`LLV9s`gA)m(*&g+V_gsEPGgfx$afp#V{fj8_fq;xlV1v zlNwT)+f^K7Dqo{13cE+y!RlR+V)x`AAb);C4!5nBS9-N6W3A*ouY`kLtEESCd<-HR zj44i9F_Tu$I>fFLCqn4VNK2*|G*ElV4^zfUemYDINy$zRmXro8e#v;7RdrEZte_ow5Pg=63lK!i_CgSccDA&&EL&RNGGRlV$n>+KO^Cu(XB%Gkb|+Ji$pmja0->)u*3$IY7h5PGw9fs zB$o8_<%Zd=4%AAj7(G2szuz?ejM>bS930X`ExL6|%sn@v)52R$JA)60@9pBT-C)DG zOT5G&$2(9A+`qafvt!nO>n;^5mvKjBrNPp88m@|;+yd7WKcQ&(T$i5knhBA>K+iG0 zUgh*IsCVsz2rORw{2N7=zn1Ujj7IHN1W(=Zz5u_36F$;Ad~~?MAG+D_@$nWfe8aQD z33sZv1=%2zgP0jI{gFOIQMiRy-!s;hDjOTzut{vZhZ$_lh@Hz}kAy!U@`A-8&FLP6^gZx50)^!ffM!q?$xQnir+JoYw>JsjAgwnS@gm!ty;I* zB8tiMjX`g8rF0aJK$+xb*a*&Z347Qkkv!#d@(ybmw%Xt$s4y|?wnHqedPtQ9Z4Rmm zoALYGVc_C|vrjyZfqejH7+%|spYD%%dA`SI=Wp@SF8K8F3SYzV5<6at;8B3x4U>uu z2Oyh-5Qz7YiejoUGPMVnAy2o3QPJ?XUiWpXO4_%=A*D>$m|<^I4TWR!4qzN4{P>BS=v-i`3F_}cr-%tAclD+~5s5s$4z@FFCk zElso%qTbz(oD`}>VZHECAvBQBm@$7s(8h0MD#LWND&$=UICqv;`xOTU0D& z%FB9JEMPvLw4+uZD3!Y5o)(QoG2z)f^g<->YvsbCBPA(~ug$raJ630J;9JZ+qhZ_P zyB6uQBO2aW_cHRnr;k0{0<>?Se|Lz)_P6jnqhgvxvew<3E0Z=c$%xMdW}OqEWtf+_ zaSeZLuf11V#VloQerXy1m^GMLgO}s#_EOU+c{&(Wg<06V8fseYeOp$#5$iM9u9Zls z;OSlC3iVvoO5R}QtIwe!k;rr4%C5q@kUvw}9eRj|Qa_cAS-{1nSE7p7VltZSoH%MO z4&PF?#pr4FKXDGpX~7YpI$(&_F&Ih zdcmjJ@8uaQJ=Bcf>OWV(pPu{tzrP5ZCs;CdaqRH=Gn zMUqCltXkYg#E%(0_MG(Pb~u5SG2t5ZeC40*tYzcyBBg&0wTzt$+Hh~O+w)lB3Wc7bX>4f7-5)f;KRp%pue=G zTW;2ebZOcKpI4M$IwEKdbH;GWseVvyl#Nl6c{$|wIK5#-DaV{{75_LdliR>Jq$l|% zmUbMErq=l1fsn$8qm7@@N|I`2AAoud+CcAR!Ol`#Dh-;dJB}B#jkQ&KwnM-v1oEVzzOM<&<(#K({c9OX)1&%atMN&P1Y%0^P;>Coh;V~8v zT*{-_RL!@}6}N;dw#?itA!R5Td!>rxqb3=l8AXu0N4zkMpv4#njB3p~RVL1v3HJ@d zV>6nYMfhkEefocMYNAam&r$O36Wb!`SArhnS>|t>EE?V)1rp7{sw~o6PftlOA<}v# zDW?_0z}9z4!5s$0n;_$R=RW3LRV(vE09*KZoT-Tb+z#&lX8I7>(=gD&#z zhU9sH=W;+$_pBJrLAgI8EDZ8pKAnsCgxey>e_3{{nkCJr(Q#55|r zg0W&`M1|@w*e-)h79e#+2}+hyadnd?NKkA;Mup%%hp>*#^nq)`dT)%gPI7Xuz$7Or z(R@c6E=#}wS8!WxrLN^lu9zeRVb&PTlwt=QkxhI4#l1 z<#3)6CH`B7jjQ8z0jE0Q?uJlfVx)aR$_kct5ToTXV|U$>-4*qhR#P4So*{x=oT_iu z&mKyV;*Krq0TbHtDn@|MxMy_yXyE=b%xm_!P|3U)v}NH1>#`V2`!Et{%Ua2t^~K5o zl(w#re*_P8Za&GIMuM8ULbezv$9v?JI%8brRMc-u+!!&}O3_xICSRrAO(4`j3)e1R zfMf#6T-7vOdQLLqhC@s*Q2a%K|CxQa?-MzG5e5JY>hrBUSLbc`gR zyacrXq^HNQ!b(h+)-EZa(JTq9nOkFXtAulA)F$>B+eA%l zxdVeRI*}`~LQc8!(8z4-Oy-!@F@me)+6kN#;bAc}isPNl=xC^ycov*X{j zcu41E>&+Em<(hK%v@yuy;~$PigxGSu6&C7QU`o6P8Gm;J-~`|u2cE<60XBSiIN=k$ z!w+ts;zxH+@!|1^7rNor29B;c%!nBr24J_^T zom5mQ8+4VjjnwcG{0=;Zj=Ioal;=XiNNoX%7JYxlfVqHY+Kq0&LidQx++$#uLz71Z zb_gEFhBqR3RL8e|!PjQ^8fScVeudw7^%Y+3XMBMNd7ehEi`9 z)|F`PXWIoG0QLn)s0)iTez(rRTJPFX#j7#4APPPwbUD5!V`2+rvdQ#PicHhGB27b$ zhHVssr4&~=q~$cUy6?^DXvhSttSZg3lxGybMn$K4y#o=n>S{MQT{>2p$g0(p~DRC?nWHaN6 zU1*QVx3Gthg^?SwRQ4aA?TyAcsS~z$LYSp6E2P8d&xq zDfu3n45Yk&GftI5leCy-GwliBM{n3gn!i?vIta$-9zwYwv}JFbKcnF)8VZmR$}Lc_ zD!MR{uB}+IEX*)QNB`y7l3zauF8u>^jo*nFiYo_2vNNpxF4rL2rJuX%fS3WmfpAP} zn)z~_UdTvxYvq;YDon$MQDOen^8ZvK}9`sb(ZzyAG*;qRtpL-enT;7^o5i z>J@=Ghorgb?vxzrEZ)Y}Bul$X%DK=1-RH6b=% zj3R0?Vr^JVHdO?+r`+Fa>OV$YVlk;pMF%G%+yvP=rOr{UHdw8SzeSSRS=@^kOnXY( zSSIf&xb$#FM@qCNhe3lC;Dk$3xCN7PSfOOtRp9%ie@^SWk|oYE5B+KM;#KZzyx>Y$ z+t>n9Xl3Kzd{n#mQP1y-*De3I&g6(e0(2@`O1=g_Wn&XRA$o{{+oLh?A?@8(si-Z> z0+JgfF2lk$QWoL1CCC#0N1qDT+9?z+F^rg*cRiHJ5Q3eb?`{D}R00`O(iSm(u@N!a ziIm&KjDU-a(Fxw<_r2uQ#=dz%ZB&8dl#`1}(TB^{ZEPaC;s&=@#*3`yNUa1BOKLBsSw21z?O*Gl^zoY7FUicgS{###fvi zOoiDF!GT~Xht?E+!$m`}V%pSuH--5Q8Cx>BV>o6le>S9aH>@c-S|eoHM7dKT!2(N^ z9#dz^#Y*&#_hid$L|xvw7n||fnl^aO0pAod+gg`kviPtJKKV{A=@*i)HP7m>hk-e? zN~pMa=a9=LuzkYKFt!boO?}$VQU+byT93wgGAhr^3-m^Pbqp2wUe}hm*hpPC1xQ%S z!(4%mq>ra1ZmZ{AGkU$Ykx61tjo`r@W??-XGQiHDfuS0}&s`M{04_(x!`$(uJ>utZ ze5Zy_&yV=>{x!b1+~ae9z~?yQ6%6+r0qa~kmOUL1jarho4P4&qwC#d zYr9f<>m#7ltAI-v_B*AzX+3i=J;rjFVoT?#6`!e)QsS))gk&w`!osTIPQYV#D#?Wy zLY1kn-goKYkfP$Fi#VUDKD2At!t+wGpyhkV|Ha*pN=FOE5#tpz-62`{ZuNgJ(3b|4 zce~$;=xc^vfh6l4^yDgqsxc+fnz62nA`4f+W>mJNor9p)8eWW$Xy?#i;VD;Bm+3+7 ztIs|fnMyull9EYV8q{0nzEByd=+K5j=Kff&NY8+BNm1-4he}a&wo;(j|xU zEtZqi!Y^XQ$>xv-vzH1lV}=P);4jz_+jNalSlqSp&H3INjdFP=DWJ5n~3G1FdcRL`3&Tnb$l5@b1;RnIa#5{?ouwsbJ%r!US*+LtvVq$Lxm2+a5L z*;>EgX*}`6)fau%w!RHvn!HU<<1CT^B%(TuFU$=MX*uP zkz9~e)raze=SQ8Ppyg64eX1<2gp6Z!7v&4%Hw69iYa;l!;rPqb=H>fw!|yvoLjDJc z{9{RzuIXe-d#2Z695Iyi2vtjLi?6V!)IGhD#e>P|sR6oC>ZHDS!!jBVOUAR+p>NQV zzNfD;FYY4xCh0zbYqCI7JQM(C%Tws-ftQY*@}+uUwzl*0DBBV*|12JC;Ucv#fl7~N zq4n%A<=azg>J6n8q@q_F-j+Yo6fa0yH#L3HD&?u3NdP6K?4$&n5y_dg=}XxxWni@{u3`xmP2l9gJyPMrC%g_)j>(M?NUAMo^URcoav0)4 zk?(RTHsKbjGKA`PEF_FDNh|;!D<(_+ZwwB*c#{TbJ8{bNc9~2ViKZck9#|lcT?~6| z$_0%k5Boz;`<0uw_2iKSc2;uC)e0sxF1v~smutRRYl1D?qt;n!#=0_10JKLPJm~(n z0GwnQjh z`n+#iehn-vpJ)Co8EKk?Jvf*+V_(lz1;*6K_ zh%W^Ajc1VwdP*h4;Po*8OtlisnQ09koswBe z;2{S)O6{`K_kC5~dQV>Bxn`tg^{ctC@T$-X^{I5;r3HVIFZb3eka6oC+7QAhky_xJy zW9~*%6`D{?l)cOAU6nctoXnoK?ZxjEmaDX)dMCUO6Bz^U(#>DT+)}AL(UF&9!w`WU6q^p*-rix;PzVkT+}xaS zJZ;zx2W%PXbH_FYPNy3kO ziFU`F6T9zt^X3tAUQ#9-XQo}lzO#`{!a~3;TS*`VM!hmMRFwEN8%3%&DJ~oza^pWM zMNz6=mtpD>bG6uVYChigujO8J0kP&HR&`lOE$yVW;!+xITFgBp-wn#MK^iQdit9}C z^R6*X7M|F0hSy-116C}Sr-Wo$MT2&2n5tHwtk($yK{g!Xc4b{Ns=c&$6xM${ zqe|na{RtHRfjj=ZPy6)yvBU2hLqdMdA^*56g++QNrQ)`NA`uTa55xiZPRSdzsIabf zwJ9yi|5c19>cB>2FjxF^CRTu5E+XIN$L>D30#ub z#kICDiuT~Ou6@E8-mMrg%<{d?g@lXx8@MSU+_88E-jceM_m?+Z2fbf04YrwJyS<=% zDih`!(s&`EdNwB^XTmKF)J38hzn57SSh7rOQ5*U2(^AI4X(I z`!ZZ(0W=)&QDn1_dTZ!nWAv~LOeBG%LJ7ac&udNV53jO!EYUDv&(F}yE9}(}2zDc% zB@5LV1*uIhwne5BWNelpWE+z(S@G~j`UvHQW-M6EAr|%oH#sEtT@_QQYJ$zBIBykH z6|uf%Wn=M~FT=62AS{wiEU6~m`>-5d<*;)szF!f+fjVjRp!&ayffs(jN82qv+HUZn z9`VsO@bS$Pe4t0XXB(dD0k?Vxp2Y-o8q{};3uTXlSk4jt^0~!O^dj-8>xeCl`hbYW zCc+mwJS{R-(5L|$1hX>zCW`?hTL);+U0vX8z6S)(>^AWnhIKN>l9F6o+bPD`K>#+V z`_e##uiBU^-@{>C`G!}Un_H$NS#2-Xway@=KUgwxMiHqZa5#3jC=O8Ul=dGq&Q7P- z)+{0vWklf#r7S%o>Qo?m4B#g160($5WwBvKO6IMJDv+vVdX3X>8?lxv4~FluTQ@2b z&@r(=aWH}Hh9euHDS``teJIXH!37&0e8-zP@#TELSLa83dEW8a{0_e{U*R)6;Ae3B z91{<4JZgOYm$XHaqhWhpI}7kvDnSKmw7Yrv+>`1*>kbcY5E9<0X|LfxrXbAHQHmZZ z42|=Y^6sbiSUPTG;azsnu}v^4G?OM6;oQ>&HjJp~yh^aC1qM$SMJLOYEPf%l=ed|M zU zZL4GUB1jjf1z87L7Aju~|F;Hzf{FsHV2J`KCfmX)3eqKc8~tY!AlwNSaUKI&1K3tH zCcHIcpS3pEQrw(18BZd#001BWNklqH-d|jpikw72 z6*DFpOwtfjY)pRJjt6o~;`uxr4>%r<7&>rsdxPW65y!&`+v$Y6+gse8PPn^$f}6Wr zoDN4kef|tj?{0BC93eU|M*Q!F4v0G5fAKzGf``ik=Dr6j&48QJF>JLqVbiet4!DG6 zgf)`w6BCC0e8&0w2oc5o{VTk_e}v6T471Jg0CPZ$zs1Wh@Y$!I;pgA{9NXal)q%&y zdptfohP~J>c)VON_lbSKgc5^i(|PWUX!dLCLm8pSh#-j6M@P6;4}xnoVp*H7Y#8L& z$<{*)bYB+vL*R0)$@xL2urlESuGt`D^yzi;{0oymNAV_%Swal zJso})k^cDyt7FHq%AC=!;}t z<(i`Gymc#!IEm!xl2)tqhw^G7DCst>qa~$>JfK%NL7C=Dm4T%L&DGNI%Kdg8O!@mj z0=l+Fuvta=0Rslv4h`ss$DjyYp$KE@WRo3z@LC%X^m$-HUx;z9|NwRvH6ZjFfKrLPMxH=f@yy#^fzRos~4PJkx>p38>kIO2ot1|J=6 z@Zs$fytui+2eRRZIO6>|aA$^-D7Fi55Dkf=%>e0olKYwHL2&EbHa4alK<4{fK*S}x zTe8Q^Xk`NUHmM@SM1VQTtqA|4(pHu5++y&gil|85t)N)r^Ng`s$(w;_z=$flm{p8Ai84f!AQwG2UAkc z^_fzQ8ulfRQOgCvQLsV22os^q!Eo9bRLsi-2lW{Eatk9CYKOEmOjvw=hhxNL!eOd7 zyWv3;uk?a%%<$X(fS2b7e0qL`uO46HrJwQje8jhyc!b~#;0@J&Q=N(fVake>?7Y1C zcCA9fNQh>q9f%Ijwwq#}DjQVn_8KcJOUJ?gyMT0I^8ph^#H{aRs!OskZJXty{*$N` zDgqG8f`ZRUTaG+qXO@M(w6$zmBDm1nK^92GHA1e`3qm_0mOFan{aA#vgr!Jhv*r9Sv)1QcfA0*SD@^+ zyJ#l3IjC`$Vl36*s356c31}%tx5Zmu!1V>H>`Sy_@{)QeA{_ejPOV97s>}wH1Gt6MDkAOz(RBjr3gXQu3+%_EaF)HjLN0m^i~030SmgUtSBiyRNB*gNw&*G zA*uT!W%r|a7HNl0OLIg?N1c+xka$06RZ}dfBK*87+SMzCC3sF8l0YFtaX1`sI393w zJmGXa;pXNB@4oX6-v96ey#MaII6Zp`?1tmvhL8j>jY3eg3@8?90AGRIqIu z6oT_*XRrwG=F0^tg0USiVVLJVzPp_^0vWhGT=3@c4eOv?%zD%d^zL% zaK=|(e}zpHx@~y*`RDlYr$5I1{R1`~7$dk`uU>zLhldATE<4WWb6AjNmc7!iLSZ=s zPivpF<-8Sxb_IbQQr`{-hbYdO_vR-C?i?CKJ;%jo@-DO^v?Z4(Ws%cqF*j)mAz36w8C7ZA%qcqV$}DnsGY)5@Bq-yC#2b>R1U>Q4cV;r@iy=wr)?Vah zBqoN$#pE526$f-Q<5rNTCB*~n=`%(GRZM4PVCR(DuLI1b2M{GOrs~)y>98sFW&u?z z4eJK2H4KawHHU98fzNG~E{O&io0L<|EV6?18=KTA#tCUlB-1OF>kYU3``NG*RBDL_ zqxwab42&dsd%^;<3iOm+^B|&svU0sS_4SO4Tg@yIQrNcJyd}$2U$(SfQGW>{ggOTt z(%$HV)z#o)A;?QNw_ZTj_&mlK)lf|r3~6!m&_M`LF8!q=WmPc}ot7SNhAc$S7XZSH zXd0@iM6Llz%&NTont^~1Mh2ztmJAneM|O{xwe5B!hHtmZGz}H`4L* z5BD?9;!z1D1kVQkotpx;({T&nT^V>Gg7tS(R;~T@8Gkv^Y!u6_EfZ*VmG+m*giY;% zV)ge+v61l5v|dSX8nf^gvP_!#&+Vpg2&hf(ZnhTfT51s^Y8|_w+*&uhcOlCAgp4io zF=d{u|-q1@{D;vUIb7UTSRM-trm8Do*uOImDfvf=OK z4xN#PF;by2VrcWIipLFjC5~4|;LFR7&o2-7$-^6basCcJ+b{UiUgL9I@Jb9{tK%LT zT%JKGuCm$Oldq8598`0WO+wx|mUk*Q+X+}*lWPL5O1Pd!Y{d{S1)&!p&Y4irfVtEI zCTAlCSJK;C&LJT5AS0p%XUgp%+{9|Bpp-@F6mQ0aEb(rOy!dybDrJcnTwsK0Rj>x3 zNoVYl6O}fk8AF;S9<)`9i)XvkgFUphWiWCjtLBg}`Aafqfl-`88kdF!oAq5i#4F`D zA&6cv%55_d3`}O4$y@e_jDD=2nRQiyTssMg_orl93EzDVcyiF5wT2~#dZg_i?$YgZFM!oq18VX+3C5n}w%}~O)lMf|YjjUy&3*L%@ts9s1 zoQ=go;Kk9&#ZA=rafvCjdZW&1xO{$JLeQ>FLXy*N2f^uh3XYOGZccZ2@A-SUxxK;D zr%&%lP%o$mXw>iK}Tef9--cn%Y z;Gq7^1%hUMHvpNwD35eed7OGBB_Tl|v2N|X1CpiAXOZBNsZLGGed;0|KjaGpZAsE9ZiY#~9wWtIve9m6 zmF2dLACjk>7Cp2nA8&4gH1n2oXck#d3Pye|{x;n+fL6*;Au+!}6GHJ!!_^L<@ zC>YZgmJeo8^H9_=u#J?#kJ=7eS~qr&4MY0+Er(qeO%;@&VJ}rmq{n3XN2ntAO;Vf&$j0V#+dV*a^yh{2HJX+%n*XX65OO%U`ea8YEe&gL9}cV z(RQRta$&R6b3HF+OVIzCUr;=j1%Mb^@ZR#P14j{@TyYD>Ee76$;5`}mNC!SXJ;Nu5 zC-~&}1TVG|-W@l1V!(}T7(wR3aW`!H#31Vhh=Is10lc!&(BGX6l1XNw%-T>{sW4*X z;)CoGZX7LTx|PZzREbCNewog}qPM)i&glH!3cFR3XZKDuaG5;g1@0T1(pYQ>+m`Z2 zPe!KYb~K+A(C%lsDdQ`;F>k$M2lLO$;(;wny%zRPB<18ABlrmCnJh?D7QnVw`pu>T zBb9E+y+ID>y6E!b0Xr>KXLn3hyfMS8`G}u9JmBT|9-r;6@%em>&;5dza=|xlcm=^j z2LlKGf>^7V;;8SynNxx!^mBvbQ^@(r@35I4ncOhAOo$=5 z7xXMPYAgh?#6)wwTsE)8yYC#R%D5(@JBPpVgj$Xg8P%9tK0^I8heJ|DwCDZkJwVPy zLRmyDBcyYXMP>rCUUQI<0Ib~Y=TI1{kGh5}tCGMMfMLtkGdZAmQK?b0jgj6pw_I_8W0r4<84zJ?^sJL@c}Fno6=rM6w5qC>gdSwp7M29VE0 z?aDKX)FX~n&D*wP4r~Dgg*vY22qs-KRhKO_d}&+2BBBHVvPcOyDzeYrOA*$0K!Z!A z#$l7tOr!OJi;CQqlJ_*-2+>~ExL$*qw2NpCi@X;N&L**ccCWK@67re_M<;c3?5Ueo z%0><%#J!X_Yl<9kN>Fi#4Z=%vBi(WYN3+kIv5G>=s=Un^(S%udiiNY5u4R~Yid_P) zc`JqGV+dWET1z>r4p_Z26+D-RVLg1E#cv@;>3;AkJ>9nSwOT4Gc5{}xlbwjHD}urog5k2E;TZaLp`S2rS`~;tr;J<;0`#JJn<_yA9JQPCYWh^ zN6O1xF$OT_8vN{6!vM|8)v<(xwR}zr6zEs7(d=q7sl-<)D?0uX>#$}i0ty0|6H#V* zYpO2L@l}!*lZwK@mCPz@iY&@fGm%UWqu04OgloPy6&J9O>lyhIf(D6}w;Q$w2J?Gn z+-x3DwP{~qPMTZ?!S2h{RlGxoI8_Cw@RJvD$=e-J9SvvoL9ehipfaOZ;TU5hrJwcq z7#XZxa=}WlU{iD$hZ>ILFeRjf@yn?Uj3JAyq_CUucNl%|GCJqGsN|D1KM(tT0zg;7_0s)e+NT6EKagSLE&62-n$%jybc)aua< zPZYQ@!Fv#VG){Q2-QWk?6MS_01V1<(@u6<`NRIg7W#GmXgF_lQqoXegf88}c-^m6) zQedro-tpYbXiDin-|3wcjg37Jk)n0L{7~_UsSt||Dk}X*mT16y2GS~>vKJwcSS#w0Tf}WJxh5GAv)DT& z{g0eZ4zaE7pGf1iw?LEu_!JA^B;a@;hi4AKc_5 z=XS==>>l4qfNCFT8=r__s3DaYglx@tLz8?j1?UQ8L2X85;f)9u2r;3W1?i)r&;1H) zxZVX`)+xmx>*(t5tSCgUL9<^~6to@~Qz}Z<&GR#r0p5tVc=x%5PG*ldb`C646{40* zi?mOq3a0BwFD8rSGPsgbur`g79UUWOGeWC$zp~t&OUjhDTpBeLm1R)V5X-U-P#$7# zwMgl-hdxts604L&8vHuFLOS*`QgOVJt%OqTilOAcGeO^2CG?AuYGu7FMyUE73+$bZ zi#OcSCS$s5V3Wl<>lN)N#Dpd zV1zd-0bD>8lry9J7>lS247-7#lh)G3a*_%g4!hizcu(Xzn{{pLCyh1=C;YVD%_VlR zHri@bFz~Zop@}Oyga3|VplI_1jPc;b$*^7;X8i7C{T2g`81RHzCznf z+0+FaEvOd{4v_0|%mi&8-{8Afvm;)V8b zGH_Bwq_W|7bHvT*gqu@vDNeUHxVt%F98S2qJ>dr*{}4}Z?(p5~*Ld&wJNWpMPcX(I z63zhZ^McbMlo|K0Ug3N`W4k#r(%Eo04Cn~m>*M*1+tUftC+3_5aDVmc9-n>oDZcsk zTYUZXH~9MNukiSI!QqmNbw#LvZ-X(aM;VgD@NhfPE)$M@S z_QcU~S(#IvnMypwvXGHw$6$ILdn9O5bu}w7keDtQBypESNLHJ|>V5JhA0VMC!nmBB z9(XL7$KBXe-a5PhN8YR4&JjoCIp=_+w}IL8v-I!e^zv7HSQch0$7~371TNpEO5TjRTVggB3`0W(t6pBBhyU)=--Tl2^d3WL*(z_%`PR zW~e4m_MGr5CS=nxPs+T=-7-cfhkRDkV$)z2x#MP5yYcfrG4f~7_-D3z*bI14?bEI{ zfXQm`V$S52n%lJ;q}2sul`fGKYg4JsXPzI{6R`GPU&V#yfO8qtt?DLwi#mN(M2Fng zu55%_YN=kKY8fkIfG2?OTxSErL&-1zJaOQ;54?cjhldS6+MeL=J3PfFchB(g=?*Xa zfM
    @@ -256,7 +313,6 @@

    Chapel

    that one can write a MPI-like SIMD program by explicity launching a function on each core:

    -
    coforall loc in Locales do 
         on loc do
             coforall tid in 0..#here.maxTaskPar do
    @@ -276,14 +332,12 @@ 

    Chapel

    an upcoming package manager will likely lower the bar to future contributions.

    -

    Chapel also lacks a REPL, which makes experimentation and testing somewhat harder — there’s no equivalent of JuliaBox where one can play with the language at a console or in a notebook. There is an effort in that direction now which may be made easier by ongoing work on the underlying compiler architecture.

    -

    Similarities and differences

    Standard library

    @@ -292,7 +346,6 @@

    Standard library

    have good documentation, and the basic modules or capabilities one would expect from languages aimed at technical computing:

    -
    • Complex numbers
    • Mathematical function libraries
    • @@ -315,13 +368,11 @@

      Standard library

      consise syntax and “do the right thing” approach are particularly helpful for interactive use2, which is a primary use-case of Julia.

      -

      On profiling, the Julia support is primariy for serial profiling and text based; Chapel has a very nice tool called chplvis for visualizing parallel performance.

      -

      Other packages

      Julia’s early adoption of a package management framework and very @@ -342,14 +393,12 @@

      Other packages

      so that these high-quality tools were more readily visible to new users.

      -

      On the other hand, there are almost no packages available for Chapel outside of the main project. There are efforts to develop a package manager inspired by cargo (Rust) and glide (Go); this would be an important and needed development, almost certainly necessary to grow the Chapel community.

      -

      Language features

      The biggest language feature difference is undoubtedly Julia’s @@ -363,7 +412,6 @@

      Language features

      even compared to other compilers, makes the development cycle much slower than it would be with Julia or Python.

      -

      Beyond that, Julia and Chapel are both quite new and have functionality one might expect in a modern language: first class functions, lambda functions, comprehensions, keyword/optional parameters, type @@ -372,7 +420,6 @@

      Language features

      generating online documentation from source code and embedded comments.

      -

      More minor but something that quickly comes up: there’s difference in command-line argument handling which reflects the use cases each team finds important. Both give access to an argv-like array of @@ -384,7 +431,6 @@

      Language features

      on the command line by prefixing the const with config and running the program with --n 20.

      -

      Simple computational tasks

      Here we take a look at a couple common single-node scientific @@ -392,13 +438,11 @@

      Simple computational tasks

      to compare the language features. Full code for the examples are available on GitHub.

      -

      Linear algebra

      For linear algebra operations, Julia’s matlab lineage and interactive really shine:

      - @@ -489,13 +533,11 @@

      Linear algebra

      specifying a specific solver (which Julia also allows) is probably advantageous.

      -

      Stencil calculation

      Below we take a look at a simple 1-d explicit heat diffusion equation, requiring a small stencil, and see how it compares across the languges.

      -
      Julia
      @@ -586,7 +628,6 @@

      Stencil calculation

      calculated as total run time minus the final time spent running the calculation)

      -
      Julia
      @@ -600,7 +641,6 @@

      Stencil calculation

      Julia wins this test, edging out Chapel by 16%; Python with numba is surprisingly (to me) fast, coming within a factor of two.

      -

      Kmer counting

      Fields like bioinformatics or digital humanities push research @@ -616,7 +656,6 @@

      Kmer counting

      handling and simple string slicing. Here we’re using pure Python for the Python implementation:

      -
      time Julia Chapel Python + Numpy + NumbaPython + Numpy
      @@ -683,7 +722,6 @@

      Kmer counting

      FASTA file for the reference genome of a strain of E. coli, we get timings as below

      -
      Julia
      @@ -698,27 +736,23 @@

      Kmer counting

      actually a given, even for a compiled language, as those features are heavily optimized in Python implementations.

      -

      (One caveat about the timings; pairwise string concatenation in Julia is slow; in reading in the file, concatenating the sequence data in Julia as it was done in the other languages resulted in a runtime of 54 seconds! Instead, all sequence fragments were read in and the result put together at once with join().)

      -

      Parallel primitives

      Since we’re interested in large-scale computation, parallel features are of particular interest to us; here we walk through the parallel primitives available to the languages and compare them.

      -

      Remote function execution

      Both Julia and Chapel make it easy to explicitly launch tasks on other processors:

      -
      time Julia Chapel Python
      @@ -765,14 +799,12 @@

      Remote function execution

      sensible (as determined by the extremely useful hwloc library).

      -

      As seen above, Chapel distinuishes between starting up local and remote tasks; this is intrinsic to its “multiresolution” approach to parallelism, so that it can take advantage of within-NUMA-node, across-NUMA-node, and across-the-network parallism in different ways.

      -

      Futures, atomics and synchronization

      Once one can have tasks running asynchronously, synchronization @@ -782,15 +814,12 @@

      Futures, atomics and synchronizatio blocking until the future has been “filled”. Futures can only be filled once.

      -

      In fact, in the above, Julia’s remotecall_fetch performs the remote call and then fetches, mimicing a blocking call; the begin blocks in Chapel do not block.

      -

      Futures work the following way in Julia and Chapel:

      -

      Julia
      @@ -819,13 +848,11 @@

      Futures, atomics and synchronizatio variables, and sync blocks for joining tasks launched within them before proceeding.

      -

      Parallel loops, reductions, and maps

      Both languages make parallel looping, and reduction over those parallel loops straightforward:

      -
      Julia
      @@ -871,7 +898,6 @@

      Threading

      experimental feature in Julia, not quite ready to use for production work yet.

      -

      Distributed data

      Julia has a @@ -880,7 +906,6 @@

      Distributed data

      at any index, but only the local part can be written to. Chapel is built around its PGAS distributions and iterators atop them.

      -

      Julia’s DistributedArrays are known not to perform particularly well, and have been taken out of the base language since 0.4. They have been worked on since in preparation for the 0.6 release; however, @@ -888,7 +913,6 @@

      Distributed data

      at least I couldn’t get it working. This section then mostly covers the previous version of DistributedArrays.

      -

      Accessing remote values over DistributedArrays is quite slow. As such, DistributedArrays performs quite badly for the sort of thing one might want to use Chapel distributed arrays for; they’re really @@ -906,12 +930,10 @@

      Distributed data

      array for rather than just having each task have its own local array.

      -

      However, for largely local computation (such as coordinator-worker type operations), the distributed arrays work well. Here we have a STREAM calculation:

      -
      Julia
      @@ -972,7 +994,6 @@

      Communications

      channels, like go, which are something like a cross between queues and futures; they can keep being written to from multiple tasks:

      -
      @everywhere function putmsg(pid)
           mypid = myid()
           msg = "Hi from $mypid"
      @@ -994,7 +1015,6 @@ 

      Communications

      is done implicitly through remote data access or remote code invocation.

      -

      A 2d advection problem

      Having seen the parallel computing tools available in each language, @@ -1005,14 +1025,11 @@

      A 2d advection problem

      velocity field; shown below is the initial condition, the blob moved slightly after a few timesteps, and the difference.

      -

      2D Advection Plot

      -

      We do this in Julia using DistributedArrays, in Chapel using Stencil-distributed arrays, and in Python using Dask arrays. The relevant code snippets follow below.

      -
      Julia
      @@ -1146,7 +1163,6 @@

      A 2d advection problem

      require a lot of bookkeeping to use; both Chapel and Dask are much more straightforward.

      -

      The one-node timings here aren’t even close. By forcing Chapel to run on each core separately, the performance isn’t that different than Julia. But when informed that there is one “locale” and letting @@ -1155,7 +1171,6 @@

      A 2d advection problem

      on a single 8-processor node, running a 1000x1000 grid with all cores takes the following amount of time:

      -
      Julia
      @@ -1176,13 +1191,11 @@

      A 2d advection problem

      overhead dominates; Julia seems to suffer that overhead even with just one process.

      -

      Another interesting thing here is that Python+Numpy+Dask (numba didn’t help here) is competitive even with Chapel if you force Chapel to not use threading on-node, and either made it much easier to write the program than Julia.

      -

      Strengths, Weaknesses, and Future Prospects

      Both Julia and Chapel are perfectly useable today for problems that @@ -1191,7 +1204,6 @@

      Strengths, Weaknesses, and Fu both have significant potential and “room to grow” beyond their current capabilities; but both face challenges as well.

      -

      Julia

      Julia’s great flexibility - the metaprogramming and the type system @@ -1209,7 +1221,6 @@

      Julia

      Julia’s right now, the basic pieces are there and it certainly could be in the future.

      -

      Many of Julia’s disadvantages are inevitable flip sides of some of those advantages. Because of the dynamic nature of the language and its reliance on JIT and type inference, it is @@ -1224,7 +1235,6 @@

      Julia

      ecosystem means that the package listing is littered with abandoned and broken packages.

      -

      But some of the disadvantages seem more self-inflicted. While the language has been public and actively developed for over five years, @@ -1250,11 +1260,9 @@

      Julia

      breaks the ParallelAccelerator, and Julia 0.6 is needed for the @simd feature with DistributedArrays.

      -

      So Julia living up to its potential is not a given. If I were on Julia’s project team, things that would concern me would include:

      -
      Peak Julia?
      Julia grew very quickly early on, but since then seems to have topped out; @@ -1335,12 +1343,10 @@

      Julia

      developers and users, and onboarding more people into core internals development would help the underlying technology.

      -

      Chapel

      If I were on the Chapel team, my concerns would be different:

      -
      Adoption
      It’s hard to escape the fact that Chapel’s user base is very @@ -1398,13 +1404,11 @@

      Chapel

      the space of those programs by leveraging early adopters into writing packages.

      -

      My conclusions

      This is entitled “My conclusions” because my takeaways might reasonably be different than yours. Here’s my take.

      -

      Both projects are strong and useable, right now, at different things

      I’d have no qualms about recommending Chapel to someone who wanted @@ -1415,7 +1419,6 @@

      Both concurrency than parallelism). Julia also seems like a good choice for prototyping a DSL for specific scientific problems.

      -

      Neither project is really a competitor for the other; for Julia the nearest competitor is likely the Python ecosystem, and for Chapel it would be status quo (X + MPI + OpenMP/OpenACC) or that people @@ -1423,7 +1426,6 @@

      Both Spark (which is good at a lot of things, but not really scientific simulation work.)

      -

      Scientific computing communities are very wary of new technologies (it took 10+ years for Python to start getting any traction), with the usual, self-fulfulling, fear being “what if it goes away”. I @@ -1440,7 +1442,6 @@

      Both case, there are clear paths to follow (porting or upgrading) to keep your code working.

      -

      Both projects have as-yet untapped potential

      What’s exciting about both of these projects is how far they could @@ -1452,7 +1453,6 @@

      Both projects have as-yet it could make large-scale scientific computation accessible to a much broader community of scientists (and thus science).

      -

      Julia has the same potential to broaden computational science on the desktop, and (at least in the near term) for computations requiring only minimal communication like coordinator-worker computations. @@ -1460,8 +1460,8 @@

      Both projects have as-yet the distributed-memory computing front, and there will be something of a race to see which gets there first.

      -
      +
      1. @@ -1473,4 +1473,76 @@

        Both projects have as-yet

      -
      \ No newline at end of file + + + +
      + +
      + + + + + + + + + + + + + + + + diff --git a/_posts/gaborsamu/2017-8-31-spectrumlsf_armv8.md b/2017/standing-up-a-ibm-spectrum-lsf-community-edition-cluster-on-arm-v8/index.html similarity index 78% rename from _posts/gaborsamu/2017-8-31-spectrumlsf_armv8.md rename to 2017/standing-up-a-ibm-spectrum-lsf-community-edition-cluster-on-arm-v8/index.html index ae69c9c..a79afd5 100644 --- a/_posts/gaborsamu/2017-8-31-spectrumlsf_armv8.md +++ b/2017/standing-up-a-ibm-spectrum-lsf-community-edition-cluster-on-arm-v8/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2017-08-31 18:01:46' -layout: post -original_url: https://www.gaborsamu.com/blog/spectrumlsf_armv8/ -slug: standing-up-a-ibm-spectrum-lsf-community-edition-cluster-on-arm-v8 -title: Standing up a IBM Spectrum LSF Community Edition cluster on Arm v8 ---- - -

      So, you’ve got yourself a shiny new (or maybe not) system based upon a 64-bit Arm (Arm v8) processor that you want to put through it’s + + + + + + + Standing up a IBM Spectrum LSF Community Edition cluster on Arm v8 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      Standing up a IBM Spectrum LSF Community Edition cluster on Arm v8

      +

      So, you’ve got yourself a shiny new (or maybe not) system based upon a 64-bit Arm (Arm v8) processor that you want to put through it’s paces. For me, this happens to be a MACCHIATObin board powered by a Marvell ARAMADA 8040 based on ARM Cortex A-72 cores and installed with Ubuntu 16.04.3 LTS. You can read about my shenanigans running HPL on my system with a passively cooled CPU and running up against some overheating conditions - much like the head gasket failure in my car this past summer - but I digress!

      @@ -442,4 +511,76 @@ # All processes entering MPI_Finalize
      -

      There you have it. if you’re after more information about IBM Spectrum LSF, visit here. .

      \ No newline at end of file +

      There you have it. if you’re after more information about IBM Spectrum LSF, visit here. .

      + +
      +
      + +
      + + + + + + + + + + + + + + + + diff --git a/2017/stashcache/index.html b/2017/stashcache/index.html new file mode 100644 index 0000000..ab5e667 --- /dev/null +++ b/2017/stashcache/index.html @@ -0,0 +1,221 @@ + + + + + + + StashCache - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Dereks Web Thoughts from Derek. See the original post here.
      + +
      +

      StashCache

      +

      StashCache is a framework to distribute data across the Open Science Grid. It is designed to help opportunistic users to transfer data without the need for dedicated storage or frameworks of their own, like CMS and ATLAS have deployed. StashCache has several regional caches and a small set of origin servers. Caches have fast network connections, and sizable disk storage to quickly distribute data to the execution hosts in the OSG.

      + +

      StashCache is named for the Stash filesystem located at the University of Chicago’s OSG-Connect service. It is primarily intended to be used to cache data from the Stash filesystem, though, data origins exist for other experiments.

      + +
      + +Regional Caches + +
      Regional Caches
      +
      + +

      Components

      +

      The worker nodes are where the user jobs will run. The transfer tools are used on the worker nodes to download data from StashCache caches. Worker nodes are geographically distributed across the US, and will select the nearest cache based upon a GeoIP database.

      + +
      + StashCache Architecture +
      StashCache Architecture
      +
      + +

      The caches are distributed to computing sites across the U.S. They are are running the XRootD software. The worker nodes connect directly to the regional caches, which in turn download from the Origin servers. The caching proxies discover the data origin by querying the Redirectors. The caching algorithm used is Least Recently Used (LRU). In this algorithm, the cache will only delete cached data when storage space is near capacity, and will delete the least recently used data first.

      + +

      The origin servers are the primary source of data for the StashCache framework. StashCache was named after the Stash data store at the University of Chicago’s OSG-Connect service, but other origins also utilize the framework. The origin is the initial source of data, but once the data is stored on the Caches, the origin is no longer used. Updates to data on the origin are not reflected in the caches automatically. The caches treat the data from the origin as immutable, and therefore do not check for updates. If a user requires new data to be pulled into the cache, the name or location of the data on the origin must be changed.

      + +

      Redirectors are used to discover the location of data. They are run only at the Indiana Grid Operations Center (GOC). The redirectors help in the discovery of the origin for data. Only the caching proxies communicate with the redirectors.

      + +

      Tools to transfer

      +

      Two tools exist to download data from StashCache, CVMFS and StashCP. With either of these tools, the first step for users is to copy the data to the Stash filesystem. Once the user has an OSG-Connect account, they may copy their data to the /stash//public directory. Once there, both of the tools can view and download the files.

      + +

      CVMFS (CERN Virtual Machine File System) is a mountable filesystem that appears to the user as a regular directory. CVMFS provides transparent access for users to data in the Stash filesystem. The namespace, such as the size and name of files, and the data are separate in the Stash CVMFS. CVMFS distributes the namespace information for the Stash filesystem over a series of HTTP Forward Proxies that are separate from the StashCache federation. Data is retrieved through the Stash proxies.

      + +

      In order to map the Stash filesystem into CVMFS, a process is constantly scanning the Stash filesystem checking for new files. When new files are discovered, they are checksummed and the meta-data is stored in the CVMFS namespace. Since this scanning can take a while for a filesystem the size of Stash, it may take several hours for a file placed in Stash to be available through CVMFS.

      + +

      Using CVMFS, copying files is as easy as copying files with any other filesystem:

      + +
      $ cp /cvmfs/stash.osgstorage.org/user/<username>/public/… dest/
      +
      +
      + +

      CVMFS access also has other features that are beneficial for Stash access. CVMFS will cache files locally so that multiple accesses to the same file on the same node will be very fast. Also, CVMFS can fallback to other nearby caches if the first fails.

      + +

      StashCP is the second tool that can download data from StashCache. StashCP uses CVMFS above, as well as falling back to the caching proxies and eventually the origin. The order of operations that StashCP performs:

      + +
        +
      1. Check for the file in CVMFS mount under /cvmfs/stash.osgstorage.org/…
      2. +
      3. If CVMFS copy fails, connect directly to the nearest proxy and attempt to download the file.
      4. +
      5. If the proxy fails, then connect directly to the origin server.
      6. +
      + +

      Since StashCP doesn’t rely on the CVMFS mount only, files are immediately available to transfer with StashCP.

      + +

      StashCP is distributed with OSG-Connect’s module system. Using StashCP is nearly as simple as using the cp command:

      + +
      $ module load  stashcp
      +$ stashcp /user/<username>/public/… dest/
      +
      +
      + +

      Conclusions

      +

      The StashCache framework is very useful for downloading data to execution hosts across the OSG. It was designed to help opportunistic users to transfer data without the need for dedicated storage or frameworks of their own, like CMS and ATLAS have deployed.

      + +

      StashCache has been used to transfer over 3 PB of data this year. Check out some of the papers written about using StashCache:

      + +
        +
      • Derek Weitzel, Brian Bockelman, Duncan A. Brown, Peter Couvares, and Frank Wu ̈rthwein, Edgar Fajardo Hernandez. 2017. Data Access for LIGO on the OSG. In Proceedings of PEARC17, New Orleans, LA, USA, July 09-13, 2017, 6 pages. DOI: 10.1145/3093338.3093363 Online
      • +
      • Derek Weitzel, Brian Bockelman, Dave Dykstra, Jakob Blomer, and René Meusel, 2017. Accessing Data Federations with CVMFS. In Journal of Physics - Conference Series. Online
      • +
      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/gaborsamu/2017-8-29-turning_up_heat_armv8.md b/2017/turning-up-the-heat-on-my-armada-8040/index.html similarity index 78% rename from _posts/gaborsamu/2017-8-29-turning_up_heat_armv8.md rename to 2017/turning-up-the-heat-on-my-armada-8040/index.html index 42da207..39013f0 100644 --- a/_posts/gaborsamu/2017-8-29-turning_up_heat_armv8.md +++ b/2017/turning-up-the-heat-on-my-armada-8040/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2017-08-29 20:07:13' -layout: post -original_url: https://www.gaborsamu.com/blog/turning_up_heat_armv8/ -slug: turning-up-the-heat-on-my-armada-8040 -title: Turning up the heat...on my Armada 8040 ---- - -

      Although I took delivery of a shiny new SolidRun Marvell macchiatoBIN a few months back (end May), I’ve not really had a chance + + + + + + + Turning up the heat...on my Armada 8040 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      Turning up the heat...on my Armada 8040

      +

      Although I took delivery of a shiny new SolidRun Marvell macchiatoBIN a few months back (end May), I’ve not really had a chance to put it through it’s paces until now. For those of you who are not familiar with the board, it’s a high-performance 64-bit Arm (v8) board designed really for networking. It’s based on the Marvell ARMADA 8040 processor for those who like to keep track. For those looking for more information about the board, there is a community page here.

      @@ -673,4 +742,76 @@ I should probably stop abusing this poor board and wait until my Noctua industrial fan arrives :)

      -
      \ No newline at end of file + + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/gaborsamu/2018-9-14-spectrumlsf_gpu_autoconfig.md b/2018/a-hands-on-look-at-gpu-autoconfig-in-ibm-spectrum-lsf/index.html similarity index 69% rename from _posts/gaborsamu/2018-9-14-spectrumlsf_gpu_autoconfig.md rename to 2018/a-hands-on-look-at-gpu-autoconfig-in-ibm-spectrum-lsf/index.html index c66b34b..74d3381 100644 --- a/_posts/gaborsamu/2018-9-14-spectrumlsf_gpu_autoconfig.md +++ b/2018/a-hands-on-look-at-gpu-autoconfig-in-ibm-spectrum-lsf/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2018-09-14 16:38:29' -layout: post -original_url: https://www.gaborsamu.com/blog/spectrumlsf_gpu_autoconfig/ -slug: a-hands-on-look-at-gpu-autoconfig-in-ibm-spectrum-lsf -title: A hands-on look at GPU "autoconfig" in IBM Spectrum LSF ---- - -

      It’s been a long time since I’ve posted to my goulash blog. I’ve not disappeared, rather I’ve been writing articles for the + + + + + + + A hands-on look at GPU "autoconfig" in IBM Spectrum LSF - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      A hands-on look at GPU "autoconfig" in IBM Spectrum LSF

      +

      It’s been a long time since I’ve posted to my goulash blog. I’ve not disappeared, rather I’ve been writing articles for the IBM Accelerated Insights solution channel on HPCWire. Since then, I’ve been fortunate enough to have access to a POWER9 based developer system equipped with a NVIDIA Tesla V100 PCIe card to put through it’s paces. This is very timely for me as there is some exciting new functionality in IBM Spectrum LSF known as GPU auto detect, which @@ -21,7 +90,6 @@

      -

      Back in Dark Ages (no not literally), administrators of HPC clusters had to specify in the configuration of the workload scheduler which nodes were equipped with GPUs, the model of the GPUs and so on. This was relatively straightforward when nodes were equipped with single GPUs and clusters were smaller. With the proliferation of GPUs, nodes are frequently equipped with multiple GPUs and @@ -207,4 +275,76 @@ 0 0 303 0 0 0 303

      This has only been a teaser of the GPU support capabilities in Spectrum LSF. Spectrum LSF also includes support for NVIDIA DCGM which -is used to collect GPU resource utilization per job. But that’s a topic for another blog :). À la prochaine fois!

      \ No newline at end of file +is used to collect GPU resource utilization per job. But that’s a topic for another blog :). À la prochaine fois!

      + +
      +
      + +
      + + + + + + + + + + + + + + + + diff --git a/_posts/dursi/2018-7-16-incrementalism-for-scientific-developmenthtml.md b/2018/a-killer-feature-for-scientific-development-frameworks-an-incremental-path-to-maturity/index.html similarity index 62% rename from _posts/dursi/2018-7-16-incrementalism-for-scientific-developmenthtml.md rename to 2018/a-killer-feature-for-scientific-development-frameworks-an-incremental-path-to-maturity/index.html index 5827bff..a9c3127 100644 --- a/_posts/dursi/2018-7-16-incrementalism-for-scientific-developmenthtml.md +++ b/2018/a-killer-feature-for-scientific-development-frameworks-an-incremental-path-to-maturity/index.html @@ -1,43 +1,106 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2018-07-16 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/incrementalism-for-scientific-development.html -slug: a-killer-feature-for-scientific-development-frameworks-an-incremental-path-to-maturity -title: A Killer Feature for Scientific Development Frameworks- An Incremental Path - To Maturity ---- - -

      ( Note: This is a bit of a work in progress; even more so than usual, comments/criticisms/additions welcome )

      - + + + + + + + A Killer Feature for Scientific Development Frameworks- An Incremental Path To Maturity - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
      + +
      +

      A Killer Feature for Scientific Development Frameworks- An Incremental Path To Maturity

      +

      ( Note: This is a bit of a work in progress; even more so than usual, comments/criticisms/additions welcome )

      The Stages of Research Software Development

      Research software development covers a lot of ground — it’s the development of software for research, and research is a broad endeavour that covers a lot of use cases.

      -

      The part of research software development that I find the most interesting is the part that is a research effort itself; the creation of new simulation methods, new data analysis techniques, new ways to combining different sorts of approaches. Like any new tools, this work can enable people to ask entirely new questions, or answer old questions in new ways, pushing scholarship forward along previously unexplored paths.

      -

      But for new methods to live up to their potential and have that impact, they have to be developed and disseminated. As a community, we’re still developing the training and tool chains that make this routine; without them, there are still too many bottlenecks in the method development pipeline that mean good ideas for new tools get delayed, sometimes indefinitely, before adoption.

      -

      Computational tools for science and scholarship go through stages of development like any experimental technique:

      -
      1. Will this even work? Testing the core ideas out, usually interactively
      2. Will this answer my question? Developing a very early prototype on your own data set/conditions
      3. @@ -50,18 +113,15 @@

        The Stages of Research Soft that are used to describe the maturity of technologies and tools, now often used when talking about commercialization.

        -

        Not every idea has to go through all four stages to be successful; sometimes a tool will be a ‘one-off’ or nearly so, used for one or two projects and that’s it. This isn’t at all a bad thing, if it served its one purpose well.

        -

        But each transition between stages represents a potential barrier for ideas becoming new tools, a jump in level of development skills and effort required. Every tool that stalls at between stages solely because there isn’t training or tooling to allow incremental progress along the pipeline is a tool that is unnecessarily lost to researchers who might have made use of it.

        -

        Training Research Software Developers To Tackle all Stages

        The set of techniques that we mean when we talk about “Software @@ -72,7 +132,6 @@

        Training Res “well-engineered” software for tools at stage 1 or 2, where the answers will often turn out to be “No”.

        -

        It was understood fairly early that the lifecycle for scientific projects differed a great deal from scientific software development. Realizing that something correspondingly different training was needed, in the late 90s @@ -85,12 +144,10 @@

        Training Res but those issues will vary from research project to research project, and the goal is to get the students to the point where they can learn additional material themselves.

        -

        There still isn’t a lot of training for researchers to make the next big jump, from prototype-for-self to tool-some-others-can-use. However, authors are beginning to write resources for students wanting to learn how to proceed1,2,3,4.

        -

        The second-biggest transition in that list, that from 3 to 4, is the one I worry the least about. It’s at that stage that existing software engineering teaching, tooling, and resources become the most helpful. And while the effort to learn those techniques @@ -98,25 +155,21 @@

        Training Res useful enough that it is much easier to find the time, people, and resources to complete a “research infrastructure”-grade implementation.

        -

        Of course, once the set of ideas is implemented as research infrastructure, it’s much harder for most practicing researchers to get under the hood and start tinkering with by making changes or incorporating additional ideas. And so the cycle starts again.

        -

        The Best Scientific Development Frameworks will Allow an Incremental Path Towards Maturity

        While the research computing community has made great progress in creating development training specific to their needs, there’s been much less success with programming languages, tools, or frameworks which reflect the path of research programs.

        -

        Arguably the best programming language for science, and certainly one of the most successful, has been a general purpose programming language, Python. I think the reasons for this include the relatively smooth path scientific software development can take towards maturity in the Python ecosystem:

        -
        • One can easily and rapidly test out ideas at the REPL and in a notebook. (Stage 1)
        • The large standard library and even larger ecosystem lets you quickly implement a lot of functionality (Stages 1/2)
        • @@ -144,7 +197,6 @@

          -

          To their credit, the Julia community has come closest, but they are focussed on a narrow piece of the issue; the need for a framework for incremental adoption becomes “one @@ -161,13 +212,11 @@

          The Developing Field of Research Software Engineering

          It’s been fascinating to watch from the sidelines over the past two decades @@ -178,13 +227,11 @@

          The Developing Fi challenges of developing software that itself is research into methods development.

          -

          I’m still somewhat pessimistic, however, on the state of development frameworks for research computing. My current work with web services development just drives home the point of how scarce the tooling is for building research software.

          -

          The history of research computing since Fortran’s dominance has been that research software engineering has grafted itself on to a set of existing general purpose programming languages like C++ @@ -192,7 +239,6 @@

          The Developing Fi computing. There are exciting experiments here and there with new languages, but none are yet particularly compelling.

          -

          As Data Science/Data Engineering becomes more and more common in commercial enterprises and as a computing use case, we may yet end up finding frameworks which, if not actually designed for science, @@ -204,7 +250,6 @@

          The Developing Fi work has led to a generation of research software developers who are ready to take the plunge.

          -
          1. @@ -224,4 +269,76 @@

            The Developing Fi

          -
          \ No newline at end of file +

      + +
      +
      + +
      + + + + + + + + + + + + + + + + diff --git a/2018/a-week-in-the-life-of-an-sc-attendee/index.html b/2018/a-week-in-the-life-of-an-sc-attendee/index.html new file mode 100644 index 0000000..8ef321c --- /dev/null +++ b/2018/a-week-in-the-life-of-an-sc-attendee/index.html @@ -0,0 +1,250 @@ + + + + + + + A week in the life of an SC attendee - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
      + +
      +

      A week in the life of an SC attendee

      +

      Last week was the annual Supercomputing conference, held this year in Dallas, and it was as busy as they always are.  Every year I take plenty of photos and post plenty of tweets throughout the week, but this year I thought it might be fun to share some of those photos (and the related things I learned) now that the dust has settled.  Since some people might also be interested in how someone might approach the conference from a technical and philosophical perspective, I figured I’d write a more general piece documenting my entire SC experience this year.

      This post wound up being a massive, meandering, chronological documentary of a week in my life that includes both technical and non-technical commentary.  For anyone who is only interested in the technical insights I gained during SC, check out the items prefixed with (tech) in this table of contents:
      <ul><li>Before the Conference</li><li>Saturday</li><li>Sunday</li><li>Monday</li><ul><li>(tech) PDSW-DISCS 2018 Highlights</li><li>SC Exhibition Gala</li><li>The Beowulf Bash</li></ul><li>Tuesday</li><ul><li>(tech) Technical Program, Data and Storage Paper Track Highlights</li><li>Interlude of Meetings</li><li>(tech) Cray and Fujitsu’s Exascale System Hardware on the Expo Floor</li><li>(tech) Analyzing Parallel I/O BOF Highlights</li><li>The Cray Celebration</li></ul><li>Wednesday</li><ul><li>SC Student Career Fair and a Booth Talk</li><li>(tech) Flash, Disk, and Tape Technologies on the Expo Floor</li><li>(tech) Recap of the IO-500/VI4IO BOF</li></ul><li>Thursday</li><ul><li>(tech) WekaIO and Micron at the Exhibitor Forum</li><li>NSF Future Directions BOF</li><li>My SC Paper</li><li>SC Technical Program Reception at the Perot Museum</li></ul><li>Friday</li><li>After the Conference</li></ul>
      Everything that’s not labeled (tech) is part diary and part career development perspective.  Hopefully someone will find something in here that’s of some value.

      Finally, disclosures:
      <ul style="font-size: xx-small;"><li>I omitted some names in the interests of respecting the privacy of the folks who took the time to talk to me one-on-one.  If you’re part of this story and don’t mind having your name out there, I’d be happy to include it.</li><li>Everything I paraphrase here is public information or conjecture on my part.  Nothing in this post is either confidential or sensitive.  That said, check your references before citing anything here.  I don’t know what I’m talking about.</li><li>Everything here is my personal opinion and does not necessarily reflect the viewpoint of my employer or its funding agency.  I attended the conference as a part the regular course of business in which I am employed.  However I took all photos for personal purposes, and the entirety of this post was written on my own personal time.</li></ul>
      <h2 id="before-conf">Before the conference</h2>Everyone’s SC experience is different because it draws such a diverse range of professionals.  There are plenty of activities for everyone ranging from students and early-career staff to senior management and leadership, and people on different career tracks (e.g., facilities staff, computer science researchers, program managers, product sales) are likely to be drawn to very different parts of the conference agenda.  My priorities during the week of SC are definitely shaped by where I am in my career, so when filling out my calendar a few weeks ahead of the conference, I considered the following:

      My job is half research and half facilities staff.  50% of my time is funded by grant money to do applied research in characterizing parallel I/O systems.  The other half of my time is spent staying current on emerging technologies in computing and storage.  These two responsibilities mean that my SC is usually a mix of attending technical program sessions (to see what my peers in research are doing and see what research ideas might turn up in future technologies) and engaging with vendors.

      I work in advanced technologies.  This means I am generally not in the trenches directly feeling the pains of operating HPCs today; instead, my job is to identify technologies that will cause less problems tomorrow.  This also means that I don’t have purchasing authority, and I am less likely to be involved with anything that’s going to hit the floor in the next year.  As such, I generally don’t do vendor sales meetings or briefings at SC because they are generally focused on nearer-term products and sales.

      I did not get to where I am by myself.  I first heard about SC in 2010 when I was a graduate student, and it sounded almost infinitely more exciting than the materials science conferences I was attending.  I had no experience in HPC at the time, but it made me realize what I really wanted to pursue as a career.  I relied heavily on the good will of the online HPC community to learn enough to get my first HPC job at SDSC, and after that, the faith of a great many more to get me to where I am now.  SC is often the only time I get to see people who have helped me out in my early career, and I always make time connect with them.

      The net result of these goals was a pretty full schedule this year:

      <div class="separator" style="clear: both; text-align: center;"></div>

      +

      Julia -p=1Julia -p=8Chapel -nl=1 ParTasksPerLocale=8Chapel -nl=8 ParTasksPerLocale=1Python
      My SC'18 schedule.  Note that the time zone is PST, or two hours behind Dallas time.
      +



      I mark everything that I must attend (usually because I’m a speaker) in red to know the immovable obligations. Blue items are things I will attend unless an emergency comes up, and grey things are events I should attend because they sound interesting.

      White space is very important to me too; between 10am and 6pm, white spaces are when I can walk the expo floor.  A lot of people write off the expo as a waste of time, but I actually feel that it’s one of the most valuable parts of SC.  Since my job is to understand emerging technology (and the market trends that drive them), accosting a pre-sales engineer or product manager in a strategically important technology provider can yield an invaluable peek into the markets they’re serving.  White space in the evenings are equally important for engagements of opportunity or working on slides that have to be presented the next day.
      <div>
      </div>

      +

      Saturday, November 10

      +

      I always fly to SC on the Saturday before the conference starts.  I have historically opted to do workshops on both Sunday and Monday, as I really enjoy attending both PMBS and PDSW-DISCS.  I bring a suitcase with has extra room for conference swag, and doing so this year was critically important because I opted to bring along a pair of cowboy boots that I knew I would not want to wear on the flight home.

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">My brown kicks.  Also Harriet the cat.</td></tr></tbody></table>
      On just about every work flight I’m on, I’ve got PowerPoint slides to review; this trip was no different, and I spent the 3.5-hour flight time reviewing the slides I had to present the next day. Once in Dallas and at my hotel, I carried out my usual work-travel night-of-arrival ritual: order the specialty pizza from a local pizza joint, text home saying I arrived safely, and iron my clothes while watching Forensic Files.

      <h2 id="sunday">Sunday, November 11</h2>This year I had the honor of presenting one part of the famed Parallel I/O in Practice tutorial at SC along with Rob Ross, Brent Welch, and Rob Latham.  This tutorial has been running for over fifteen years now, and at some point over those years, it picked up the curious ritual of being kicked off with some juggling:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Brent leading up to the tutorial start time with some juggling.  He brought the pins with him.</td></tr></tbody></table>
      The tutorial itself is really comprehensive and includes everything from device-level performance behavior to parallel file systems architecture and I/O middleware.  Even though I can proudly say that I knew 95% of the material being presented throughout the day (as I probably should since I was a presenter!), I found this particular slide that Rob Latham presented particularly insightful:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">The ease and portability of using I/O middleware comes without sacrificing performance!  Sorry for the odd angle; this is the screen as us presenters were able to view it.</td></tr></tbody></table>
      It makes the case that there is no significant performance penalty for using higher-level I/O libraries (like PnetCDF or parallel HDF5) despite how much easier they are to use than raw MPI-IO.  One of the biggest take-home messages of the entire tutorial is to use I/O middleware wherever possible; doing so means that understanding parallel file system architecture isn’t prerequisite to getting good I/O performance.

      <h2 id="monday">Monday, November 12</h2><div>Monday was the official first day of SC.  Workshops and tutorials went on throughout the day, and the opening keynote and exhibition hall opening gala started in the evening.</div>

      +

      +

      PDSW-DISCS 2018

      +

      The 3rd Joint International Workshop on Parallel Data Storage & Data Intensive Scalable Computing Systems (PDSW-DISCS) was on Monday, and I had the honor of being asked to serve as its Publicity Chair this year.

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">The PDSW-DISCS full-day workshop agenda</td></tr></tbody></table>
      It’s a really great workshop for people working in I/O, storage, and data and always draws a large crowd:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      For researchers, it’s a great venue for short papers that IEEE or ACM publishes, and it also has a really nice Work-in-Progress track where a page-long abstract gives you a seven minute spot to pitch your work.  For attendees, it’s always chock full of good talks that range from pure research to applied development.

      This year’s keynote speaker was Rangan Sukumar, Cray’s analytics guru.  His talk was interesting in that it approached the oft-mentioned convergence between HPC and AI (which has become an over-used trope by itself) from the perspective of a system architect (which is where the rubber meets the road):

      <div class="separator" style="clear: both; text-align: center;"></div> +
      As many great keynote speakers are, Rangan used hyperbole at times to contrast HPC and “Big Data” workloads, and this stimulated some discussion online.  Although the slides alone tell only part of the story, you can download them from the PDSW-DISCS’18 website.

      Later in the morning, Margaret Lawson (University of Illinois, Sandia Labs) presented a follow-on to the EMPRESS metadata system she presented last year:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      Last year, EMPRESS seemed a little too researchy for me (as a facilities person) to sink my teeth into.  This year though, the picture seems a lot more complete and I quite like the architectural framework.  Although EMPRESS may not ever be a household name, the concept of separating data streams and metadata streams underneath some sort of I/O middleware is really solid.  I think that storing data and metadata in different, architecturally distinct storage systems that map to the unique access patterns of data and metadata is ultimately the right way to approach large-scale data and metadata management in HPC, and I expect to see this design pattern proliferate as scientific data analysis becomes a bigger part of large-scale HPC workloads.

      In the afternoon, researchers from OSU offered a rare peak into Alibaba through a high-level analysis of SSD failure data provided by the Chinese hyperscaler:

      <div class="separator" style="clear: both; text-align: center;"></div> +

      The most alarming finding to me was that 20% of SSD failures were caused by humans yanking the wrong SSD.  This immediately made me wonder who Alibaba is hiring to do routine operational support at their data centers; if people are causing a significant fraction of storage faults, either they aren’t hiring with the same standards as their US counterparts, or their data centers are a mess.  The speaker’s proposed remedy was to use a different SSD form factor for each logical use case for SSDs so that operators could visually identify an SSD reserved for metadata versus one reserved for data.  I personally think a label maker, a barcode scanner, and a decent salary is an easier, standards-based solution.

      Other highlights included
      <ul><li>Characterizing Deep-Learning I/O Workloads in TensorFlow, presented by Stefano Markidis of KTH.  The first time I’ve seen an I/O-centric evaluation of how deep learning workflows will affect storage requirements of future systems.  I learned a lot.</li><li>Toward Understanding I/O Behavior in HPC Workflows, presented by Jakob Lüttgau of DKRZ/ANL.  Rather than analyze the I/O pattern of a single MPI job, this paper began examining the I/O patterns of related jobs that all work towards a single scientific objective.  Again, one of the first research papers I’ve seen that takes a critical look at end-to-end workflows from an I/O perspective.</li><li>Methodology for the Rapid Development of Scalable HPC Data Services, presented by Matthieu Dorier of ANL.  I think this paper is intended to be the canonical reference for the Mochi project, which I was glad to finally see.  The idea of enabling quickly composable, purpose-built I/O services that are optimized for next-generation media and interconnects is a brilliant one, and I am a huge believer that this approach will be what demonstrates the earliest scientific successes that rely on storage-class memory at scale.</li></ul>
      There were a number of really promising ideas presented at the WIP sessions as well, and recapping the entirety of the workshop is a blog post in and of itself.  Fortunately, all the papers and slides are openly available on the PDSW-DISCS website.

      <h3 id="gala">SC Opening Keynote and Gala</h3>I’ve actually stopped going to the SC keynotes over the last year since they’re increasingly focused on the societal impacts enabled by HPC rather than HPC itself.  While I’m definitely not knocking that theme–it’s a great way to inspire early-career individuals, big-picture program management types, and disenchanted technical folks in the trenches–it’s just not why I attend SC.  Instead, I make use of my exhibitor badge and head into the expo floor before it opens to the public; this is the only time during the conference where I seem to be able to reliably find the people I want to meet at their booths.

      This year I visited a few small businesses with whom I’ve fostered good will over the last few years to say hello, then dropped in on the SDSC booth to catch up with the latest news from my former coworkers.  They also happen to have free beer on the opening night.

      Once the expo floor opens to the public following the opening keynote, booth activity goes from zero to eleven really quickly.  Every booth has a big splash during the gala which makes it hard to choose just one, but my decision this year was made easier by Cray choosing to unveil its new exascale HPC platform, Shasta, and celebrate its first sale of a Shasta system to NERSC.

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Cray CEO Pete Ungaro at the Shasta unveiling ceremony</td></tr></tbody></table>
      This new system, named Perlmutter, will be delivered in 2020 and has a bunch of really slick new technologies incorporated into it.

      After Cray CEO Pete Ungaro unveiled the prototype Shasta blades, there was a celebratory toast and both NERSC and Cray staff donned their “ASK ME ABOUT SAUL” pins:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">NERSC and Cray staff got these VIP pins to promote NERSC’s next system, named after astrophysicist, Nobel laureate, and Berkeley Lab scientist Saul Perlmutter.</td></tr></tbody></table>
      I stuck around to shake hands with my colleagues at Cray (including the CEO himself!  Haven’t washed my hand since) and catch up with some of my counterparts in storage R&D there.

      <h3 id="bash">The Beowulf Bash</h3>The gala shut down at 9 PM, at which time I headed over to the Beowulf Bash to try to find other some colleagues who said they would be there.  I generally don’t prioritize parties at SC for a couple reasons:
      <ol><li>Shouting over music all night is a great way to burn out one’s voice.  This is not good when I have to present something the next day.</li><li>The crowds and lines often undercut my enjoyment of catching up with old colleagues (and meeting new ones).</li><li>I almost always have slides that need to be finished by the end of the night.</li></ol><div>I make an exception for the Bash because I personally value many of the people behind organizing and sponsoring it, and it captures the scrappier side of the HPC community which helped me get my foot in the door of the industry.  This year I specifically went to catch up with my colleagues at The Next Platform; Nicole and Tim are uncommonly insightful and talented writers and editors, and they always have wacky anecdotes to share about some of the more public figures in our industry.</div>

      +

      +
      More generally and self-servingly though, maintaining a good relationship with members of the HPC trade press at large has tremendous value over time regardless of your affiliation or job title.  Behind every interesting HPC news article is an editor with incomparable access to a broad network of people in the industry.  Despite this though, they still are subject to the same haters as anyone else who puts something out in the spotlight, so I have to imagine that putting in a kind word in-person will is always worth it.
      +

      +
      At around midnight, only the die-hards were still around.
      +

      +
      Late night Beowulf Bash at Eddie Deen's Ranch.
      +

      +
      Regrettably, I barely had any time to catch up with my colleagues from the FreeNode HPC community at the Bash (or at all).  Maybe at ISC.
      +

      +
      After getting back to the hotel, I realized I hadn't eaten anything since lunch.  I also learned that absolutely nothing that delivers food in the downtown Dallas area is open after midnight.  After waiting an hour for a food delivery that wound up going to a restaurant that wasn't even open, I had to settle for a hearty dinner of Hot Pockets from the hotel lobby.
      +

      +
      I hadn't eaten a Hot Pocket since graduate school.  Still taste the same.
      +

      +
      Fortunately my Tuesday was relatively light on hard obligations.
      +


      <h2 id="tuesday">Tuesday, November 13</h2><div>Tuesday was the first day in which the SC technical program and expo were both in full swing.  I split the day between paper talks, meetings, and the expo floor.

      </div>

      +

      Technical Program, Part 1 - Data and Storage

      +

      My Tuesday morning began at 10:30 AM with the Data and Storage paper presentation session in the technical program.  Of note, the first two papers presented were about cloud-centric storage paradigms, and only the third one was clearly focused on scientific HPC workloads.

      <ul><li>SP-Cache: Load-Balanced, Redundancy-Free Cluster Caching with Selective Partition by Yu et al was a paper squarely aimed at reducing tail latency of reads.  Very important if you want to load an old GMail message without waiting more than a few seconds for it to load.  Less useful for most scientific HPC workloads.</li><li>BESPOKV: Application Tailored Scale-Out Key-Value Stores by Anwar et al was a paper presenting a framework that is uncannily similar to the Mochi paper presented at PDSW on the day before.  The premise was to allow people to compose their own Cassandra-like KV store with specific consistency and durability balance without having to reinvent the basic building blocks.</li><li>Scaling Embedded In Situ Indexing with DeltaFS by Zheng et al was the talk I really wanted to hear but I had to miss on account of a conflicting meeting.  The DeltaFS work being done by CMU and LANL is a really innovative way to deal with the scalability challenges of parallel file system metadata, and I think it’s going to ultimately be where many of the nascent software-defined storage technologies aimed at HPC will converge.</li></ul><div>Unfortunately I had to cut out of the session early to meet with a vendor partner at a nearby hotel.</div> +
      <h3 id="tuesdayinterlude">Interlude of Meetings</h3>The first of my two vendor meetings at this year’s SC was less a sales call and more about continuing a long-running discussion about technology futures in the five-to-ten year timeframe.  No sane vendor will commit to any roadmap that far out, especially given the uncertainty surrounding post-Moore’s Law technologies, but they are receptive to input from customers who are formulating their own strategic directions for the same time period.  Maintaining these sorts of ongoing conversations is a major part of what falls under my job title in “advanced technologies.”

      Unfortunately that vendor meeting overlapped with the Lustre BOF, but other staff from my institution were able to attend and ensure that our interests were represented.  I was also able to attend the Lustre Lunch that followed the BOF which was very fruitful; in addition to simply being present to remind the Lustre community that I (and the institution I represent) am a part of it, I happened to connect in-person with someone I’ve known for a few years via Twitter and make a valuable connection.  Unfortunately I had to leave the Lustre Lunch early to make another meeting, unrelated to SC, that allowed a geographically distributed committee to meet face-to-face.

      After that committee meeting, I seized the free hour I had to visit the show room floor.

      <h3 id="tuesdayexpo">Expo Floor, Part 1</h3>The first photo-worthy tech I saw was the Shasta blade at the Cray booth.  Because the booth was mobbed with people during the previous night’s gala, this was actually my first time seeing Shasta hardware up close.  Here’s the compute blade:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Part of a Cray Shasta compute blade up-close</td></tr></tbody></table>
      Unlike the Cray XC blade of today’s systems which uses a combination of forced-air convection and heat exchangers to enable liquid cooling, these Shasta blades have direct liquid cooling which is rapidly becoming a de facto minimum requirement for an exascale-capable rack and node design.  I had some questions, so I struck up a conversation with a Cray employee at the booth and learned some neat things about the Shasta packaging.

      For the sake of clarity, here is a hand-drawn, annotated version of the same photo:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Part of a Cray Shasta compute blade up-close with my annotations</td></tr></tbody></table>
      What stood out to me immediately was the interesting way in which the DIMMs were direct-liquid cooled.  Unlike IBM’s attempt at this with the POWER 775 system (the PERCS system of Blue Waters infamy) where cold plates were attached to every DIMM, Cray has opted to use what looks like a heat-conductive foam that wraps copper cooling lines.  To service the DIMMs, the entire copper cooling complex that runs between the two rows of two DIMMs unfastens and lifts up.  There’s enough slack in the liquid cooling lines (highlighted in purple) so that DIMMs (and presumably every other field-replaceable part in the blade) can be serviced without draining the coolant from the blade.

      The NIC is also pretty interesting; it is a commercial high-end data center Ethernet NIC that’s manufactured in a custom form factor to fit this blade.  It looks like a second CPU is housed underneath the NIC so that it may be the case that the NIC and one of the CPUs shares a common cooling block.  The NIC is also positioned perpendicular to the long edge of the blade, meaning that there are probably some pretty good cable runs going from the front-most NIC all the way to the rear of the blade.  Finally, because the NIC is on a discrete mezzanine card, the networking technology is no longer soldered to the compute as it is with Aries on today’s XC.

      The network switch (which I did not photograph, but others did) is another blade that slots into the rear of the Shasta cabinet and mates perpendicularly with a row of compute blades such that a single switch blade can service a fully populated compute chassis.  The engineer with whom I spoke said that these Shasta cabinets have no actual midplane; the compute blades connect directly to the switch blades through a bunch of holes cut out of the sheet metal that separates the front of the cabinet from the rear.  Without a midplane there is presumably one less single point of failure; at the same time though, it wasn’t clear to me how out-of-band management works without a centralized controller somewhere in the chassis.

      At this point I should point out that all of the above information is what I learned by talking to a Cray booth employee at SC without any special privilege; although I’m sure that more details are available under non-disclosure, I frankly don’t remember any of it because I don’t work on the compute side of the system.

      My next big stop on the show room floor was at the Fujitsu booth, where they had their post-K prototype hardware on display.  Of particular note was their A64FX engineering sample:

      <div class="separator" style="clear: both; text-align: center;"></div> +

      If you look very carefully, you can see the four stacks of high-bandwidth memory (HBM) on-die along with the ARM, which is fantastically historic in that it’s the first general-purpose CPU (of which I am aware) that has integrated HBM2.  What’s not present is any indication of how the on-chip Tofu NIC is broken out; I guess I was expecting something like Intel’s -F series KNLs with on-package OmniPath.

      A sample node of the post-K system was also on display:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      Seeing as how both this post-K system and Cray Shasta are exascale-capable system architectures, it’s interesting to compare and contrast them.  Both have direct liquid cooling, but the post-K compute blade does not appear to have any field-replaceable units.  Instead, the entire board seems to be a single FRU, so CPUs must be serviced in pairs.  I think the A64FX lacks any cache coherence bus, meaning that two CPUs correspond to two nodes per FRU.

      That all said, the post-K design does not appear to have any DDR DRAM, and the NIC is integrated directly into the CPU.  With those two components out of the picture, the rate of a single component failure is probably a lot lower in post-K than it would be in Shasta.  Hopefully the post-K HBM has ECC though!

      In chatting with a Fujitsu engineer about the post-K node architecture at their booth, I also met a Fujitsu engineer who just happened to be developing LLIO, the post-K system’s burst buffer service:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">LLIO burst buffer slide shown at the Fujitsu booth</td></tr></tbody></table>
      It sounds a lot like DataWarp in terms of features, and given that Fujitsu is also developing a new Lustre-based file system (FEFS 2.0?) for post-K, we might see a tighter integration between the LLIO burst buffer layer and the FEFS back-end disk storage.  This is definitely a technology that wasn’t on my radar before SC but is definitely worth keeping an eye on as 2021 approaches.

      As I was racing between a few other booths, I also happened upon my boss (and NERSC-9 chief architect) presenting the Perlmutter system architecture at the NVIDIA booth:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">NERSC’s Nick Wright, chief architect of the Perlmutter system, describing its architecture at the NVIDIA booth</td></tr></tbody></table>

      The talk drew a crowd–I’m glad to see people as jazzed about the new system as I am.

      <h3 id="paralleliobof">Analyzing Parallel I/O BOF</h3>The Analyzing Parallel I/O BOF is a must-attend event for anyone in the parallel I/O business, and this year’s BOF was especially good.  Andreas Dilger (of Lustre fame; now CTO of Whamcloud) gave a brief but insightful retrospective on understanding I/O performance:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      Unfortunately I did not take a picture of Andreas’ second slide (available on the Analyzing Parallel I/O BOF’s website) which is a “what is needed?” slide which largely revolves around better integration between storage system software (like Lustre) and user applications.  I/O middleware seems to be at the center of most of the bullets that called for increased development which bodes well for scientific application developers who attended the Parallel I/O in Practice tutorial on Sunday–recall that this was my key takeaway.  It’s good to know that the lead of Lustre development agrees with this vision of the future, and I hope Whamcloud moves Lustre in this direction so users and middleware developers can meet the storage system software somewhere in the middle.

      The BOF took a darker turn after this, starting with a presentation from Si Liu of TACC about the Optimal Overloaded IO Protection System, or OOOPS.  It’s a library that wraps the standard POSIX I/O calls:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">OOOPS operates by hijacking standard I/O calls and lagging them.</td></tr></tbody></table>

      But in addition to passively monitoring how an application performs I/O, it purposely injects latency to throttle the rate at which I/O operations get issued by an application.  That is, it purposely slows down I/O from clients to reduce server-side load and, by extension, the effects of a single bad actor on the I/O performance of all the other users.

      Ideologically, I have a lot of problems with an HPC facility inserting itself into the user’s workflow and reducing the efficiency with which he or she can accomplish their science relative to the peak capability of the HPC resource.  If a storage system allows a single user to accidentally deny service to other users in pursuit of peak performance, that is a problem with the storage system and it should be addressed at the system level.  And as Andreas pointed out in the BOF, tools exist to allow storage systems to accomplish fair sharing, which is distinctly different from explicitly penalizing users.  Granted, TACC is also the facility where one of its staff went on record as saying that the R language should not be used by anyone since it is a waste of energy.  Perhaps they have an institutionally different relationship with their user community.

      Fortunately, anything that relies on LD_PRELOAD can be circumvented by users, so OOOPS is unlikely to be used to enforce any kind of resource usage policy as it was pitched during the BOF.  I do see a lot of value in using it to fence data analysis workflows that may hit a pathological condition as a result of their inputs, and being able to trigger changes in application behavior by tracking I/O rates is a technique that could be useful in auto-tuning I/O middleware.

      Rosemary Francis, CEO of Ellexus, also spoke at the BOF and spoke for the need to make I/O performance analysis a little more accessible for the end users.  I was quite delighted by the visualizations she presented (presumably from her company’s Breeze product) which used both color and human-readable “bad” I/O patterns to create a pie graph that quickly shows how much time an application spent doing I/O in various good, bad, and neutral ways.  Darshan, the tried-and-true open source I/O profiling library, operates at a slightly lower level and assumes a slightly higher level of user sophistication by comparison.

      The discussion half of the BOF was packed with engagement from the audience–so much so that I didn’t find any moments of silence to seize the opportunity to stump for my own view of the world.  The combination of OOOPS and Rosemary’s I/O war stories did steer the discussion towards ways to punish bad users though.  I can appreciate HPC operators’ frustration in novice users causing system-wide problems, but I don’t think shaming users who do bad I/O is a great solution.  Rather, something between OOOPS’ automatic identification of bad I/O at runtime and Ellexus’ user-centric reporting and feedback, combined with storage systems capable of enforcing QOS, is where we need to go.

      <h3 id="crayparty">The Cray Celebration</h3>I wrote earlier that I normally don’t do the SC vendor party circuit, but the Cray party this year was another exception for two reasons: (1) we had just announced Perlmutter along with Cray’s Shasta unveiling which is worth celebrating, and (2) there were specific Cray staff with whom I wanted to confer sometime during the week.  So after the Parallel I/O BOF, I headed over to the event venue:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      The event was quite nice in that it was not held at a loud bar (which made conversation much easier), it had plenty of food (no need for 2 AM Hot Pockets), and the format was conducive to moving around and meeting a lot of different people.  The event was awash with representatives from all the major Cray customers including the DOE labs, the big oil & gas companies, and the regional leadership computing centers in EMEA including CSCS and KAUST, as well as alumni of all those employers and Cray itself.  I’ve only worked at a Cray customer site for three years now, but I couldn’t walk ten feet without running into someone I knew; in that sense, it felt a little like an event at the annual Cray User Group meeting but with a broader range of attendees.

      I don’t know what this event would’ve been like if I was a student or otherwise didn’t already know many of the regular faces within the Cray user community and instead had to start conversations cold.  That said, I was busy the entire evening getting to know the people behind all the conference calls I’m on; I find that getting to know my industry counterparts as people rather than just vendor reps really pays dividends when surprises happen and conflicts need to be resolved.  Events like this at SC are invaluable for building and maintaining these sorts of relationships.

      <h2 id="wednesday">Wednesday, November 14</h2>My Wednesday began bright and early with a quick run-around of the expo floor to figure out who I needed to visit before the end of the week.

      <div class="separator" style="clear: both; text-align: center;"></div> +
      The expo floor was awkwardly laid out this year, so I really needed to do this to make sure I didn’t spin my tires trying to find certain booths once the crowd showed up.  Incidentally, I did witness a sales person violate the unwritten rule of keeping everything friendly until the expo floor opened to the public–a sales rep selling “the world’s fastest storage system” tried to stir up cold sales leads at my employer’s booth at 8 AM while we were all still drinking our coffee and catching up on e-mail.  If you do this, shame on you!  Respect the exhibitor access and don’t put your game face on until the public is allowed in.

      <h3 id="wednesdaymorning">SC Student Career Fair and Booth Talk</h3>My first meeting was a chat over coffee with VAST Data, a storage technology company that has some really innovative and exciting ideas in the pipeline, to keep up to date with the latest news as they approach public launch.

      My second obligation was volunteering at my employer’s booth at the SC Career Fair.  I generally enjoy booth duty and talking to students, and this year I was doubly motivated by my desire to fill some career and student job openings related to my responsibilities.  A diverse cross section of students dropped by our booth looking for both summer internships and full-time jobs; many seemed very well rehearsed in their cold pitch, while some others were a little more casual or cautious.  Although I’m not particularly qualified to give career advice, I will say that knowing how to sell yourself cold can be a valuable skill in your early career.  If you are seeking employment, be prepared to respond to a request to “tell me about yourself” in a way that makes you stand out.

      After the Career Fair, I wound up hunkering down at the SDSC booth to have lunch with my former coworkers and review the slides I volunteered to present at the adjacent DDN booth.

      At 2 PM I took the stage (booth?) and one of my colleagues was not only kind enough to sit in on this booth talk, but also share this photo he took right before I started:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Beginning of my talk at the DDN booth.  Photo credit goes to Suhaib Khan via Twitter.</td></tr></tbody></table>
      I continue to be humbled that anyone would go out of their way to come hear what I have to say, especially when my talk is as unvetted as booth talks tend to be.  Talking at booths rarely goes well for me; the audio is always a wildcard, the audience is often unwitting, and auditory and visual distractions are literally everywhere.  The DDN booth was my sole booth talk of this year and it went about as well as I would have expected.  On the up side, quite a few attendees seemed genuinely interested to hear what I had to say about the variety of ways one can deploy flash in an HPC system.  Unfortunately, I ran a few minutes long and got derailed by external distractions several times during the presentation though.  Flubbing presentations happens, and none of the audience members seemed to mind.

      Shortly after the booth talk, I had to find a quiet spot to jump on a telecon.  This was no easy task; since cell phones killed the public phone booth, there are very few places to take a call on the expo floor.

      <h3 id="wednesdayexpo">Expo Floor, Part 2</h3>The afternoon afforded me two more hours to race around the expo floor.  Despite my planning earlier in the morning, I wound up spinning my tires looking for a few key vendors who simply didn’t show up to SC this year, including

      <ul><li>Samsung and SK Hynix, two of the top three DRAM vendors and the sole manufacturers of HBM2</li><li>Seagate, one of two hard disk drive manufacturers</li><li>Broadcom/Avago, the company manufacturing most of the serdes used in the upcoming 200G and 400G network devices</li><li>Juniper, one of the major players in the 400 GbE space</li><li>AdvancedHPC, one of the few US integrators selling BeeGFS</li></ul>
      I’m not really sure why so many vendors didn’t show up this year, but it made getting a holistic view of the storage and networking technologies markets impossible.  That said, I still saw a few noteworthy things.

      One of the big open questions in high-performance storage revolves around the battle between the NF1 (formerly NGSFF, promoted by Samsung) and EDSFF (promoted by Intel) form factors for NVMe.  It’s clear that these long-and-skinny NVMe designs are going to have to replace the thermally inefficient 2.5” U.2 and unserviceable HHHL PCIe form factors, but the dust is far from being settled.  On the one hand, Samsung leads flash storage sales worldwide, but their NF1 form factor caps the power consumption (and therefore performance) of its devices to levels that are squarely aimed at cheaper data center flash.  On the other, the EDSFF form factor being pushed by Intel has a short version (competing directly with NF1) and a longer version that allows higher power.

      The Supermicro booth had actual EDSFF drives on display, and this was the first time I could actually see one up-close:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">A long-type EDSFF NVMe drive at the Supermicro booth.  The aluminum casing is actually required to meet the thermals.</td></tr></tbody></table>

      What I didn’t realize is that the higher thermal specification enabled by the long-version EDSFF drives requires that the entire SSD circuit board be enclosed in the aluminum casing shown to enable better heat dissipation.  This has the nasty side effect of reducing density; while a standard 19” 1U chassis can fit up to 36 NF1 SSDs, the aluminum casing on long EDSFFs reduces the equivalent density to 32 SSDs.  Although long EDSFF drives can compensate for this by packing more NAND dies on the physically longer EDSFF board, supporting these longer SSDs requires more engineering on the chassis design to fit the same amount of compute into a smaller area.

      Similarly but differently, the Lenovo booth was showcasing their D3284 JBOD which packs 84x 3.5” HDDs into a double-decker 5U chassis.  I had naively assumed that all of these super-dense 84-drive enclosures were top-loading such that each drive mates to a backplane that is mounted to the floor of the chassis, but it turns out that’s not the case:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Lenovo’s 5U84 JBOD</td></tr></tbody></table>
      Instead, each 3.5” drive goes into its 2.5U shelf on its side, and each drive attaches to a carrier that has to be slid slightly toward the front of the JBOD to release the drive, and then slide towards the back of the JBOD to secure it.  This seems a little harder to service than a simple top-load JBOD, but I assume there are thermal efficiencies to be gained by this layout.

      The Western Digital booth had a pretty broad portfolio of data center products on display.  Their newest gadget seems to be a planar NAND-based U.2 device that can present itself as DRAM through a custom hypervisor.  This sounds like a direct competitor to Intel’s Memory Drive offering which uses ScaleMP’s hypervisor to expose flash as DRAM to a guest VM.  The combination of exposing flash as very slow memory and relying on software virtualization to do this lends this to being a technology not really meant for HPC, and the engineer with whom I spoke confirmed as much.  Virtualized big-and-slow memory is much more appealing to in-memory databases such as SAP HANA.

      Perhaps more interestingly was the lack of any mention of Western Digital’s investment in storage-class memory and microwave-assisted magnetic recording (MAMR) disk drives.  When I prodded about the state of MAMR, I was assured that the technology will work because there is no future of hard drives without some form of energy-assisted magnetic recording.  However, product announcements are still 18-24 months away, and the capacity for these drives will enter the market at the rather underwhelming range of ~20 TB.  Conveniently, this matches Seagate’s recent cry of wolf that they will launch HAMR drives in 2020 at a 20 TB capacity point.  Western Digital also made no mention of multi-actuator drives, and asking about it only got me a sly grin; this suggests that Western Digital is either playing slow and steady so as not to over-promise, or Seagate has a slight technological lead.

      My last substantive stop of the afternoon was at the IBM booth, where they had one of their new TS4500 tape libraries operating in demo mode.  The window was too reflective to take a vide of the robotics, but I will say that there was a perceptible difference between the robotics in IBM’s enterprise tape library and the robotics in another vendor’s LTO tape library.  The IBM enterprise robotics are downright savage in how forcefully they slam tapes around, and I now fully believe IBM’s claims that their enterprise cartridges are constructed to be more physically durable than standard LTO.  I’m sure there’s some latency benefit to being able to ram tapes into drives and library slots at full speed, but it’s unnerving to watch.

      IBM also had this cheeky infographic on display that was worth a photo:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      If I built a tape drive that was still operating after forty years in outer space, I’d want to brag about it too.  But there are a couple of factual issues with this marketing material that probably made every physical scientist who saw it roll their eyes.

      Over at the compute side of the IBM booth, I learned that the Summit and Sierra systems sitting at the #1 and #2 positions on Top500 are built using node architectures that IBM is selling commercially.  There are 2 CPU + 6 GPU nodes (which is what Summit at OLCF has) which require liquid cooling, and 2 CPU + 4 GPU nodes (which is what Sierra at LLNL has) which can be air- or liquid-cooled.  I asked an IBM technologist which configuration is more commercially popular, and the Sierra configuration is currently leading sales due to the relative lack of infrastructure to support direct liquid cooling in commercial data centers.

      This has interesting implications for the exascale technologies I looked at on Tuesday; given that the exascale-capable system designs presented by both the Fujitsu and Cray rely on direct liquid cooling, bridging the gap between achieving exascale-level performance and delivering a commercially viable product is pretty wide from a facilities perspective.  Fortunately, the Fujitsu A64FX chip usually runs below 200 W and can feasibly be air-cooled with lower-density packaging, and Cray’s Shasta will support standard air-cooled 19” racks via lower-density nodes.

      <h3 id="io500bof">The IO-500/VI4IO BOF</h3>The second must-attend BOF for people working in I/O is the IO-500 and Virtual Institute for I/O BOF.  It’s a very pragmatic BOF where people discuss system architecture, benchmarking, and various related community efforts, and since 2017, also began to include the semiannual unveiling of the IO-500 list.

      This year was exciting in that the top system, a DDN IME installation at JCAHPC, was unseated by the monstrous storage system attached to the Summit system at Oak Ridge and sustained an astounding 2 TiB/sec and 3 million opens/sec.  In fact, the previous #1 system dropped to #4, and each of the new top three systems was of a different architecture (Spectrum Scale at Oak Ridge, IME at KISTI, and Lustre at Cambridge).

      Perhaps the most interesting of these new submissions was the #3 system, the Data Accelerator at Cambridge, which is a home-grown whitebox system that was designed to be functionally equivalent to DataWarp’s scratch mode:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Alasdair King presenting the Data Accelerator design at the IO-500 BOF</td></tr></tbody></table>

      The hardware are just Dell boxes with six NVMe drives and one OPA NIC per socket, and the magic is actually handled by a cleanroom reimplementation of the interface that Slurm uses to instantiate DataWarp partitions on Cray XC systems.  Rather than use a sophisticated orchestration system as DataWarp does though, the Data Accelerator translates Slurm #DW pragmas into Ansible plays that spin up and tear down ephemeral Lustre file systems.

      The fact that the #3 fastest storage system in the world is a whitebox NVMe system is really remarkable, and my hat is off to the team at Cambridge that did this work.  As all-flash parallel file systems go from the realm of being a high-end boutique solution and become affordably mainstream, relatively scrappy but innovative engineering like the Cambridge system are surely going to cause a rapid proliferation of flash adoption in HPC centers.

      DDN also presented their software-defined IO-500 submission, this time run in Google Cloud and landing in the #8 position:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      Since DDN’s embedded SFA product line already runs virtual machines on their controller hardware, it doesn’t seem like a big stretch to run the same SFA VMs in the cloud.  While this sounds a little counterproductive to DDN’s biggest differentiator in providing a fully integrated hardware platform, this idea of running SFA in Google Cloud arose from the growing need for parallel file systems in the cloud.  I can only assume that this need is being largely driven by AI workloads which require a combination of high I/O bandwidth, high IOPS, and POSIX file interfaces.

      <h2 id="thursday">Thursday, November 15</h2><div>The conference was showing signs of winding down by Thursday, as many attendees brought their luggage with them to the convention center so they could head back home that night.  The expo floor also closes in the mid-afternoon on Thursday.</div>

      +

      +

      Technical Program, Part 2 - Exhibitor Forum

      +

      My Thursday began at 10:30 AM with the HPC Storage and Memory Architectures session of the Exhibitor Forum.  Liran Zvibel, former CTO and now CEO of WekaIO was the first presenter and gave a surprisingly technical description of the WekaIO Matrix parallel file system architecture:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">WekaIO’s Matrix file system architecture block diagram.  Surprising amount of detail can be cleaned by examining this carefully.</td></tr></tbody></table>
      In terms of building a modern parallel file system from the ground up for all-flash, WekaIO checks off almost all of the right boxes.  It runs almost entirely in user space to keep latency down, it runs in its own reserved pool of CPU cores on each client, and capitalizes on the approximate parity between NVMe latency and modern high-speed network latency.  They make use of a lot of the smart ideas implemented in the enterprise and hyperscale storage space too and are one of the few really future-looking storage companies out there who are really thinking about the new possibilities in the all-flash world while still courting the HPC market.

      There is a fair amount of magic involved that was not broken down in the talk, although I’ve found that the WekaIO folks are happy to explain some of the more complex details if asked specific questions about how their file system works.  I’m not sure what is and isn’t public though, so I’ll save an architectural deep-dive of their technology for a later date.

      Andreas Schlapka of Micron Technology was the next speaker, and his talk was quite a bit more high-level.  Aside from the grand statements about how AI will transform technology though, he did have a couple of nice slides that filled some knowledge gaps in my mind.  For example:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Broad strokes highlighting the different computational (and architectural) demands of training and inference workloads</td></tr></tbody></table>
      Training is what the vast majority of casual AI+HPC pundits are really talking about when extolling the huge compute requirements of deep learning.  Part of that is because GPUs are almost the ideal hardware solution to tackle the mathematics of training (dense matrix-matrix multiplication) and post impressive numbers; the other part is that inference can’t happen without a well-trained model, and models are continually being refined and re-trained.  What I hadn’t fully appreciated is that inference is much more of an interesting computational problem in that it more closely resembles the non-uniform and latency-bound workloads of scientific computing.

      This has interesting implications for memory technology; while HBM2 definitely delivers more bandwidth than DDR, it does this by increasing the channel width to 128 bits and hard-wiring 8 channels into each stack.  The extra bandwidth helps feed GPUs for training, but it’s not doing much for the inference side of AI which, presumably, will become a much more significant fraction of the cycles required overall.  In my mind, increasing the size of SRAM-based caches, scratchpads, and register files are the more obvious way to reduce latency for inference, but we haven’t really seen a lot of fundamentally new ideas on how to effectively do that yet.

      The speaker went on to show the following apples-to-apples system-level reference:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">System-level speeds and feeds of the memory products available now or in the near future as presented by Micron</td></tr></tbody></table>
      It’s not terribly insightful, but it lets you back out the bus width of each memory technology (bandwidth / data rate / device #) and figure out where its bandwidth is coming from:
      <ul><li>DDR4 and DDR5 use 64-bit channels and relies on increasing channel-level parallelism to improve bandwidth.  This is now putting them in a place where you wind up having to buy way more capacity than you may want just to get sufficient bandwidth.  This is analogous to where HDDs are in the HPC storage hierarchy today; it’s rapidly becoming uneconomical to rely on DDR for bandwidth.</li><li>GDDR uses narrower channels (32 bits) but more of them to get better bandwidth.  They also rely on phenomenally high data rates per pin; I don’t really understand how this is possible since they rely on inefficient single-ended signaling.</li><li>HBM uses both wide (128 bits) and plentiful channels to get its performance; the table is a misleading in this regard since each “device” (HBM stack) contains eight channels.  This is fine for feeding highly parallel arithmetic units like vector ALUs, but this offers no benefit to latency-bound workloads that, for example, chase pointers to traverse a graph. (it turns out HBM is just fine for pointer chasing–thanks to one of the HPC’s memory-wizards-at-large for pointing this out to me!)</li></ul>Micron also made the strange assertion that they are the only company that offers the entire range of memory products.  I guess since Samsung and SK Hynix both opted to skip SC, Micron can say whatever it likes; however, Samsung is currently the only company shipping commercial quantities of HBM, and Hynix’s HBM capability just came online.  As far as I know, Micron has never manufactured a stack of HBM since they spent years promoting the competing-but-now-defunct Hybrid Memory Cube technology.

      <h3 id="nsfbof">The NSF Future Directions BOF</h3>I opted to see what was new with National Science Foundation’s Office of Advanced Cyberinfrastructure (OAC) at their noon BOF.  Despite having left the NSF world when I left San Diego, I still care deeply about NSF computing because they pay for many of the most accessible HPC resources in the US.  I certainly got my start in HPC on the NSF’s dime at SDSC, and I got to see firsthand the huge breadth of impact that SDSC’s XSEDE resources had in enabling smaller research groups at smaller institutions to perform world-class research.  As such, it’s also no surprise that the NSF leads the pack in developing and deploying many of the peripheral technologies that can make HPC accessible such as federated identity, science gateways, and wide-area file systems.

      That all said, actually listening to the NSF HPC strategic vision makes me rather grumpy since the directions of such an important federal office sometimes appear so scattershot.  And judging by the audience questions at the end of the BOF, I am not the only one–Very Important People(tm) in two different national-level HPC consortia asked very pointed questions of Manish Parashar, the NSF OAC director, that highlighted the dichotomy between OAC’s strategic vision and where it was actually putting money.  I really believe in the critical importance of NSF investment in maintaining national cyberinfrastructure which is probably why I keep showing up to these BOFs and do my best to support my colleagues at SDSC and the other XSEDE SPs.

      After sitting through this Future Directions BOF, I could write another updated rant about how I feel about the NSF’s direction in HPC and get myself in trouble.  Instead, I’ll instead share just a few slides I photographed from afar along with some objective statements and leave it at that.

      The future directions summary slide:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">NSF OAC’s future directions</td></tr></tbody></table><ul><li>Performance, capability computing, and global leadership are not mentioned in the above slides.  Terms like “agility, responsiveness, accessibility”) are often used to describe the cloud.</li><li>“reduce barriers to CI adoption” indicates that NSF wants to serve more users.  NSF is not increasing investment in capital acquisition (i.e., more or larger HPC systems beyond the status quo of technology refreshes).</li><li>“Prioritize investments to maximize impact” does not define what impacts are to be maximized.</li></ul>
      The Frontera slide:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">NSF’s next leadership-class HPC, Frontera, to be deployed by TACC</td></tr></tbody></table><ul><li>The award amount was $60M.  The previous Track-1 solicitation that funded Blue Waters was $200M.  Stampede was $30M, and Stampede 2 was another $30M.</li><li>“leadership-class … for all [science and engineering] applications” either suggests that all science and engineering applications are leadership-capable, or this leadership-class system is not primarily designed to support a leadership computing workload.</li><li>It is unclear what the significance of the “CPU” qualifier in “largest CPU system” is in the larger context of leadership computing.</li><li>There is mention of “leadership-class” computing.  There is no mention of exascale computing.  There is nothing that acknowledges leveraging the multi-billion-dollar investment the US has made into the Exascale Computing Project.  An audience member politely asked about this omission.</li></ul><div>
      The Midscale Research Infrastructure slide:</div>

      +

      +
      Upcoming solicitations for research cyberinfrastructure
      +
      • NSF OAC expects to issue one $6M-$20M solicitation and another $20M-$70M solicitation "soon" to fund HPC systems and the associated infrastructure.
      • $6M-$20M is on the same order of magnitude as the Track-2 solicitations that funded SDSC's Gordon ($10M) and Comet ($12M).
      • $20M-$70M is on the same order of magnitude as the Track-2 solicitations that funded TACC's Stampede 1 and 2 ($30M).  NSF's next leadership-class investment (Frontera) is $60M.
      +


      <h3 id="mypaper">My SC Paper</h3>The next major item on my agenda was presenting my paper, A Year in the Life of a Parallel File System, as the final talk in the final session of the paper track.

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">My name in lights–or something like that.</td></tr></tbody></table>
      I was admittedly bummed out when I found out that I was going to be the conference closer since a significant number of SC attendees tend to fly out on Thursday night and, presumably, would not stick around for my presentation.  As a result, I didn’t take preparation for it as seriously in the weeks leading up to SC as I normally would have.  I knew the presentation was a 30-35 minute talk that had to be fit into a 25-minute slot, but I figured I would figure out how to manage that on the night before the talk and mostly wing it.

      What I realized after arriving at SC was that a bunch of people–most of whom weren’t the expected audience of storage researchers–were looking forward to hearing the talk.  This left me scrambling to seriously step up the effort I was going to put into making sure the presentation was well composed despite needing to drop ten minutes of material and fit it into the 25 minutes I was given.  I documented my general approach to crafting presentations in my patented Glenn K. Lockwood Five Keys to Being a Successful Researcher (FKBSR) method, but I’ll mention some of my considerations for the benefit of anyone who is interested in how others approach public speaking.
      <ol><li>I absolutely could not overshoot the timing because some attendees had to leave at 5 PM to catch 7 PM flights.  This meant that it would be better for me to undershoot the time and either draw out the conclusions and acknowledgments slides to finish on time or finish early and leave extra time for questions.</li><li>The people I met at SC who indicated interest in my talk were storage systems people, not statisticians.  This meant I could probably tone down the statistical rigor in the presentation without offending people’s scientific sensibilities.</li><li>Similarly, because attendees were already familiar with typical HPC I/O systems and the relevant technologies, I could gloss over the experimental setup and description of the different compute and storage systems.</li><li>Given the above considerations, a reasonable approach would be to punt as many non-essential details into the Q&A after the talk and let people try to poke holes in my methods only if they really cared.</li></ol><div>I also know two things about myself and the way I present:</div>

      +
      1. I can present either at a casual pace where I average ~70 seconds per slide or in turbo mode where I average ~50 seconds per slide.  Orating at turbo speed requires a lot more preparation because it requires speaking through slide transitions rather than pausing to reorient after each slide transition.
      2. I get distracted easily, so I would rather have people begin to leave after my monologue ended and Q&A began than have the commotion of people getting up derail the tail end of my presentation.
      +


      As a result of all these factors, I opted to both cut a lot of details to get the talk down to ~25-30 minutes when presented at a casual pace, then prepare to present in turbo mode just in case the previous speakers went long (I was last of three speakers), there were A/V issues (they were prolific at this SC, especially for Mac users), or there were any audience interruptions.

      I also opted to present from my iPad rather than a full laptop since it did a fine job earlier at both PDSW-DISCS and the IO-500/VI4IO BOF.  In sticking with this decision though, I learned two valuable things during the actual presentation:
      <ol><li>The iOS “do not disturb” mode does not suppress Twitter notifications.  A couple of people were kind enough to tweet about my presentation as I was giving it, but this meant that my presenter view was blowing up with Twitter noise as I was trying to present!  Fortunately I only needed to look down at my iPad when transitioning between slides so it didn’t derail me.</li><li>There’s no usefully sized timer or clock in PowerPoint for iOS’s presenter view, and as a result, I had no idea how I was doing on time as I entered the final third of my slides.  This became a distraction because I was fully expecting a five-minute warning from the session moderator at some point and got worried that I wasn’t going to get one.  As such, I didn’t want to slow down the tail of the presentation without knowing how close I was getting to the target.  It turned out that I didn’t get a five-minute warning because I was already concluding at that point.</li></ol><div>Fortunately the audience was sufficiently engaged to pad out the Q&A period with many of the questions that would’ve been answered by the slides I had dropped.  Afterwards I got feedback that indicated the presentation was noticeably short to the audience (not great) but that the narrative remained understandable to most attendees throughout the entire presentation (good).</div>

      +

      +
      As far as the technical content of the presentation though, I won't recap that here--until I write up the high-level presentation as another blog post, you may have to read the paper (or invite me to present it at your institution!).
      +

      +

      SC Technical Program Reception

      +
      I've never attended the reception that wraps up the last full day of SC for a variety of reasons, and I was going to skip it again this year to fit some me-time into the otherwise frantic week.  However the venue (the Perot Museum) and its close proximity to my hotel lured me out.
      +

      +
      The entryway to the Perot Museum
      +

      +
      I am not a "never eat alone" kind of person because I find that my ability to be at the top of my game diminishes without at least some intermittent time to sit back and digest.  As such, I approached the reception with very selfish intent: I wanted to see the museum, learn about something that had nothing to do with supercomputing, have a drink and a meal, and then go back to my hotel.  So I did just that.
      +

      +
      The dinosaurs seemed like a major feature of the museum:
      +

      +
      Rapetosaurus skeleton on display at the Perot Museum
      +

      +
      The archaeological diversity of the dinosaur room reminded me of the dinosaur museum near my wife's hometown in the Canadian prairies, but the exhibit seemed to be largely reproduction fossils that blended science with entertainment.
      +

      +
      More impressive to me was the extensive mineral collection:
      +

      +
      I'm a sucker for quartz.  I did my PhD research on silicates.
      +

      +
      Not only were the minerals on display of remarkable quality, but many of them were found in Texas.  In fact, the museum overall had a remarkably Texas-focused set of exhibits which really impressed me.  The most interesting exhibit that caught my attention was a mini-documentary on the geologic history of Texas that explained how plate tectonics and hundreds of millions of years resulted in the world-famous oil and gas reserves throughout the state.
      +

      +
      Having learned something and enjoyed some delightful food at the museum, I then called it quits and cashed out.
      +


      <h2 id="friday">Friday, November 16</h2><div>The last day of SC is always a bit odd because the expo has already wrapped up, most of the vendors and casual attendees have gone home, and the conference is much more quiet and focused.  My day started with a surreal shuttle ride to the conference center in what appeared to be a 90’s-era party bus:</div>

      +

      +
      Conference shuttle, complete with taped-together audio system, faux leather sofa, and a door that had to be poked with a broom stick to open.
      +

      +

      +
      Only six concurrent half-day workshops and a panel were on the agenda:
      +

      +
      The entire Friday agenda fit on a single screen
      +

      +
      I stuck my head into the P3HPC workshop's first panel discussion to catch the age-old but ever-lively argument over someone's proposed definition of performance portability and productivity either being too broad or too narrow.  I/O performance portability generally does not have a place in these sorts of conversations (which I don't fault--algorithmic complexity in I/O is usually hidden from user applications) so I attended only as an interested observer and wasn't as fastidious about taking notes as I was earlier in the week.
      +

      +
      At 10:30 AM I headed over to the Convergence between HPC and Big Data: The Day After Tomorrow panel discussion which had a star-studded speaker lineup.  NERSC's Katie Antypas gave a great overview of the NERSC-9/Perlmutter architecture which fit the panel topic uncannily well since it is a system design from the ground up to meet the needs of both traditional HPC and large-scale data analysis.
      +

      +
      The NERSC-9 Project Director describing how the Perlmutter system embodies the convergence of HPC and Big Data in front of a remarkably big crowd in the final session of SC.
      +

      +
      Unfortunately I had to duck out shortly after she spoke to get to my last meeting of the week with an old colleague for whom I always make time at SC.  Incidentally, some of the most valuable time you can spend at SC is talking to industry consultants.  Not unlike getting to know members of the trade press, good consultants have exposure to a tremendous breadth of problem and solution spaces.  They can give you all manner of interesting insights into different vendors, industry verticals, and market trends in an otherwise brief conversation.
      +

      +
      After my final meeting was cut short by my colleague's need to run to the airport, I had a quick bite with another Friday holdout then made my own way to the airport to catch up on a week's worth of e-mails.  The flight back to Oakland was one of the rare occasions where I was just too worn out to try to catch up on some delinquent report writing and just watched three hours of Dark Tourist on Netflix.
      +

      +

      After the Conference

      +
      It was technically Saturday by the time I finally got home, but the family was happy to see me (and the swag I had in tow):
      +

      +
      George fully appreciating the giant pile of conference swag with which I came home
      +

      +
      This was definitely the busiest SC of my career, but in many ways it was also the most productive.  I owe sincere thanks to everyone in the HPC community who made it such a worthwhile conference to attend--vendors, presenters, old colleagues, and even the new colleagues who occasionally just wanted to introduce themselves and express that they enjoy reading the nonsense I post on Twitter.  I always leave SC more amazed and humbled by all the bright minds with whom I connect, and I hope that I am doing my part to pay that experience forward for others now and in the SC conferences to come.
      +

      + + +
      + +
      + + + + + + + + + + + + + + + + diff --git a/2018/are-fpgas-the-answer-to-hpc-s-woes/index.html b/2018/are-fpgas-the-answer-to-hpc-s-woes/index.html new file mode 100644 index 0000000..308472b --- /dev/null +++ b/2018/are-fpgas-the-answer-to-hpc-s-woes/index.html @@ -0,0 +1,187 @@ + + + + + + + Are FPGAs the answer to HPC's woes? - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
      + +
      +

      Are FPGAs the answer to HPC's woes?

      +

      Executive Summary

      +

      Not yet.  I’ll demonstrate why no domain scientist would ever want to program in Verilog, then highlight a few promising directions of development that are addressing this fact.

      The usual disclaimer also applies: the opinions and conjectures expressed below are mine alone and not those of my employer.  Also I am not a computer scientist, so I probably don’t know what I’m talking about.  And even if it seems like I do, remember that I am a storage architect who is wholly unqualified to speak on applications and processor performance.

      <h2>Premise</h2>We’re now in an age where CPU cores aren’t getting any faster, and the difficulties of shrinking processes below 10 nm means we can’t really pack any more CPU cores on a die.  Where’s performance going to come from if we ever want to get to exascale and beyond?

      Some vendors are betting on larger and larger vectorsARM (with its Scalable Vector Extensions) and NEC (with its Aurora coprocessors) are going down this path.  However, algorithms that aren’t predominantly dense linear algebra will need very efficient scatter and gather operations that can pack vector registers quickly enough to make doing a single vector operation worthwhile.  For example, gathering eight 64-bit values from different parts of memory to issue an eight-wide (512-bit) vector multiply requires pulling eight different cache lines–that’s moving 4096 bits of memory for what amounts to 512 bits of computation.  In order to continue scaling vectors out, CPUs will have to rethink how their vector units interact with memory.  This means either (a) getting a lot more memory bandwidth to support these low flops-per-byte ratios, or (b) pack vectors closer to the memory so that pre-packed vectors can be fetched through the existing memory channels.

      Another option to consider are GPUs, which work around the vector packing issue by implementing a massive numbers of registers and giant crossbars to plumb those bytes into arithmetic units.  Even then, though, relying on a crossbar to connect compute and data is difficult to continue scaling; the interconnect industry gave up on this long ago, which is why today’s clusters now connect hundreds or thousands of crossbars into larger fat trees, hypercubes, and dragonflies.  GPUs are still using larger and larger crossbars–NVIDIA’s V100 GPU is one of the physically largest single-die chips ever made–but there’s an economic limit to how large a die can be.

      This bleak outlook has begun to drive HPC designers towards thinking about smarter ways to use silicon.  Rather than build a general-purpose processor that can do all multiplication and addition operations at a constant rate, the notion is to bring hardware design closer to the algorithms being implemented.  This isn’t a new idea (for example, RIKEN’s MDGRAPE and DESRES’s Anton are famous examples of purpose-built chips for specific scientific application areas), but this approach historically has been very expensive relative to just using general-purpose processor parts.  Only now are we at a place where special-purpose hardware may be the only way to sustain HPC’s performance trajectory.

      Given the diversity of applications that run on the modern supercomputer though, expensive and custom chips that only solve one problem aren’t very appetizing.  A close compromise are FPGAs though, and there has been a growing buzz surrounding the viability of relying on FPGAs in mainstream HPC workloads.

      Many of us non-computer scientists in the HPC business only have a vague and qualitative notion of how FPGAs can realistically be used to carry out computations, though.  Since there is growing excitement around FPGAs for HPC as exascale approaches though, I set out to get my hands dirty and figure out how they might fit in the larger HPC ecosystem.

      <h2>Crash course in Verilog</h2>Verilog can be very difficult to grasp for people who already know how to program languages like C or Fortran (like me!).  On the one hand, it looks a bit like C in that has variables to which values can be assigned, if/then/else controls, for loops, and so on.  However these similarities are deceptive because Verilog does not execute like C; whereas a C program executes code line by line, one statement after the other, Verilog sort of execute all of the lines at the same time, all the time.

      A C program to turn an LED on and off repeatedly might look like:

      <div></div> +where the LED is turned on, then the LED is turned off, then we repeat.

      In Verilog, you really have to describe what components your program will have and how they are connected. In the most basic way, the code to blink an LED in Verilog would look more like

      <div></div> +
      Whereas C is a procedural language in that you describe a procedure for solving a problem, Verilog is more like a declarative language in that you describe how widgets can be arranged to solve the problem.

      This can make tasks that are simple to accomplish in C comparatively awkward in Verilog. Take our LED blinker C code above as an example; if you want to slow down the blinking frequency, you can do something like

      <div></div> +
      Because Verilog is not procedural, there is no simple way to say “wait a second after you turn on the LED before doing something else.” Instead, you have to rely on knowing how much time passes between consecutive clock signals (clk incrementing).

      For example, the DE10-Nano has a 50 MHz clock generator, so every 1/(50 MHz) (20 nanoseconds), and everything time-based has to be derived from this fundamental clock timer. The following Verilog statement:

      <div></div> +
      indicates that every 20 ns, increment the cnt register (variable) by one. To make the LED wait for one second after the LED is turned on, we need to figure out a way to do nothing for 50,000,000 clock cycles (1 second / 20 nanoseconds). The canonical way to do this is to
      <ol><li>create a big register that can store a number up to 50 million</li><li>express that this register should be incremented by 1 on every clock cycle</li><li>create a logic block that turns on the LED when our register is larger than 50 million</li><li>rely on the register eventually overflowing to go back to zero</li></ol>If we make cnt a 26-bit register, it can count up to 67,108,864 different numbers and our Verilog can look something like

      <div></div> +
      However, we are still left with two problems:
      <ol><li>cnt will overflow back to zero once cnt surpasses 226 - 1</li><li>We don’t yet know how to express how the LED is connected to our FPGA and should be controlled by our circuit</li></ol>Problem #1 (cnt overflows) means that the LED will stay on for exactly 50,000,000 clock cycles (1 second), but it’ll turn off for only 226 - 1 - 50,000,000 cycles (17,108,860 cycles, or 0.34 seconds). Not exactly the one second on, one second off that our C code does.

      Problem #2 is solved by understanding the following:

      <ul><li>our LED is external to the FPGA, so it will be at the end of an output wire</li><li>the other end of that output wire must be connected to something inside our circuit–a register, another wire, or something else</li></ul>
      The conceptually simplest solution to this problem is to create another register (variable), this time only one bit wide, in which our LED state will be stored. We can then change the state of this register in our if (cnt > 5000000) block and wire that register to our external LED:

      <div></div> +
      Note that our assign statement is outside of our always @(posedge clk) block because this assignment–connecting our led output wire to our led_state register–is a persistent declaration, not the assignment of a particular value. We are saying “whatever value is stored in led_state should always be carried to whatever is on the other end of the led wire.” Whenever led_state changes, led will simultaneously change as a result.

      With this knowledge, we can actually solve Problem #1 now by
      <ol><li>only counting up to 50 million and not relying on overflow of cnt to turn the LED on or off, and</li><li>overflowing the 1-bit led_state register every 50 million clock cycles</li></ol>Our Verilog module would look like

      <div></div> +
      and we accomplish the “hello world” of circuit design:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      This Verilog is actually still missing a number of additional pieces and makes very inefficient use of the FPGA’s hardware resources. However, it shows how awkward it can be to express a simple, four-line procedural program using a hardware description language like Verilog.

      <h2>So why bother with FPGAs at all?</h2>It should be clear that solving a scientific problem using a procedural language like C is generally more straightforward than with a declarative language like Verilog. That ease of programming is made possible by a ton of hardware logic that isn’t always used, though.

      Consider our blinking LED example; because the C program is procedural, it takes one CPU thread to walk through the code in our program. Assuming we’re using a 64-core computer, that means we can only blink up to 64 LEDs at once. On the other hand, our Verilog module consumes a tiny number of the programmable logic blocks on an FPGA. When compiled for a $100 hobbyist-grade DE10-Nano FPGA system, it uses only 21 of 41,910 programmable blocks, meaning it can control almost 2,000 LEDs concurrently**. A high-end FPGA would easily support tens of thousands.

      <table cellpadding="0" cellspacing="0" class="tr-caption-container" style="display: block; float: right; margin-left: 1em; text-align: right;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">The CM2 illuminated an LED whenever an operation was in flight. Blinking the LED in Verilog is easy.  Reproducing the CM2 microarchitecture is a different story.  Image credit to Corestore.</td></tr></tbody></table>Of course, blinking LEDs haven’t been relevant to HPC since the days of Connection Machines, but if you were to replace LED-blinking logic with floating point arithmetic units, the same conclusions apply.  In principle, a single FPGA can process a huge number of FLOPS every cycle by giving up its ability to perform many of the tasks that a more general-purpose CPU would be able to do.  And because FPGAs are reprogrammable, they can be quickly configured to have an optimal mix of special-purpose parallel ALUs and general purpose capabilities to suit different application requirements.

      However, the fact that the fantastic potential of FPGAs hasn’t materialized into widespread adoption is a testament to how difficult it is to bridge the wide chasm between understanding how to solve a physics problem and understanding how to design a microarchitecture.

      <h2>Where FPGAs fit in HPC today</h2>To date, a few scientific domains have had success in using FPGAs.  For example,

      <ul><li>Experimental instruments that generate data commonly deploy FPGAs close to their detectors to perform very repetitive, relatively simple data filtering or manipulation at extremely high rates.  For example, Illumina HiSeq DNA sequencers incorporate both Altera and Xilinx FPGAs to assist with the high-throughput image processing, and high-energy physics experiments routinely use FPGAs for signal processing.</li><li>Closer to the HPC side, Convey implemented loadable FPGA blocks to perform many algorithms common to bioinformatics.  For example, they provided an FPGA-accelerated Smith-Waterman algorithm; this algorithm is used to align short DNA sequences along a reference genome and must be executed thousands of times per genome before actual genomic analysis can start.</li><li>More recently, Edico Genome has been very successful in implementing a wide range of common bioinformatics algorithms on FPGA and providing end-to-end analysis processing pipelines that act as drop-in replacements for standard genomic analysis pipelines.</li></ul><div>The success of these FPGA products is due in large part to the fact that the end-user scientists don’t ever have to directly interact with the FPGAs.  In the case of experimental detectors, FPGAs are sufficiently close to the detector that the “raw” data that is delivered to the researcher has already been processed by the FPGAs.  Convey and Edico products incorporate their FPGAs into an appliance, and the process of offloading certain tasks to the FPGA in proprietary applications that, to the research scientist, look like any other command-line analysis program.</div>

      +

      +
      With all this said, the fact remains that these use cases are all on the fringe of HPC.  They present a black-and-white decision to researchers; to benefit from FPGAs, scientists must completely buy into the applications, algorithms, and software stacks.  Seeing as how these FPGA HPC stacks are often closed-source and proprietary, the benefit of being able to see, modify, and innovate on open-source scientific code often outweighs the speedup benefits of the fast-but-rigid FPGA software ecosystem.
      +

      +

      Where FPGAs will fit in HPC tomorrow

      +
      The way I see it, there are two things that must happen before FPGAs can become a viable general-purpose technology for accelerating HPC:
      +
      1. Users must be able to integrate FPGA acceleration into their existing applications rather than replace their applications wholesale with proprietary FPGA analogues.
      2. It has to be as easy as f90 -fopenacc or nvcc to build an FPGA-accelerated application, and running the resulting accelerated binary has to be as easy as running an unaccelerated binary.
      The first steps towards realizing this have already been made; both Xilinx and Intel/Altera now offer OpenCL runtime environments that allow scientific applications to offload computational kernels to the FPGA.  The Xilinx environment operates much like an OpenCL accelerator, where specific kernels are compiled for the FPGA and loaded as application-specific logic; the Altera environment installs a special OpenCL runtime environment on the FPGA.  However, there are a couple of challenges:
      +
      +
      • OpenCL tends to be very messy to code in compared to simpler APIs such as OpenACC, OpenMP, CUDA, or HIP.  As a result, not many HPC application developers are investing in OpenCL anymore.
      • Compiling an application for OpenCL on an FPGA still requires going through the entire Xilinx or Altera toolchain.  At present, this is not as simple as f90 -fopenacc or nvcc, and the process of compiling code that targets an FPGA can take orders of magnitude longer than it would for a CPU due to the NP-hard nature of placing and routing across all the programmable blocks.
      • The FPGA OpenCL stacks are not as polished and scientist-friendly right now; performance analysis and debugging generally still has to be done at the circuit level, which is untenable for domain scientists.
      Fortunately, these issues are under very active development, and the story surrounding FPGAs for HPC application improves on a month by month basis.  We're still years from FPGAs becoming a viable option for accelerating scientific applications in a general sense, but when that day comes, I predict that programming in Verilog for FPGAs will seem as exotic as programming in assembly is for CPUs.
      +
      +

      +
      Rather, applications will likely rely on large collections of pre-compiled FPGA IP blocks (often called FPGA overlays) that map to common compute kernels.  It will then be the responsibility of compilers to identify places in the application source code where these logic blocks should be used to offload certain loops.  Since it's unlikely that a magic compiler will be able to identify these loops on their own, users will still have to rely on OpenMP, OpenACC, or some other API to provide hints at compile time.  Common high-level functions, such as those provided by LAPACK, will probably also be provided by FPGA vendors as pre-compiled overlays that are hand-tuned.
      +

      +

      Concluding Thoughts

      +
      We're still years away from FPGAs being a viable option for mainstream HPC, and as such, I don't anticipate them as being the key technology that will underpin the world's first exascale systems.  Until the FPGA software ecosystem and toolchain mature to a point where domain scientists never have to look at a line of Verilog, FPGAs will remain an accelerator technology at the fringes of HPC.
      +

      +
      However, there is definitely a path for FPGAs to become mainstream, and forward progress is being made.  Today's clunky OpenCL implementations are already being followed up by research into providing OpenMP-based FPGA acceleration, and proofs of concept demonstrating OpenACC-based FPGA acceleration have shown promising levels of performance portability.  On the hardware side, FPGAs are also approaching first-class citizenship with Intel planning to ship Xeons with integrated FPGAs in 2H2018 and OpenPOWER beginning to ship Xilinx FPGAs with OpenCAPI-based coherence links for POWER9.
      +

      +
      The momentum is growing, and the growing urgency surrounding post-Moore computing technology is driving investments and demand from both public and private sectors.  FPGAs won't be the end-all solution that gets us to exascale, nor will it be the silver bullet that gets us beyond Moore's Law computing, but they will definitely play an increasingly important role in HPC over the next five to ten years.
      +

      +
      If you've gotten this far and are interested in more information, I strongly encourage you to check out FPGAs for Supercomputing: The Why and How, presented by Hal Finkel, Kazutomo Yoshii, and Franck Cappello at ASCAC.  It provides more insight into the application motifs that FPGAs can accelerate, and a deeper architectural treatment of FPGAs as understood by real computer scientists.
      +

      +

      ** This is not really true.  Such a design would be limited by the number of physical pins coming out of the FPGA; in reality, output pins would have to be multiplexed, and additional logic to drive this multiplexing would take up FPGA real estate.  But you get the point.
      SaveSave
      SaveSaveSaveSave

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2018/gpu-usage-information-for-jobs-in-ibm-spectrum-lsf/index.html b/2018/gpu-usage-information-for-jobs-in-ibm-spectrum-lsf/index.html new file mode 100644 index 0000000..eb8cdda --- /dev/null +++ b/2018/gpu-usage-information-for-jobs-in-ibm-spectrum-lsf/index.html @@ -0,0 +1,258 @@ + + + + + + + GPU usage information for jobs in IBM Spectrum LSF - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      GPU usage information for jobs in IBM Spectrum LSF

      +

      In my last blog, we ran through an example showing how IBM Spectrum LSF now automatically detects the presence of NVIDIA GPUs on hosts in the cluster and performs the necessary configuration of the scheduler automatically.

      + +

      In this blog, we take a closer look at the integration between Spectrum LSF and +NVIDIA DCGM which provides GPU usage information for jobs submitted to the +system.

      + + + +

      To enable the integration between Spectrum LSF and NVIDIA DCGM, we +need to specify the LSF_DCGM_PORT=<port number> parameter in +LSF_ENVDIR/lsf.conf

      + +
      root@kilenc:/etc/profile.d# cd $LSF_ENVDIR
      +root@kilenc:/opt/ibm/lsfsuite/lsf/conf# cat lsf.conf |grep -i DCGM
      +LSF_DCGM_PORT=5555
      + +

      You can find more details about the variable LSF_DCGM_PORT and what it +enables here.

      + +

      Before continuing, please ensure that the DCGM daemon is up and running. Below +we start DCGM on the default port and run a query command to confirm that it’s +up and running.

      + +
      root@kilenc:/opt/ibm/lsfsuite/lsf/conf# nv-hostengine
      +Started host engine version 1.4.6 using port number: 5555
      +
      +root@kilenc:/opt/ibm/lsfsuite/lsf/conf# dcgmi discovery -l
      +1 GPU found.
      ++--------+-------------------------------------------------------------------+
      +| GPU ID | Device Information                                                |
      ++========+===================================================================+
      +| 0      |  Name: Tesla V100-PCIE-32GB                                       |
      +|        |  PCI Bus ID: 00000033:01:00.0                                     |
      +|        |  Device UUID: GPU-3622f703-248a-df97-297e-df1f4bcd325c            |
      ++--------+-------------------------------------------------------------------+ 
      + +

      Next, let’s submit a GPU job to IBM Spectrum LSF to demonstrate the collection +of GPU accounting. Note that the GPU job must be submitted to Spectrum LSF +with the exclusive mode specified in order for the resource usage to be +collected. As was the case in my previous blog, we submit the gpu-burn test +job (formally known as Multi-GPU CUDA stress test).

      + +
      test@kilenc:~/gpu-burn$ bsub -gpu "num=1:mode=exclusive_process" ./gpu_burn 120
      +Job <54086> is submitted to default queue <normal>
      + +

      Job 54086 runs to successful completion and we use the Spectrum LSF bjobs command with the -gpu option to display the GPU usage +information in the output below.

      + +
      test@kilenc:~/gpu-burn$ bjobs -l -gpu 54086
      +
      +Job <54086>, User <test>, Project <default>, Status <DONE>, Queue <normal>, Com
      +                     mand <./gpu_burn 120>, Share group charged </test>
      +Mon Oct  1 11:14:04: Submitted from host <kilenc>, CWD <$HOME/gpu-burn>, Reques
      +                     ted GPU <num=1:mode=exclusive_process>;
      +Mon Oct  1 11:14:05: Started 1 Task(s) on Host(s) <kilenc>, Allocated 1 Slot(s)
      +                      on Host(s) <kilenc>, Execution Home </home/test>, Executi
      +                     on CWD </home/test/gpu-burn>;
      +Mon Oct  1 11:16:08: Done successfully. The CPU time used is 153.0 seconds.
      +                     HOST: kilenc; CPU_TIME: 153 seconds
      +                        GPU ID: 0
      +                            Total Execution Time: 122 seconds
      +                            Energy Consumed: 25733 Joules
      +                            SM Utilization (%): Avg 99, Max 100, Min 64
      +                            Memory Utilization (%): Avg 28, Max 39, Min 9
      +                            Max GPU Memory Used: 30714888192 bytes
      +
      +
      +GPU Energy Consumed: 25733.000000 Joules
      +
      +
      + MEMORY USAGE:
      + MAX MEM: 219 Mbytes;  AVG MEM: 208 Mbytes
      +
      + SCHEDULING PARAMETERS:
      +           r15s   r1m  r15m   ut      pg    io   ls    it    tmp    swp    mem
      + loadSched   -     -     -     -       -     -    -     -     -      -      -  
      + loadStop    -     -     -     -       -     -    -     -     -      -      -  
      +
      + EXTERNAL MESSAGES:
      + MSG_ID FROM       POST_TIME      MESSAGE                             ATTACHMENT
      + 0      test       Oct  1 11:14   kilenc:gpus=0;                          N     
      +
      + RESOURCE REQUIREMENT DETAILS:
      + Combined: select[(ngpus>0) && (type == local)] order[gpu_maxfactor] rusage[ngp
      +                     us_physical=1.00]
      + Effective: select[((ngpus>0)) && (type == local)] order[gpu_maxfactor] rusage[
      +                     ngpus_physical=1.00]
      +
      + GPU REQUIREMENT DETAILS:
      + Combined: num=1:mode=exclusive_process:mps=no:j_exclusive=yes
      + Effective: num=1:mode=exclusive_process:mps=no:j_exclusive=yes
      +
      + GPU_ALLOCATION:
      + HOST             TASK ID  MODEL        MTOTAL  FACTOR MRSV    SOCKET NVLINK                           
      + kilenc           0    0   TeslaV100_PC 31.7G   7.0    0M      8      -               
      + +

      And to close, yours truly spoke at the HPC User Forum in April 2018 (Tucson, AZ) giving a +short update in the vendor panel about Spectrum LSF, focusing on GPU support.

      + +
      + +
      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2018/htcondor-pull-mode/index.html b/2018/htcondor-pull-mode/index.html new file mode 100644 index 0000000..b9ec5ad --- /dev/null +++ b/2018/htcondor-pull-mode/index.html @@ -0,0 +1,209 @@ + + + + + + + HTCondor Pull Mode - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Dereks Web Thoughts from Derek. See the original post here.
      + +
      +

      HTCondor Pull Mode

      +

      For a recent project to utilize HPC clusters for HTC workflows, I had to add the ability to transfer the input and output sandboxes to and from HTCondor. HTCondor already has the ability to spool input files to a SchedD, and pull the output sandbox. These functions are intended to stage jobs to an HTCondor pool. But, HTCondor did not have the ability to pull jobs from an HTCondor pool.

      + +

      The anticipated steps for a job pulled from an HTCondor pool:

      + +
        +
      1. Download the input sandbox
      2. +
      3. Submit the job to the local scheduler
      4. +
      5. Watch the job status of the job
      6. +
      7. Once completed, transfer the output sandbox to the origin SchedD
      8. +
      + +

      The sandboxes are:

      + +
        +
      • Input: +
          +
        • Input files
        • +
        • Executable
        • +
        • Credentials
        • +
        +
      • +
      • Output: +
          +
        • Stdout / Stderr from job
        • +
        • Output files or any files that may have changed while the job ran
        • +
        +
      • +
      + +

      API Additions

      + +

      In order to transfer the input sandbox and output sandbox, 2 new commands where added to the SchedD, as well as a new client function and python bindings to use them.

      + +

      The function for transferring input files is:

      + +
      transferInputSandbox(constraint, destination)
      +
      +
      + +

      jobs is a HTCondor constraint selecting the jobs whose input files should be transferred. destination is a directory to put the sandboxes. The sandboxes will be placed in directories named destination/<ClusterId>/<ProcId>/.

      + +

      For transferring output files, the function is:

      + +
      transferOutputSandbox( jobs )
      +
      +
      + +

      Where jobs is a list of tuples. The structure of the tuple is ( classad, sandboxdir ). classad is the full classad of the original job, and sandboxdir is the location of the output sandbox to send.

      + +

      Current Status

      + +

      I have created a repo for an example that uses these functions in order to pull a job from a remote SchedD.

      + +

      Also, my changes to HTCondor are in my repo, and I have begun the discussion about merging in my changes.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2018/stashcache-by-the-numbers/index.html b/2018/stashcache-by-the-numbers/index.html new file mode 100644 index 0000000..b2ca895 --- /dev/null +++ b/2018/stashcache-by-the-numbers/index.html @@ -0,0 +1,274 @@ + + + + + + + StashCache By The Numbers - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Dereks Web Thoughts from Derek. See the original post here.
      + +
      +

      StashCache By The Numbers

      +

      The StashCache federation is comprised of 3 components: Origins, Caches, and Clients. There are additional components that increase the usability of StashCache which I will also mention in this post.

      + +
      + Diagram of StashCache Infrastructure
      + Diagram of the StashCache Federation + +
      + +
      + Cumulative Usage of StashCache
      + Cumulative Usage of StashCache over the last 90 days + +
      + +

      Origins

      + +

      A StashCache Origin is the authoritative source of data. The origin receives data location requests from the central redirectors. These requests take the form of “Do you have the file X”, to which the origin will respond “Yes” or “No”. The redirector then returns a list of origins that claim to have the requested file to the client.

      + +

      An Origin is a simple XRootD server, exporting a directory or set of directories for access.

      + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      OriginBase DirectoryData Read
      LIGO Open Data/gwdata926TB
      OSG Connect/user246TB
      FNAL/pnfs166TB
      OSG Connect/project63TB
      + +

      A list of Origins and their base directories.

      + +

      Clients

      + +

      The clients interact with the StashCache federation on the user’s behalf. They are responsible for choosing the “best” cache. The available clients are CVMFS and StashCP.

      + +
      + + + + Client Usage By Tool + + + + + + StashCP Usage + + + + +
      StashCache Client Usage +
      + +
      + +

      In the pictures above, you can see that most users of StashCache use CVMFS to access the federation. GeoIP is used by all clients in determining the “best” cache. GeoIP location services are provided by the CVMFS infrastructure in the U.S. The geographically nearest cache is used.

      + +

      The GeoIP service runs on multiple CVMFS Stratum 1s and other servers. The request to the GeoIP service includes all of the cache hostnames. The GeoIP service takes the requesting IP address and attempts to locate the requester. After determining the location of all of the caches, the service returns an ordered list of nearest caches.

      + +

      The GeoIP service uses the MaxMind database to determine locations by IP address.

      + +

      CVMFS

      + +

      Most (if not all) origins on are indexed in an *.osgstorage.org repo. For example, the OSG Connect origin is indexed in the stash.osgstorage.org repo. It uses a special feature of CVMFS where the namespace and data are separated. The file metadata such as file permissions, directory structure, and checksums are stored within CVMFS. The file contents are not within CVMFS.

      + +

      When accessing a file, CVMFS will use the directory structure to form an HTTP request to an external data server. CVMFS uses GeoIP to determine the nearest cache.

      + +

      The indexer may also configure a repo to be “authenticated”. A whitelist of certificate DN’s is stored within the repo metadata and distributed to each client. The CVMFS client will pull the certificate from the user’s environment. If the certificate DN matches a DN in the whitelist, it uses the certificate to authenticate with an authenticated cache.

      + +

      StashCP

      + +

      StashCP works in the order:

      + +
        +
      1. Check if the requested file is available from CVMFS. If it is, copy the file from CVMFS.
      2. +
      3. Determine the nearest cache by sending cache hostnames to the GeoIP service.
      4. +
      5. After determining the nearest cache, run the xrdcp command to copy the data from the nearest cache.
      6. +
      + +

      Caches

      + +
      + Cache Locations
      + Cache Locations in the U.S. + +
      + +

      The cache is half XRootD cache and half XRootd client. When a cache receives a data request from a client, it searches it’s own cache directory for the files. If the file is not in the cache, it uses the built-in client to retrieve the file from one of the origins. The cache will request the data location from the central redirector which in turn, asks the origins for the file location.

      + +

      The cache listens on port 1094 to regular XRootD protocol, and port 8000 for HTTP.

      + +

      Authenticated Caches

      + +

      Authenticated caches use GSI certificates to authenticate access to files within the cache. The client will authenticate with the cache using the client’s certificate. If the file is not in the cache, the cache will use it’s own certificate to authenticate with the origin to download the file.

      + +

      Authenticated caches use port 8443 for HTTPS.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2018/the-taming-of-the-gpu/index.html b/2018/the-taming-of-the-gpu/index.html new file mode 100644 index 0000000..8a151bc --- /dev/null +++ b/2018/the-taming-of-the-gpu/index.html @@ -0,0 +1,182 @@ + + + + + + + The Taming of the GPU - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      The Taming of the GPU

      +

      The media has been alight with articles regarding the groundbreaking Summit supercomputer recently unveiled at Oak Ridge National Laboratory. It sports a mind boggling 9,216 IBM POWER9 CPUs, 27,648 NVIDIA Tesla GPUs, underpinned with 250 petabytes of storage. This muscle will be put to good use running traditional HPC as well as AI workloads across a broad range of sciences.

      + +

      Looking at the landscape of systems being built for HPC and now AI, there is one commonality – many are hybrid CPU-GPU systems. Whether we’re considering systems at the pinnacle of computing such as Summit, or commercial HPC and AI systems, GPUs have become a defacto method for accelerating code and providing copious amounts of floating point performance.

      + +

      The early days of clustered computing saw the advent of workload and resource managers which were a means of taming environments by orchestrating access to, and bringing computing resources to bear, in a predictable manner – aligned with the needs of scientists and businesses alike. As environments have grown in scale to meet the growing thirst for HPC, GPUs and accelerated computing have stepped out on stage to take a bow.

      + +

      Software developers have and continue to port and optimize applications to benefit from the capabilities provided by GPUs. According to a recent report from November 2017, a high percentage of HPC applications now offer GPU support.

      + +
      +

      “According to the latest HPC User Site Census data and additional research, of the 50 most popular application packages mentioned by HPC users, 34 offer GPU support (including two under current development), including 9 of the top 10.”

      + +
      +

      Indeed, the recent Top 500 list (November 2017) includes no less than 87 hybrid CPU-GPU systems (and more counting other types of accelerators).

      + +

      So how do GPU-heavy systems impact the task of the workload and resource managers? Fundamentally, as GPUs are resources, workload schedulers have had to adapt too.

      + +

      A wild west land grab

      + +

      It’s not just large-scale supercomputers that face the challenges of compute supply versus user demands. Commercial HPC environments are also now increasingly hybrid CPU-GPU based with potentially hundreds of users and millions of jobs per day in high-throughput computing use cases. These are complex environments and large investments requiring workload management software with sophisticated capabilities to reign in all the resources – so that users end up with GPU workloads running on the right servers.

      + +

      Computing environments today can have some servers with GPUs, some without, varied GPU configurations including models and memory, and a different number of GPUs per node. Adding to this complexity, in a typical data center, servers can come and go so the configuration is not always static.

      + +

      In general, workload schedulers require the administrator to specify in the configuration whether a given server is equipped with GPUs, often requiring additional information such as the GPU model, etc. Without this crucial information, the workload scheduler cannot effectively route jobs to nodes – potentially leading to a Wild West grab for resources.

      + +

      Call in the Cavalry

      + +

      IBM Spectrum LSF has been continuously innovating to address the needs of increasingly complex HPC environments of scale since 1992. Support for NVIDIA GPUs was first introduced in IBM Spectrum LSF in 2007. Continuing this long tradition of enhancements to NVIDIA GPU support, IBM Spectrum LSF now includes a new capability designed to dramatically simplify the administration of GPU servers and enables users to be more productive faster. With “zero config” for NVIDIA GPUs, IBM Spectrum LSF detects the presence of GPUs and automatically performs the necessary scheduler configuration – without any interaction from the administrator. IBM Spectrum LSF will help tame the GPU environment for you, allowing users with GPU ready codes to be productive from the moment the environment is setup.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2019/beyond-simulation-harnessing-ai-for-next-generation-hpc-at-isc/index.html b/2019/beyond-simulation-harnessing-ai-for-next-generation-hpc-at-isc/index.html new file mode 100644 index 0000000..7eaad62 --- /dev/null +++ b/2019/beyond-simulation-harnessing-ai-for-next-generation-hpc-at-isc/index.html @@ -0,0 +1,213 @@ + + + + + + + Beyond Simulation – Harnessing AI for Next-Generation HPC at ISC - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      Beyond Simulation – Harnessing AI for Next-Generation HPC at ISC

      +

      Computer simulation has become a staple technique in many disciplines – so much so that it often described as the “third pillar” of the scientific method. Alongside theory and experimentation, simulation is used in everything from automotive design to computational chemistry to forecasting weather and market movements.

      + +

      Simulation helps us solve problems that are too difficult, time-consuming, or expensive to solve empirically – for example, what is the optimal design and material for an impeller in a centrifugal pump? Or what failure states might exist in a semiconductor design from which a device can’t recover?

      + +

      By devising accurate mathematical models and approximating those numerically in software, we can predict the behavior of real-world systems based on various parameters and a set of initial conditions. The better the model, the quality of the input data, and the more computing power that can be brought to bear, the better the prediction.

      + +

      Simulation vs. analytics

      + +

      High-performance data analytics (HPDA) and computer simulation are increasingly joined at the hip. Analytic techniques are sometimes used to improve simulation – providing better quality datasets to feed a simulation model, for example. Other times, simulation helps improve analytics – back-testing the performance of a financial or weather model over past data, for example, to gain confidence in a model’s predictive quality.

      + +

      While simulation has served us well, it has limits. The quality of a predictive model is only as good as our ability to identify features useful in making accurate predictions. For some problems, such as are structural mechanics, the features required to build a predictive model are relatively well known. For other problems, such as financial markets or weather models, the number of potential parameters is vast, and their effects are sometimes poorly understood, significantly affecting the quality of the result.

      + +

      A fourth pillar in the scientific method

      + +

      AI is rapidly emerging as a “fourth pillar” in the scientific method complementing theory, experimentation, and simulation techniques. Inference allows computers to make educated guesses about future results without the need to go through a full-blown simulation.

      + +

      In fact, the AI development process can be modeled as automation of the scientific method where the steps are:

      + +
        +
      1. Observe
      2. +
      3. Hypothesize
      4. +
      5. Test Hypothesis
      6. +
      7. (return to #1)
      8. +
      +

      The power of “better guesses”

      + +

      Humans often infer things based on prior knowledge intuitively. For example, back to our impeller design, if a centrifugal pump needs to handle a viscous or corrosive liquid, the human engineer might know intuitively that a strong, non-reactive material like stainless steel is a good choice. By making educated guesses on materials and other parameters, the problem-space to be simulated is reduced dramatically.

      + +

      When dealing with complex problems, however, our human ability to make such inferences breaks down. Even for subject matter experts, problems like modeling chemical reactions or predicting how a semiconductor will behave, are beyond our experience. The systems we need to model are too complex and involve too many parameters.

      + +

      Intelligent Simulation

      + +

      Fortunately, computers are very good at sifting through vast amounts of data and detecting patterns not obvious to humans. The best way to boost simulation performance is often to avoid simulations that will be irrelevant and not useful. By applying machine learning and other AI techniques to make informed guesses about what parameters and simulations will be most useful in solving a problem we can:

      + +
        +
      • Reduce the number of simulations required
      • +
      • Provide higher resolution simulations and more trustworthy models
      • +
      • Reduce costs and cycle times wherever computer simulation is used
      • +
      +

      Intelligent simulation helps us more effectively explore a problem space by predicting what regions, data, and exploratory techniques are most likely to be useful and omitting the rest.

      + +

      Bayesian Optimization

      + +

      In probability theory, Bayes’ theorem describes the probability of an event, based on prior knowledge of conditions that might be related to the event. It turns out that Bayesian analysis is a particularly effective way to capture common sense information from data, to help make better predictions, thus reducing the amount of computer simulation required. IBM has developed a Bayesian optimization accelerator that can function as an HPC advisory engine.

      + +

      Powered by Bayesian optimization libraries, the system helps scientists exploit these state-of-the-art techniques to computer simulation in multiple industries without the need for deep AI expertise. Bayesian optimization has demonstrated that it can reduce simulation requirements by half with no disruption to the existing HPC infrastructure, dramatically improving HPC productivity.

      + +

      Harnessing AI for Next-Generation HPC @ ISC 2019

      + +

      At this year’s ISC conference in Frankfurt, Germany, you can learn more about IBM solutions for AI and HPC –

      + +
        +
      • Learn how accelerating simulations with Bayesian optimization has the potential to help you perform simulations in half the time
      • +
      • Learn how IBM Q researchers are putting machine learning on the path to quantum advantage
      • +
      • Try out IBM RXN for Chemistry and learn how AI techniques are helping automated discovery for organic chemistry by predicting chemical reactions
      • +
      • Finally, learn how a CPPM PCIe40 data acquisition adapter in an IBM POWER9 based system can help advance state-of-the-art research in high-energy physics and other applications
      • +
      +

      Stop by the IBM booth (D-1140 in the exhibit hall) to see demos Power Systems and Spectrum Storage to Spectrum Computing and Watson Machine Learning Accelerator.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/dursi/2019-9-29-science-makes-great-managers-but-not-necessarily-good-oneshtml.md b/2019/computational-science-collaborations-train-great-managers-but-trainees-might-need-help-to-become-good-managers-first/index.html similarity index 59% rename from _posts/dursi/2019-9-29-science-makes-great-managers-but-not-necessarily-good-oneshtml.md rename to 2019/computational-science-collaborations-train-great-managers-but-trainees-might-need-help-to-become-good-managers-first/index.html index 8b9ca26..9550ed4 100644 --- a/_posts/dursi/2019-9-29-science-makes-great-managers-but-not-necessarily-good-oneshtml.md +++ b/2019/computational-science-collaborations-train-great-managers-but-trainees-might-need-help-to-become-good-managers-first/index.html @@ -1,19 +1,87 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2019-09-29 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/science-makes-great-managers-but-not-necessarily-good-ones.html -slug: computational-science-collaborations-train-great-managers-but-trainees-might-need-help-to-become-good-managers-first -title: Computational Science Collaborations Train Great Managers - But Trainees Might - Need Help To Become Good Managers First ---- - -
      + + + + + + + Computational Science Collaborations Train Great Managers - But Trainees Might Need Help To Become Good Managers First - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
      + +
      +

      Computational Science Collaborations Train Great Managers - But Trainees Might Need Help To Become Good Managers First

      +

      What I write below likely applies to fields of theoretical and observational science that involve collaborations, too. I think the experiences that trainees in laboratory science are likely @@ -34,10 +102,8 @@ open disdain for anything that smacks of management, marketing, or other suspiciously real-world activities).

      -

      But computational-science academic environments are pretty particular places, with different approaches to working in a team than, say, in much of business.

      -

      First, academic work is largely performed by trainees like students, who have a very different relationship to their academic supervisor than an employee does to their manager. At its best, when an academic lab @@ -46,13 +112,11 @@ responsibilities, and looking for opportunities for them to apply those emerging skills to new problems.

      -

      Second, since much of the work is on open research problems, it’s very difficult to judge how long something “should” take, so deadlines in assigning tasks is relatively uncommon; updates tend to sound like “here’s what I managed to get done this week,” and it is what it is.

      -

      Third, due to the open-endedness, the trainee/mentor relationship, and modelling the extreme independence of senior academics, there is a norm of collegiality. Directing someone’s work @@ -60,18 +124,15 @@ ruthlessly assessed, but path to get there, the work process, is somewhat off-limits.

      -

      Fourth, it’s common - maybe even the norm - for projects to be tackled with others outside of not only the team, but in different institutions entirely.

      -

      Finally, the independence of researchers, the dynamic nature of research, and the fact that so many coworkers are elsewhere mean many working relationships are comparatively short-lived.

      -

      So imagine that you are a postdoc - the most senior trainee - in a computational lab, routinely working in multi-institutional collaborations, and this is where you’re developing your people and @@ -83,7 +144,6 @@ conference that a competitor’s lab is looking at some of the same questions.

      -

      But your “project team” are peers or even academics more senior than you, and many are outside your institution entirely; getting them to do anything is a matter of persuasion. Your local, @@ -94,10 +154,8 @@ skills, and you don’t have time to direct people in how to do their work even if you were so inclined. And the clock is ticking.

      -

      What kind of skills are you developing as you’re thrust into this situation?

      -

      Much of the computing and technical community is teaching itself @@ -110,10 +168,8 @@ teams, and 3 pitfalls that managers in less successful teams fell into.

      -

      Those characteristics of good managers, in decreasing order of importance:

      -
      1. They’re good coaches.
      2. They empower their team and don’t micro-manage.
      3. @@ -127,7 +183,6 @@

        How will our postdoc rate against those criteria? Well:

        -
        1. They are going to be very concerned with skills development in their direct reports, encouraging them on to bigger and better things — so the postdoc learns to be a good coach;
        2. They certainly won’t micromanage — they’ll let team members decide how to approach their work;
        3. @@ -141,12 +196,10 @@

          That’s just about a clean sweep!

          -

          So I claim that the sort of training that I’ve seen people get on projects in computational (or observational or theoretical) science collaborations equips people with the advanced skills to become great managers.

          -

          But there’s a downside. The very hands-off approach to management (indeed, the refusal to countenance that “management” is even an appropriate thing for scientists to stoop to) means that some of @@ -154,7 +207,6 @@ work at Google pointed out key shortcomings of their less successful managers:

          -
          1. Have trouble making a transition to the team.
          2. Lack a consistent approach to performance management.
          3. @@ -167,7 +219,6 @@ management shouldn’t be done; “we’re not that sort of people”. So:

            -
            1. Making the transition to being the manager of the team is going to be doubly difficult for our postdoc — both in internalizing their role as a manager, and in putting the time in to develop really solid working relationships with the team members.
            2. Performance communications - giving people feedback (positive and negative) on their work often and regularly, rather than waiting weeks or months for some big sub-project to be done and then assessing the finished project — is going to be completely foreign, if not anathema, to them.
            3. @@ -179,18 +230,15 @@ academy, the basic skills — or even models of what the basic skills would look like — are often going to be lacking.

              -

              But those basic skills are the easiest to address! Anyone can learn them, and someone who’s spent a good chunk of their career in the sciences certainly can.

              -

              So many computational scientists do end up becoming good — and so quickly become great — managers successfully on their own, but it can take a lot of trial and error, and be stressful for all involved. (My own transition towards becoming good has been…. uneven.)

              -

              I don’t think that transition has to be so difficult; today there are some fantastic resources out there to help. And maybe it’s where I’ve been looking or which @@ -209,7 +257,6 @@ rework.withgoogle.com are well worth reading.

              -

              Scientists learn a lot of transferrable skills in their training, and the world needs more of their input in teams and projects across all sectors. There’s a stereotype about scientists being @@ -221,7 +268,6 @@ help them succeed. If there are some that have especially helped you, please do share them with me and I’ll list them here.

              -
              1. @@ -233,4 +279,76 @@
              -
              \ No newline at end of file +
      + +
      +
      + +
      + + + + + + + + + + + + + + + + diff --git a/2019/intelligent-hpc-keeping-hard-work-at-bay-es/index.html b/2019/intelligent-hpc-keeping-hard-work-at-bay-es/index.html new file mode 100644 index 0000000..dc80f6f --- /dev/null +++ b/2019/intelligent-hpc-keeping-hard-work-at-bay-es/index.html @@ -0,0 +1,179 @@ + + + + + + + Intelligent HPC - Keeping Hard Work at Bay(es) - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      Intelligent HPC - Keeping Hard Work at Bay(es)

      +

      Since the dawn of time, humans have looked for ways to make their lives easier. Over the centuries human ingenuity has given us inventions such as the wheel and simple machines – which help greatly with tasks that would otherwise be extremely laborious. Over the time, we’ve learned there are often alternatives to brute force ways of doing things. It’s this human reasoning that has driven the advancement we find in our world today.

      + +

      Fast forward to this century where computer driven simulations have been developed as the third branch of scientific method supplementing theory and experimentation. For decades, simulation and modelling have delivered unprecedented capabilities to drive innovation for the betterment of the world. The need to run more simulations faster, has spurred the development of ever faster processors, networking and storage. The approach to speeding up simulations has been one of brute force. Faster computing to deliver faster results. But the insatiable desire to perform simulations faster has very real implications in today’s world – such as managing the power requirements of future supercomputers. It’s time in high performance computing to revisit the brute force approaches to achieve the next level of performance.

      + +

      Lessons from the past

      + +

      We sometimes forget that it’s important to look at lessons from the past, in order to create a better future. HPC simulations today are computationally intensive – and as the fidelity of models increases, so does the number of calculations and time to solution. Rethinking this laborious method for simulations, are there ways that we can cut down on the number of calculations performed? A calculation avoided, is time saved. Our lesson takes us back to 1763 when Thomas Bayes authored “An Essay towards solving a Problem in the Doctrine of Changes”, from which Bayes’ Theorem was developed.

      + +

      In simple terms, Bayes’ theorem can be used to predict the probability of an outcome, based upon prior knowledge or information. What if Bayes’ theorem could be applied to computational simulations to determine the likelihood of a given iteration of a simulation to provide a useful outcome, and to discard those iterations where there is not likely a useful outcome? A calculation avoided, is time saved. As it turns out, applying Bayesian methods to HPC design can dramatically reduce the time to optimal product specification.

      + +

      Bayesian optimization at work

      + +

      To put Bayesian methods to the test, the engineers of the IBM Systems High Speed Bus Signal Integrity (HSB-SI) Team used software based upon the principles of Bayesian statistics called IBM Bayesian Optimization (IBO) developed by IBM Research. IBO was designed to accelerate computational workflows through the application of sophisticated algorithms. The HSB-SI team’s challenge is to minimize the time needed design validation simulation analysis of high-speed interfaces for the purpose of choosing an optimal configuration point, while maintaining or increasing the fidelity of the solution. In testing IBO, they wanted to reduce the number of simulations needed to reach the optimal configuration point for chip-to-chip communication.

      + +
      +

      “Our team is taking advantage of state-of-the-art machine learning to design computer systems of the future.” +Dale Becker, Ph.D., Chief Engineer Electrical Packaging Integration, IBM

      + +
      +

      The results were dramatic. They achieved a 140x faster time to solution with higher accuracy than their legacy method. They used 99% less cores to arrive at a higher confidence solution with less than a 1% error rate using IBO.

      + +

      With time to solution being a critical element of competitive advantage, the adoption of sophisticated statistical methods and machine learning to accelerate simulation workflows is destined to grow quickly. In our next article about innovations in HPC we will highlight multiple use cases where Bayesian optimized workflows are transforming HPC simulation-driven innovation.

      + +

      Originally published on HPCwire IBM Solution Channel on December 18, 2019

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2019/isc-19-recap/index.html b/2019/isc-19-recap/index.html new file mode 100644 index 0000000..74fd63f --- /dev/null +++ b/2019/isc-19-recap/index.html @@ -0,0 +1,161 @@ + + + + + + + ISC'19 Recap - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
      + +
      +

      ISC'19 Recap

      +

      I was fortunate enough to attend the ISC HPC conference this year, and it was a delightful experience from which I learned quite a lot.  For the benefit of anyone interested in what they have missed, I took the opportunity on the eleven-hour flight from Frankfurt to compile my notes and thoughts over the week.

      I spent most of my time in and around the sessions, BOFs, and expo focusing on topics related to I/O and storage architecture, so that comprises the bulk of what I’ll talk about below.  Rather than detail the conference chronologically as I did for SC’18 though, I’ll only mention a few cross-cutting observations and trends here.

      I’ll also not detail the magnificent HPC I/O in the Data Center workshop here, but anyone reading this who cares about storage or I/O should definitely flip through the slides on the HPC-IODC workshop website!  This year HPC-IODC and WOPSSS merged their programs, resulting in a healthy mix of papers (in both CS research and applied research), expert talks, and fruitful discussion.

      <h2>High-level observations</h2>As is often the case for ISC, there were a few big unveilings early in the week.  Perhaps the largest was the disclosure of several key architectural details surrounding the Aurora exascale system to be deployed at Argonne in 2021.  TACC’s Frontera system, a gigantic Dell cluster stuffed with Intel Cascade Lake Xeons, made its debut on the Top500 list as well.  In this sense, Intel was in good form this year.  And Intel has to be, since only one of the handful of publicly disclosed pre-exascale (Perlmutter and Fugaku) and exascale systems (Frontier) will be using Intel parts.

      The conference had also had an anticipatory undertone as these pre-exascale and exascale systems begin coming into focus.  The promise of ARM as a viable HPC processor technology is becoming increasingly credible as Sandia’s Astra machine, an all-ARM cluster integrated by HPE, appeared throughout the ISC program.  These results are paving the way for Fugaku (the “post-K” machine), which will prove ARM and its SVE instruction set at extreme scale.

      Also contributing to the anticipatory undertone was a lot of whispering that occurred outside of the formal program.  The recently announced acquisition of Cray by HPE was the subject of a lot of discussion and conjecture, but it was clear that the dust was far from settled and nobody purported to have a clear understanding of how this would change the HPC market.  There was also some whispering about a new monster Chinese system that was on the cusp of making this year’s ISC Top500.  Curiously, the Wuxi supercomputer center (where Tianhe-2 is housed) had a booth on the show floor, but it was completely vacant.

      Also noticeably absent from the show floor was NVIDIA, although they certainly sent engineers to participate in the program.  By comparison, AMD was definitely present, although they were largely promoting the impending launch of Rome rather than their GPU lineup.  A number of HPC solutions providers were excited about Rome because of both high customer demand and promising early performance results, and there wasn’t a single storage integrator with whom I spoke that wasn’t interested in what doors will open with an x86 processor and a PCIe Gen4 host interface.

      <h2>Intel disclosures about Aurora 2021</h2>Perhaps the biggest news of the week was a “special event” presentation given by Intel’s Rajeeb Hazra which disclosed a number of significant architectural details around the Aurora exascale system being deployed at Argonne National Laboratory in 2021.

      <h3>Nodes will be comprised of Intel Xeon CPUs and multiple Intel GPUs</h3>Intel has confirmed that Aurora will be built on Intel-designed general-purpose GPUs based on the “Xe” architecture with multiple GPUs per node.  With this disclosure and the knowledge that nodes will be connected with Cray’s Slingshot interconnect, it is now possible to envision what a node might look like.  Furthermore, combining the disclosure of a high GPU:CPU ratio, the Aurora power budget, and some vague guessing at the throughput of a 2021 GPU narrows down the number of nodes that we may expect to see in Aurora.

      Although no specific features of the Intel GPUs were disclosed, Intel was also promoting their new AVX512-VNNI instructions to position their latest top-bin Xeon cores as the best option for inference workloads.  Coupled with what we can assume will be highly capable GPUs for training acceleration, Intel is building a compelling story around their end-to-end AI portfolio.  Interestingly, news that NVIDIA is partnering with ARM dropped this past week, but NVIDIA’s noted absence from ISC prevented a comparable ARM-NVIDIA AI solution from shining through.

      <h3>System will have over 10 PB of system memory</h3>Aurora will have a significant amount of memory presumably comprised of a combination of HBM, DDR, and/or Optane persistent memory.  The memory capacity is markedly higher than that of the AMD-based Frontier system, suggesting that Intel may be leveraging Optane persistent memory (which has a lower cost per bit than DDR) to supplement the HBM that is required to feed such a GPU-heavy architecture.

      <h3>The storage subsystem will deliver over 230 PB of capacity at over 25 TB/sec</h3>Perhaps the most interesting part of Aurora is its I/O subsystem, which will use an object store and an all-solid-state storage architecture instead of the traditional parallel file system.  This will amount to 230 PB of usable flash capacity that can operate in excess of 25 TB/sec.  Although I’ll describe this storage architecture in more depth below, combining the performance point of 25 TB/sec with the aforementioned high GPU:CPU ratio suggests that each compute node will be able to inject a considerable amount of I/O traffic into the fabric.  This points to very capable Xeon cores and very capable NICs.

      <h3>The programming model for the system will utilize SYCL</h3>Intel has announced that its “One API’ relies on the Khronos Group’s SYCL standard for heterogeneous programming in C++ rather than the incumbent choices of OpenMP, OpenACC, or OpenCL.  This does not mean that OpenMP, OpenACC, and/or OpenCL won’t be supported, but it does reveal where Intel intends to put all of its efforts in enabling its own GPUs and FPGAs for HPC.  They further emphasized their desire to keep these efforts open, standards-based, and portable, undoubtedly demonstrating stark contrast with the incumbent GPU vendors.  This is an interesting long-term differentiator, but time will tell whether SYCL is able to succeed where OpenCL has failed and gain a foothold in the HPC ecosystem.

      <h2>DAOS will be HPC’s gateway drug to object stores</h2>DAOS (the “Distributed Asynchronous Object Store,” pronounced like it’s spelled) is an object store that Intel has been developing for the better part of a decade in collaboration with the US Department of Energy.  The DAOS name has become overloaded in recent years as a result of it changing scope, focus, and chief architects, and the current version is quite different from the original DAOS that was prototyped as a part of the DOE Fast Forward program (e.g., only one of three original DAOS components, DAOS-M, survives).  A few key features remain the same, though:
      <ul><li>It remains an object store at its core, but various middleware layers will be provided to expose alternate access APIs and semantics</li><li>It is specifically designed to leverage Intel Optane persistent memory and NAND-based flash to deliver extremely high IOPS in addition to high streaming bandwidth</li><li>It relies on user-space I/O via Mercury and SPDK to enable its extreme I/O rates</li><li>Its storage architecture is still based on a hierarchy of servers, pools, containers, and objects</li></ul>Object stores have historically not found success in HPC due to HPC apps’ general dependence on POSIX-based file access for I/O, but the Aurora DAOS architecture cleverly bridges this gap.  I was lucky enough to run into Johann Lombardi, the DAOS chief architect, at the Intel booth, and he was kind enough to walk me through a lot of the details.

      DAOS will provide seamless integration with a POSIX namespace by using Lustre’s new foreign layout feature which allows an entity in the Lustre namespace to be backed by something that is not managed by Lustre.  In practice, a user will be able to navigate a traditional file namespace that looks like any old Lustre file system using the same old ls and cd commands.  However, some of the files or directories in that namespace may be special DAOS objects, and navigating into a DAOS-based object transparently switches the data path from one that uses the traditional Lustre client stack to one that uses the DAOS client stack.  In particular,
      <ul><li>Navigating into a directory that is backed by a DAOS container will cause the local DAOS agent to mount that DAOS container as a POSIX namespace using FUSE and junction it into the Lustre namespace.  Files and subdirectories contained therein will behave as regular POSIX files and subdirectories for the most part, but they will only honor a subset of the POSIX consistency semantics.</li><li>Accessing a file that is backed by a DAOS container (such as an HDF5 file) will cause the client to access the contents of that object through whatever API and semantics the DAOS adapter for that container format provides.</li></ul>DAOS also includes a preloadable library which allows performance-sensitive applications to bypass the FUSE client entirely and map POSIX API calls to DAOS native API calls.  For applications that use middleware such as HDF5 or MPI-IO, I/O will be able to entirely bypass the POSIX emulation layer and get the highest performance through DAOS-optimized backends.  In the most extreme cases, applications can also write directly against the DAOS native object API to control I/O with the finest granularity, or use one of DAOS’s addon APIs that encapsulate other non-file access methods such as key-value or array operations.

      A significant amount of this functionality is already implemented, and Intel was showing DAOS performance demos at its booth that used both IOR (using the DAOS-native backend) and Apache Spark:

      <div class="separator" style="clear: both; text-align: center;"></div> +

      The test hardware was a single DAOS server with Intel Optane DIMMs and two Intel QLC NAND SSDs and demonstrated over 3 GB/sec on writes and over a million read IOPS on tiny (256-byte) transfers.  Johann indicated that their testbed hardware is being scaled up dramatically to match their extremely aggressive development schedule, and I fully expect to see performance scaling results at SC this November.

      This is all a far cry from the original Fast Forward DAOS, and this demo and discussion on the show floor was the first time I felt confident that DAOS was not only a good idea, but it was a solution that can realistically move HPC beyond the parallel file system.  Its POSIX compatibility features and Lustre namespace integration provide enough familiarity and interoperability to make it something usable for the advanced HPC users who will be using the first exascale machines.

      At the same time, it applies a number of new technologies in satisfying ways (Mercury for user-space network transport, GIGA+ for subtree sharding, Optane to coalesce tiny I/Os, …) that, in most ways, puts it at technological parity with other high-performance all-flash parallel storage systems like WekaIO and VAST.  It is also resourced at similar levels, with DOE and Intel investing money and people in DAOS at levels comparable to the venture capital that has funded the aforementioned competitors.  Unlike its competitors though, it is completely open-source and relies on standard interfaces into hardware (libfabric, SPDK) which gives it significant flexibility in deployment.

      As with everything exascale, only time will tell how DAOS works in practice.  There are plenty of considerations peripheral to performance (data management policies, system administration, and the like) that will also factor into the overall viability of DAOS as a production, high-performance storage system.  But so far DAOS seems to have made incredible progress in the last few years, and it is positioned to shake up the HPC I/O discussion come 2021.

      <h2>The Cloud is coming for us</h2>This ISC also marked the first time where I felt that the major cloud providers were converging on a complete HPC solution that could begin eroding campus-level and mid-range HPC.  Although application performance in the cloud has historically been the focus of most HPC-vs-cloud debate, compute performance is largely a solved problem in the general sense.  Rather, data—its accessibility, performance, and manageability—has been the single largest barrier between most mid-range HPC users and the cloud.  The convenience of a high-capacity and persistent shared namespace is a requirement in all HPC environment, but there have historically been no painless ways to produce this environment in the cloud.

      AWS was the first to the table with a solution in Amazon FSx, which is a managed Lustre-as-a-service that makes it much easier to orchestrate an HPC workflow that relies on a high-performance, high-capacity, shared file system.  This has prompted the other two cloud vendors to come up with competing solutions:  Microsoft Azure’s partnership with Cray is resulting in a ClusterStor Lustre appliance in the cloud, and Google Cloud will be offering DDN’s EXAScaler Lustre appliances as a service.  And Whamcloud, the company behind Lustre, offers its own Lustre Cloud Edition on all three major cloud platforms.

      In addition to the big three finally closing this gap, a startup called Kmesh burst on to the I/O scene at ISC this year and is offering a cloud-agnostic solution to providing higher-touch parallel file system integration and management in the cloud for HPC.  Vinay Gaonkar, VP of Products at Kmesh, gave insightful presentations at several big I/O events during the week that spoke to the unique challenges of designing Lustre file systems in a cloud ecosystem.  While architects of on-prem storage for HPC are used to optimizing for price-performance on the basis of purchasing assets, optimizing price-performance from ephemeral instance types often defies conventional wisdom; he showed that instance types that may be considered slow on a computational basis may deliver peak I/O performance at a lower cost than the beefiest instance available:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      Vinay’s slides are available online and offer a great set of performance data for high-performance storage in the public clouds.

      The fact that there is now sufficient market opportunity to drive these issues to the forefront of I/O discussion at ISC is an indicator that the cloud is becoming increasingly attractive to users who need more than simple high-throughput computing resources.

      Even with these sorts of parallel file systems-as-a-service offerings though, there are still non-trivial data management challenges when moving on-premise HPC workloads into the cloud that result from the impedance mismatch between scientific workflows and the ephemeral workloads for which cloud infrastructure is generally designed.  At present, the cost of keeping active datasets on a persistent parallel file system in the cloud is prohibitive, so data must continually be staged between an ephemeral file-based working space and long-term object storage.  This is approximately analogous to moving datasets to tape after each step of a workflow, which is unduly burdensome to the majority of mid-scale HPC users.

      However, such staging and data management issues are no longer unique to the cloud; as I will discuss in the next section, executing workflows across multiple storage tiers is no longer a problem unique to the biggest HPC centers.  The solutions that address the burdens of data orchestration for on-premise HPC are likely to also ease the burden of moving modest-scale HPC workflows entirely into the cloud.

      <h2>Tiering is no longer only a problem of the rich and famous</h2>Intel started shipping Optane persistent memory DIMMs earlier this year, and the rubber is now hitting the road as far as figuring out what I/O problems it can solve at the extreme cutting edge of HPC.  At the other end of the spectrum, flash prices have now reached a point where meat-and-potatoes HPC can afford to buy it in quantities that can be aggregated into a useful tier.  These two factors resulted in a number of practical discussions about how tiering can be delivered to the masses in a way that balances performance with practicality.

      The SAGE2 project featured prominently at the high-end of this discussion.  Sai Narasimhamurthy from Seagate presented the Mero software stack, which is the Seagate object store that is being developed to leverage persistent memory along with other storage media.  At a distance, its goals are similar to those of the original DAOS in that it provides an integrated system that manages data down to a disk tier.  Unlike the DAOS of today though, it takes on the much more ambitious goal of providing a PGAS-style memory access model into persistent storage.

      On the other end of the spectrum, a number of new Lustre features are rapidly coalescing into the foundation for a capable, tiered storage system.  At the Lustre/EOFS BOF, erasure coded files were shown on the roadmap for the Lustre 2.14 release in 2Q2020.  While the performance of erasure coding probably makes it prohibitive as the default option for new files on a Lustre file system, erasure coding in conjunction with Lustre’s file-level replication will allow a Lustre file system to store, for example, hot data in an all-flash pool that uses striped mirrors to enable high IOPS and then tier down cooler data to a more cost-effective disk-based pool of erasure-coded files.

      In a similar vein, Andreas Dilger also discussed future prospects for Lustre at the HPC I/O in the Data Center workshop and showed a long-term vision for Lustre that is able to interact with both tiers within a data center and tiers across data centers:

      <div class="separator" style="clear: both; text-align: center;"></div> +

      Many of these features already exist and serve as robust building blocks from which a powerful tiering engine could be crafted.

      Finally, tiering took center stage at the Virtual Institute for I/O and IO-500 BOF at ISC with the Data Accelerator at Cambridge beating out OLCF Summit as the new #1 system.  A key aspect of Data Accelerator’s top score arose from the fact that it is an ephemeral burst buffer system; like Cray DataWarp, it dynamically provisions parallel file systems for short-term use.  As a result of this ephemeral nature, it could be provisioned with no parity protection and deliver a staggering amount of IOPS.

      <h2>Impressions of the industry</h2>As I’ve described before, I often learn the most by speaking one-on-one with engineers on the expo floor.  I had a few substantive discussions and caught on to a few interesting trends.

      <h3>No winners in EDSFF vs. NF.1</h3>It’s been over a year since Samsung’s NF.1 (formerly M.3 and NGSFF) and Intel’s EDSFF (ruler) SSD form factor SSDs, and most integrators and third-party SSD manufacturers remain completely uncommitted to building hardware around one or the other.  Both form factors have their pros and cons, but the stalemate persists by all accounts so far.  Whatever happens to break this tie, it is unlikely that it will involve the HPC market, and it seems like U.2 and M.2 remain the safest bet for the future.

      <h3>Memory Landscape and Competition</h3>The HBM standard has put HMC (hybrid memory cube) in the ground, and I learned that Micron is committed to manufacturing HBM starting at the 2e generation.  Given that SK Hynix is also now manufacturing HBM, Samsung may start to face competition in the HBM market as production ramps up.  Ideally this brings down the cost of HBM components in the coming years, but the ramp seems to be slow, and Samsung continues to dominate the market.

      Perhaps more interestingly, 3DXPoint may be diversifying soon.  Although the split between Intel and Micron has been well publicized, I failed to realize that Intel will also have to start manufacturing 3DXPoint in its own fabs rather than the shared facility in Utah.  Micron has also announced its commitment to the NVDIMM-P standard which could feasibly blow open the doors on persistent memory and non-Intel processor vendors to support it.  However, Micron has not committed to an explicit combination of 3DXPoint and NVDIMM-P.

      Realistically, the proliferation of persistent memory based on 3DXPoint may be very slow.  I hadn’t realized it, but not all Cascade Lake Xeons can even support Optane DIMMs; there are separate SKUs with the requisite memory controller, suggesting that persistent memory won’t be ubiquitous, even across the Intel portfolio, until the next generation of Xeon at minimum.  Relatedly, none of the other promising persistent memory technology companies (Crossbar, Everspin, Nantero) had a presence at ISC.

      <h3>China</h3>The US tariffs on Chinese goods are on a lot of manufacturers’ minds.  Multiple vendors remarked that they are either

      <ul><li>thinking about moving more manufacturing from China into Taiwan or North America,</li><li>already migrating manufacturing out of China into Taiwan or North America,</li><li>under pressure to make shorter-term changes to their supply chains (such as stockpiling in the US) in anticipation of deteriorating conditions</li></ul>
      I was not expecting to have this conversation with as many big companies as I did, but it was hard to avoid.

      Beyond worrying about the country of origin for their components, though, none of the vendors with whom I spoke were very concerned about competition from the burgeoning Chinese HPC industry.  Several commented that even though some of the major Chinese integrators have very solid packaging, they are not well positioned as solutions providers.  At the same time, customers are now requiring longer presales engagements due to the wide variety of new technologies on the market.  As a result, North American companies playing in the HPC vertical are finding themselves transitioning into higher-touch sales, complex custom engineering, and long-term customer partnerships.

      <h2>Concluding thoughts</h2><div>This year’s ISC was largely one of anticipation of things to come rather than demonstrations that the future has arrived.  Exascale (and the pre-exascale road leading to it) dominated most of the discussion during the week.  Much of the biggest hype surrounding exascale has settled down, and gone are the days of pundits claiming that the sky will fall when exascale arrives due to constant failures, impossible programming models, and impossible technologies.  Instead, exascale is beginning to look very achievable and not unduly burdensome: we know how to program GPUs and manycore CPUs already, and POSIX file-based access will remain available for everyone.  Instead, the challenges are similar to what they’ve always been–continuing to push the limits of scalability in every part of the HPC stack.</div>

      +

      +
      I owe my sincerest thanks to the organizers of ISC, its sessions, and the HPC-IODC workshop for putting together the programs that spurred all of the interesting discourse over the week.  I also appreciate the technical staff at many of the vendor booths with whom I spoke.  I didn't name every person with whom I drew insights on the expo floor, but if you recognize a comment that you made to me in this post and want credit, please do let me know--I'd be more than happy to.  I also apologize to all the people with whom I spoke and sessions I attended but did not include here; not everything I learned last week fit here.
      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2019/letsencrypt-for-multiple-hosts/index.html b/2019/letsencrypt-for-multiple-hosts/index.html new file mode 100644 index 0000000..763c91d --- /dev/null +++ b/2019/letsencrypt-for-multiple-hosts/index.html @@ -0,0 +1,214 @@ + + + + + + + LetsEncrypt for Multiple Hosts - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Dereks Web Thoughts from Derek. See the original post here.
      + +
      +

      LetsEncrypt for Multiple Hosts

      +

      Using LetsEncrypt for certificate creation and management has made secure communications much easier. Instead of contacting the IT department of your university to request a certificate, you can skip the middle man and generate your own certificate which it trusted around the world.

      + +

      A common use case of certificates is to secure data transfers. Data transfers that use the GridFTP, XRootD, or HTTPS transfer protocols can load balance between multiple servers to increase throughput. keepalived is used to load balance between multiple transfer servers. The certificate provided to the clients need to have the virtual host address of the load balancer, as well as the hostname of each of the worker nodes.

      + +
        +
      1. Create a shared directory between the data transfer nodes
      2. +
      3. Install httpd on each of the data transfer nodes
      4. +
      5. Configure httpd to use the shared directory as the “webroot”
      6. +
      7. Configure keepalived to use virtualize port 80 to at least 1 of your data transfer nodes.
      8. +
      9. Run certbot with the webroot option, as well as the multiple hostnames of the data transfer nodes.
      10. +
      + +

      Create a NFS share that each of the data transfer nodes can read. The steps in creating a NFS shared directory is outside the scope of this guide. In this guide, the shared directory will be referred as /mnt/nfsshare . Next, install httpd on each of the data transfer nodes:

      + +
      root@host $ yum install httpd
      +
      +
      + +

      Create a webroot directory within the shared directory on one of the nodes:

      + +
      root@host $ mkdir /mnt/nfsshare/webroot
      +
      +
      + +

      Configure httpd to export the same webroot on each of the data transfer nodes:

      + +
      <VirtualHost *:80>
      +    DocumentRoot "/mnt/nfsshare/webroot"
      +    <Directory "/mnt/nfsshare/webroot">
      +        Require all granted
      +    </Directory>
      +</VirtualHost>
      +
      +
      + +

      Configure keepalived to virtualize port 80 to at least one of your data transfer nodes. +Add to your configuration:

      + +
      virtual_server <VIRTUAL-IP-ADDRESS> 80 {
      +    delay_loop 10
      +    lb_algo wlc
      +    lb_kind DR
      +    protocol tcp
      +
      +    real_server <GRIDFTP-SERVER-#1-IP ADDRESS> {
      +        TCP_CHECK {
      +            connect_timeout 3
      +            connect_port 80
      +        }
      +    }
      +}
      +
      +
      + +

      Run certbot with the webroot options on only 1 of the data nodes. The first domain in the command line should be the virtual hostname:

      + +
      root@host $ certbot certonly -w /mnt/nfsshare/webroot -d <VIRTUAL_HOSTNAME> -d <DATANODE_1> -d <DATANODE_N>...
      +
      +
      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/gaborsamu/2019-5-30-poweringhpc.md b/2019/powering-the-future-of-hpc-ai-with-openpower/index.html similarity index 50% rename from _posts/gaborsamu/2019-5-30-poweringhpc.md rename to 2019/powering-the-future-of-hpc-ai-with-openpower/index.html index ad2a4de..66431f5 100644 --- a/_posts/gaborsamu/2019-5-30-poweringhpc.md +++ b/2019/powering-the-future-of-hpc-ai-with-openpower/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2019-05-30 03:21:37' -layout: post -original_url: https://www.gaborsamu.com/blog/poweringhpc/ -slug: powering-the-future-of-hpc-ai-with-openpower -title: Powering the Future of HPC & AI with OpenPOWER ---- - -

      It is coming up on one year that the Summit supercomputer based on IBM POWER9 at Oak Ridge National Lab claimed the number one spot on the Top500 ranking. This system represents the culmination of a significant collaboration between OpenPOWER foundation members IBM, Nvidia, Mellanox and Red Hat with the goal of producing well a balanced computing platform for not only traditional HPC workloads such as modelling and simulation, but also AI workloads. With this milestone approaching, we took the opportunity to catch-up with Hugh Blemings, Executive Director at the OpenPOWER Foundation to chat about the foundation, and what lies ahead.

      + + + + + + + Powering the Future of HPC & AI with OpenPOWER - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      Powering the Future of HPC & AI with OpenPOWER

      +

      It is coming up on one year that the Summit supercomputer based on IBM POWER9 at Oak Ridge National Lab claimed the number one spot on the Top500 ranking. This system represents the culmination of a significant collaboration between OpenPOWER foundation members IBM, Nvidia, Mellanox and Red Hat with the goal of producing well a balanced computing platform for not only traditional HPC workloads such as modelling and simulation, but also AI workloads. With this milestone approaching, we took the opportunity to catch-up with Hugh Blemings, Executive Director at the OpenPOWER Foundation to chat about the foundation, and what lies ahead.

      Q: Our readership may not have heard of the OpenPOWER Foundation, what’s your 30 second summary?

      @@ -74,4 +143,76 @@

      We’re seeing more adoption of low cost OpenPOWER hardware by individual developers, researchers and security conscious end users – broadens the ecosystem and overall user base.

      -

      There is at least one other announcement in the works that I think will truly be industry changing, but we’re a couple of months out from being able to discuss more widely. I’d perhaps simply recommend our OpenPOWER North American Summit in San Diego to your readership – it’s connected with the Linux Foundation Open Source Summit and will be the place to be in August.

      \ No newline at end of file +

      There is at least one other announcement in the works that I think will truly be industry changing, but we’re a couple of months out from being able to discuss more widely. I’d perhaps simply recommend our OpenPOWER North American Summit in San Diego to your readership – it’s connected with the Linux Foundation Open Source Summit and will be the place to be in August.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2019/sc-19-recap/index.html b/2019/sc-19-recap/index.html new file mode 100644 index 0000000..f60e7a3 --- /dev/null +++ b/2019/sc-19-recap/index.html @@ -0,0 +1,258 @@ + + + + + + + SC'19 Recap - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
      + +
      +

      SC'19 Recap

      +

      Last week was the annual Supercomputing conference, held this year in Denver, and it was its usual whirlwind of big product announcements, research presentations, vendor meetings, and catching up with old colleagues.  As is the case every year, SC was both too short and too long; there is a long list of colleagues and vendors with whom I did not get a chance to meet, yet at the same time I left Denver on Friday feeling like I had been put through a meat grinder.

      All in all it was a great conference, but it felt like it had the same anticipatory undertone I felt at ISC 2019.  There were no major changes to the Top 500 list (strangely, that mysterious 300+ PF Sugon machine that was supposed to debut at ISC did not make an appearance in Denver).  AMD Rome and memory-channel Optane are beginning to ship, but it seems like everyone’s got their nose to the grindstone in pursuit of achieving capable exascale by 2021.

      As with every major HPC conference, I approached SC this year with the following broad objectives:
      <ol><li>Sharing knowledge and ideas by contributing to the technical program and its workshops, tutorials, and BOFs with the goal of getting more momentum behind good ideas and steering research and roadmaps in a direction best aligned with where I think the HPC industry needs to go</li><li>Gathering intelligence across different technologies and market verticals to stay ahead of where technology and the community may be driving as a result of other parallel industries</li><li>Contributing to community development amongst storage and I/O researchers and practitioners with the goal of broadening the community and bringing more people and ideas to the table</li><li>Building and maintaining relationships with individual vendor representatives and peers so that I know to whom I can turn when new opportunities or challenges come up</li></ol>The things I took away from the conference are colored by these goals and the fact that I mostly work in high-performance storage systems design.  If I missed any major themes or topics in this recap post, it was likely a reflection of the above goals and perspective.

      <h2 id="before">Before the conference</h2>SC’19 started back in the early spring for me since I served on the technical papers committee and co-chaired the Parallel Data Systems Workshop this year.  That all amounted to a predictable amount of work throughout the year, but there were two surprises that came up in October with respect to SC that are worth mentioning before we dive into the technical contents of the conference.

      <h3>The “I am HPC Guru” campaign</h3>Jim Cownie had the brilliant idea in early October to launch a covert campaign to create “I am HPC Guru” pins for SC, and he enlisted a group of willing members of the HPC Twitter community to pitch in.  I was fortunate enough to be invited to participate in the fun, and judging by the reach of the #IAmHPCGuru tag on Twitter during the conference, it was a wild success.

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td></td></tr><tr><td class="tr-caption" style="font-size: 12.800000190734863px;">An allotment of “I am HPC Guru” pins.  People who pitched in also got a commemorative larger-sized pin (shown outside the bag above) which was a calling card for members of the secret society.</td></tr></tbody></table>
      Hats off to Jim for conceiving this great idea, seeing through the design and shipment of the pins, and being so inclusive with the whole idea.  There are now hundreds of HPC_Guru pins all over the world thanks to Jim’s efforts (and a couple dozen still with me here in California…), and I think it was a really positive way to build the Twitter-HPC community.

      <h3>The new job</h3>Life also threw me a bit of a curve ball in late October when I took on a new set of responsibilities at NERSC and changed from contributing to an R&D group to leading an operational storage team.  This meant that, in addition to all the pre-conference commitments I had made with an eye towards longer-term storage technology strategy, I suddenly had to contextualize my goals with respect to a completely new role in tactical planning and deployment.

      Whereas I’ve historically written off sales-oriented meetings at SC, having good relationships with vendor sales teams in addition to their engineers and product managers is now an essential component of my new position.  As a result of wearing these two hats instead of one, the number of hard commitments I had over the course of the conference about doubled over what it usually had been.  About half of these meetings were private (and not things about which I could write), and they also reduced the time I could’ve otherwise getting into the weeds about upcoming technologies.

      Because the conference was so broken up into private and public meetings for me this year, a chronological recounting of the conference (as I did for my SC’18 recap) would be full of odd gaps and not make a whole lot of sense.  Instead, I will focus around a few of the juiciest topics I took away from the conference:
      <ol><li>High-level trends that seemed to pop up repeatedly over the week</li><li>Intel’s disclosures around the Aurora/A21 system</li><li>Outcomes from the 2019 Parallel Data Systems Workshop (PDSW 2019)</li><li>The Perlmutter all-NVMe storage node architecture</li><li>DAOS and the 2019 DAOS User Group meeting</li><li>Everything else</li></ol><div>
      </div>

      + +

      It’s difficult to group together all of the disparate things I heard and learned over the week into crisp bundles that I would consider emerging trends, but there were a few broad topics that kept popping up that suggested the following:

      #1 - Memory-channel 3D XPoint is now out in the wild at sufficient scale that a picture is beginning to form around where it fits in the I/O stack.  The NEXTGenIO project and Intel DAOS both demonstrated the performance achievable when 3D XPoint is integrated into larger systems this year, and the acceleration it offers can be staggering when a sensible software framework is built upon around persistent memory to bridge it with other media (like flash) and higher-level functionality (like parallel storage).  Michèle Weiland and Adrian Jackson presented their successes with the NEXTGenIO project throughout the week, most notably in the technical papers track (see “An early evaluation of Intel’s Optane DC persistent memory module and its impact on high-performance scientific applications”) and across several smaller events (e.g., Adrian presented performance results, detailed in his EPCC blog post, at the Multi-Level Memory BOF).  DAOS also made a splash on IO-500; more on this below.

      #2 - The I/O ecosystem developed in preparation for the manycore era is making the transition from pure research to practical engineering effort.  As the first generation of 7nm CPUs hit the market with KNL-like core counts and massive scale-up GPU node architectures are being announced by every major HPC silicon provider, latency-hiding techniques for I/O are becoming a hot topic.  Asynchronous I/O—that is, techniques that allow an application to continue computing while a write I/O operation is still happening—came up a few times, and this technique is also moving up in the software stack from system software (such as DAOS, WekaIO, and VAST) into middleware (MPI-IO and HDF5).  I touch on this in the PDSW section below.

      #3 - Innovation in HPC storage is moving away from the data plane and towards full data life cycle.  Whereas focus in HPC I/O has traditionally revolved around making I/O systems as fast as possible, research and product announcements this year seemed to gravitate towards data management—that is, how to manage the placement of data before, during, and after I/O.  Proprietary frameworks for data migration, policy management, tiering, and system-level analytics and intelligence (backed by serious vendor investment; see Cray ClusterStor Data Services and DDN STRATAGEM) are popping up across the storage appliance market as a differentiator atop open-source software like Lustre, and research around applying AI to optimize data placement is maturing from novel research into product engineering.

      #4 - Scientific workflows—and the parallels they have with enterprise and hyperscale markets—are starting to be taken seriously by technology providers.  Vendors have begun to take ownership of the data movement challenges that exist between bursts of compute-intensive jobs. Advances aimed at edge computing are becoming surprisingly relevant to HPC since decentralized data that is far away from compute is, in a sense, how HPC has done storage for decades.  Whether they be sensors distributed across billions of cell phones, thousands of non-volatile storage media distributed across an exascale computing system, or detectors deployed at giant telescopes relying on a supercomputer for image processing, there are a common set of data management, movement, and remote processing challenges whose solutions can be applied across the board.

      <h2 id="splash">Intel’s big splash</h2>Following on their big system-level disclosures at ISC’19, Intel’s disclosure of the ALCF exascale system node architecture and the unveiling of their software strategy seemed to be the biggest splash of SC’19.  I was not actually at the Intel DevCon keynote where Raja Koduri made the announcements, but his slides on Xe and oneAPI are available online.

      The node architecture is, at a glance, very similar to the Summit node architecture today:
      <blockquote class="twitter-tweet"><div dir="ltr" lang="en">Aurora #supercomputer @argonne will have nodes with 2 Sapphire Rapids CPUs and 6 Ponte Vecchio GPUs with unified memory architecture#SC19 #HPC #AI #Exascale #GPU pic.twitter.com/HTGMnYh7AY</div> +— HPC Guru (@HPC_Guru) November 18, 2019</blockquote>From the slide and accompanying discussion on Twitter, there was quite a lot unveiled about the node architecture.  Each node will have:
      <ul><li>Two Sapphire Rapids Xeons (which appear to have 8 channels of DDR in the aforementioned slide) and six Ponte Vecchio Intel GPUs</li><li>A CXL-based “Xe Link” router provides all-to-all connectivity between the GPUs, presumably comparable to (but more standards-based than) NVLink/NVSwitch, for a unified memory space</li><li>Eight Slingshot NIC ports per node, which is 1.6 Tbit/sec of injection bandwidth</li><li>A “Rambo Cache” that sits between HBM, GPU, and CPU that presumably reduces NUMA effects for hot data that is being touched by many computing elements</li><li>A “matrix engine” (which sounds an awful lot like NVIDIA’s tensor cores) in each GPU</li></ul><div>This was an extremely daring release of information, as Intel has now publicly committed to a 7nm GPU part (comparable to TSMC’s 5nm process), along with a high-yield EMIB process (their chiplet interconnect for HBM integration) and Foveros (their 3D die stacking for Rambo integration), in 2021.</div>

      +

      +
      Intel also released the beta version of their Intel oneAPI which appears to be a mixture of re-branded Intel developer products (Fortran and C++ compilers, TBB, MKL, DAL, MPI, VTune, etc) with their new SYCL-based Data Parallel C++ compiler.  The novelty here is that Intel is committing to supporting this entire stack for CPUs, GPUs, FPGAs, and matrix accelerators so that, for example, you could feasibly write a single application with a single set of tools that runs across all accelerator types.
      +

      +
      There was a lot of interest in SYCL at the Performance Portability and Productivity workshop, P3HPC, on Friday.  There were two talks of particular interest in the parts I attended; the first, presented by Balint Joo of Jefferson Lab, presented the performance of a quantum chromodynamics kernel when implemented using Kokkos, accelerator-specific libraries, and SYCL:

      SYCL vs. Kokkos vs. native on NVIDIA and Intel architectures

      These early results are promising, and with the exception of KNL, the SYCL ecosystem is already showing promise as a performance-portable framework.  The same is generally true for more complex computational kernels as well, as presented by Istvan Reguly from Pázmány Péter Catholic University:
      +
      Performance portability figure of merit for a complex kernel using different performance-portable parallel runtimes.

      Intel's choice to back an open standard rather than develop its own proprietary APIs for each accelerator type was a very smart decision, as it looks like they are already making up lost ground against NVIDIA in building a robust software ecosystem around their accelerator technologies.  The fact that these presentations were given by application scientists, not Intel engineers, really underscores this.
      +

      +
      Strangely, AMD kept a low profile at SC by comparison despite the fact that Rome is beginning to enter the market and, by all accounts I heard on the show floor, selling like gangbusters.  One major procurement I heard about switched from an Intel CPU-based plan of record to AMD processor as a result of a schedule slip by Intel; this wound up resulting the system obtaining 50% more cores at the same cost (plus the added benefit of PCIe Gen4) which is a testament to the advantage that AMD currently has in the near term.
      +

      +
      By comparison, very few large HPC centers seem to be biting on Intel's Cascade Lake-AP despite Intel's very aggressive marketing against Rome.  Combined with the above observation that the Aurora architecture's Sapphire Rapids processors will only have eight memory channels per socket suggests that Cascade Lake-AP's 12-channel socket was likely released as a stopgap to have an answer to Rome while 10nm Xeon part production is scaling up.
      +


      <h2 id="pdsw">PDSW 2019</h2>This year I had the great honor of co-chairing the Parallel Data Systems Workshop, the premiere data and storage workshop at SC, along with the esteemed Phil Carns (creator of Darshan and PVFS2/OrangeFS, among other things).  We tried to broaden the scope of the workshop to be more inclusive of “cloudy” storage and data topics, and we also explicitly tried to build the program to include discussion about data management that ran tangential to traditional HPC-focused storage and I/O.

      The proceedings are already online in an interim location hosted by ACM, and the full proceedings will be published by IEEE TCHPC.  Slides are available on the PDSW website, and I tried to tag my realtime thoughts using #pdsw19 on Twitter.

      <h3>Alluxio Keynote</h3>Our keynote speaker was Haoyuan Li, founder of Alluxio, who gave a brilliant talk about the data orchestration framework he developed at AMPLab and went on to commercialize.  It is an abstraction that stitches together different storage resources (file systems, object stores, etc) into a single namespace that applications can use to read and write data in a way that hides the complexity of tiered storage.  It was designed towards the beginning of the “Big Data revolution” with a specific eye towards providing a common interface for data accessibility; by writing an application against the Alluxio API, it would be made future-proof if the HDFS or S3 APIs fizzled since Alluxio normalizes the specific API and semantics of a native storage interface from user applications.

      Had something like this existed in the early days of HPC, there’s a good chance that we would not be stuck using POSIX I/O as the least common denominator for data access.  That said, Alluxio does solve a slightly easier problem in that it targets analytics workloads that are read-intensive—for example, it does not provide a means for applications to do random writes, and so it provides only a subset of the full semantics that some more general-purpose I/O interfaces (such as file access) may provide.  In making this trade-off though, it is able to aggressively cache data from any storage backend in a distributed memory space, and Alluxio has a configurable cache eviction policy for predictable workflows.

      In describing the motivation for the Alluxio design, Haoyuan had some interesting insights.  In particular, he pointed out that there is a growing movement away from the hyperconverged hardware architecture that motivated Hadoop and HDFS:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      The whole “move compute to where the data is!” model for Hadoop has always struck me as rather fanciful in practice; it only works in single-tenant environments where there’s no chance of someone else’s compute already existing where your data is, and it imposes a strict coupling between how you scale data and analytics.  As it turns out, the data analytics industry is also waking up to that, and as Haoyuan’s slide above shows, separating storage from compute gives much more flexibility in how you scale compute with respect to data, but at the cost of increased complexity in data management.  The whole point of Alluxio is to minimize that cost of complexity by making data look and feel local by (1) providing a single namespace and API, and (2) using distributed memory caching to make data access perform as well as if compute and memory were colocated.

      This is a bit ironic since HPC has been disaggregating storage from compute for decades; HPC systems have tended to scale compute capability far faster than storage.  However, the HPC community has yet to address the added complexity of doing this, and we are still struggling to simplify storage tiering for our users.  This is only getting worse as some centers slide back into hyperconverged node designs by incorporating SSDs into each compute node.  This causes different tiers to spread data across multiple namespaces and also further complicate data access since the semantics across those namespaces differ.  For example, it’s not sufficient to know that
      <ul><li>/local is the fastest tier</li><li>/scratch is less fast</li><li>/home is slow</li></ul>since
      <ul><li>/local is only coherent with other processes sharing the same physical compute node</li><li>/scratch is globally coherent</li><li>/home is globally coherent</li></ul>Alluxio is not the solution to this problem at present because it is optimized for write-once, read-many workloads whereas HPC does have to support random writes.  That said, HPC storage systems that incorporate the same design goals as Alluxio (connecting many types of storage under a single namespace, providing a restricted set of semantics, and applying aggressive caching to deliver local-like performance) hold a lot of promise.  Perhaps it’s no surprise that every serious parallel file system on the market is beginning to implement features like this—think Lustre File-Level Redundancy (FLR) and Persistent Client Caching (LPCC), Spectrum Scale AFM, and the core two-tier design of WekaIO.

      Haoyuan also presented a few case studies that showcased the ability of Alluxio to ease the transition from on-premise infrastructure (like Hadoop with HDFS) to hybrid cloud (e.g., run Presto across datasets both in older on-prem HDFS and newer S3 buckets).  It seems to be very fashionable to run analytics directly against data in object stores in industry, and Alluxio essentially gives such data more dynamism by being the place where active data can be staged for processing on demand.  Because it is a stateless orchestration layer rather than a storage system itself, Alluxio also seems nicely compatible with dynamic provisioning of compute resources.  In this sense, it may be an interesting internship project to see if Alluxio could be deployed on an HPC system to bridge a large data analytics job with an off-system object store.  Get in touch with me if you know a student who may want to try this!

      <h3>Asynchronous I/O</h3>Middleware for asynchronous I/O came up in two different papers this year.  The first, “Enabling Transparent Asynchronous I/O using Background Threads” by Tang et al., described a new pluggable runtime for HDF5 that processes standard HDF5 I/O requests asynchronously.  It does this by copying I/O requests and their metadata into a special buffer, putting those requests on a queue that is managed by the asynchronous runtime, building a directed graph of all requests’ dependencies, and dispatching I/Os alongside regular application execution using a lightweight (Argobots-based) asynchronous worker pool.

      What this amounts to is that a standard HDF5 write call wouldn’t block until the I/O has been committed to disk somewhere; instead, it returns immediately after the async runtime makes a copy of the data to be written into its own private memory buffer.  The application is then free to continue computing, while an Argobots thread begins buffering and dispatching outstanding asynchronous I/O calls.  The performance that results from being able to overlap I/O with computation is remarkable:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">I/O speedup at scale as a result of the asynchronous runtime backend for HDF5 presented by Tang et al.</td></tr></tbody></table>
      What’s more impressive, though, is that this backend is almost entirely transparent to the user application; in its simplest form, it can be enabled by setting a single environment variable.

      Later in the day, Lucho Ionkov presented a much more novel (research-y?) asynchronous I/O runtime in his paper, “A Foundation for Automated Placement of Data” which glued together DRepl (an abstraction layer between scientific applications and storage architectures, vaguely similar to what Alluxio aims to do), TCASM (a Linux kernel modification that allows processes to share memory), and Hop (an expressive key-value store with tunable performance/resilience requirements).  The resulting runtime provides a high-level interface for applications to express I/O and data placement as a series of attach, publish, and re-attach operations to logical regions of memory.  The runtime then manages the actual data movement (whether it be between nodes or to persistent storage) asynchronously.

      Again, the net result in speedup as the problem size scales up is impressive:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">I/O speedup at scale using the asynchronous I/O runtime presented by Iokov in Otstott et al.</td></tr></tbody></table>As with the asynchronous HDF5 paper, performance gets better with scale as the increasing costs of doing I/O at scale are amortized by overlapping it with computation.  In contrast to HDF5 though, this runtime comes with a completely new application API, so one would need to convert an application’s critical I/O routines to use this framework instead of POSIX I/O.  The runtime is also pretty heavyweight in that it requires a separate global data placement “nameserver,” a custom Linux kernel, and buy-in to the new memory model.  In that sense, this is a much more research-oriented framework, but the ideas it validates may someday appear in the design of a fully framework that incorporates both an application runtime and a storage system.

      Why is this important?  These asynchronous I/O runtimes are making a lot more sense in the era of heterogeneous computing where accelerators (think GPUs) really aren’t good at driving a full kernel-based I/O pipeline.  Instead of running a full I/O stack and enforcing strict consistency (i.e., serializing I/O) on a lightweight accelerator core, having an asynchronous runtime running on a fat core that simply copies an I/O buffer from accelerator memory to slower memory before releasing program control back to the accelerator allows the accelerator tp spend less time doing what it’s terrible at doing (ordering I/O operations) and more time computing.  At the same time, the fat core that is running the asynchronous I/O runtime can then operate on that copied I/O buffer on its own time, reorder and serialize operations to ensure consistency, and jump into and out of the kernel to enforce file permissions without interrupting the accelerator:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Sketch of how an asynchronous I/O runtime might map to a heterogeneous node architecture</td></tr></tbody></table>
      Ron Oldfield did raise a really great consideration during PDSW about this though: at the end of the day, the asynchronous I/O runtime still has to share network resources with the application’s message passing runtime (e.g., MPI).  He alluded to work done a decade ago that found that asynchronous I/O was often stomping on MPI traffic since both MPI and I/O could happen at the same time.  Without some kind of awareness or coordination between the asynchronous I/O runtime and the application communication runtime, this sort of scheme is prone to self-interference when running a real application.

      Given this, the right place to integrate an asynchronous I/O runtime might be inside the message passing runtime itself (e.g., MPI-IO).  This way the asynchronous I/O scheduler could consider outstanding asynchronous messages it must pass as well and be smart about dispatching too many competing network transfers at the same time.  Unfortunately this then places a complex burden of serialization and synchronization on the runtime, and this starts to look a lot like just throwing messages at the NIC and letting it figure out the correct ordering.  The principal advantage here would be that the runtime has a lot more visibility into user intent (and may have more spare processing capacity if most of the application time is spent on an accelerator), so it could afford to be smarter about how it builds its dependency graph.

      <h3>Analytics for Runtime and Operations</h3>No computing-related workshop would be complete without a smattering of artificial intelligence and machine learning, and PDSW was no different this year.  Two papers were presented that attempted to use machine learning to predict parallel I/O performance in slightly different ways.

      Suren Byna presented “Active Learning-based Automatic Tuning and Prediction of Parallel I/O Performance” where the authors developed an approach for autotuning parallel I/O (specifically using MPI-IO hints and Lustre striping parameters) using active learning to predict the optimal values for their tuning parameters.  They used two different approaches, and the faster one uses predicted performance to infer optimal tuning values.  Given how many factors actually come to play in parallel I/O performance on production systems, their model was able to predict I/O performance quite well under a range of I/O patterns:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      Bing Xie et al presented “Applying Machine Learning to Understand Write Performance of Large-scale Parallel Filesystems” which pursued a similar line of work—using machine learning to predict I/O performance—but with a slightly different goal.  Xie’s goal was to identify the factors which most strongly affect predicted I/O performance, and she found that write performance was most adversely affected by metadata load and load imbalance on Blue Gene/Q and GPFS, whereas Cray XK7 and Lustre were more affected by aggregate file system load and load imbalance.  This system-centric work laid out a more sophisticated blueprint for identifying causal relationships between poor I/O performance and system-level health events, and I think applying these approaches to the dataset I published last year with my Year in the Life of a Parallel File System paper might identify some interesting emergent relationships between bad performance and the subtle factors to which they can be attributed.

      Why is this important?  Industry is beginning to take notice that it is no longer sufficient to just report there here-and-now of how parallel file systems are behaving, and more sophisticated analytics engines are being co-deployed with very large systems.  For example, the Summit system at Oak Ridge made a splash in October by announcing the real-time analytics engine that was implemented on top of it, and Cray View is a similar analytics-capable engine built atop Lustre that Cray offers as a part of its ClusterStor lineup.  I’m not sure if DDN has something comparable, but their recent purchase of Tintri and its robust, enterprise-focused analytics engine means that they hold IP that can be undoubtedly be applied to its HPC-focused storage product portfolio.

      Being able to predict performance (and the conditions that cause it to degrade!) is the holy grail of parallel I/O systems management, and it’s a sure bet that all the HPC storage vendors are watching research in this area very closely to see what ideas they can pluck from the community to add value to their proprietary analytics engines.  The fact that AI is being applied to production system data and yielding useful and actionable outcomes gives legs to this general idea of AI for self-driving systems.  The talks at PDSW this year were only demonstrations, not hardened products, but these ad-hoc or small-scale demonstrations are moving us in the right direction.

      <h3>My Talk on Data Motion</h3>I also coauthored and presented a paper at PDSW this year that was an exploratory study of how we can understand data movement throughout an entire data center.  The goal of the entire paper, “Understanding Data Motion in the Modern HPC Data Center,” was to generate this diagram that shows how much data flows between different systems at NERSC:

      <div class="separator" style="clear: both; text-align: center;"></div>

      +
      +
      +


      <div class="separator" style="clear: both; text-align: center;"></div>

      +
      +


      I won’t recount the technical content of the talk here, but the paper is open access for those interested.  The essence of the study is that we showed that it is possible to examine data motion beyond the context of individual jobs and begin tying together entire workflows, but there’s a lot of supporting work required to shore up the tools and telemetry from which this analysis draws.  The paper was very much a long-form work in progress, and I’d be interested in hearing from anyone who is interested in pursuing this work further.

      <h2 id="e1kf">Scale-up highly available NVMe hardware</h2>Although it didn’t make a many headlines (as storage rarely does), Cray announced its new ClusterStor E1000 platform shortly before SC and had some of their E1000-F all NVMe enclosures on display at a few booths.  I normally don’t care too much about storage enclosures (it’s all just sheet metal, right?), but this announcement was special to me because it is the hardware platform that is going into NERSC’s Perlmutter system in 2020, and I’ve been involved with the different iterations of this hardware design for over a year now.

      It’s very gratifying to see something start out as a CAD drawing and a block diagram and grow up into actual hardware:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">The E1000-F all-NVMe enclosure</td></tr></tbody></table>
      Torben Kling Petersen gave a talk at the Exhibitor Forum disclosing the details of the hardware design on behalf of Cray, and it looks like they’ve made just about everything surrounding the E1000 public:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      The foundation for this platform is the E1000-F high-availability enclosure as shown in the above slide.  It has two separate Rome-based servers (“controllers”) and 24 U.2 NVMe slots capable of PCIe Gen4.  Each Rome controller has slots for up to three 200 Gbit NICs; doing the math, this gives a very nicely balanced design that is implemented entirely without PCIe switches:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Cartoon block diagram for one half of the E1000-F chassis.  Note that the NVMe read rates (violet text) are assumed based on Samsung PM1733 specs and performance projections that Petersen presented.  Also note that each NVMe drive is 2x2 PCIe Gen4 with multipath to the other Rome controller (not shown).</td></tr></tbody></table>I visited the booth of the ODM with whom Cray worked to develop this node design and was fortunate enough to meet the node architects from both sides who gave me a really helpful breakdown of the design.  Physically, the 2U chassis is laid out something like this:

      <div class="separator" style="clear: both; text-align: center;"></div> +
      Just about everything is both hot-swappable and fully redundant.  The entire system can be powered and cooled off of a single 1.2 kW(?) power supply, and all the fans are hot-swappable and configured in a 5+1:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Fans are all individually replaceable and configured in 5+1.  You can also see the NVMe backplanes, attached to an active midplane (not shown), through the open fan slot.</td></tr></tbody></table>
      All the fans are on the same pulse-width modulator (PWM), so they all operate at the same speed and provide even airflow as long as they are properly powered.  My recollection from what the architect told me is that the PWM signal is provided by an FPGA on the midplane which also handles drive power-up.  Because there is only a single midplane and this power/cooling controller lives on it, this power/cooling FPGA is also configured redundantly as 1+1.  Thus, while the midplane itself is not redundant or field-replaceable, the active components on it are, and it would take physical damage (e.g., someone punching a hole through it and breaking the PCB traces) to knock the whole chassis offline.

      Each chassis has two independent node boards that are hot-pluggable and self-contained:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">One of the E1000-F node sleds with its cover popped off at the Cray booth</td></tr></tbody></table>Each node board is wrapped in a sheet metal sled and has a screwed-on lid.  The whole node sled was designed by the ODM to be a field-replaceable unit (FRU), so doing something like a DIMM swap does require a screwdriver to remove the top cover.  However it’s ultimately up to OEMs to decide how to break down FRUs.

      The ODM had a bare controller board at its booth which looks like this:

      <div class="separator" style="clear: both; text-align: center;"></div>

      +
      E1000-F bare controller board
      +

      There are two M.2 PCIe Gen4 slots for mirrored boot drives and a pair of big hot-plug block connectors in the front of the board for redundant power and 48 lanes of PCIe Gen4 for the 24x U.2 drives hanging off the midplane.  There’s a single riser slot for two standard HHHL PCIe add-in cards where two NICs plug in, and a third OCP-form factor slot where the third NIC can slot in.  The rear of the controller sled shows this arrangement:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Rear view of a single Rome controller</td></tr></tbody></table>It looks like there’s a single RJ45 port (for LOM?), a power and reset button, a single USB-3, and a mini DisplayPort for crash carting.

      When Cray announced the E1000-F, HPCwire ran a block diagram of the complete chassis design that suggested that heartbeating would be done through a non-transparent bridge (NTB) implemented on the AMD Rome host interface.  This was a little worrisome since AMD has yet to release the proper drivers to enable this NTB for Linux in a functional way; this simple fact is leading other ODMs towards a more conservative node design where a third-party nonblocking PCIe switch is added simply to provide a functioning NTB.  When I asked the architect about this, though, he revealed that the E1000-F also has an internal gigabit Ethernet loop between both controllers for heartbeating which completely obviates the need to rely on any NTB for failover.

      Another interesting thing I learned while talking to the E1000-F designers is that the power supply configuration gives a lot of runway for the overall system design:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">One of the two power supply sleds for the E1000-F chassis.  Lots of free real estate remains and is currently occupied by bus bars.</td></tr></tbody></table>The current power supply is (I believe) ~1200 W, and the carrier sled on which it is mounted is mostly empty space taken up by two fat bus bars that reach all the way to the front of it.  In leaving all of this space in the sled, it will be fully possible to build a physically compatible PSU sled that delivers significantly more power to the U.2 NVMe drives and host controllers if the power consumption of the controllers or the NVMe drives increases in the future.  The ODM confirmed that the cooling fans have similar headroom and should allow the whole enclosure to support a higher power and thermal load by just upgrading the power and controller FRUs.

      This point is important because the performance of PCIe Gen4 SSDs are actually capped by their power consumption—if you look at product sheets for ruler SSDs (M.2, NF1, and E1.S), you will find that their performance is universally lower than their U.2 and HHHL variants due to the fact that the ruler standards limit power to 8-12W compared to U.2/HHHL’s ~25W.  This E1000-F chassis is designed as-is for 25W U.2 drives, but there are already proposals to push individual SSD power up to 40W and beyond.  Given this trend and the high bandwidth available over a PCIe Gen4 x4 connector, it’s entirely possible that there will be a demand for higher-power NVMe enclosures as Gen4 matures and people want to drive Gen4 NVMe at line rate.

      <h2 id="daos">DAOS User Group</h2>The 2019 DAOS User Group was held on Wednesday in a hotel adjacent to the main convention center. Contrary to previous years in which I attended, this meeting felt like a real user group; there were presenters from several different organizations, none of whom directly contribute to or are contractual customers of DAOS.  There were also real performance data which largely centered around the insanely high IO-500 benchmark score that DAOS posted earlier in the week:

      <table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"></td></tr><tr><td class="tr-caption" style="text-align: center;">Bandwidth spread on the IO-500’s IOR test suite</td></tr></tbody></table>These numbers are using a pretty modest server environment and client count (24 DAOS servers, 26 client nodes, 28 ranks per client, dual-rail OPA100) and use the native DAOS API.  What I didn’t snap a photo of are the crazy metadata rates which posted a geometric mean of 4.7 million IOPS; by comparison, the 250 PB Alpine file system attached to the Summit supercomputer at Oak Ridge posted 1.2 million IOPS using more than 500 clients.  To the extent that it was meant to address the IOPS limitations intrinsic to traditional parallel file systems, the DAOS design is looking like a resounding success.

      According to the speaker, the metadata performance of this IO-500 run was not limited by any server-side resources, so adding more clients (like WekaIO’s top-scoring run with 345 clients) could have pushed this number higher.  It was also stated that the staggering IOR read performance was limited by the aggregate Optane DIMM bandwidth which is a testament to how highly optimized the data path is.

      <h3>Actually using DAOS</h3>This is all using the DAOS native API though, and unless you intend to rewrite all your open()s and write()s as daos_pool_connect() + daos_cont_open() + daos_array_open()s and daos_array_write()s, it’s hard to tell what this really means in terms of real-world performance.  Fortunately there was a great set of talks about the DAOS POSIX compatibility layer and related middleware.  I described the POSIX middleware a little in my recap of ISC’19, but it’s much clearer now exactly how a POSIX application may be adapted to use DAOS.  Ultimately, there are three options that DAOS provides natively:

      <ul><li>libdfs, which is a DAOS library that provides a POSIX-like (but not POSIX-compatible) API into DAOS.  You still have to connect to a pool and open a container, but instead of reading and writing to arrays, you read and write arbitrary buffers to byte offsets within file-like objects.  These objects exist in a hierarchical namespace, and there are functions provided by libdfs that map directly to POSIX operations like mkdir, rmdir, statfs, etc.  Using libdfs, you would still have to rewrite your POSIX I/O calls, but there would be a much smaller semantic gap since POSIX files and directories resemble the files and directories provided by libdfs.  A great example of what libdfs looks like can be found in the IOR DFS backend code.</li><li>dfuse, which is a FUSE client written on top of libdfs.  With this, you literally get a file system mount point which POSIX applications can interact with natively.  Because this uses FUSE though, such accesses are still generating system calls and memory copies which come with steep latency penalties.</li><li>libioil, which is a POSIX interception library.  This is what you’d LD_PRELOAD in front of a standard application, and it does the remapping of genuine POSIX API calls into libdfs-native calls without ever going through the kernel.</li></ul>
      <div>Cedric Milesi from HPE presented benchmark slides that showed that using the DFS (file-based) API over the native (array-based) API has no effect on performance:</div>

      +

      +
      Performance scaling of the native DAOS API (which encodes array objects) to the DAOS DFS API (which encodes file and directory objects).  No discernible performance difference.
      +


      <div>Thus, there is no performance difference whether you treat DAOS like an array store (its original design) or a file/directory store (through the libdfs API) as far as bandwidth is concerned.  This is excellent news, as even though libdfs isn’t a drop-in replacement for POSIX I/O, it implements the POSIX data model (data is stored as streams of bits) which is a more comfortable look and feel for a storage system than storing typed arrays.  And since libioil is a shim atop libdfs, the above performance data suggests that POSIX applications won’t pay significant bandwidth overheads by preloading the POSIX intercept library to get DAOS compatibility out of the box.</div>

      +

      +
      What's less clear is what the metadata overheads of libdfs are.  Because the whole metadata model of DFS (files and directories) is very different from native DAOS (arrays), it's impossible to do a head-to-head comparison of metadata performance.  That said, DFS metadata is only a subset of the full POSIX metadata so it should be faster even on identical hardware.  For example, DAOS only enforces permissions when opening a container, so I would not expect DFS to have any notion of file-level or directory-level ownership or permissions bits.  As such, DFS would not incur the cost of doing an expensive recursive permission check on dfs_open(), and the open rate should be much higher than something that adheres to POSIX.
      +

      +
      Kevin Harms from ALCF also presented a really enlightening slide containing very early performance tests from their internal DAOS testbed using dfuse and libioil:
      +

      +
      +

      +
      This slide is a treasure trove of interesting information:
      +
      1. It implicitly confirms that the verbs provider for libfabric not only works, but works well.  Recall that the Intel testbed from which IO-500 was run used Intel OmniPath 100, whereas the Argonne testbed uses a competitor's fabric, InfiniBand.
      2. Single-stream performance of DAOS using the dfuse interface is 450 MB/sec which isn't terrible.  For comparison, single-stream performance of Lustre on Cray Aries + FDR InfiniBand is about the same.
      3. Using the libioil POSIX interface dramatically increases the single-stream performance which shines a light on how costly using the Linux VFS kernel interface (with FUSE on top) really is.  Not using FUSE, avoiding an expensive context switch into kernel mode, and avoiding a memcpy from a user buffer into a kernel buffer gives a 3x performance boost.
      Again, in the sense that DAOS was meant to address the performance impacts of using a kernel-based storage system for I/O, it looks like DAOS is meeting expectation.
      +
      +

      +
      Finally, Mohamad Chaarawi also spent some time talking about the Lustre/DAOS integration which uses DAOS dfuse to stitch together a Lustre namespace with DAOS DFS namespaces.  I mentioned this in my ISC recap, but there's now a pretty detailed slide about how this will look in practice:
      +

      +
      +

      +
      This Lustre integration won't be quite as rosy as I described earlier since DFS namespaces don't seamlessly merge into the Lustre namespace.  Instead, it looks like DFS namespaces will be mounted in a separate directory hierarchy governed by their pool UUID ("PUUID" in above slide) and container UUID ("CUUID"), and the Lustre namespace will contain symlinks to the DFS mounts.  What exactly creates and destroys these symlinks is unclear; in July it had sounded like Lustre foreign layouts would dynamically stitch DAOS objects into Lustre using the Lustre control plane, but now it sounds like DAOS will behave more like autofs on top of Lustre.
      +
      +

      +

      The burgeoning DAOS community

      +
      Although the progress and increasing tangibility of DAOS is impressive, I was most struck by the diversity of stakeholders represented at the DAOS User Group meeting.  In particular, the participation of HPE (the non-Cray part, no less!) and Lenovo was a surprise to me since neither has an immediate interest in the Argonne exascale system which has been the biggest driver for DAOS development.  Lenovo in particular made the bold statement that they want to sell a DAOS appliance in 4Q2020/1Q2021 called the "DSS-D Integrated Solution with DAOS."
      +

      +
      Oddly enough, the Cray part of HPE was not obviously present at the DAOS User Group despite their involvement in Argonne's Aurora system and activity on the DAOS mailing lists.  This may just be a reflection of Cray's historic reluctance to send engineering staff to SC, but their absence was quite notable in contrast to Lenovo's head-first dive into announcing a DAOS appliance.  There were also no loud voices supporting all of the work that DAOS has put into integrating with Apache Spark, nor were there any vocal supporters of Intel's newly stated ambition to create a native SEG-Y interface (a format used by oil and gas) for DAOS.

      +

      Everything else

      +
      There were some interesting tidbits that I picked up at SC this year don't fit neatly anywhere else in this post but are worth writing down.
      +

      +

      Technical tidbits - the Cray Shasta cabinet

      +
      Much like the Cray E1000-F storage enclosure, I have also watched the Cray Shasta cabinet design evolve from a set of CAD diagrams to living, breathing behemoth of sheet metal and coolant tubing.  SC'19 was the debut of a finished Cray Shasta compute cabinet, and it's a sight to behold:
      +

      +
      The front end of the new Cray Shasta compute cabinet
      +
      These new cabinets are all direct liquid cooled, and the water tubing to each blade from the center manifold is all done up in the above photo.  Compute blades slot in vertically, and each cabinet has French doors that open in directions opposite to each other.  The back end is a little less neat at a glance:
      +

      +
      The back end of the new Cray Shasta compute cabinet
      +
      As with the front end, it opens up with French doors, and interestingly, the rear doors look identical to the front doors.  Although I didn't ask explicitly, my guess is that this means that both the front and rear of the cabinets could feature giant cabinet graphics if so desired.
      +

      +
      The rear cabling is almost all copper 200 Gb/s:
      +

      +
      Cray Slingshot switch blade and Cray chassis management module
      +
      And, in a departure from the XC and XT/XE lines, all of this copper cabling uses a standard QSFP-DD connectors to carry 2x200 Gb.  In the above photo, you can see a genuine Cray Slingshot switch blade slotted in horizontally (cf. the vertically slotted compute blades) and the water coupling for the liquid-cooled switch blade and management module.  There are no fancy coolant waterfalls with Shasta, but that's probably not a bad thing.  As I've heard it told, the Cray-2 waterfall was a case of making lemonade from lemons; apparently fluorinert reacts corrosively with curved plastic surfaces.
      +

      +

      Less-technical tidbits

      +
      SC isn't purely about the technology, and truth be told, the personalities and community are the principal reason I attend every year.  It follows that a number of personal highlights for me weren't directly related to HPC at all but were nevertheless very valuable bits of information that I took away from Denver.
      +

      +
      For example, I met two of the big marketing minds behind a major HPC company who really floored me by attributing value to my support of the HPC industry and community through social media.  Social media is really how I got my start in this industry (I started as a hobbyist), so it's gratifying to hear that I might be contributing in a way that is meaningful to kindred spirits who also got into the HPC field from unconventional paths.  It was also a reminder that there are always real people behind every corporate Twitter account, and you very well may meet them at a conference like SC.  When that happens, it can be a really positive experience ("Great to meet the person behind the handle!") or an embarrassing one ("I really did say that three years ago, didn't I?").  This year was the first time it became clear that, in trying to avoid the latter case as a matter of course, the former becomes more prevalent without a whole lot of added effort.
      +

      +
      I also met what may have been the world's slickest corporate sales team, whose brilliantly staged choreography of chance encounters over drinks only became apparent to me as I was walking back to my hotel.  I know that plenty of people dislike interacting with sales, but being a great salesperson is really a craft in and of itself, and I respect people who are masters of their trade regardless of what it is.  And now if I ever find myself in a situation where I need to win someone over cold, I know from whom I can draw inspiration to unleash my inner "customer success manager."  It's a careful balance of drawing out concerns, driving open-ended complaints towards something actionable, and knowing where to cut through red tape and just get the right people talking.
      +

      +
      Another non-technical area in which I was looking for information this year was management philosophy.  I've had the pleasure of working with and for some very talented managers who recognize management as a distinct vocation in and of itself, and I made it a point to get time with a few such people who've consistently built me up over the years.  One of the more pithy philosophies I took away from one colleague is that there are times when neither "asking for permission" nor "asking for forgiveness" is the right approach—rather, sometimes you have to "radiate intent."  I'd never heard this before, but it makes sense in that it allows others the opportunity to say "no" and take explicit ownership of inaction, but it doesn't require the inverse of saying "yes" and taking responsibility for the outcomes.
      +

      +

      Staying organized

      +
      Finally, I am always trying to figure out the optimal "workflow" for keeping organized at SC, and this year was no different.  A few years ago I fully committed to simply not bringing my laptop to the conference venue every day in lieu of bringing a much lighter and more versatile iPad Pro, and this worked fine with two exceptions:
      +
      • For the Parallel I/O in Practice tutorial I co-presented, I brought my laptop so that all four presenters could project from it and I could use my iPad for keeping realtime notes.
      • For PDSW, I brought my laptop just in case, knowing that I would be in the same room all day.  I wound up presenting from it simply because it provided a better viewing angle from the podium; the room arrangements in Denver were such that it was impossible for a speaker at the podium to see the slides being projected, so he or she would have to rely on the device driving the projector to tell what content was actually being projected.
      I did have to use the laptop at the hotel on Saturday night to make some final modifications to my PDSW talk (there are a few obscure features in PowerPoint that simply aren't exposed in the iOS version), but the rest of the conference (including a couple of BOF talks) that were iPad-only.
      +
      +

      +
      For notetaking, I started storing all of my notes in Agenda, and where appropriate, used Agenda's feature to create a single note for each calendar entry corresponding to a formal meeting.  For unstructured conversations on the expo floor or between sessions, I kept one catch-all note per day in which I typed everything I could remember as soon as the conversation ended.  For example, the conversation I had with the designers of the E1000-F enclosure was saved as a combination of obscure written details I took as soon as I left the booth and photos I snapped during the conversation.
      +

      +
      In places where typing on an iPad was not possible (e.g., in most technical sessions, where there were no tables), I used Nebo and an Apple Pencil to take handwritten notes.  As it turns out, hand-writing on an iPad sitting on your knee is far more productive than either trying to type text letter-by-letter into the on-screen iPad keyboard or awkwardly balancing the folded-out iPad Pro keyboard on a lap or bag.  Nebo is really good at converting handwriting into ASCII, and that ASCII easily copies out and into an Agenda note.
      +

      +
      This workflow supplanted my approach last year which relied exclusively on using Notability and hand-written notes with OCR.  In meetings where a table was available (i.e., vendor briefings), being able to type rather than handwrite was far more effective in capturing every nuance in spoken word.  I've found that I rarely ever get a copy of the slides shown at SC briefings, so being able to quickly capture exact hardware specs or release dates as someone is trying to gloss over some unflattering details is really not possible when writing everything by hand.
      +

      +
      For tracking action items, I've started used Things 3 (which is admittedly crazy expensive) but is really good at capturing to-do items in under five seconds so that they can be more formally sorted, assigned a start/complete date, etc at the end of the day or after the conference.
      +

      +
      This all mostly worked, but I did run into a major issue with Agenda where all my ad-hoc notes vanished when I got home from Denver and my home computer decided to sync.  The good news is that Agenda uses internal versioning so the notes' contents weren't truly lost, and their support team was extremely responsive in both recovering my lost notes and releasing a fix within a week.  Not a great first experience with the app, but I'm not sure that'll stop me from using it.
      +

      +

      Concluding thoughts

      +
      As always seems to be the case, the week of SC was over before I knew it.  There's a lot I know that I didn't get to see in terms of colleagues, exhibitors, and technical program sessions.  Of everything I did get to see, there's there's plenty that I wasn't sure I'd be allowed to write up.  So if you happened to get this far and are wondering why I didn't write about the most interesting thing that you got out of the conference this year, odds are that I didn't see it, or if I did, I wasn't sure I was allowed to write about it.  And if I did write about you and you won't get in trouble for being attributed by name, please let me know and I'd be happy to update this post to give you credit.
      +

      +
      Denver was the city of the first SC I ever attended, so I was glad to be back.  I was also happy to get to see snow at least once this year:
      +

      +
      +

      +
      and the convention center did an excellent job of providing space, AV support, catering, and gigantic coffee urns:
      +

      +
      +

      +
      I got less sleep on average this year than any SC prior (around 6 hours a night), and yet I feel like I accomplished less of what was on my list than ever before.  I suppose that's just a sign that the conference (or perhaps my ambition!) continues to grow, and I should expect SC'20 to be even bigger, better, and exhausting.
      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/dursi/2019-11-6-purpose-of-research-computinghtml.md b/2019/the-purpose-of-research-computing-is-the-research-not-the-computing/index.html similarity index 68% rename from _posts/dursi/2019-11-6-purpose-of-research-computinghtml.md rename to 2019/the-purpose-of-research-computing-is-the-research-not-the-computing/index.html index bfb416d..c9bfcf6 100644 --- a/_posts/dursi/2019-11-6-purpose-of-research-computinghtml.md +++ b/2019/the-purpose-of-research-computing-is-the-research-not-the-computing/index.html @@ -1,22 +1,90 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2019-11-06 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/purpose-of-research-computing.html -slug: the-purpose-of-research-computing-is-the-research-not-the-computing -title: The Purpose of Research Computing is the Research, not the Computing ---- - -

      Absolutely everyone in research computing will agree that supporting + + + + + + + The Purpose of Research Computing is the Research, not the Computing - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
      + +
      +

      The Purpose of Research Computing is the Research, not the Computing

      +

      Absolutely everyone in research computing will agree that supporting research is their centre’s highest goal. And they’re not lying, but at many centres I’ve visited, they aren’t really correct, either.

      -

      The day-to-day work in such a centre, naturally enough, is all about technical operations - keeping the computers running, updating software, making sure /scratch has enough space free, answering @@ -26,7 +94,6 @@ are meant to support - the internal, technical, activities become the purpose of the centre.

      -

      Pretty quickly, you end up with centres that are ranking their performance quarter to quarter with cluster utilization numbers, or having all researcher interactions occurring via “tickets” and @@ -43,10 +110,8 @@ call from the boss at these centres. Ticket closure rates going down 5% though… maybe you’re getting a call.

      -

      Organizations that care about their clients make their offerings very clear.

      -

      It doesn’t take very long to spot centres like this, even from the outside. On their websites, most prominently of all, are the statistics that their biggest cluster premiered at position X on @@ -63,7 +128,6 @@ luck - they’re just directed to a “contact us” email address (which, of course, feeds into a ticket tracker).

      -

      (Have you ever visited a restaurant webpage and needed like 4 or 5 clicks to get to the menu and their hours? If the restaurant took the menu off the website entirely and you instead had to file a @@ -72,7 +136,6 @@ researchers. Organizations that care about their customers make their offerings very clear.)

      -

      The thing is, using metrics like utilization, tickets, storage and the like to measure how much research support is happening is madness, and we all know it’s madness. You can goose utilization @@ -86,7 +149,6 @@ dicing what could be a long, productive discussion with a researcher into a series of shorter “tickets”.

      -

      It’s madness because neither utilization, nor ticket closure rates, nor storage use, nor even training course enrolment are valuable to research in and of themselves. They are inputs to the process @@ -97,7 +159,6 @@ of responsibility, and a squandering of scarce research-support resources.

      -

      And it’s worse than that, of course. Even a focus on inputs, if it was being honest, would focus on all the inputs, and certainly the most valuable and hardest-to-replace inputs - the technical @@ -110,10 +171,8 @@ fraction of the expertise of the support staff is being used? What is the meaningful contribution rate?

      -

      Inputs produce outpus which produce outcoms which produce impact.  The inputs are not what you should measure.

      -

      The reason that those staff input metrics aren’t being measured and others are is simple, and clarifying. The hardware inputs aren’t being used as metrics due to a (false) belief that they are meaningful @@ -123,7 +182,6 @@ to gather. And they’re comfortable to use because they don’t really require centre managers to make any hard choices.

      -

      Focussing on the inputs instead of the outputs - or even better, outcomes - isn’t only a research computing thing, of course. It’s an absolutely classic mistake in a lot of sectors; a google search @@ -131,7 +189,6 @@ inputs returns 139 million results.

      -

      There are two prototypical reasons why it happens. If I were feeling in a twitter-ranty mood again, I might be tempted to draw the analogy to the first case - lack of competition, due to private- or @@ -152,7 +209,6 @@ a focus on client outcomes are doing so by constantly expending almost heroic levels of unseen effort inside the organization.

      -

      But I don’t actually think that’s what driving some research computing centres inputs focus when it comes to operations and technical decision making. I think it comes almost from the other direction, @@ -160,7 +216,6 @@ concerned with their clients, who focus first on a very basic need and then don’t know how to generalize beyond that as they grow.

      -

      Imagine a small nonprofit, passionately committed to helping people, that gets its start meeting a very basic need - let’s say they’re providing before-school breakfasts to children in or near poverty. @@ -171,7 +226,6 @@ wider range of breakfasts to be inclusive of students with particular dietary needs. They are super committed to their clients.

      -

      But as that nonprofit starts expanding, it becomes clear their client base needs a wider range of services. It starts partnering with food banks, to help fight student hunger at home; its staff @@ -182,7 +236,6 @@ of slowing the growth of the breakfast program next year, is that the right thing to do, or not? How would they know?

      -

      This is a terrifying transition for a nonprofit to go through. Before, it knew exactly what it was doing, and had very clear metrics for success. In this intermediate stage, it probably has some @@ -198,14 +251,12 @@ intermediate state until they are overtaken by events or other organizations.

      -

      At most research computing centres, I think the story is more like that of the nonprofit. Except let’s be honest, while providing breakfasts is inherently meaningful and has very few organizations willing to do it, providing cycles and storage isn’t, and has many alternate providers.

      -

      But going beyond meeting the basic needs of providing research computing cycles and storage, which was a much greater need in the 90s than it is today, is genuinely hard. It’s very labour intensive - @@ -220,12 +271,10 @@ to unfamiliar qualitative evaluations and doing the hard work of trying to measure research outcomes.

      -

      But there’s a relatively straightforward approach to get there starting from where you are. It takes some work, but just going through the process is clarifying.

      -
      1. What do you do now? You know, broadly, what services you offer to researchers, you’ve just never had to make it explicit. Start to put @@ -293,7 +342,78 @@ needs are the same! But those are questions that team leaders need to be wrestling with.

        -

        The alternative, just running a set of computers for the same friendly user group of people year after year, isn’t research -support; it’s a hobby.

        \ No newline at end of file +support; it’s a hobby.

        + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/glennklockwood/2019-2-27-tagbloggercom1999blog-4307061427721284246post-7916337747246028185.md b/2019/vast-data-s-storage-system-architecture/index.html similarity index 76% rename from _posts/glennklockwood/2019-2-27-tagbloggercom1999blog-4307061427721284246post-7916337747246028185.md rename to 2019/vast-data-s-storage-system-architecture/index.html index e007ad2..155cc32 100644 --- a/_posts/glennklockwood/2019-2-27-tagbloggercom1999blog-4307061427721284246post-7916337747246028185.md +++ b/2019/vast-data-s-storage-system-architecture/index.html @@ -1,18 +1,87 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2019-02-27 05:23:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2019/02/vast-datas-storage-system-architecture.html -slug: vast-data-s-storage-system-architecture -title: VAST Data's storage system architecture ---- - -VAST Data, Inc, an interesting new storage company, unveiled their new all-flash storage system today amidst a good amount of hype and fanfare.  There's no shortage of marketing material and trade press coverage out there about their company and the juiciest features of their storage architecture, so to catch up on what all the talk has been about, I recommend taking a look at
      + + + + + + + VAST Data's storage system architecture - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
      + +
      +

      VAST Data's storage system architecture

      +

      VAST Data, Inc, an interesting new storage company, unveiled their new all-flash storage system today amidst a good amount of hype and fanfare.  There’s no shortage of marketing material and trade press coverage out there about their company and the juiciest features of their storage architecture, so to catch up on what all the talk has been about, I recommend taking a look at
      <div><ul><li>The VAST “Universal Storage” datasheet</li><li>The Next Platform’s article, “VAST Data Clustered Flash Storage Bans The Disk From The Datacenter”</li><li>Chris Mellor’s piece, “VAST Data: The first thing we do, let’s kill all the hard drives”</li></ul></div>

      The reviews so far are quite sensational in the literal sense since VAST is one of very few storage systems being brought to market that have been designed from top to bottom to use modern storage technologies (containers, NVMe over Fabrics, and byte-addressable non-volatile memory) and tackle the harder challenge of file-based (not block-based) access.

      In the interests of grounding the hype in reality, I thought I would share various notes I've jotted down based on my understanding of the VAST architecture.  That said, I have to make a few disclaimers up front:
      @@ -44,7 +113,8 @@

      System Composition

      Because I/O servers are stateless and operate in
      This said, VAST has tested their architecture at impressively large scale and has an aggressive scale-out validation strategy.

      -

      Shared-everything consistency

      Mounting every block device on every server may also sound like anathema to anyone familiar with block-based SANs, and generally speaking, it is.  NVMeoF (and every other block-level protocol) does not really have locking, so if a single device is mounted by two servers, it is up to those servers to communicate with each other to ensure they aren't attempting to modify the same blocks at the same time.  Typical shared-block configurations manage this by simply assigning exclusive ownership of each drive to a single server and relying on heartbeating or quorum (e.g., in HA enclosures or GPFS) to decide when to change a drive's owner.  StorNext (formerly CVFS) allows all clients to access all devices, but it uses a central metadata server to manage locks.
      +

      Shared-everything consistency

      +
      Mounting every block device on every server may also sound like anathema to anyone familiar with block-based SANs, and generally speaking, it is.  NVMeoF (and every other block-level protocol) does not really have locking, so if a single device is mounted by two servers, it is up to those servers to communicate with each other to ensure they aren't attempting to modify the same blocks at the same time.  Typical shared-block configurations manage this by simply assigning exclusive ownership of each drive to a single server and relying on heartbeating or quorum (e.g., in HA enclosures or GPFS) to decide when to change a drive's owner.  StorNext (formerly CVFS) allows all clients to access all devices, but it uses a central metadata server to manage locks.

      VAST can avoid a lot of these problems by simply not caching any I/Os on the I/O servers and instead passing NFS requests through as NVMeoF requests.  This is not unlike how parallel file systems like PVFS (now OrangeFS) avoided the lock contention problem; not using caches dramatically reduces the window of time during which two conflicting I/Os can collide.  VAST also claws back some of the latency penalties of doing this sort of direct I/O by issuing all writes to nonvolatile memory instead of flash; this will be discussed later.

      @@ -54,7 +124,8 @@

      Shared-everything consistency

      Mounting every block device on every

      It is not clear to me what happens to locks in the event that an I/O server fails while it has outstanding I/Os.  Since I/O servers do not talk to each other, there is no means by which they can revoke locks or probe each other for timeouts.  Similarly, JBOFs are dumb, so they cannot expire locks.

      -

      The VAST write path

      I think the most meaningful way to demonstrate how VAST employs parity and compression while maintaining low latency is to walk through each step of the write path and show what happens between the time an application issues a write(2) call and the time that write call returns.
      +

      The VAST write path

      +
      I think the most meaningful way to demonstrate how VAST employs parity and compression while maintaining low latency is to walk through each step of the write path and show what happens between the time an application issues a write(2) call and the time that write call returns.

      First, an application on a compute node issues a write(2) call on an open file that happens to reside on an NFS mount that points to a VAST server.  That write flows through the standard Linux NFS client stack and eventually results in an NFS RPC being sent over the wire to a VAST server.  Because VAST clients use the standard Linux NFS client there are a few standard limitations.  For example,
      1. There is no parallel I/O from the client.  A single client cannot explicitly issue writes to multiple I/O servers.  Instead, some sort of load balancing technique must be inserted between the client and servers.
      2. VAST violates POSIX because it only ensures NFS close-to-open consistency.  If two compute nodes try to modify the same 4 KiB range of the same file at the same time, the result will be corrupt data.  VAST's server-side locking cannot prevent this because it happens at the client side.  The best way around this is to force all I/O destined to a VAST file system to use direct I/O (e.g., open with O_DIRECT).
      Pictorially, it might look something like this:

      @@ -83,4 +154,76 @@

      Performance Expectations

      This likely has a profound effect on the w
      • Both 3D XPoint SSDs and NAND SSDs have higher read bandwidth than write bandwidth as a result of the power consumption associated with writes.  This will further increase the 3:1 read:write performance penalty.
      • VAST always writes to 3D XPoint but may often read from NAND.  This closes the gap in theory, since 3D XPoint is significantly faster at both reads and writes than NAND is at reads in most cases.  However the current 3D XPoint products on the market are PCIe-attached and limited to PCIe Gen3 speeds, so there is not a significant bandwidth advantage to 3D XPoint writes vs. NAND reads.
      It is also important to point out that VAST has yet to publicly disclose any performance numbers.  However, using replication to protect writes is perhaps the only viable strategy to deliver extremely high IOPS without sacrificing data protection.  WekaIO, which also aims to deliver extremely high IOPS, showed a similar 3:1 read:write performance skew in their IO-500 submission in November.  While WekaIO uses a very different approach to achieving low latency at scale, their benchmark numbers indicate that scalable file systems that optimize for IOPS are likely to sacrifice write throughput to achieve this.  VAST's architecture and choice to replicate writes is in line with this expectation, but until VAST publishes performance numbers, this is purely speculative.  I would like to be proven wrong.

      Other Bells and Whistles

      The notes presented above are only a small part of the full VAST architecture, and since I am no expert on VAST, I'm sure there's even more that I don't realize I don't know or fully understand.  That said, I'll highlight a few examples of which I am tenuously aware:

      Because every I/O server sees every NVMe device, it can perform global compression.  Typical compression algorithms are designed only to compress adjacent data within a fixed block size, which means similar but physically disparate blocks cannot be reduced.  VAST tracks a similarity value for extents in its internal metadata and will group these similar extents before compressing them.  I envision this to work something like a Burrows-Wheeler transformation (it is definitely not one though) and conceptually combines the best features of compression and deduplication.  I have to assume this compression happens somewhere in the write path (perhaps as stripes are written to NAND), but I don't understand this in any detail.

      The exact compression algorithm is one of VAST's own design, and it is not block-based as a result of VAST not having a fixed block size.  This means that decompression is also quite different from block-based compression; according to VAST, their algorithm can decompress only a local subset of data such that reads do not require similar global decompression.  The net result is that read performance of compressed data is not significantly compromised.  VAST has a very compelling example where they compressed data that was already compressed and saw a significant additional capacity savings as a result of the global nature of their algorithm.  While I normally discount claims of high compression ratios since they never hold up for scientific data, the conceptual underpinnings of VAST's approach to compression sounds promising.

      VAST is also very closely tied to byte-addressable nonvolatile storage from top to bottom, and much of this is a result of their B-tree-based file system metadata structure.  They refer to their underlying storage substrate as an "element store" (which I imagine to be similar to a key-value store), and it sounds like it is designed to store a substantial amount of metadata per file.  In addition to standard POSIX metadata and the pointers to data extents on various NVMe devices, VAST also stores user metadata (in support of their S3 interface) and internal metadata (such as heuristics about file volatility, versioning for continuous snapshots, etc).  This element store API is not exposed to customers, but it sounds like it is sufficiently extensible to support a variety of other access APIs beyond POSIX and S3.

      Take-away Messages

      VAST is an interesting new all-flash storage system that resulted from taking a green-field approach to storage architecture.  It uses a number of new technologies (storage-class memory/3D XPoint, NAND, NVMe over fabrics) in intellectually satisfying ways, and builds on them using a host of byte-granular algorithms.  It looks like it is optimized for both cost (in its intelligent optimization of flash endurance) and latency (landing I/Os on 3D XPoint and using triplication) which have been traditionally difficult to optimize together.

      Its design does rely on an extremely robust backend RDMA fabric, and the way in which every I/O server must mount every storage device sounds like a path to scalability problems--both in terms of software support in the Linux NVMeoF stack and fundamental sensitivities to topology inherent in large, high-diameter RDMA fabrics.  The global all-to-all communication patterns and choice to triplicate writes make the back-end network critically important to the overall performance of this architecture.

      That said, the all-to-all ("shared everything") design of VAST brings a few distinct advantages as well.  As the system is scaled to include more JBOFs, the global compression scales as well and can recover an increasing amount of capacity.  Similarly, data durability increases as stripes can be made wider and be placed across different failure domains.  In this sense, the efficiency of the system increases as it gets larger due to the global awareness of data.  VAST's choice to make the I/O servers stateless and independent also adds the benefit of being able to scale the front-end capability of the system independently of the back-end capacity.  Provided the practical and performance challenges of scaling out described in the previous paragraph do not manifest in reality, this bigger-is-better design is an interesting contrast to the mass storage systems of today which, at best, do not degrade as they scale out.  Unfortunately, VAST has not disclosed any performance or scaling numbers, so the proof will be in the pudding.

      However, VAST has hinted that the costs are "one fifth to one eighth" of enterprise flash today; by their own estimates of today's cost of enterprise flash, this translates to a cost of between $0.075 and $0.12 per gigabyte of flash when deployed in a VAST system.  This remains 3x-5x more expensive than spinning disk today, but the cost of flash is dropping far faster than the cost of hard drives, so the near-term future may truly make VAST cost-comparable to disk.  As flash prices continue to plummet though, the VAST cost advantage may become less dramatic over datacenter flash, but their performance architecture will remain compelling when compared to a traditional disk-oriented networked file system.

      As alluded above, VAST is not the first company to develop a file-based storage system designed specifically for flash, and they share many similar architectural design patterns with their competition.  This is creating gravity around a few key concepts:
      • Both flash and RDMA fabrics handle kilobyte-sized transfers with grace, so the days of requiring megabyte-sized I/Os to achieve high bandwidth are nearing an end.
      • The desire to deliver high IOPS makes replication an essential part of the data path which will skew I/O bandwidth towards reads.  This maps well for read-intensive workloads such as those generated by AI, but this does not bode as well for write-intensive workloads of traditional modeling and simulation.
      • Reserving CPU resources exclusively for driving I/O is emerging as a requirement to get low-latency and predictable I/O performance with kilobyte-sized transfers.  Although not discussed above, VAST uses containerized I/O servers to isolate performance-critical logic from other noise on the physical host.  This pattern maps well to the notion that in exascale, there will be an abundance of computing power relative to the memory bandwidth required to feed computations.
      • File-based I/O is not entirely at odds with very low-latency access, but this file-based access is simple one of many interfaces exposed atop a more flexible key-value type of data structure.  As such, as new I/O interfaces emerge to serve the needs of extremely latency-sensitive workloads, these flexible new all-flash storage systems can simply expose their underlying performance through other non-POSIX APIs.
      Finally, if you've gotten this far, it is important to underscore that I am in no way speaking authoritatively about anything above.  If you are really interested in VAST or related technologies, don't take it from me; talk to the people and companies developing them directly.
      -
      \ No newline at end of file +
      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/dursi/2019-1-19-what-are-national-research-computing-platforms-for-nowhtml.md b/2019/what-should-a-national-research-computing-platform-be/index.html similarity index 69% rename from _posts/dursi/2019-1-19-what-are-national-research-computing-platforms-for-nowhtml.md rename to 2019/what-should-a-national-research-computing-platform-be/index.html index 57c9efd..64a81af 100644 --- a/_posts/dursi/2019-1-19-what-are-national-research-computing-platforms-for-nowhtml.md +++ b/2019/what-should-a-national-research-computing-platform-be/index.html @@ -1,18 +1,87 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2019-01-19 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/what-are-national-research-computing-platforms-for-now.html -slug: what-should-a-national-research-computing-platform-be- -title: What Should a National Research Computing Platform Be? ---- - -

      What is a National Research Computing Platform For in 2019?

      + + + + + + + What Should a National Research Computing Platform Be? - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
      + +
      +

      What Should a National Research Computing Platform Be?

      +

      What is a National Research Computing Platform For in 2019?

      Computers are everywhere now, but computing is still hard. Canada @@ -32,7 +101,6 @@

      What is a Na however, and the kind of services that researchers most need, has changed radically over the past decades.

      -

      The history of providing computers for research

      In the 1990s and 2000s, the overwhelming need was simply access to @@ -50,7 +118,6 @@

      The history of providin vast majority of such support is offered through Compute Canada.

      -

      As we enter 2019, this landscape looks quite different than it did in the 90s. Computing resources adequate for research are thick on the ground. Indeed, as the range of problems researchers tackle @@ -58,7 +125,6 @@

      The history of providin compute- and data-powered research require nothing more than a powerful desktop.

      -

      And for larger needs, the unavoidable logic of economies of scale for computers and storage has now entered the marketplace. A competitive range of commercial vendors provide access to @@ -77,7 +143,6 @@

      The history of providin of data with the strictest regulatory on-premises requirements — but they represent a minority of computational science needs.

      -

      The need for higher-level support

      We advance research more powerfully by providing clarity than clusters @@ -91,7 +156,6 @@

      The need for higher-level support

      -

      The good news is that the Compute Canada federation has a network of roughly 200 computational experts, many at the Ph.D. level, available to directly enable science projects. The bad news is that @@ -101,7 +165,6 @@

      The need for higher-level support

      -

      What should today’s R&D computing support focus on?

      With academic institutions now being just one player amongst @@ -115,7 +178,6 @@

      What should today’s emphasizing low cost, ‘no frills’ access to compute and storage, competing on price.

      -

      Either of these approaches represent a scandalous squandering of opportunity, wasting invaluable and nearly irreplaceable expertise and experience in applying computational techniques to open research @@ -126,12 +188,10 @@

      What should today’s providers focus on the lower level procurement and operating of most computing and storage hardware.

      -

      Skills beat hardware

      -

      The goal of a research computing support platform is to enable research, and to help develop the next generation of research talent. Knowledge transfer and skills development are by far the most @@ -140,7 +200,6 @@

      Skills beat hardware

      real needs in Canada’s R&D ecosystem, and simply because no one else can do it at scale.

      -

      First, deep training with research methods pay long-lasting dividends. Even in a rapidly changing fields like data and computational science, skills and experience don’t depreciate the @@ -148,7 +207,6 @@

      Skills beat hardware

      really go; and fluency in the previous generation of methods makes learning – or even creating – those newer methods easier.

      -

      And it’s actually even better than that, because not only do the skills that come from that research experience and training remain useful in their field for long periods from time, they transfer @@ -171,7 +229,6 @@

      Skills beat hardware

      in the research computing centres themselves and in the community as a whole.

      -

      Finally, there just aren’t other options for providing high-level data and computational science collaboration and training to Canada’s scholars and researchers consistently and across disciplines. We in the @@ -184,19 +241,16 @@

      Skills beat hardware

      are very much not commodity skills, and cannot be purchased or rented from somewhere else.

      -

      The cloud premium is a price worth paying

      -

      The benefits of further efforts in skills development and training are fairly clear, and this alone would justify redirecting some effort from hardware to research services, and using comercial cloud providers to fill the gap. But having substantial commercial cloud resources available for researchers is worthwhile on its own merits.

      -

      Firstly, cloud provides more flexibility for rapidly changing research. The resource mix can be much broader and change much more rapidly than traditional procurement cycles would allow; what’s more, those @@ -208,7 +262,6 @@

      The cloud premium is a price will generally be significantly better than what can be provided in house.

      -

      Secondly, trainees and staff benefit from gaining extremely relevant commercial cloud expertise. This goes back to skills development a bit, but in this case it’s the system tools – the experience @@ -217,7 +270,6 @@

      The cloud premium is a price and will be attractive skills to have in whatever career they move on to.

      -

      Finally, commercial engagement can proceed much more smoothly, and be more attractive from the point of view of the commercial partner, when the collaboration happens in the commercial cloud. The success @@ -227,7 +279,6 @@

      The cloud premium is a price and are likely more comfortable with such offerings that using academic systems.

      -

      How to proceed

      Making significant changes to priorities and indeed how we @@ -235,7 +286,6 @@

      How to proceed

      how to get there from here, but there are some basic approaches and guidelines that can help.

      -
      No need to do it all at once
      This is a change that can and should be made incrementally. A @@ -288,7 +338,6 @@

      Summary

      -

      The goal of a research computing support platform - any research support resource, really - is to enable research, and to help develop the next generation of research talent. With that primary mission @@ -296,7 +345,6 @@

      Summary

      science experts on collaboration and skills development rather than operating commodity hardware could not be clearer:

      -
      • Collaboration across disciplines - domain science and computational/data expertise - enables better Canadian research;
      • Computational and data skills maintain their value, while hardware rapidly depreciates; and
      • @@ -307,7 +355,6 @@

        Summary

        have someone else run much of that hardware. But even those costs have upsides:

        -
        • Cloud provides more flexibility for rapidly changing research; capability mixes and system configurations can be changed much faster than hardware procurement cycles;
        • Commercial cloud infrastructure provides much better uptime and currency for researchers;
        • @@ -319,7 +366,6 @@

          Summary

          The prospect of moving to such a different service model may seem daunting, but it needn’t be:

          -
          • Move one step at a time, with a new, small, “national site” being a collection of cloud resources;
          • Not all hardware can be outsourced; make what you do retain an ownership stake in count by having it be best-in-class, enable experimentation and development of new approaches, or otherwise having owning it rather than renting it directly advance the mission;
          • @@ -332,5 +378,76 @@

            Summary

            computing world of today is not that of the 1990s, and how we support computational research should take advantage of that.

            +

            Images courtesy of shutterstock and pixabay, used under license

            + +

      +
      + +
      + + + + + + + + + + +
      + + + + -

      Images courtesy of shutterstock and pixabay, used under license

      \ No newline at end of file diff --git a/2020/bootup-fun-dual-socket-power9/index.html b/2020/bootup-fun-dual-socket-power9/index.html new file mode 100644 index 0000000..2580b06 --- /dev/null +++ b/2020/bootup-fun-dual-socket-power9/index.html @@ -0,0 +1,172 @@ + + + + + + + Bootup fun- dual-socket POWER9 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      Bootup fun- dual-socket POWER9

      +

      Well today it’s going to be a short one. For those of you out there who are like me and enjoy +watching systems boot, I’ve recorded this brief (~3 minutes) bootup sequence of a dual-socket +POWER9 based system. This was done through the CLI based OpenBMC console (obmc-console-client) +and we see the system progressing through the bootup sequence to a running instance of CentOS 7.

      + +
      + +
      + +

      And for something a bit more esoteric another bootup video recorded a number of years back. +This time a MIPS-based IBM Workpad z50 booting NetBSD. Definitely not a room heater, but +probably the best keyboard I’ve used on a small form factor laptop - ironically the form +factor is referred to as “hpc”, which in this case stands for “handheld pc”.

      + +
      + +
      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/dursi/2020-11-22-cpus-getting-weirderhtml.md b/2020/buckle-up-cpus-are-going-to-get-weirder/index.html similarity index 52% rename from _posts/dursi/2020-11-22-cpus-getting-weirderhtml.md rename to 2020/buckle-up-cpus-are-going-to-get-weirder/index.html index 2639f10..2aac3de 100644 --- a/_posts/dursi/2020-11-22-cpus-getting-weirderhtml.md +++ b/2020/buckle-up-cpus-are-going-to-get-weirder/index.html @@ -1,23 +1,91 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2020-11-22 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/cpus-getting-weirder.html -slug: buckle-up-cpus-are-going-to-get-weirder -title: Buckle up, CPUs are going to get weirder ---- - -

      The M1 is a good test run, let’s get ready

      + + + + + + + Buckle up, CPUs are going to get weirder - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
      + +
      +

      Buckle up, CPUs are going to get weirder

      +

      The M1 is a good test run, let’s get ready

      (Note: This post is adapted from last week’s issue 51 of the resarch computing teams newsletter)

      -

      The big news of the past month has been Apple’s new M1 CPU. The M1’s specs in and of themselves kind of interesting, but more @@ -27,7 +95,6 @@

      The M1 is a good test run, let a future of more diverse computing architectures that we’d do well to get ready for.

      -

      Large-scale research computing systems have all been about “co-design” for ages, but the truth is that in the mainstream, big-picture CPU design choices have been pretty fixed, with most of co-design @@ -40,7 +107,6 @@

      The M1 is a good test run, let Apple hardware in the future, M1’s choices and their consequences are interesting.

      -

      M1 makes two substantially different trade-offs. The first is having DRAM on socket. This sacrifices extensibility — you can’t just add memory — for significantly better memory performance and @@ -48,11 +114,9 @@

      The M1 is a good test run, let between chips takes a surprising amount of energy, and doing it fast takes a lot of power! The results are striking:

      - -

      LINPACK - solving a set of linear equations - is a pretty flawed benchmark, but it’s widely understood. The performance numbers here are pretty healthy for a chip with four big cores, but the @@ -62,7 +126,6 @@

      The M1 is a good test run, let But they are absurdly high for something more general-purpose like a CPU.

      -

      Having unified on-socket memory between CPU and integrated GPU also makes possible some great Tensorflow performance, @@ -71,7 +134,6 @@

      The M1 is a good test run, let and does weirdly well at running postgreSQL.

      -

      The second tradeoff has some more immediate effects for research computing teams. Apple, as is its wont, didn’t worry too much about backwards-looking compatibility, happily sacrificing that for @@ -85,7 +147,6 @@

      The M1 is a good test run, let (Though that wasn’t magic either; the ecosystem had spent years slowly getting ready for adoption by the mainstream.)

      -

      “Freaking out” wouldn’t be too strong a way to describe the reaction in some corners; one user claimed that GATK would “never @@ -100,7 +161,6 @@

      The M1 is a good test run, let took months to get good tooling for, the depth of concern seemed a bit overwrought.

      -

      This isn’t to dismiss the amount of work that’s needed to get software stacks working on new systems. Between other ARM systems and M1, a lot of research software teams are going to have to put @@ -115,7 +175,6 @@

      The M1 is a good test run, let NaN) that M1 does not honour, so natively compiled R needs extra checks on M1.

      -

      It’s also not to dismiss the complexity that people designing and running computing systems will have to face. Fifteen years ago, the constraints on a big computing system made things pretty clear — @@ -129,18 +188,15 @@

      The M1 is a good test run, let specific workloads. And that necessarily means disfavouring others, which centres have been loathe to do.

      -

      So the point here isn’t M1. Is M1 a good choice for your research computing support needs? Almost certainly not if you run on clusters. And if you’re good with your laptop or desktop, well, then lots of processors will work well enough.

      -

      But even so, a lot of software is going to now have to support these new chips. And this is just the start of “weird” CPUs coming for research computing.

      -

      CPUs will keep coming that will make radically different tradeoffs than choices than seemed obvious before. That’s going to make things harder for research software and research computing systems @@ -154,8 +210,79 @@

      The M1 is a good test run, let using build and deployment workflows and processes that can handle supporting multiple architectures, now is a good time to start.

      -

      But the new architectures, wider range of capabilities, and different tradeoff frontiers are also going to expand the realm of what’s possible for research computing. And isn’t that why we got into -this field?

      \ No newline at end of file +this field?

      + +

      +
      + +
      + + + + + + + +
      + + +
      + + + + + diff --git a/2020/cobol-imperial-college-bursty-maintenance-and-sustained-scientific-software/index.html b/2020/cobol-imperial-college-bursty-maintenance-and-sustained-scientific-software/index.html new file mode 100644 index 0000000..dc1d3ff --- /dev/null +++ b/2020/cobol-imperial-college-bursty-maintenance-and-sustained-scientific-software/index.html @@ -0,0 +1,267 @@ + + + + + + + COBOL, Imperial College, Bursty Maintenance, and Sustained Scientific Software - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
      + +
      +

      COBOL, Imperial College, Bursty Maintenance, and Sustained Scientific Software

      +

      We’ve all read about the huge rise in unemployment claims causing +unprecedented loads on US state software systems, with the situation +so dire that the governor of New Jersey put out an urgent call +for COBOL programmers. +It’s worth looking at this from the point of view of research +software, where we need software to be sustainable and reproducible +for long periods of time.

      + +

      The systems that need suddenly need COBOL developers have often +been chugging away with maintenance and tweaks for 40–50 +years. This is an almost unfathomable success in the world of +software. So the current issue clearly isn’t with the quality of +the software itself per se.

      + +

      Is COBOL being “obsolete” the problem? I mean, look +at that record of success again. COBOL is a proven, perfectly +serviceable, +domain-specific language for these sorts of batch tasks. There’s +ways to connect to tools and services written in other languages, +so it can coexist with other systems. The lack of (say) a vibrant and +rapidly-evolving ecosystem of third-party packages isn’t necessarily +a bad thing here. (How innovative and cutting-edge do you want the +system that sends out your pension cheques to be, exactly, when the +time comes? Do you really want someone to accidentally +leftpad +your bank account?)

      + +

      Yes, people coming in to maintain the software for the first time +will have to familiarize themselves with a new, old, language. But +people in research or open-source software learn an unfamiliar language to +contribute to a code base every day. Even if they knew the language, +they would still have to learn the codebase itself, the idioms, and +the problem domain. All of those things can be quickly learned by +new developers if there is documentation and tests, and especially +if there are people who have recently been maintaining the code +base to help. And that’s the issue here.

      + +

      These COBOL systems weren’t poorly designed, or obsolete, or a bad +match to their requirements. Easily handling 100x the previously +expected maximum rate of applications isn’t a feature, it’s a symptom +of giddy overengineering. The requirements just changed suddenly. +And when that happened, the people, procedures, and resources weren’t +in place to do the necessary maintenance.

      + +

      There is no such thing as infrastructure which does not require +maintenance, and the need for that maintenance is often quite bursty. +This is just as true in research software as it is in governmental +systems. Research software which goes into production needs to be +written in a maintainable fashion, but that’s not enough. There +has to be funding support to keep in place the people, procedures, +and resources necessary to maintain that software, likely in bursts. +And those resources have to remain in place between bursts.

      + +

      The bursty nature of necessary maintenance has also come up in +research software, in the saga of the Imperial College epidemic +modelling +software. +When COVID-19 arrived, this tool suddenly moved from a mildly +interesting research code to a key input into UK domestic policy. +Transparency and flexibility leapt from being nice-to-haves to key +requirements, and the people, procedures, documentation, tests, and +resources weren’t in place to add them.

      + +

      The importance and urgency of epidemic modelling meant that expertise +and resources from many places were made available to extend and +eventually rewrite the code. But this isn’t a sustainable model for +research computing software, any more than it is for unemployment +application processing systems.

      + +

      We still genuinely don’t know how to reliably provide maintenance, bursty +or otherwise, for software, shared databases, or systems in +our research communities. Our funding models are all built around +supporting experiments, observations, or theoretical works — +short-term projects which start, proceed, result in publications +and other research outputs, and are then done. Mechanisms for ongoing support of evolving +research inputs isn’t even a work in progress — it’s absent.

      + +

      If experimental methods work develops new kinds of equipment or +reagents which are useful to other researchers, then a vendor starts +manufacturing and selling those items to researchers, with money +that comes out of their grants — and that’s the sustainability +model. We don’t have that for ongoing efforts in software, databases, +or even reliably for hardware shared at a larger scale than a single +organization yet.

      + +

      For software undergoing active development, there are at least +plausible approaches proposed. Some of them look, +reasonably enough, like the research equipment model above. Add a +modest amount of money to grants earmarked for distribution to +software, databases, or systems that the research group relies on. +Maybe that would work! But it would almost certainly preferentially +fund projects that are being actively worked on, taking feature +requests and bug reports for software or new submissions for +databases.

      + +

      For mature, quiescent resources that “just work” and +so fade into the background, the tools that don’t need development +until they suddenly do, we need other solutions. Likely we need +centres of expertise in research computing, populated by professionals +as advocated by RSE societies around +the world, with named maintainers even for +research tools actively used but not actively developed.

      + +

      People — +maintainers, +with the tools to do their job — are what drive software +sustainability, not language choices or technologies. As a research +community we need to find and retain funding to retain, develop, +and empower those people to do their work. Otherwise we’re going +to waste time and effort urgently re-learning and re-creating tools +when individually unforeseeable but collectively predictable bursts +in maintenance are needed.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2020/exascale-s-long-shadow-and-the-hpc-being-left-behind/index.html b/2020/exascale-s-long-shadow-and-the-hpc-being-left-behind/index.html new file mode 100644 index 0000000..1dea6a6 --- /dev/null +++ b/2020/exascale-s-long-shadow-and-the-hpc-being-left-behind/index.html @@ -0,0 +1,157 @@ + + + + + + + Exascale's long shadow and the HPC being left behind - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
      + +
      +

      Exascale's long shadow and the HPC being left behind

      +

      The delivery of Japan’s all-CPU Fugaku machine and the disclosure of the UK’s all-CPU ARCHER 2 system amidst the news, solidly “pre-Exascale” machines with pre-exascale budgets, is opening old wounds around the merits of deploying all-CPU systems in the context of leadership HPC.  Whether a supercomputer can truly be “leadership” if it is addressing the needs of today using power-inefficient, low-throughput technologies (rather than the needs of tomorrow, optimized for efficiency) is a very fair question to ask, and Filippo took this head-on:

      <blockquote class="twitter-tweet" style="display: block; margin: auto;"><div dir="ltr" lang="en">Unfortunately take codes from Tier-2 with GPU to Tier-1 without GPU is a huge step backward. These calls are holding back the true potential of #GPU computing in accelerating scientific discovery! https://t.co/qVVEWFDXt1</div> +— Filippo Spiga (@filippospiga) May 20, 2020</blockquote>

      Of course, the real answer depends on your definition of “leadership HPC.”  Does a supercomputer qualify as “leadership” by definition if its budget is leadership-level?  Or does it need to enable science at a scale that was previously unavailable?  And does that science necessarily have to require dense floating point operations, as the Gordon Bell Prize has historically incentivized?  Does simulation size even have anything to do with the actual impact of the scientific output?

      While I do genuinely believe that the global exascale effort has brought nearly immeasurable good to the HPC industry, it’s now casting a very stark shadow that brings contrast to the growing divide between energy-efficient, accelerated computing (and the science that can make use of it) and all the applications and science domains that do not neatly map to dense linear algebra.  This growing divide causes me to lose sleep at night because it’s splitting the industry into two parts with unequal share of capital.  The future is not bright for infrastructure for long-tail HPC funded by the public, especially since the cloud is aggressively eating up this market.

      Because this causes a lot of personal anxiety about the future of the industry in which I am employed, I submitted the following whitepaper in response to an NSCI RFI issued in 2019 titled “Request for Information on Update to Strategic Computing Objectives.”  To be clear, I wrote this entirely on my personal time and without the permission or knowledge of anyone who pays me–to that extent, I did not write this as a GPU- or DOE-apologist company man, and I did not use this as a springboard to advance my own research agenda as often happens with these things.  I just care about my own future and am continually trying to figure out how much runway I’ve got.

      The TL;DR is that I am very supportive of efforts such as Fugaku and Crossroads (contrary to accusations otherwise), which are looking to do the hard thing and advance the state of the art in HPC technology without leaving wide swaths of traditional HPC users and science domains behind. Whether or not efforts like Fugaku or Crossroads are enough to keep the non-Exascale HPC industry afloat remains unclear.  For what it’s worth, I never heard of any follow-up to my response to this RFI and expect it fell on deaf ears.

      <h2>Response to “Request for Information on Update to Strategic Computing Objectives”</h2>G. K. Lockwood
      August 17, 2019

      <h3>Preface</h3>This document was written as a direct response to the Request for Information on Update to Strategic Computing Objectives (Document Number 2019-12866) published on June 18, 2019.  All views expressed within are the personal opinion of its author and do not represent the views or opinions of any individuals or organizations with whom the author may or may not be associated in any professional or personal capacities.  This document was authored without the support, knowledge, or input of any such individuals or organizations, and any similarity between the opinions expressed here and any other individuals or organizations is purely coincidental.

      <h3>Question 1. What are emerging and future scientific and technical challenges and opportunities that are central to ensuring American leadership in Strategic Computing (SC), and what are effective mechanisms for addressing these challenges?</h3>
      While the NSCI Strategic Plan identified four overarching principles which are undeniably required to maintain continued American leadership, its five strategic objectives are, in many ways, mutually incompatible with each other.

      In the three years following the initial NSCI plan towards delivering capable exascale, the outcomes of the Aurora and CORAL-2 procurements within DOE have made undeniably clear that the definition of “capable exascale” necessarily requires the use of GPU technologies.  Because GPUs are, in many ways, accelerators specifically suited for scientific problems that can be reduced to dense linear algebra, this has effectively signaled that scientific challenges which are not reducible to dense linear algebra (and therefore incompatible with GPU technologies) are, by definition, no longer of strategic significance.

      By bifurcating science domains based on whether they are or are not compatible with GPU-based acceleration, we are now at a crossroads where entire classes of domain science research that have historically run at-scale on CPU-based leadership computing systems will be left behind.  To be clear, this is not simply a matter of engineering—many important classes of scientific challenges are fundamentally incompatible with the GPU accelerator model of computation, and no amount of code modernization will change this fact.  Yet these same science domains, which rely on complex multiphysics applications that are core to strategic areas such as stockpile stewardship and climate science, are of undeniably critical importance to both national security and society at large.

      Thus, there is now a clear and growing gap between NSCI’s ambition to deliver capable exascale and the larger mission to maintain leadership in entirety of truly strategically important computing in the nation.  There are technical challenges intrinsic in this growing gap which include pursuing research in hardware and software technologies that approach strategic computing more holistically rather than exclusively from a FLOPS perspective.  The community has long acknowledged that the scope of HPC has surpassed simply performing floating point operations, and the definition of capability computing now includes enabling science that, for example, may require tremendous data analysis capabilities (e.g., moving, transforming, and traversing massive data sets) but have relatively low floating point requirements.  The DOE Crossroads procurement and the Japanese leadership program and its Fugaku system embody this more balanced approach, and there is little doubt that both Crossroads and Fugaku will demonstrate a number of world’s-firsts and, by definition, demonstrate leadership in strategic computing without making all of the sacrifices required to meet today’s definition of capable exascale.

      Both Crossroads and Fugaku have required significant R&D investment to enable these dimensions of capability, and the NSCI would do well to explicitly call out the need for continued investment in such directions that are orthogonal to exaflop-level capability.

      <h3>Question 2. What are appropriate models for partnerships between government, academia and industry in SC, and how can these partnerships be effectively leveraged to advance the objectives of SC?</h3>
      The most impactful models for industry-government partnership in HPC have come in the form of close collaboration between the HPC facilities that deploy extreme-scale systems and the technology providers in industry that create and support the required hardware and software solutions.  Strategy necessarily involves taking input from user requirements, workload characterization, and technology trends to inform future directions, and HPC facilities are uniquely qualified to speak to both user requirements (by virtue of the fact that they directly interact with users in support of HPC systems) and workload characterization (by virtue of the fact that they manage HPC systems).  Complementarily, industry technology providers (vendors) are uniquely qualified to speak to technology directions, marketability, and sustainability in the larger technology market.

      This effective collaboration can take the form of non-recurring engineering such as those contracts associated with large system procurements (often to address more tactical challenges towards strategic computing) or standalone programs such as DOE PathForward (which addresses longer-term technology development towards strategic computing).  In both cases though, industry (not HPC facilities or academic researchers) propose the initial scope of work based on their own understanding of both (1) HPC-specific requirements and (2) larger market and profit prospects.  This latter point is critical because the HPC market alone is simply not large enough to sustain purpose-built technologies, and sustaining new technologies and their peripheral enabling ecosystems requires buy-in from multiple markets.

      The role of academia in research is more complex, as academic research in HPC can be either basic or applied in nature.  Basic research (such as in applied mathematics and algorithm development) has stood on its own historically since such work results in a larger base of knowledge from which specific technology solutions (whether developed by industry or HPC facilities) can be composed both today and in the future.  The federal agencies participating in NSCI can claim credit for funding the basic research outcomes that have been incorporated into innumerable software and hardware technologies in use today.

      On the other hand, applied research (such as developing new software systems that may implement the outcomes of basic research) has had very mixed outcomes.  It is often the case that applied researchers who have no direct relationship with neither HPC facilities nor technology providers formulate research projects based on second-hand HPC requirements and technology trends.  It follows that their interpretation of such requirements is incomplete, and their research outcomes are misaligned with the actual needs of HPC facilities and industry.  Barring cases where academic applied research outcomes are so valuable that they stand on their own (of which there are many examples including OpenMPI and Tau), applied research in the absence of such a sustainability path results in a tremendous amount of software that has virtually no long-term (i.e., strategic) value to SC.

      This speaks to a gap between applied research in academia and those who apply research in practice that must be closed.  This gap has been perpetuated by a lack of HPC practitioners (domain scientists and applied researchers directly attached to HPC facilities or technology providers) on the committees that evaluate the merit of research.  Thus, a more effective engagement model would involve coupling the academic research pipeline to HPC facilities and industry more closely.  This may be something as informal as increasing the diversity of review panels and program committees to include representatives from facilities and industry to a formal requirement that successful research proposals have a clearly defined connection to a specific industry or facility partner.  Regardless of the solution though, funding applied research that will be “thrown over the wall” to HPC facilities and vendors without their input is not compatible with SC.

      <h3>Question 3. How do we develop and nurture the capable workforce with the necessary skill and competencies to ensure American leadership in SC? What are effective nontraditional approaches to lowering the barriers to knowledge transfer?</h3>
      Although virtually every report discussing strategic directions and future requirements of HPC call for knowledge transfer and building a larger workforce through training and outreach (e.g., see the complete set of DOE Exascale Requirements Reviews), such reports generally neglect two critical realities of employing and retaining a talented workforce at production HPC facilities and in industry.

      The first reality is that the problems intrinsic to modern HPC (solving problems at extreme scales) are no longer exclusive to HPC.  The ubiquity of technology in modern life now means that the entire technology industry must deal with problems at scale as a matter of course.  As such, the HPC community is now competing with well-capitalized commercial entities that have increased the absolute value of a skilled engineer to levels that the scientific research community simply cannot afford.

      Thus, the perceived lack of skilled workforce in HPC is not a failing of the workforce development strategy in place; in fact, it may be a great indicator of its success, as it has created a workforce whose skills have value that far outstrip the investment put into workforce development.  However, this also means that the talented individuals who eschew the higher pay and amenities of working in the larger technology industry do so for non-monetary reasons (work-life balance, attraction to the science mission, geographic locality).  It is therefore critically important that strategic computing identify these motivators and built upon them to the greatest possible degree to maintain an edge in an extremely competitive hiring landscape.

      The second reality is that the key to an exceptional workforce is not simply a matter of technical knowledge.  There is no shortage of individuals who understand parallel programming in the world, and it is of little strategic value to pursue workforce development strategies that prioritize knowledge transfer as the principal outcome.  Rather, strategic computing requires a workforce that is capable of critical thinking and has a natural drive to solve problems that have never been solved before.  These traits should be emphasized to a far greater degree than the current pedagogical emphasis on material that can be learned from a manual by anyone with a curious mind.

      By definition, very few people in the world have prior experience in world-class HPC.  There are very limited opportunities to build a credible work history in extreme-scale HPC for individuals who are ineligible for student internships or postdoctoral appointments.  As a result, world-class HPC facilities rarely see qualified applicants for open positions when “qualified” is defined on the basis of relevant work experience; a mid-career developer or systems engineer working in a campus-scale HPC organization simply has no opportunities to demonstrate his or her intellectual capability in a way that is outstanding to the facilities that deliver strategic computing resources.

      Thus, an integrative approach to workforce development that (1) emphasizes problem-based learning rather than rote reiteration of manuals and standards documents in an environment where (2) representatives from NSCI constituent agencies can engage with trainees (i.e., potential employees) in a fashion with less formality and pretense than a typical “CV-phone screen-interview” pipeline may reveal a much broader potential workforce whose strengths more closely align with strategic computing.  Such an approach may manifest in the form of intensive boot camps such as the DOE ATPESC program, grants for mid-career retraining in partnership with a leadership computing facility, or sabbatical support for technical staff at the nation’s mid-scale computing facilities.

      <h3>Question 4. How can technical advances in SC and other large government and private initiatives, including infrastructure advances, provide new knowledge and mechanisms for executing next generation research?</h3>
      No response.

      <h3>Question 5. What are the future national-level use cases that will drive new computing paradigms, and how will new computing paradigms yield new use cases?</h3>It is easy to claim that artificial intelligence will be the most important future national use case to drive new computing paradigms.  However, this is a very dangerous statement to make without qualification, as the actual level of readiness for applying AI to solve scientific problems is very low, and the actual scales, aggregate demand, and algorithmic motifs required by such workloads for scientific discovery are poorly undefined.  More generally, the requirements of AI workloads at large remain uncertain; for example, the Facebook uses a variety of AI techniques in production and have found that each application area requires different computational, storage, and network resources (see Applied Machine Learning at Facebook: A Datacenter Infrastructure Perspective).  Outside of the large hyperscale datacenters, industry consensus suggests that production AI workloads remain largely at single-server scales.  As such, it is difficult to confidently assert what the rate of scale-out AI will be for strategic computing.

      The current leading technique for AI at scale is deep learning, yet scientific discovery is at odds with the black-box nature of this method.  Alternative methods such as decision trees offer much more insight into why a trained model behaves as it does and is more compatible with applying physical constraints to which physical systems being modeled (e.g., see Iterative random forests to discover predictive and stable high-order interactions).  However, the relative importance of such non-block-box learning techniques in HPC are completely unknown, as are the general optimization points for such techniques in the context of scientific computing.  There is a danger that the similarities between deep learning and many HPC problems (GEMM-heavy workloads) place an artificially high importance on the role of deep learning in SC.  It may be the case that deep learning is the most effective method for applying AI to address problems in scientific computing, but caution must be taken to ensure that major challenges in SC not all look like deep-learning nails simply because GPUs are a very effective hammer.

      From a domain science perspective, there are very few domain sciences where AI can replace traditional simulation-driven workflows wholesale.  As such, the role of AI in SC will be largely supplementary; scientific workflows may integrate an AI component to generate starting conditions, replace humans in the loop during steering, or identify areas of interest in the results of a primary simulation.  However, it is very unlikely that AI will grow to be of greater significance to scientific computing than modeling and simulation.  Instead, it will be the source of new computational resource requirements that simply did not exist in the past because those tasks were carried out by humans.  The road towards integrating AI into scientific workflows will also be a long and tortuous one, as the field is evolving far more rapidly in industry than scientific computing traditionally has.  Care must be taken that SC not tie itself too closely to a method (and its associated hardware configurations) that may be deprecated in short order.

      <h3>Question 6. What areas of research or topics of the 2016 NSCI Strategic Plan should continue to be a priority for federally funded research and require continued Federal R&D investments? What areas of research or topics of the 2016 Strategic Plan no longer need to be prioritized for federally funded research?</h3>
      The five objectives outlined in the 2016 NSCI Strategic Plan all gravitate around elements of topics that require continued federal R&D investments, but they do require realignment with the technological, scientific, and economic landscape as it exists now.

      <h4>Objective 1: accelerating the development of capable exascale by the mid-2020s</h4>The 2016 NSCI report correctly stated that capable exascale technologies would not be available until the mid-2020s, but DOE pulled its exascale system deliveries into the early 2020s.  As a result, the delivery of exascale had to be accelerated at significantly higher costs: there have been significant capital costs (the first US exascale systems will cost between 2x and 10x their immediate predecessors, either setting a new bar for the cost of future leadership HPC systems or resulting in a bubble in funding for all post-exascale machines), operational costs (the power budgets may exceed the original 20 MW goal by 50%), and opportunity cost (only two of the three CORAL labs actually deployed a CORAL-1 machine).

      Notably absent here is a commensurate increase (2x-10x, 1.5x, or 1.3x as above) in R&D efforts towards making these exascale systems widely accessible to applications that do not fall under the umbrella of ECP funding.  As such, NSCI must continue to emphasize the importance of funding R&D to enable the “capable” component of this objective through the mid-2020s at minimum.

      <h4>Objective 2: Developing a coherent platform for modeling, simulation, and data analytics</h4>The convergence of HPC and Big Data was a popular point of discussion when the 2016 report was written, but there has yet to be a compelling, quantitative analysis that demonstrates the difference between a “Big Data” system and an “HPC” system despite the best efforts of several leadership-scale HPC facilities.  The challenge is not one of technology and system architecture; rather, the principal design point for “Big Data” systems outside of the HPC world has simply been one of cost (e.g., scaling out cheap hardware over a cheap network for a very well-defined bulk data access pattern) over performance.  There is absolutely nothing that stops the typical “Big Data” application stacks, both old (e.g., Hadoop and Spark; see this paper) and new (e.g., TensorFlow; see this paper) from running at scale on any modern HPC systems, and both have been demonstrated at scale on systems that were sensibly designed.

      As such, this objective need not be emphasized in the future.  Rather, engineering work is required to enable the “Big Data” stacks in use outside of HPC to work efficiently on the HPC systems of tomorrow.  This remains a software, not architectural, problem, and very much an engineering, not research, challenge.

      <h4>Objective 3: R&D towards post-CMOS technologies and new paradigms</h4>It is not the role of NSCI constituent agencies to fund the development of new materials systems explicitly for post-CMOS computing, because these agencies, their review committees, and the academic researchers they fund do not have the insight into the realities of logistics, material costs, and manufacturing required to predict what combination of materials and microarchitectures could actually be turned into a marketable product that can be sustained by the larger technology industry.  In the absence of this insight, R&D towards post-CMOS technologies is likely to produce interesting demonstrations that are impractical for the purposes of actually developing leadership-scale computing systems.  Instead, such research should be funded using facility-industry partnerships as discussed previously in Question 2.

      Investing in R&D towards new paradigms in computing should also be considered not with respect to enabling new scientific applications, but rather accelerating existing scientific workloads that are incompatible with exascale technologies (GPUs).  As discussed in response to Question 1, there is a very real risk of leaving entire domains of computational science behind as the definition of leadership computing (when equated to exascale) becomes increasingly narrow in scope.  Developing new accelerator technologies that are of benefit to complex application workflows (e.g., multiphysics simulations) are of critical importance in the coming years missions such as stockpile stewardship and climate science fall by the wayside.

      <h4>Objective 4: Improving application development and workforce development</h4>The DOE Exascale Computing Project (ECP) has demonstrated a highly effective way of integrating researchers, application code teams, and facilities towards improving application development.  Providing a coherent ecosystem of recommended methods (such as its IDEAS project; e.g., see ECP-IDEAS), development tools (funded under its Software Technologies area), algorithm-application partnerships (through its co-design centers), and application integration efforts (funded under Hardware and Integration area) are an excellent blueprint for improving application development.  Developing a more generic model for establishing and supporting this style of development beyond the timeline of the ECP funding should be pursued.

      Improving workforce development should reduce its focus on basic technical training and more on improving critical thinking as described in the response to Question 3 above.

      <h4>Objective 5: Broadening public-private partnership</h4>As described in the response to Question 2 above, public-private partnership is absolutely critical to sustain SC in the coming years.  The financial incentives driving technology development from the world outside of HPC have come to outstrip the resources available to HPC to exist independently.  SC efforts must engage with both technology providers and the primary market forces (the enterprise and hyperscale computing industries) to better understand where technologies, solutions, and opportunities can be pursued in partnership rather than in parallel.

      <h3>Question 7. What challenges or objectives not included in the 2016 NSCI Strategic Plan should be strategic priorities for the federally funded SC R&D? Discuss what new capabilities would be desired, what objectives should guide such research, and why those capabilities and objective should be strategic priorities?</h3>The mission of providing capable exascale as described in the 2016 NSCI Strategic Plan is proving to be not a sustainable long-term path.  As described in the response to Question 1 above, the first exascale machines stand to accelerate scientific problems that can be cast as dense matrix-matrix multiplication problems, but there are large swaths of scientific problems to which this does not apply.  If one considers the Graph500 BFS list, three of the top five systems are over seven years old and will be retired in 2019.  While graph problems are not prolific in SC, the fact that such little progress has been made in accelerating extreme-scale graph traversal during the seven years that exascale has been aggressively pursued is indicative of some classes of HPC problems being abjectly left behind.

      Thus, a primary objective towards capable exascale must be examining the opportunity costs of the current strategic direction.  If it is determined that there is simply no way to bring forward those types of computational problems that are incompatible with GPU-based acceleration, then a clearer strategy must be formulated to ensure that the scientific challenges being solved by those computational problems do not stagnate.  As it stands, the public discourse surrounding the first-generation US exascale architectures is not universally positive because of this perceived scientific exclusivity of the chosen architectures, and such exclusivity is at odds with both capable computing and computing leadership.
      <div>
      </div>

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2020/extending-the-spectrum-lsf-gui-to-display-job-gpu-metrics/index.html b/2020/extending-the-spectrum-lsf-gui-to-display-job-gpu-metrics/index.html new file mode 100644 index 0000000..790baaa --- /dev/null +++ b/2020/extending-the-spectrum-lsf-gui-to-display-job-gpu-metrics/index.html @@ -0,0 +1,240 @@ + + + + + + + Extending the Spectrum LSF GUI to display job GPU metrics - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      Extending the Spectrum LSF GUI to display job GPU metrics

      +

      I’ve previously written about accounting for GPU workloads in Spectrum LSF using Nvidia DCGM to collect granular metrics including energy consumed, memory used, and overall GPU +utilization. Spectrum LSF collects the information and it is made available through the familiar bhist and bacct +commands.

      + +

      How can one go about displaying this information in the web-based job management interface that is provided by +Spectrum LSF Application Center or as part of the Spectrum LSF Suites? Here we will provide a simple example showing +how:

      + +
        +
      • Administrators can customize the navigation in the Spectrum LSF web-based job management interface
      • +
      • Display the same GPU accounting information in the Spectrum LSF web-based job management interface
      • +
      +

      The following assumes that DCGM support has been enabled in Spectrum LSF and that you are running an edition of +the Spectrum LSF Suite or Spectrum LSF Application Center

      + +

      The Spectrum LSF web-based job management interface enables GUI administrators to create new tabs with a user specified +URL or command. Here we will create a new tab which runs a command (script) which will run the Spectrum LSF bhist +command to display the GPU metrics for a given job. The script must be able to distinguish between a GPU and non-GPU +job.

      + +

      A. To begin, we’ll require a simple script to display the detailed historical data of a given jobID, including +GPU metrics using the Spectrum LSF bhist command. An example simple script is provided below which is saved +with filename gpu_acct.sh.

      + +
      #!/bin/sh
      +if [ -z "$1" ]
      +then
      +   echo "Usage $0 <jobID>"
      +else
      +OUTPUT=`bhist -a -l -gpu $1`
      +grep -q 'GPU Energy Consumed' <<< $OUTPUT && bhist -a -l -gpu $1 || echo "Not a GPU job."
      +fi
      + +

      As the Spectrum LSF administrator, create the above script in the $LSF_BINDIR directory with permissions 755.

      + +

      B. Next, login to the Spectrum LSF web-based interface as a user with administrative privileges and navigate to +Workload > Workload. Note that the user must have the Application Center Administrator privilege.

      + +
      +
      + +

      C. It’s now necessary to select one of the jobs in the job list in order to display the job detail view. This is the +page where we will be adding the GPU accounting tab.

      + +
      +
      + +

      D. Click the edit (pencil) dropdown that can be found at the top right of the Spectrum LSF web-based interface +and select Edit Page.

      + +
      +
      + +

      This will display the Create New Tab window which will be filled in during the next step.

      + +

      E. In the Create New Tab window, specify the following:

      + +
        +
      • Tab Label: GPU accounting
      • +
      • Content From: Command and specify the command gpu_acct.sh %J
      • +
      +

      Click the Apply button to complete the addition of the new tab on the job detail page.

      + +
      +
      + +

      F. Finally, click the Edit Page dropdown on the top right corner of the interface and select +Apply and exit Pages Editing to make the changes take effect. You will now see a new GPU accounting tab in the +job detail view. Here I’ve selected a GPU job that has been run previously through Spectrum LSF. We see the full +bhist output displayed here including the detailed GPU accounting.

      + +

      +
      + +
      +
      +

      + +

      As a final note, for jobs that have not requested a GPU resource through Spectrum LSF, we will see the message +“Not a GPU job" displayed when the GPU accounting tab is selected.

      + +
      +
      + +

      That concludes this simple example showing how the Spectrum LSF web-based interface can be customized.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2020/gracc-transition-visualization/index.html b/2020/gracc-transition-visualization/index.html new file mode 100644 index 0000000..6212f43 --- /dev/null +++ b/2020/gracc-transition-visualization/index.html @@ -0,0 +1,199 @@ + + + + + + + GRACC Transition Visualization - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Dereks Web Thoughts from Derek. See the original post here.
      + +
      +

      GRACC Transition Visualization

      +

      The OSG is in the progress of transitioning from an older ElasticSearch (ES) cluster to a new version. Part of this process is reindexing (copying) data from the old to the new. Unfortunately, it’s not easy to capture a status of this transition. For this, I have created the GRACC Transition page.

      + +

      The goal is to transition when both the old and new ES have the same data. A simple measure of this is if they share the same number of documents in all of the indexes.

      + +

      Source for this app is available on github: GRACC Transition

      + +

      Data Collection

      + +

      Data collection is performed by a probe on each the new and old ElasticSearch clusters. Upload is performed with a POST to the gracc transition website. Authorization is performed with a shared random token between the probe and the website.

      + +

      The probe is very simple. It queries ES for all indexes, as well as the number of documents and data size inside the index.

      + +

      There are also many indexes that the OSG is not transitioning to the new ES. In order to ignore these indexes, a set of regular expressions is used to remove the indexes from consideration. Those regular expressions are:

      + +
      /^osg.*/,           // Start with osg.*
      +/^ps_.*/,           // Start with ps_*
      +/^shrink\-ps_.*/,   // Start with shrink-ps_*
      +/^glidein.*/,       // Start with glidein*
      +/^\..*/,            // Start with .
      +/^ps\-itb.*/        // Start with ps-itb*
      +
      +
      + +

      The Website

      + +

      GRACC Transition Website

      + +

      The gracc transition app is hosted on the Heroku. I choose Heroku because it provides a simple hosting platform with a database for free.

      + +

      The website pushes alot of the data processing to the client. The data is stored in the database as JSON and is sent to the client without any transformation. The client pulls the data from the website for both the new and old ES and begins to process the data within javascript.

      + +

      The website breaks the statistics into three visualizations:

      + +
        +
      1. Progress Bars: Comparing the total documents and total data size of the old and new. The progress is defined as new / old. The bars provide a very good visualization of the progress of the transition as they need to reach 100% before we are able to fully transition.
      2. +
      3. Summary Statistics: The summary statistics show the raw number of either missing or mismatched indexes. If an index is in the old ES but is not in the new ES, it is counted as missing. If the index is a different size in the old vs. the new, it is counted as mismatched.
      4. +
      5. Table of Indices: Finally, a table of indices is shown with the number of documents that are missing, or simply Missing if the index is missing in the new ES.
      6. +
      + +

      In addition to the table, I also provide a button to download the list of indexes that are missing or mismatched. This can be useful for an administrator to make sure it matches what they expect or to process with elasticsearch.

      + +

      Improvements and Future

      + +

      In the future, I would like to generate a weekly or even daily email to show the progress of the transition. This would give provide a constant reminder of the state of the transition.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/dursi/2020-3-24-quickstart-remote-one-on-oneshtml.md b/2020/how-to-quickly-start-one-on-ones-with-your-research-computing-team-a-one-week-plan-of-action/index.html similarity index 87% rename from _posts/dursi/2020-3-24-quickstart-remote-one-on-oneshtml.md rename to 2020/how-to-quickly-start-one-on-ones-with-your-research-computing-team-a-one-week-plan-of-action/index.html index 2f95b86..fb77583 100644 --- a/_posts/dursi/2020-3-24-quickstart-remote-one-on-oneshtml.md +++ b/2020/how-to-quickly-start-one-on-ones-with-your-research-computing-team-a-one-week-plan-of-action/index.html @@ -1,42 +1,102 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2020-03-24 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/quickstart-remote-one-on-ones.html -slug: how-to-quickly-start-one-on-ones-with-your-research-computing-team-a-one-week-plan-of-action -title: How To Quickly Start One-on-Ones with your Research Computing Team- A One-Week - Plan of Action ---- - -

      Research computing teams around the world are finding themselves working completely remotely suddenly. As a manager, you’ve gotten over the first hump and made sure everyone has the tools they need - software, VPN access, accounts on whatever chat and videoconferencing tools you’ll need. Now what?

      - + + + + + + + How To Quickly Start One-on-Ones with your Research Computing Team- A One-Week Plan of Action - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
      + +
      +

      How To Quickly Start One-on-Ones with your Research Computing Team- A One-Week Plan of Action

      +

      Research computing teams around the world are finding themselves working completely remotely suddenly. As a manager, you’ve gotten over the first hump and made sure everyone has the tools they need - software, VPN access, accounts on whatever chat and videoconferencing tools you’ll need. Now what?

      We all know that remote teams need more communication than on-site teams, so you’ll need to start communicating more. This is a perfect time to start doing one-on-ones if you haven’t been doing them already.

      -

      What follows is a one-week plan to get started doing one-on-ones with your newly-remote research computing team. For each weekday, there’s about 10 minutes of reading and another 10-15 minutes of homework to do to get you started doing one-on-ones with your team starting one week from when you begin. There’s follow-up activities in weeks two and three to take stock, make tweaks, and start thinking about tools that will help.

      -

      This document is available in pdf and epub formats. You can also sign up below to get the material sent to you one day at a time in a series (your email won’t be used for anything else except sending you the material below.)

      - - -

      Day 1 - Background and Planning

      Even on-site, one of the most important things a manager can do with their teams is to have regular one-on-one meetings with each of their team members. This practice is almost ubiquitous in tech companies and many other industries. The fact that there are tools, websites, podcasts, and videos about it might lead you to think they’re complicated; they’re not. They’re super simple. Those resources all exist because one-on-ones are important and people are trying to help. Some of those resources are quite good, and I’ll provide some pointers to some of them that I think are particularly relevant in our research computing context; but you don’t need any of them.

      -

      One-on-ones are just meetings with each individual team member and you; they get a half hour of your completely undivided attention, every week (or at worst, every other week). The basic principles of successful one-on-one meetings are:

      -
      • The meeting is scheduled and at a regular time every week.
      • This is about building working relationships.
      • @@ -49,10 +109,8 @@

        Day 1 - Background and Planning

        And that’s it. There’s no tricks or particularly hard parts! If you follow the principles above when making decisions, and are disciplined enough to keep the meetings going even (especially) when things are busy, pay attention to what you’re hearing, and follow up, you’re going to to have successful one-on-ones.

        -

        Simple as they might be, these meetings are going to be most effective way to achieve four important things:

        -
        • Understand your team member better and so build solid working relationships.
        • Build trust with your team member.
        • @@ -62,13 +120,10 @@

          Day 1 - Background and Planning

          On top of those benefits, most managers find that these meetings actually save them time - people will save up questions they have for the one-on-ones rather than asking them as they come up, you’ll be able to better match people to tasks (and be able to find out how better to direct them on those tasks), and if anything starts to go sideways you’ll find out about it faster.

          -

          Your assignment: Let’s get things started by finding potential one-on-one slots on your calendar starting a week from today; in a couple of days you’ll be sending the list of those timeslots out to your team members to sign up for their one-on-ones. Look for a few more slots than team members - if you have 8 team members, aim to find say 12 slots. Identify 30-minute slots on your calendar for that week, ideally with a bit of padding on either side to prepare and review your notes. Prioritize slots that are normally free in coming weeks, and start doing what you can to fence those times off for a bit. Normally we’d be doing this for three weeks out or so when our calendars are mostly empty except for recurring meetings - here we’re doing them in a hurry, and I know you already have a bunch of things lined up for next week. But this is important, so if you have to, consider rescheduling or cancelling some other low priority meetings next week to make room. List the slots in a google spreadsheet, or start a doodle poll with them as the entries, and in a couple of days we’ll get people to sign up for them.

          -

          Also: if you’re having a weekly team meeting today, give them a heads up that because you’re now all remote you’d like to start meeting with them one-on-one every week, and you’ll be sending an email out.

          -

          Resources

          • Your Team Just Went Remote, Now What by Stephen Younge talks about the importance of communications and one-on-ones in the current moment
          • @@ -80,45 +135,33 @@

            FAQs

            Q: I have 12 people directly reporting to me - I just don’t think I can find 6 hours a week to spend on this.
            A: I promise you that this is one of the most productive ways you can be spending your management time — certainly in difficult periods like this, but even when things get back to normal. You don’t have to take my word for it - ask around if you know anyone doing one-on-ones in their team, and see what they say. Doing these meetings will mean you’ll be less stressed as a manager, have a better handle on what’s going on in your team, be able to put fires out sooner, have a forum for giving your team members feedback and coaching, be better able guide your team members skills and career development, and your team members will know that you’re listening to them. If you’re still skeptical, phase it in - start every-other week and move to weekly after you have a few rounds under your belt.

            -

            Q: Ok, 12, sure, but I’m a PI with a lab of 30 people. How’s that supposed to work?
            A: Thirty people is just too many people to give effective management to - you aren’t meaningfully tracking the needs, career development, and progress of thirty people now on top of everything else you need to do, and your lab members already know it. One-on-ones aren’t magic; they can’t fix that. So you’ll have to pick and choose between some options.

            -

            Perhaps you’ll prioritize trainees and some senior staff: have one-on-ons with them, and after a couple of rounds so that they understand how it works, have the senior staff start one-on-ones with more junior staff, even if there’s no formal reporting relationship there. That’s not as good as them having one-on-one time with you, but it’s better than no one having one-on-one time with anyone, and it starts preparing senior staff or trainees for future management duties. Every so often you could make sure you have “skip-level one-on-ones” with the staff or trainees who are having one-on-ones with these senior lab members - individually or as a group meeting - to make sure things are going well.

            -

            Alternately, you could just have bi-weekly one-on-ones with everyone; that’s 7.5 hours a week providing direct hands-on management with your team members. Again, it’s not as good as weekly one-on-ones but it is significantly better than not having them.

            -

            Q: So if biweekly is ok, can I do these monthly?
            A: A useful way to think about one-on-ones from your team members point of view is to imagine the situation with you having one-on-ones with someone more senior to you - your boss if you’re a staff manager, or a department chair or dean if you’re a PI. How would you treat a regular monthly half-hour one-on-one meeting with someone you report to?

            -

            I don’t think that this is a stretch to imagine. You’d want this precious 30 min a month to go as well as possible. You’d spend some time preparing a dog-and-pony show, probably some slides or something, and prioritize and hone a small number of questions you need answers to. It would be A Big Deal, and so kind of stressful, each time.

            -

            Your boss would get a sanitized view of what’s going on. You’d get a chance to look good to a boss whose been around a while and recognizes this as the highly polished view that it is. You’d maybe get a few answers you needed, or a promise to get back to you on those questions, which is good - but at the cost of significant preparation and stress.

            -

            Monthly just isn’t worth doing. Bi-weekly isn’t great - you won’t save as much time on interruptions and questions, because short questions that can wait three days for an answer often can’t wait a week or more - but it’s not bad. Weekly is the best.

            -

            Q: My schedule gets changed at the last minute a lot - I’m not sure I can always keep the same time every week for these meetings!
            A: It’s ok! Your team members know you’re busy. Stuff gets rescheduled. They understand, really. The important thing isn’t that you’re meeting Frieda at 2pm every Thursday come what may; the important thing is that Frieda knows she always has 30 minutes on your schedule every week. Just reschedule them as needed, just like you’d do with any other important meeting - for the same week, and as soon as you know something’s coming up.

            -

            Day 2 - Understanding What The One-on-Ones will Look Like

            You know the basic principles of each one-on-one meeting now; what does this mean for how the meetings will go?

            -

            First, let’s talk about the medium. This is about building working relationships, and while it’s probably not literally impossible to do that over text-based chat, it’s hugely slower and more prone to missteps. On top of that, there are going to eventually be sensitive topics discussed in these one-on-ones: you want the team member to be able to tell you about problems they’re having with co-workers, or that they didn’t do so great at something, or the stress of working at home with the uncertainty of the pandemic, and they might reasonably be reluctant to put these concerns into writing and send them off into the internet.

            -

            So the options are some kind of video/teleconference, or just phonecalls. Videoconferencing is better, because it lets you show facial expression and some body language; that goes a long way towards conveying intent and reducing miscommunications. You probably already use some tools for videoconferencing, and whatever you use is fine. I personally have no problem recommending:

            -
            • Zoom.us - rock solid, requires installation of an app on desktop or mobile, free for 40-minute one-on-one calls which works perfectly here.
            • Slack videoconference - requires a paid plan which is extremely reasonably priced for academic or non-profit work; less solid but works reasonably well and if you’re already using slack it’s one fewer application to juggle. A downside is that you can’t do video on mobile.
            • @@ -127,13 +170,10 @@

              Day 2 - Under

              But really, if you already use something else and your team members have access to it, just use it, it’s fine. If you’re not already using something, pick whereby or zoom. And if for some reason none of those are available, just plan to make phone calls.

              -

              So that’s the medium, what’s the message? There are a number of suggested high-level agendas for one-on-ones out there. I’m going to recommend going with the Manager Tools One-on-Ones agenda: it’s super simple, I’ve seen it work very well in a number of research computing contexts, it’s well motivated by empirical data, and I think it makes the best starting point. If you’ve seen and used a different agenda that’s worked well, feel free to use it instead; otherwise, use theirs.

              -

              The Manger Tools agenda is:

              -
              • 10 minutes for what your team member wants to talk about
              • 10 minutes for items on your agenda
              • @@ -142,37 +182,26 @@

                Day 2 - Under

                Let’s tackle these in order:

                -

                Their agenda: 10 minutes for what they want to talk about.

                -

                It’s hard to give you much information here, since your various team members will have different topics they want to cover. A number will misunderstand and try to give updates on their tasks; try to gently discourage that and nudge them toward higher-level topics.

                -

                They will also likely have questions about the effort, their role, what’s coming next; this is a great opportunity for discussion. They’ll have questions about the current situation that you are unlikely to be able to answer, and you’ll just have to be honest about what you do and don’t know. They’ll talk about how things are currently working with the team and may bring up things you want to follow up on.

                -

                But there will also be a lot of not particularly work-related discussion they want to have with you. Maybe it’ll be about their new home-office setup, or their pets, or their family, or the world situation. They may want to lament the loss of sports (and talk about their favourite team), or talk about a new dish they’re learning to cook now that they can’t go to one of their neighbourhood restaurants. Remember that the purpose of this meeting is to build a working relationship with your team member, to understand them better and to open up lines of communications. It’s a privilege to have them sharing this with you. Take notes and ask lots of questions — you’ll learn a lot from the answers — and share in kind if you feel comfortable doing so.

                -

                Your agenda: 10 minutes for your topics.

                -

                Remember, the one-on-one isn’t a status update meeting, and using it as one wastes an opportunity. You’re the boss; you can ask for status updates any time you want, and it’s something easily covered over chat or email. These meetings are better for higher level topics - talking about why they’re having troubles, or great successes, with particular tasks, what’s coming next, and the like. If you want, you can routinely check on status of tasks before the one-on-one, and then that can inform the discussion. The goal here isn’t to get a weekly update on what tasks are slipping behind - the goal here is in the medium term to have fewer tasks slipping behind because you better understand the work and your team members.

                -

                This is a good opportunity to give positive or corrective feedback on things you’ve seen over the past few days; in times of change like now, this feedback can be a useful way to reinforce expectations around things like code reviews or meetings that may look a little different now then they did several weeks ago when there was more in-person interaction.

                -

                You might well want to followup from things from previous meetings, or things that have come up from the past weeks work; to ask questions about things you saw. In general, even during your time, the more questions you can be asking that draw information from the team member, rather than just telling them stuff, the better.

                -

                It’s also a great time to share some updates about the big-picture view of how things are going in your department or effort, and how it will effect them and their work, or opportunities it might give them in the future. It’s a great opportunity to gauge their level of enthusiasm for new aspects of upcoming work.

                -

                Don’t use one on ones to share information that you’re going to be sharing with several members of the team, though.

                -
                1. It’s not a good use of this time. This meeting is about the individual team member, so it should cover things specifically about them. (Covering how something you’ve updated the entire team about will effect them in particular, however, is completely on-topic.)
                2. It’s not a good use of your time. If it’s something several team members need to know about, cover it at your weekly team meeting and save yourself some time.
                3. @@ -181,19 +210,14 @@

                  Day 2 - Under

                  Professional Development: 10 minutes

                  -

                  This isn’t something that comes out of your time or their time, because professional development is a shared responsibility between the two of you. A first one-on-one is a great chance to update your baseline on what their long-term career goals are, and give them any guidance or resources they ask for; and to find out if there are particular aspects of the research they’d like to get more involved in or new technologies they’d like to learn. In future one-on-ones, you can cover upcoming conferences or presentation opportunities, try to find work opportunities that will help them build experience they’re looking for, or coach them on particular skill development.

                  -

                  Once you’ve been doing those for a while, you’ll find that there usually isn’t going to be 10 minutes worth of things to say about their career or skills development - their development needs or long-term career goals just aren’t going to change much week-to-week. So after a while it’ll more typically be 15 minutes for them, 15 minutes for you. But it’s still hugely valuable to have a recurring slot for these topics to be discussed, and your first few one-on-ones there’ll probably be more than enough topics there to have it take the time.

                  -

                  Your assignment: For each of your team members, start a document or (a page in a document) listing them, what you know of their career plans, what they’re working on, things you’ve been meaning to talk with them about, your current understanding of their strengths and weaknesses, how you imagine their role might change over the coming year… anything that would be useful to touch on in a one-on-one. It doesn’t need to be anything like comprehensive - it’ll get added to over the course of many one-on-ones - but it’ll be a good starting point to preparing for the first round of meetings.

                  -

                  Also: if today’s your weekly team meeting today, give them a heads up that because you’re now all remote you’d like to start meeting with them one-on-one every week, and you’ll be sending an email out.

                  -

                  Resources

                  • The entire Manager Tools Basics series, and especially their one-on-ones podcasts, are extremely widely followed and are definitely worth listening to.
                  • @@ -204,41 +228,31 @@

                    FAQs

                    Q: What if they don’t have anything to say or ask for their 10-15 minutes?
                    A: Team members that are kind of distrustful about this new practice might be kind of reticent to talk or ask questions. It may take them several meetings to really start trusting this whole one-on-one thing and start opening up. That’s ok; one of the purposes of having these meetings is exactly to open up lines of communications.

                    -

                    Long silences can be helpful here; ask them what they’d like to talk about, smile patiently and look at the camera, and count out 10 seconds in your head. That 10 seconds will feel like a while to you, and an absolute eternity to them, but it’s very convincing that you’re interested in waiting to hear what they have to say. If they’re still not coming up with anything, you can try prompting them with very broad questions — “How is this whole remote work going for you?” “How are you dealing with the COVID-19 situation?” “Do you feel our approach to working remote is going well?”. It’s better if the topics and questions come unprompted, but that may take some time. It’s alright.

                    -

                    Frankly though, by far the more common issue is:

                    -

                    Q: What if they go long?
                    A: This is way more common than there being not enough to say, especially at the beginning. If they’re not used to having your undivided attention, they’re going to have a lot to say, especially for the first few meetings. My suggestion is just let them go long for the first few. If there are things you really want to cover on your agenda, gently interrupt them towards the end with time to cover your priorities - but remember, you’re the boss, you can talk to them whenever you want about stuff; this is their half hour to bring things to you. If they keep going long week after week, start giving them feedback about that and gradually over the weeks get them to 10-15 minutes. But don’t be too forceful about it; these meetings are about opening channels of communication.

                    -

                    Q: I just have something quick to ask, can we start with that before we get into their stuff?
                    A: Absolutely, categorically, no. I don’t blame you, I’ve slipped and done this myself, but it sabotages the whole meeting - now it’s just another meeting with the boss quizzing the team member about stuff they want to know about. Seriously, you can wait 15 minutes - or just ask them on slack (or whatever) before the meeting.

                    -

                    Q: Can I ask about status of their tasks at all?
                    A: Talking about tasks’ status isn’t completely off-limits, but it’s really easy for status questions to slowly take over the one-on-ones. If there’s a task or project you two haven’t talked about for a while, by all means you can take this opportunity to ask for a quick update, but try to make sure that’s the exception not a rule. This is a better venue for identifying problems that keep coming up and coaching them on dealing with those, or building on strengths they’ve shown in dealing with other tasks - high level things rather than quick updates. Again, you’re the boss - you can ask for status updates any time you want. This time is for them.

                    -

                    Q: What if they offer status updates during their time?
                    A: Listen, and take notes; and pay attention to anything they’re saying beyond the status update (are they pointing out things they had to overcome that you didn’t know about? Are they talking in a way that suggests they’re not normally getting enough credit?). If it is really just a status update, thank them but explain that this isn’t a status update meeting, you really want to focus on higher-level issues — whether they have what they need to succeed, whether there are things they want to know about the program as a whole, how things are going with their coworkers, what have they learned working on project X, are there things that they’re concerned about for the coming weeks. If a few weeks of gentle redirection isn’t enough, you can be more direct (or you can try to short-circuit things by directly asking for status updates before the one-on-one).

                    -

                    Q: What if I don’t have a full 10-15 minutes of things to cover?
                    A: That’s ok! The main purpose of this meeting is for them to talk. We’ll introduce later some general questions you can raise in your time if you don’t have specific things you need to address; but otherwise, if your list is short this week, give them a heads up so they can use more time this time; and if the meetings are short sometimes, it’s fine.

                    -

                    Day 3 - Sending Out The Invitations

                    You’ve done the hardest parts of preparation now - found spots in your calendar, and identified the starting points for discussions with each of your team members. Today’s easy, but it’s the key - you send out the email (or slack message, or..) announcing that this is coming and get people to start signing up for the slots you set aside two days ago.

                    -

                    Make sure that the times you’ve looked up two days ago are ready to be signed up for. Pick your favourite way of doing that, in rough order of least work-for-you to most:

                    -
                    • Google doc sign-up spreadsheet: Set up a google doc spreadsheet (or Office 365 Excel sheet or whatever you use) with the times in one column and the next column being who gets that slot. Make it nice-looking if you want; you now have a signup sheet.
                    • Doodle poll - In Doodle under Poll Settings you can “Limit the number of choices per option” to one, so first person to pick the option gets it.
                    • @@ -247,50 +261,35 @@

                      Day 3 - Sending Out The Invitations

                      With that done you’re ready to send off the announcement and begin the signups!

                      -

                      Here’s a perfectly good email announcing the one-on-ones and asking for signups. If the letter strikes you as good enough, then fill in the blanks and send it off. If you want to rework or rewrite it to be in your own voice, absolutely do so: but send it off.

                      -

                      Hi team:

                      -

                      With us working remotely now, we don’t get as many opportunties to talk with each other, hear how things are going, and ask questions of each other.

                      -

                      So I’d like to start having weekly half-hour one-on-one meetings with each of you individually. These aren’t status update meetings; it’s a meeting for you to tell me how things are going, ask questions about our projects, let me know what you need help with, or to tell me things you need from me. I’ll have some questions for you about how things are going, and will give any input I have on what’s gone on in the past week. And we’ll have an opportunity to spend some time each meeting talking about your professional development and career goals.

                      -

                      We’ll have the same agenda each meeting:

                      -

                      First 10 minutes - your agenda. Whatever you want to talk about or ask questions about. If there’s a question I don’t know the answer to off hand, I’ll have it for you by the next meeting. If you want to ask me about something you’re doing and it would help to screen share a plot or diagram, by all means, but please this isn’t something to prepare slides for. It’s just us talking.

                      -

                      Second 10 minutes - my topics for the week. I’ll talk about things that have come up in the past week, share updates on efforts that have specific relevance to the work you’re doing, and follow up on things we discussed in earlier one-on-ones.

                      -

                      Third 10 minutes - your professional and career development. We’ll talk about your career goals and what we can both do to help you get there. Maybe there are areas of research you’d like to be more involved in, or technologies you’d like to learn more about; if we’re discussing these regularly then I’ll be better able to take them into account when opportunities come up. After our first few meetings this may not be something we have items to discuss every single week; for those weeks it’ll be 15 minutes/15 minutes.

                      -

                      We’ll have these meetings over [videoconference]. [Details].

                      -

                      I’ve setup a sign up sheet for time slots at [google doc/doodle poll/here in the rest of the email]; let me know your first choice for times (first come, first served!) After a couple weeks we can adjust the times if we need to.

                      -

                      We’ll begin on [start date]; I’m looking forward to starting having these conversations with you.

                      -

                      Let me know if you have any questions.

                      -

                      Best wishes, [You]

                      -

                      Your assignment: If today is your weekly team meeting, cover the content of the letter before sending it out. Then get the times signup ready in whatever format you like. Rework the email as necessary, and send it off. You’re done!

                      -

                      Resources

                      • How to manage one to ones - One-on-ones from the team members point of view; you could include this in the email if you wanted.
                      • @@ -301,178 +300,128 @@

                        FAQs

                        Q: Isn’t this… not a lot of notice for my team?
                        A: Honestly, yes - we’re ramping this up in a week total. Normally I’d suggest starting a couple of weeks out to socialize the idea and answer questions (and to make sure your calendar was less full already), but you’re working remotely now and need this a little more quickly.

                        -

                        And it’s not really that sudden. There’s a 60% chance your weekly team meeting has already happened and you’ve gotten a chance to give them a heads-up it’s coming; otherwise you’ll have one coming up and have a chance to talk with them about it.

                        -

                        Finally - your team knows that things are very uncertain right now, and there have been (and will continue to be) a lot of changes coming up. That both makes this more reasonable and mean your team will be motivated to want more face to face time with you.

                        -

                        If you really think it’s going to be an issue, bump the first few meeting times to the following week to give you and your team an extra couple of days to talk it through. But it’s not like they have to prepare a presentation or something for the first meeting. It’s you and each of them talking.

                        -

                        Q: What if one of my team members won’t sign up?
                        A: So imagine you - possibly younger you - getting an offer for more and regular face time with your boss, especially during a time of uncertainty. Do you run away, or jump on it instantly?

                        -

                        Once you send this out, most if not all of your team members are going to sign up before the electrons in your email have cooled down. We’ll talk tomorrow and the next day about how to handle the theoretically-possible occasional reluctant team member; but remember that asking your team members to attend one half-hour meeting a week is a perfectly reasonable thing to ask, even with short notice, and even under completely normal circumstances.

                        -

                        Q: I’m still not sure about this scheduled meetings thing - can’t I just play it by ear every week and schedule them on the fly?
                        A: I mean, you can do anything you want. But if you want valuable one-on-one meetings, this is the way to do it.

                        -

                        “Playing it by ear” just isn’t efficient; if you thought finding gaps in your calendar and matching them to people took some time during this process, imagine doing it every single week.

                        -

                        But more importantly, these meetings are about your team members, and what matters to them is the certainty of knowing they have time on your schedule.

                        -

                        Yes, your schedule might get moved around if things get busy — but when things are busy, stuff that’s scheduled probably gets done (even if it gets done at a different time than originally planned), and stuff that isn’t on your schedule likely just gets dropped.

                        -

                        These meetings are for and about your team members, and for them to be valuable, your team members need the certainty of having first dibs on a slot in your calendar. Catch-as-catch-can is probably how you’re doing things now; the value in this is setting up something more regular.

                        -

                        Day 4 - Answering Signups and Preparing Notes

                        So you’ve sent out the email yesterday, and you’re starting to get answers and maybe some questions. Today you’ll respond to some of those questions and get ready to start.

                        -

                        First, notice how many of your slots are already filled. People by and large want time with you, and are happy to have it. If you’re using google docs for a signup sheet, check out in “version history” the time that first slot got snapped up. Fast, right?

                        -

                        Start putting the signups in your calendar as recurring events, with no end date. (You can always move around individual events, or add an end date later). If you use email invitations yourself, include the team member on the event and send them an invite, even if you don’t usually do that internally, so they know they’re on your calendar. Thank them for signing up so quickly, and tell them you look forward to your conversation.

                        -

                        You might be getting some questions from some team members: what to expect from these meetings, they are a so new, do they need to prepare anything. Those could be over email/slack, or if you have your weekly team meeting today it could be in person. These questions are (qualitative) data! Make note of them in the document you started two days ago for each team member. These are team members who might need help with change — and there’s a lot of change going on right now, so that’s good to know about — or maybe they are team members you’ll need to work a little harder with developing trust with, and the one-on-ones will help with that. On the other hand they were comfortable enough with you to raise the question to you directly. Don’t try to interpret the data yet - just note that they had questions about the one-on-ones and what they were.

                        -

                        Answer the questions that you get (and if you get several versions of the same question, it might also be useful to send out the answer to the whole team). Also, it might be good to send out a reminder note to nudge the stragglers.

                        -

                        The other thing to do today is to get some note-taking forms ready for use during the one-on-one. The details of the form don’t matter much - there needs to be a place to take notes on what they told you; to put reminders for things you want to talk about; and place to write down things that have to be done for followup (either by you or by them). We’ll populate the “things you want to talk about” portion tomorrow.

                        -

                        The key to taking the notes once you’re in the meeting is to (a) take notes like it’s a meeting, not like it’s a class - more about this in a minute - and (b) take notes on paper.

                        -

                        Yes, on paper. Like an animal.

                        -

                        Or on electronic paper - I use my iPad + stylus.

                        -

                        The key here is to take notes in a way that show you’re paying attention. Even remotely via teleconference, typing notes in a document doesn’t do that. Again, when in doubt, imagine yourself in one-on-ones with someone you report to. You’re talking to them, they’re nodding and typing along; you stop talking, and they keep typing for a minute or so, then look up. How paid-attention-to do you feel? I mean, sure, they’re probably taking notes - or maybe they’re firing off a quick response to an email or sending someone a slack message.

                        -

                        Remember, a principle goal of these meetings is the team member understanding that they have your undivided attention for a non-trivial amount of time every week. You leaning off to the side, scribbling something down, and coming up with a pen in your hand occasionally is completely unambiguous.

                        -

                        On my iPad, I have a notebook for each team member (I use Goodnotes, there’s a lot of other good ones out there too), and before each meeting I set up one page with the date on it for stuff they’re telling me about, one page for me that I can put things that I want to talk about, and I use the bottom of each page for follow-up items.

                        -

                        The other key note-taking thing is to take notes like you would for a meeting, not like you would for a class. We in academia-adjacent fields can be really bad about this. We’ve spent a lot of time in class and lectures and symposia and colloquia writing down all the information being presented like any of it might be on the exam. That’s easier when you’re doing the note taking - you don’t have to make decisions about what’s important and what’s not - but it means that it’s more work to use the notes later, since everything’s there no matter how important it was. And in our line of work we know that stuff is read many more times than it’s written.

                        -

                        So while you’re taking notes, try to focus on the important things; specific things they say or do that you want to remember, things that need to be followed up on, surprises, etc. This will make you a more careful listener, too. If you’re not sure if something was important, ask a followup question about it and you’ll find out pretty quickly. Useful multi-purpose followup prompts every research computing manager should have in their toolbox include: “Tell me more about that.” “That sounds tricky.” and “What are some options for handling that?”

                        -

                        Your assignment: As above; add the meetings to your calendar as the signups come in, and answer any questions that you get. If there are people who haven’t signed up, send a reminder message. Get some note taking forms ready for your meetings; we’ll populate them with some initial questions and topics tomorrow. Just make sure there’s some room at the start of the form for what they tell or ask you (because they go first), a place for you to write down things you want to talk about (we’ll put stuff there tomorrow), and a place to take note of things that need to be followed-up on, by either of you.

                        -

                        Resources

                        Manager-Tools podcast - At the end of the page (look for downloads) there’s a template form for taking notes and question prompts: the “1-on-1 Key Points and Prep Form”, in PDF or word. It’s a good starting point for your notes form if you want to use it.

                        -

                        FAQs

                        Q: Seriously, on paper?
                        A: Like an animal, yes.

                        -

                        Q: C’mon.
                        A: You c’mon.

                        -

                        Q: Ok, fine. What if one of the filled slots has already been stomped on by a meeting that’s come up for me?
                        A: It happens, and there’s no sense in pretending it won’t. Just let the team member know; it’ll give them a model of what might happen in the future. “Hey Lawrence - I’ve got you in my calendar for 11:30am on Tuesdays now - thanks for signing up! Something just came up for our slot this week - it’s a one-off and I tried to have it rescheduled but I can’t avoid it. Can we do 3pm on Thursday for just this week?” This is one of the reasons you made sure there were a few extra slots. Maybe X-out that replacement slot from the signup sheet if people are still signing up.

                        -

                        Q: Should we set up some kind of shared agenda so we can each see what topics we each have?
                        A: Some teams find that extremely useful, and some don’t. Tools inevitably shape the work they’re used for, so you and your team still need to figure out the best way to run these for your particular work first. Hold off on integrating tooling into these meetings for now until you have a better sense of what you need. After a couple weeks of one-on-ones is a good time to take stock and see might would be helpful.

                        -

                        Day 5 - Preparing for the First Week of One-On-Ones

                        This is almost it! All, or at least most, of your team has signed up for one-on-ones; by the end of the day you will have had your team meeting at some point this week where you’ll have either given the team members a heads-up or answered some questions; you have note sheets ready to be filled out. Today you’ll handle anyone who hasn’t signed up yet, and figure out how to fill out the forms with your topics for the first one-on-ones.

                        -

                        It’s really unlikely that more than a full day afterwards you still have someone who hasn’t signed up for a one-on-one slot after you’ve answered questions and sent out a reminder. I’m including what to do if it does happen, because people will ask otherwise, but understand that this isn’t the common case.

                        -

                        If you do have someone dragging their feet, this is the time to follow up with them. (And the fact that you have to follow up with them is also data which is worth recording. Maybe it’s reluctance, maybe they’re overwhelmed, maybe it’s a lot of things — you don’t know yet, but there’s something there). Find out if they have questions; be genuinely curious about their reasons for not signing up, don’t assume a reason (this is a time of a lot of change and disruption, we’re all going through a lot right now). Try to get to the reason it hasn’t been done yet, do what you can to address it, and directly ask them to choose one of the remaining times by the end of the day.

                        -

                        If by the end of the day they still haven’t signed up, sign them up for one of the remaining slots - the latest one that’s still available, ideally - and send them the invitation. Yes, this is a little heavy-handed, but you’ve asked them three times now within 48 hours to complete a very simple and reasonable task. Either they are reluctant because of work-related reasons, or overwhelmed because of potentially non-work-related-reasons, and either way having individual conversations with them is the right thing to do, and your duty as their manager.

                        -

                        Ok, so by the end of the day you will have everyone signed up for one-on-one slots. Now it’s time to make sure you know what you’ll be talking about in your time.

                        -

                        Pull out the document you wrote three days ago covering each team member, and the forms from yesterday, and pick a few easy things you’d like to cover in your first one-on-one together with each of the team members. Maybe it’s about something coming up that’s relevant to them, maybe it’s a check in on a project that hasn’t come up in a while, maybe you’ve been thinking about getting them to give more presentations and you wonder if that’s something they’d be up for. Make a note to ask how they’re doing, and whether they think the team is working well in this new mode, and if they have any suggestions for things the team could do differently or start doing.

                        -

                        You may also want to review some one-on-one question lists (there’s some in the resources section below) to get some ideas for big picture questions you could ask that would draw out information that would help you. Open-ended questions that can lead into followup questions are often very eye-opening, and you’ll find that ten minutes flies by.

                        -

                        Note that this is really not the time to bring up that thing that really bugs you but you haven’t mentioned yet because you’ve been waiting to have “the talk”. This is not the time for “the talk”. If you start a new practice like one-on-ones and then immediately drop something like that on them, especially if it’s from a while ago and they had no idea, they’ll feel ambushed — and they won’t be wrong.

                        -

                        Now I’m a big believer in giving corrective feedback (even though I still find it really hard!) and in research computing, not giving enough negative feedback is a much bigger problem than giving too much. But this meeting series is about them, and developing a solid working relationship and trust. When that’s done, then among other things it will be easier for you to give and them to receive respectful, helpful, negative feedback, and have it taken in the spirit it’s intended. But building that relationship and trust, especially with everything else changing around us, will take time.

                        -

                        Lean way into the positive for now. (Again, big believer in the usefulness of good negative feedback, but people often forget how powerful and effective positive feedback is in encouraging the behaviour that the team needs and helping a team member grow to reach their potential.) If they’ve been working at anything even approaching their normal productivity these past weeks, there’s lots to give positive feedback about! It’s ok to point out some small negative things you saw in the last week or so — nudge them about being better with “mute” on Zoom, remind them about that code review that’s still pending, ask them to be on time to the next team call, whatever — but don’t poison these meetings for the team member by introducing some big negative topic early on, and do not dig up anything that’s more than a couple of weeks old.

                        -

                        For this week, preparing for the career development section is really easy. Unless you’ve had this conversation very recently, just make a note to ask them what their medium and long term career goals are now, and what skills and experience they’d like to develop to get there.

                        -

                        And that’s it. Everyone’s signed up; the one-on-one forms are ready and waiting; next week the one-on-ones start. You’re all ready — you can do this!

                        -

                        Resources

                        How to Coach Employees? Ask these One-on-One Meeting Questions - Claire Lew, KnowYourTeam The Ultimate 1-on-1 Meeting Questions Template - PeopleBox One-on-Ons app - Random one-on-one questions

                        -

                        FAQs

                        Q: More of a comment than a question - This whole thing is a lot of work?
                        A: Kind of, yeah. But it’s a lot more work getting them started than keeping them going. Once everything is set up, it only takes a few minutes a week per team member in addition to the meetings to get all of the benefits of one-on-ones — which will help your team members and help you.

                        -

                        Q: I’ve gone back and forth with one team member and answered their questions and they still seem reluctant; what should I say?
                        A: “I look forward to speaking more about this with you Wednesdays at 1pm.”

                        -

                        Seriously, there are very few times when “because I’m the boss” is a good enough reason; these sort of process details about how the team will work together when you’re now all suddenly working remotely is exactly one of those times. It’s good to hear their concerns if they have any, you should respect those concerns, and you should expect them to show up for the one-on-one.

                        -

                        Day 6 - Last minute Reminders

                        Congratulations - this is the day the one-on-ones start! You’ve done the hardest part, trust me.

                        -

                        The first few one-on-ones meetings may seem a little awkward and stilted, but they’ll quickly grow more comfortable as you get the hang of them.

                        -

                        Keep in mind the principles:

                        -
                        • The meeting is scheduled and at a regular time every week.
                        • This is about building working relationships.
                        • @@ -485,10 +434,8 @@

                          Day 6 - Last minute Reminders

                          You’ve already got the first one done, by creating the scheduling, and you’ve got the note-taking sorted. Now you just have conversations with your team members.

                          -

                          If I could recommend any tactics for the conversations, I’d just say:

                          -
                          1. They go first. Kick off with “What would you like to talk about?” or something similar to hand over the agenda to them.
                          2. Listen a lot more than you speak, and ask a lot of high-level questions and followup questions.
                          3. @@ -499,26 +446,20 @@

                            Day 6 - Last minute Reminders

                            Your assignment: Have some one-on-ones!

                            -

                            Day 8 - How’s It Going?

                            Hey, it’s the middle of one-on-ones week — congratulations! You got them started, and fast! How’s it going?

                            -

                            This is a good time to glance through the notes from the first few one-on-ones. What are you learning that you didn’t know this time last week? Have you already helped some of your team members solve a problem they were having?

                            -

                            Be sure to add the things you said you’d followup on to whatever task list system you use. Having the conversations with your team members builds a good working relationship; but it’s following up on the things you said you’d do that builds trust. Did the team member ask you for some information? To contact someone for them? To get something un-stuck in administration? Add it to your list and get them done before the next one-on-one. That, more than anything else, will prove to them you were listening and care about what you heard.

                            -

                            Day 10 - Reviewing Week One - What Went Well, and Planning for Week Two

                            You’re done your first week of one-on-ones, just 10 work days after starting the process. Congratulations, this is a big milestone.

                            -

                            So the benefits I listed on day one of getting started with one-on-ones were:

                            -
                            • Understand your team member better and so build solid working relationships.
                            • Build trust with your team member.
                            • @@ -528,45 +469,32 @@

                              Building trust will take more than a week; setting that one aside, do you feel that there are team members you already understand a bit better? How did the team members seem to react to having your attention for 30 minutes? Did you learn anything about the work being done that surprised you?

                              -

                              Scan those one-on-one notes again and update the document on your team members with things you learned. It could be very work-related things like career goals, or it could be things like the names of their pets - if they told you about it, it’s important to them, so it’s important to you.

                              -

                              Of the team members, who had a lot to say? Did some go better than others? How are you doing with the follow up tasks? Do those followup tasks suggest new topics of discussion for next week?

                              -

                              Now start putting together the one-on-one notes for next week. Have you learned something - maybe a question - that worked really well with one team member and you want to try with others? What’s come up over the last week that you’d like to talk about?

                              -

                              Congratulations - you’re done with week one, and already ready for week two! Preparing this does take a little time each week, and it will always take time, but it will be easier as the weeks go on.

                              -

                              Day 15 - Reviewing Week Two - What Went Well, and Thinking of Future One-on-Ones

                              Fantastic. Two weeks of one-on-ones!

                              -

                              You’re now starting to get the hang of this, and seeing what works and what doesn’t.

                              -

                              This might be a good time to take stock and see if there are things that would help the process go more smoothly. Have there been topics that have come up that some preparation would have been good for? Maybe it would be useful to have some kind of shared agenda. Some groups just have a google doc for each team member shared with the manager, and that can work nicely.

                              -

                              If that would be useful, consider raising it at the next team meeting. But for heaven’s sake, before you put any item on it, think about it from the direct’s point of view. If your one-on-one is a Thursday, and on Monday you enter an agenda item like “Performance on Project X” or “Your future in the department”, your team member is going to have a very bad week. **Be much more explicit and include context: “Project X review: 5 things that went well, one thing to tweak for next time”, or “Opportunities opening up elsewhere in the department”.

                              -

                              If you’d like more specialized tools, there’s a bunch of promising seeming ones; I’ll list some of heard of below in the resources.

                              -

                              Those tools might be helpful to you, or might not; our team doesn’t even use a shared agenda, but research computing is incredibly diverse and your teams needs will be different from ours. If there are any tools you find that make your teams’ one-on-ones easier and more successful, by all means use them (and let me know!)

                              -

                              You’re done! This is the end of the 3-week run of emails on starting one-on-ones in a hurry. If this was valuable to you, consider signing up for the Research Computing Teams Newsletter; any time something like this is posted it will show up in the newsletter, along with a weekly roundup of management, community, and technology news relevant to you as the manager of a research computing team.

                              -

                              Congratulations again, and best of luck to you and your research computing team!

                              -

                              Resources

                                @@ -582,4 +510,76 @@

                                Resources

                                FAQs

                                Q: So this isn’t as bad as I thought it was going to be, but I’m still not convinced. Should I just drop them?
                                -A: Do me a favour? Keep them going for two months. Have them become part of the routine way you manage. Get input from your team members. Then do what you think is best.

                                \ No newline at end of file +A: Do me a favour? Keep them going for two months. Have them become part of the routine way you manage. Get input from your team members. Then do what you think is best.

                                + +

      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/gaborsamu/2020-1-10-power9_numa.md b/2020/numa-on-nine-with-spectrum-lsf/index.html similarity index 70% rename from _posts/gaborsamu/2020-1-10-power9_numa.md rename to 2020/numa-on-nine-with-spectrum-lsf/index.html index 3099b80..8309f4f 100644 --- a/_posts/gaborsamu/2020-1-10-power9_numa.md +++ b/2020/numa-on-nine-with-spectrum-lsf/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2020-01-10 06:21:37' -layout: post -original_url: https://www.gaborsamu.com/blog/power9_numa/ -slug: numa-on-nine-with-spectrum-lsf -title: NUMA on NINE with Spectrum LSF ---- - -

      NUMA (non-uniform memory access) has been written about ad nauseam. For those fans of POWER processors + + + + + + + NUMA on NINE with Spectrum LSF - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      NUMA on NINE with Spectrum LSF

      +

      NUMA (non-uniform memory access) has been written about ad nauseam. For those fans of POWER processors out there, we’ll show briefly in this blog what the NUMA looks like on a dual-socket POWER9 development system. For those not familiar with NUMA there are many resources that can be found on the Internet describing NUMA systems in detail. In a nutshell, a NUMA system is made up of a single planar board @@ -390,4 +459,76 @@ core2124(124)

    This is just a quick example of affinity jobs in IBM Spectrum LSF. You can find out much more about the -capabilities of Spectrum LSF in the documentation which is available on the IBM Knowledge Center

    \ No newline at end of file +capabilities of Spectrum LSF in the documentation which is available on the IBM Knowledge Center

    + + +
    + +
    + + + + + + + + + + + + + + + + diff --git a/_posts/glennklockwood/2020-11-20-tagbloggercom1999blog-4307061427721284246post-1085875292800357949.md b/2020/pdsw-20-recap/index.html similarity index 60% rename from _posts/glennklockwood/2020-11-20-tagbloggercom1999blog-4307061427721284246post-1085875292800357949.md rename to 2020/pdsw-20-recap/index.html index 39dc999..ba0c3a3 100644 --- a/_posts/glennklockwood/2020-11-20-tagbloggercom1999blog-4307061427721284246post-1085875292800357949.md +++ b/2020/pdsw-20-recap/index.html @@ -1,23 +1,93 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2020-11-20 06:00:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2020/11/pdsw20-recap.html -slug: pdsw-20-recap -title: PDSW'20 Recap ---- - -

    This year was the first all-virtual Parallel Data Systems Workshop, and despite the challenging constraints imposed by the pandemic, it was remarkably engaging.  The program itself was contracted relative to past years and only had time for three Work-In-Progress (WIP) presentations, so it was a little difficult to pluck out high-level research trends and themes.  However, this year's program did seem more pragmatic, with talks covering very practical topics that had clear connection to production storage and I/O. The program also focused heavily on the HPC side of the community, and the keynote address was perhaps the only talk that focused squarely on the data-intensive data analysis side of what used to be PDSW-DISCS.  Whether this is the result of PSDW's return to the short paper format this year, shifting priorities from funding agencies, or some knock-on effect of the pandemic is impossible to say.

    + + + + + + + PDSW'20 Recap - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    PDSW'20 Recap

    +

    This year was the first all-virtual Parallel Data Systems Workshop, and despite the challenging constraints imposed by the pandemic, it was remarkably engaging.  The program itself was contracted relative to past years and only had time for three Work-In-Progress (WIP) presentations, so it was a little difficult to pluck out high-level research trends and themes.  However, this year's program did seem more pragmatic, with talks covering very practical topics that had clear connection to production storage and I/O. The program also focused heavily on the HPC side of the community, and the keynote address was perhaps the only talk that focused squarely on the data-intensive data analysis side of what used to be PDSW-DISCS.  Whether this is the result of PSDW's return to the short paper format this year, shifting priorities from funding agencies, or some knock-on effect of the pandemic is impossible to say.

    Although there weren't any strong themes that jumped out at me, last year's theme of using AI to optimize I/O performance was much more muted this year.  Eliakin del Rosario presented a paper describing a clustering and visual analysis tool he developed that underpins a study applying machine learning to develop an I/O performance model presented in the main SC technical program, but there was no work in the direction of applying AI to directly optimize I/O.  Does this mean that these ideas have climbed over the hype curve and are now being distilled down into useful techniques that may appear in production technologies in the coming years?  Or was the promise of AI to accelerate I/O just a flash in the pan?

    In the absence of common themes to frame my recap, what follows are just my notes and thoughts about some of the talks and presentations that left an impression.  I wasn't able to attend the WIP session or cocktail hour due to non-SC work obligations (it's harder to signal to coworkers that you're "on travel to a conference" when you're stuck at home just like any other workday) so I undoubtedly missed things, but all slides and papers are available on the PDSW website, and anyone with an SC workshop pass can re-watch the recorded proceedings on the SC20 digital platform.

    -

    Keynote - Nitin Agrawal

    This year’s keynote by Nitin Agrawal was a long-form research presentation on SummaryStore, an “approximate storage system” that doesn't store the data you put in it so much as it stores the data you will probably want to get back out of it at a later date.  This notion of a storage system that doesn't actually store things sounds like an affront at a glance, but when contextualized properly, it makes quite a lot of sense:

    +

    Keynote - Nitin Agrawal

    +

    This year’s keynote by Nitin Agrawal was a long-form research presentation on SummaryStore, an “approximate storage system” that doesn't store the data you put in it so much as it stores the data you will probably want to get back out of it at a later date.  This notion of a storage system that doesn't actually store things sounds like an affront at a glance, but when contextualized properly, it makes quite a lot of sense:

    There are cases where the data being stored doesn't have high value.  For example, data may become less valuable as it ages, or data may only be used to produce very rough guesses (e.g., garbage out) so inputting rough data (garbage in) is acceptable.  In these cases, the data may not be worth the cost of the media on which it is being stored, or its access latency may be more important than its precision; these are the cases where an approximate storage system may make sense.

    @@ -25,7 +95,8 @@

    Keynote - Nitin Agrawal

    This year’s keyno

    The specific case presented by Dr. Agrawal, SummaryStore, strongly resembled a time series database to feed a recommendation engine that naturally weighs recent data more heavily than older data.  The high-level concept seemed a lot like existing time series telemetry storage systems where high-frequency time series data are successively aggregated as they age so that new data may be sampled every few seconds while old data may be sampled once an hour.

    For example, LMT and mmperfmon are time series data collection tools for measuring the load on Lustre and Spectrum Scale file systems, respectively.  The most common questions I ask of these tools are things like

    -
    • What was the sum of all write bytes between January 2018 and January 2019?
    • How many IOPS was the file system serving between 5:05 and 5:10 this morning?
    By comparison, it's very rare to ask "How many IOPS was the file system serving between 5:05 and 5:10 two years ago?"  It follows that the storage system underneath LMT and mmperfmon can be "approximate" to save space and/or improve query performance.  Dr. Agrawal's presentation included this pictorial representation of this:

    +
    • What was the sum of all write bytes between January 2018 and January 2019?
    • How many IOPS was the file system serving between 5:05 and 5:10 this morning?
    +

    By comparison, it’s very rare to ask “How many IOPS was the file system serving between 5:05 and 5:10 two years ago?”  It follows that the storage system underneath LMT and mmperfmon can be “approximate” to save space and/or improve query performance.  Dr. Agrawal’s presentation included this pictorial representation of this:<p></p>

    @@ -33,13 +104,15 @@

    Keynote - Nitin Agrawal

    This year’s keyno

    This notion of approximate storage is not new; it is preceded by years of research into semantic file systems, where the way you store data is driven by the way in which you intend to access the data.  By definition, these are data management systems that are tailor-made for specific, high-duty cycle I/O workloads such as web service backends.

    What I took away from this presentation is that semantic file systems (and approximate storage systems by extension) aren't intrinsically difficult to build for these specific workloads.  Rather, making such a system sufficiently generic in practice to be useful beyond the scope of such a narrow workload is where the real challenge lies.  Tying this back to the world of HPC, it's hard to see where an approximate storage system could be useful in most HPC facilities since their typical workloads are so diverse.  However, two thoughts did occur to me:

    -
    1. If the latency and capacity characteristics of an approximate storage system are so much better than generic file-based I/O when implemented on the same storage hardware (DRAM and flash drives), an approximate storage system could help solve problems that traditionally were limited by memory capacity.  DNA sequence pattern matching (think BLAST) or de novo assembly could feasibly be boosted by an approximate index.
    2. Since approximate storage systems are purpose-built for specific workloads, the only way they fit into a general-purpose HPC environment is using purpose-built composable data services.  Projects like Mochi or BespoKV provide the building blocks to craft and instantiate such purpose-built storage systems, and software-defined storage orchestration in the spirit of DataWarp or the Cambridge Data Accelerator would be needed to spin up an approximate storage service in conjunction with an application that would use it.  

    +
    1. If the latency and capacity characteristics of an approximate storage system are so much better than generic file-based I/O when implemented on the same storage hardware (DRAM and flash drives), an approximate storage system could help solve problems that traditionally were limited by memory capacity.  DNA sequence pattern matching (think BLAST) or de novo assembly could feasibly be boosted by an approximate index.
    2. Since approximate storage systems are purpose-built for specific workloads, the only way they fit into a general-purpose HPC environment is using purpose-built composable data services.  Projects like Mochi or BespoKV provide the building blocks to craft and instantiate such purpose-built storage systems, and software-defined storage orchestration in the spirit of DataWarp or the Cambridge Data Accelerator would be needed to spin up an approximate storage service in conjunction with an application that would use it.  
    +

    I'm a big believer in #2, but #1 would require a forcing function coming from the science community to justify the effort of adapting an application to use approximate storage.

    -

    Keeping It Real: Why HPC Data Services Don't Achieve I/O Microbenchmark Performance

    Phil Carns (Argonne) presented a lovely paper full of practical gotchas and realities surrounding the idea of establishing a roofline performance model for I/O.  The goal is simple: measure the performance of each component in an I/O subsystem's data path (application, file system client, network, file system server, storage media), identify the bottleneck, and see how close you can get to hitting the theoretical maximum of that bottleneck:

    +

    Keeping It Real: Why HPC Data Services Don't Achieve I/O Microbenchmark Performance

    +

    Phil Carns (Argonne) presented a lovely paper full of practical gotchas and realities surrounding the idea of establishing a roofline performance model for I/O.  The goal is simple: measure the performance of each component in an I/O subsystem's data path (application, file system client, network, file system server, storage media), identify the bottleneck, and see how close you can get to hitting the theoretical maximum of that bottleneck:


    -The thesis of the paper was that even though this sounds simple, there's a lot more than meets the eye.  I won't recite the presentation (see the paper and slides--they're great), but I thought some of the more interesting findings included:

    +

    The thesis of the paper was that even though this sounds simple, there’s a lot more than meets the eye.  I won’t recite the presentation (see the paper and slides–they’re great), but I thought some of the more interesting findings included:<p></p>

    1. There's a 40% performance difference between the standard OSU MPI bandwidth benchmark and what happens when you make the send buffer too large to fit into cache.  It turns out that actually writing data over the network from DRAM (as a real application would) is demonstrably slower than writing data from a tiny cacheable memory buffer.
    2. Binding MPI processes to cores is good for MPI latency but can be bad for I/O bandwidth.  Highly localized process placement is great if those processes talk to each other, but if they have to talk to something off-chip (like network adapters), the more spread out they are, the greater the path diversity and aggregate bandwidth they may have to get out of the chip.
    3. O_DIRECT bypasses page cache but not device cache, while O_SYNC does not bypass page cache  but flushes both page and device caches.  This causes O_DIRECT to reduce performance for smaller I/Os which would benefit from write-back caching when used by itself, but increase performance when used with O_SYNC since one less cache (the page cache) has to be synchronized on each write. Confusing and wild.  And also completely nonstandard since these are Linux-specific flags.

    Towards On-Demand I/O Forwarding in HPC Platforms

    Jean Luca Bez (UFRGS) presented a neat userspace I/O forwarding service, FORGE, that got me pretty excited since the field of I/O forwarding has been pretty stagnant since IOFSL came out ten years ago.

    @@ -54,7 +127,8 @@

    Keeping It Real: Why HPC Data Services Don't Achie
    • Have a very IOPS-heavy, many-file workload?  Since these tend to be CPU-limited, it would make sense to allocate a lot of FORGE nodes to this job so that you have a lot of extra CPU capacity to receive these small transactions, aggregate them, and drive them out to the file system.
    • Have a bandwidth-heavy shared-file workload?  Driving bandwidth doesn't require a lot of FORGE nodes, and fewer nodes means fewer potential lock conflicts when accessing the shared file.
    This intelligent I/O forwarding naturally maps to file system architectures that incorporate I/O forwarding and stateless components--like VAST--where more network and computational parallelism can be sloshed into a compute node's data path to deal with more complex or adversarial I/O patterns.

    -

    Fractional-Overlap Declustered Parity

    Huan Ke (U Chicago) presented a paper that tried to bridge the gap between RAID implementations that use declustered parity, which has really fast rebuild but a huge failure domain, and traditional (clustered) parity which has very slow rebuilds but a very small failure domain.
    +

    Fractional-Overlap Declustered Parity

    +
    Huan Ke (U Chicago) presented a paper that tried to bridge the gap between RAID implementations that use declustered parity, which has really fast rebuild but a huge failure domain, and traditional (clustered) parity which has very slow rebuilds but a very small failure domain.

    The special sauce proposed by Ke is being judicious about how stripes are laid out across a declustered group.  Using Latin squares to map RAID blocks to physical drives, one can control how many unique stripes would be affected by a failure (termed the overlap fraction):

    @@ -66,14 +140,88 @@

    Fractional-Overlap Declustered Parity


    In the engineering context, you essentially never repeat an experiment if you can infer the result of varying one parameter using a combination of other experiments.  In the parity placement scheme, you never use a block mapping if a combination of drive failures will break all your RAID stripes.  The neat idea behind what Ke presented is a method to vary this constraint so that you can find layout schemes that have any mix of blast radius (how many stripes are lost on an unrecoverable failure) against rebuild time.

    -

    NVIDIA GPUDirect Storage Support in HDF5

    John Ravi presented his work implementing support for NVIDIA's brand new GPUDirect Storage (which allows data transfer between GPU memory and an NVMe device without ever touching host memory using peer-to-peer PCIe) in HDF5.  Much of the talk focused on the implementation details specific to HDF5, but he did present some performance results which I found quite interesting:
    +

    NVIDIA GPUDirect Storage Support in HDF5

    +
    John Ravi presented his work implementing support for NVIDIA's brand new GPUDirect Storage (which allows data transfer between GPU memory and an NVMe device without ever touching host memory using peer-to-peer PCIe) in HDF5.  Much of the talk focused on the implementation details specific to HDF5, but he did present some performance results which I found quite interesting:


    In the above diagram, "SEC2" refers to the default POSIX interface, "DIRECT" is POSIX using O_DIRECT, and "GDS" is GPUDirect Storage.  What surprised me here is that all of the performance benefits were expressed in terms of bandwidth, not latency--I naively would have guessed that not having to bounce through host DRAM would enable much higher IOPS.  These results made me internalize that the performance benefits of GDS lie in not having to gum up the limited bandwidth between the host CPU and host DRAM.  Instead, I/O can enjoy the bandwidth of HBM or GDDR to the extent that the NVMe buffers can serve and absorb data.  I would hazard that in the case of IOPS, the amount of control-plane traffic that has to be moderated by the host CPU undercuts the fast data-plane path enabled by GDS.  This is consistent with literature from DDN and VAST about their performance boosts from GDS.

    -

    Fingerprinting the Checker Policies of Parallel File Systems

    The final PDSW talk that struck a chord was by Runzhou Han who presented a methodology for exercising parallel file systems' fsck tools using targeted fault injection.  He intentionally corrupted different parts of the data structures used by BeeGFS and Lustre to store metadata, then ran fsck to see how well those mistakes were caught.  I think the biggest intellectual contribution of the work was formalizing a taxonomy of different types of corruption events (junk data, zeros written, duplicate data, and out-of-sync data) and ways in which fsck does or does not cope with them:
    +

    Fingerprinting the Checker Policies of Parallel File Systems

    +
    The final PDSW talk that struck a chord was by Runzhou Han who presented a methodology for exercising parallel file systems' fsck tools using targeted fault injection.  He intentionally corrupted different parts of the data structures used by BeeGFS and Lustre to store metadata, then ran fsck to see how well those mistakes were caught.  I think the biggest intellectual contribution of the work was formalizing a taxonomy of different types of corruption events (junk data, zeros written, duplicate data, and out-of-sync data) and ways in which fsck does or does not cope with them:


    The practical outcome of this work is that it identified a couple of data structures and corruption patterns that are particularly fragile on Lustre and BeeGFS.  Alarmingly, two cases triggered kernel panics in lfsck which led me to beg the question: why isn't simple fault injection like this part of the regular regression testing performed on Lustre?  As someone who's been adjacent to several major parallel file system outages that resulted from fsck not doing a good job, hardening the recovery process is a worthwhile investment since anyone who's having to fsck in the first place is already having a bad day.

    -
    That said, this paper seemed much more practical than foundational and it was unclear where this goes once the immediate issues discovered are addressed.  To that end, I could see why hardening fsck isn't getting a lot of research attention.
    \ No newline at end of file +
    That said, this paper seemed much more practical than foundational and it was unclear where this goes once the immediate issues discovered are addressed.  To that end, I could see why hardening fsck isn't getting a lot of research attention.
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/glennklockwood/2020-11-23-tagbloggercom1999blog-4307061427721284246post-3476164791460111937.md b/2020/sc-20-recap/index.html similarity index 80% rename from _posts/glennklockwood/2020-11-23-tagbloggercom1999blog-4307061427721284246post-3476164791460111937.md rename to 2020/sc-20-recap/index.html index b34c43e..5ed972e 100644 --- a/_posts/glennklockwood/2020-11-23-tagbloggercom1999blog-4307061427721284246post-3476164791460111937.md +++ b/2020/sc-20-recap/index.html @@ -1,23 +1,96 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2020-11-23 13:00:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2020/11/sc20-recap.html -slug: sc-20-recap -title: SC'20 Recap ---- - -

    The HPC industry's biggest conference, SC, was held virtually over the last two weeks. Although the original plan to hold it in Atlanta was supplanted by all-virtual format, it still managed to be a whirlwind show full of product showcases, research presentations, and interesting talks, panels, and workshops. The virtual format certainly wasn't the same as attending in-person, but some of the conference buzz and tone could still be sensed by following the #SC20 tag on Twitter.

    + + + + + + + SC'20 Recap - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    SC'20 Recap

    +

    The HPC industry's biggest conference, SC, was held virtually over the last two weeks. Although the original plan to hold it in Atlanta was supplanted by all-virtual format, it still managed to be a whirlwind show full of product showcases, research presentations, and interesting talks, panels, and workshops. The virtual format certainly wasn't the same as attending in-person, but some of the conference buzz and tone could still be sensed by following the #SC20 tag on Twitter.

    As with SC'19, the conference seemed subdued in part due to the fact that many attendees were still being pulled away by their daily lives while attending and in part because the HPC community is still waiting for exascale to finally get here. The community's conversion to remote work has also smeared a lot of the usual vendor briefings and big announcements out over the entire five-month period since ISC'19, causing most of the hot news at SC this year to seem incremental over years past.

    Still, I picked up on a few themes that I thought were noteworthy, and what follows is a recap of some of the highlights from the conference as I saw them.

    All the standard disclaimers apply to the remainder of this post: these are just my personal opinion and do not represent the viewpoint of anyone other than me. I'm not an expert on many (most?) of these topics, so my observations may be misinformed or downright wrong--feel free to get in touch if I stand to be corrected. Also bear in mind that what I find interesting is colored by my day job as a storage architect; I don't pay close attention to the scientific or application spaces in HPC and instead focus on hardware, architecture, systems design, integration, and I/O. As such, I'm sure I missed all sorts of topics that others find exciting.

    -

    Table of Contents

    1. Big Splashes
      1. What's new
      2. What's missing
    2. High-level Themes
      1. Computing Technologies Futures
      2. Storage Technologies Futures
    3. Actual Future Directions
      1. The Relationship of HPC and AI
      2. Disaggregation in Practice
    4. Spectrum Scale User Group vs. Lustre BOF
      1. Enterprisey features that organizations may care about
      2. Manageability features that administrators may care about
      3. Performance, scalability, and reliability features that end users may care about
      4. Interface features that platform developers may care about
      5. Overall Impressions
    5. IO-500 BOF
    6. Concluding Thoughts

    Big Splashes

    Although there weren't any earth-shattering announcements this year, there were a few newsworthy developments that received a healthy amount of press attention.

    -

    What's new

    RIKEN's Fugaku machine made its debut at ISC'20 in June this year, but I felt a lot of its deserved fanfare was muted by the the newness of the pandemic and the late-binding decision to convert ISC'20 to being all remote. SC'20 was when Fugaku got to really shine; it improved benchmark results for HPL, HPCG, and Graph500 relative to its ISC'20 numbers:

    +

    Table of Contents

    +
    1. Big Splashes
      1. What's new
      2. What's missing
    2. High-level Themes
      1. Computing Technologies Futures
      2. Storage Technologies Futures
    3. Actual Future Directions
      1. The Relationship of HPC and AI
      2. Disaggregation in Practice
    4. Spectrum Scale User Group vs. Lustre BOF
      1. Enterprisey features that organizations may care about
      2. Manageability features that administrators may care about
      3. Performance, scalability, and reliability features that end users may care about
      4. Interface features that platform developers may care about
      5. Overall Impressions
    5. IO-500 BOF
    6. Concluding Thoughts
    +

    Big Splashes

    +

    Although there weren't any earth-shattering announcements this year, there were a few newsworthy developments that received a healthy amount of press attention.

    +

    What's new

    +

    RIKEN's Fugaku machine made its debut at ISC'20 in June this year, but I felt a lot of its deserved fanfare was muted by the the newness of the pandemic and the late-binding decision to convert ISC'20 to being all remote. SC'20 was when Fugaku got to really shine; it improved benchmark results for HPL, HPCG, and Graph500 relative to its ISC'20 numbers:

    Fugaku performance improvements since July 2020

    @@ -32,17 +105,22 @@

    What's new

    RIKEN

    NVIDIA also announced new SKU of its Ampere A100 data center GPU with a whopping 80 GB of HBM2. This was surprising to me since the A100 with 40 GB of HBM2 was only first unveiled two quarters ago. The A100 chip itself is the same so there's no uptick in flops; they just moved to HBM2e stacks which allowed them to double the capacity and get an incremental increase in memory bandwidth.

    So, who's this part for? Doubling the HBM capacity won't double the price of the GPU, but the A100-80G part will undoubtedly be more expensive despite there being no additional FLOPS. My guess is that this part was released for

    -
    1. People who just want to fit bigger working sets entirely in GPU memory. Larger deep learning models are the first thing that come to my mind.
    2. People whose applications can't fully utilize A100's flops due to suboptimal memory access patterns; higher HBM2e bandwidth may allow such apps to move a little higher along the roofline.
    3. People who may want to purchase AMD's next-generation data center GPU (which will undoubtedly also use HBM2e) but probably be released before the follow-on to Ampere is ready.

    +
    1. People who just want to fit bigger working sets entirely in GPU memory. Larger deep learning models are the first thing that come to my mind.
    2. People whose applications can't fully utilize A100's flops due to suboptimal memory access patterns; higher HBM2e bandwidth may allow such apps to move a little higher along the roofline.
    3. People who may want to purchase AMD's next-generation data center GPU (which will undoubtedly also use HBM2e) but probably be released before the follow-on to Ampere is ready.
    +

    NVIDIA also upgraded its Selene supercomputer to include these A100-80G parts, moving its Top500 position to #5 and demonstrating that these parts exist and deliver as advertised.

    -

    What's missing

    HPE/Cray was pretty quiet on announcements, especially after two SCs in a row with Shasta (now "Cray EX") news. HPE undoubtedly has its head down readying its first large Shasta installations, and given the fact that the primary manufacturing facilities for Cray Shasta are located in a COVID hotspot in the US, maybe this was to be expected--this autumn has not been the time to rush anything.

    +

    What's missing

    +

    HPE/Cray was pretty quiet on announcements, especially after two SCs in a row with Shasta (now "Cray EX") news. HPE undoubtedly has its head down readying its first large Shasta installations, and given the fact that the primary manufacturing facilities for Cray Shasta are located in a COVID hotspot in the US, maybe this was to be expected--this autumn has not been the time to rush anything.

    That said, we know that Cray EX systems have been shipping since July 2020:

    So it is a little surprising that HPE was not promoting any early customer or science success stories yet, and the only Cray EX/Shasta system to appear on Top500 was Alps, a modest 4.6 PF Rome-based system at CSCS. Next year--either at the all-virtual ISC'21 or SC'21--will likely be the year of Cray EX.

    + — EPCCed (@EPCCed) July 31, 2020 +

    So it is a little surprising that HPE was not promoting any early customer or science success stories yet, and the only Cray EX/Shasta system to appear on Top500 was Alps, a modest 4.6 PF Rome-based system at CSCS. Next year--either at the all-virtual ISC'21 or SC'21--will likely be the year of Cray EX.

    Intel was also pretty quiet about Aurora, perhaps for the same reason as HPE/Cray. The fact that Intel's biggest hardware news was around Ice Lake suggests that Intel's focus is on fulfilling the promises of disclosures they made at SC'19 rather than paving new roads ahead. There was a healthy amount of broad-stroke painting about exascale, but aside from the oneAPI buzz I mentioned above, I didn't see anything technically substantive.

    Sadly, IBM was the most quiet, and it was perhaps the most prominent appearance of IBM in this year's official program was inwinning the Test of Time Award for the Blue Gene/L architecture. It was almost a eulogy of IBM's once-dominant position at the forefront of cutting-edge HPC research and development, and this feeling was perhaps underscored by the absence of perhaps the most noteworthy IBMer involved in the creation of Blue Gene. This isn't to say IBM had no presence at SC'20 this year; it's just clear that their focus is on being at the forefront of hybrid cloud and cognitive computing rather than supercomputing for supercomputing's sake.

    -

    High-level Themes

    The most prevalent theme that I kept running into was not the technology on the horizon, but rather the technology further off. There were a few sessions devoted to things like "Post Moore's Law Devices" and "Exotic Technology" in 2035, and rather than being steeped in deep technical insight, they leaned more towards either recitations of similar talks given in years past (one speaker presented slides that were literally five years old)or outlandish claims that hinged on, in my opinion, incomplete views of how technology evolves.

    +

    High-level Themes

    +

    The most prevalent theme that I kept running into was not the technology on the horizon, but rather the technology further off. There were a few sessions devoted to things like "Post Moore's Law Devices" and "Exotic Technology" in 2035, and rather than being steeped in deep technical insight, they leaned more towards either recitations of similar talks given in years past (one speaker presented slides that were literally five years old)or outlandish claims that hinged on, in my opinion, incomplete views of how technology evolves.

    I found the latter talks a bit disturbing to find in the SC program since they contained very little technical insight and seemed more focused on entertainment value--the sort of thing usually relegated to post-conference hotel bar conversation. So rather than repeat their predictions as gospel, I'll present my critical take on them. I realize that it's far easier for me to throw stones at people at the top of the hill than to climb there myself, and I'm perfectly willing to accept that my opinions below are completely wrong. And, if you'd like to throw stones at me yourself, I contributed my position to a panel on tiered storage this year against which all are welcome to argue.

    -

    Computing Technologies Futures

    This year's focus on far-flung technologies at SC made me wonder--are these sorts of talks filling out the program because there's no clear path beyond exascale? Is it possible that the HPC community's current focus on climbing the exascale mountain is taking our minds off of the possibility that there's nothing past that mountain except desert?

    +

    Computing Technologies Futures

    +

    This year's focus on far-flung technologies at SC made me wonder--are these sorts of talks filling out the program because there's no clear path beyond exascale? Is it possible that the HPC community's current focus on climbing the exascale mountain is taking our minds off of the possibility that there's nothing past that mountain except desert?

    For example, Shekhar Borkar gave his five-year outlook on memory technologies:

    @@ -77,7 +155,8 @@

    Computing Technologies Future
    Moore's Law will survive as long as we change what it means
    Moore's Law will survive as long as we change what it means according to Borkar

    The only way the future of HPC will be predictable is if you're willing to define what HPC is to fit whatever the available technologies are. Yelick expressed the same sentiment with her "Not sure, but it will be called OpenMP" bullet, and to his credit, Sterling himself did this with his Beowulf cluster. If the market just gives you a pile of parts, strap it together and call it HPC. And if transistor scaling has no more steam, find something that still has legs and call it Moore's Law.

    -

    Storage Technologies Futures

    On the storage front, the predictions from 2006 for 2020 storage technology were pretty reasonable as well. Dr. Mark Kryder (of Kryder's Law fame) predict that Kryder's Law would hold:

    +

    Storage Technologies Futures

    +

    On the storage front, the predictions from 2006 for 2020 storage technology were pretty reasonable as well. Dr. Mark Kryder (of Kryder's Law fame) predict that Kryder's Law would hold:

    Mark Kryder's vision for HDDs in 2020 as told in 2006
    Mark Kryder's vision for HDDs in 2020 as told in 2006
    @@ -97,8 +176,10 @@

    Storage Technologies Futur

    Kryder essentially acknowledged that his projection relies on something intrinsically special about HDDs; he commented that the technological advancements required to reach 1.8 PB HDDs will happen because HDD engineers don't want to lose their jobs to the flash industry. Personally, I'd take a new job with an exciting future over a gold watch any day of the week. Maybe that's the millennial in me.

    I found this general theme of wildly projecting into the future rather yucky this SC, and I won't miss it if it's gone for another fifteen years.  By their very nature, these panels are exclusive, not inclusive--someone literally has to die in order for a new perspective to be brought on board.  There was an element to this in the Top500 BOF as well, and one slide in particular made me cringe at how such a prominent good-ol-boys club was being held up before the entire SC community.  These sorts of events are looking increasingly dated and misrepresentative of the HPC community amidst the backdrop of SC putting diversity front and center.

    -

    Actual Future Directions

    Although wild projections of the future felt like fashionable hot topics of the year, a couple of previous hot topics seemed to be cooling down and transitioning from hype to reality. Two notable trends popped out at me: the long-term relationship between HPC and AI and what disaggregation may really look like.

    -

    The Relationship of HPC and AI

    As has been the norm for a few years now, deep learning (now more broadly "AI") was peppered across the SC program this year. Unlike previous years, though, the AI buzz seemed to be tempered by a little more pragmatism as if it were coming down the hype curve. Perhaps the best talk that captured this was an invited talk by Cliff Young of Google about the possibility of aVirtuous Cycle of HPC and AI.

    +

    Actual Future Directions

    +

    Although wild projections of the future felt like fashionable hot topics of the year, a couple of previous hot topics seemed to be cooling down and transitioning from hype to reality. Two notable trends popped out at me: the long-term relationship between HPC and AI and what disaggregation may really look like.

    +

    The Relationship of HPC and AI

    +

    As has been the norm for a few years now, deep learning (now more broadly "AI") was peppered across the SC program this year. Unlike previous years, though, the AI buzz seemed to be tempered by a little more pragmatism as if it were coming down the hype curve. Perhaps the best talk that captured this was an invited talk by Cliff Young of Google about the possibility of aVirtuous Cycle of HPC and AI.

    The "convergence of HPC and AI" has been talked about in the supercomputing community since HPC-focused GPUs were reinvented as an AI accelerator. If you look at who's been selling this line, though, you may realize that the conversation is almost entirely one-way; the HPC industry pines for this convergence. The AI industry, frankly, doesn't seem to care what the HPC industry does because they're too busy monetizing AI and bankrolling the development of the N+1th generation of techniques and hardware to suit their needs, not those of the HPC industry.

    Dr. Young's talk closed this loop by examining what the AI industry can learn from HPC; the so-called "Cambrian explosion" of accelerators is somewhere near its peak which has resulted in a huge architectural design space to explore:

    @@ -125,7 +206,8 @@

    The Relationship of HPC a
    Liquid cooling in hyperscale - one of few areas in which HPC is ahead
    Liquid cooling in hyperscale - one of few areas in which HPC is ahead

    Google's TPU v3 was its first foray into direct liquid cooling, a data center technology that HPC has been using for decades (think: Cray-2's waterfall). While this may not seem spectacular to any PC enthusiast who's done liquid cooling, the difficulty of scaling these systems up to rack-, row-, and data center-scale are not always linear. Young explicitly acknowledged HPC's expertise in dealing with liquid-cooled infrastructure, and if hyperscale is driven in this direction further, HPC will definitely benefit from the advances that will be enabled by a new and massive market driver.

    -

    Disaggregation in Practice

    The promise of disaggregation--having pools of CPU, persistent memory, GPUs, and flash that you can strap together into a a single node--has been around for a long time and had steadily gained attention as a potential candidate for an exascale technology. However I don't think there was a realistic hope for this until IBM's AC922 node--the one that comprises the Summit and Sierra systems--hit the market and demonstrated a unified, hardware-enabled coherent memory space across CPUs and GPUs.

    +

    Disaggregation in Practice

    +

    The promise of disaggregation--having pools of CPU, persistent memory, GPUs, and flash that you can strap together into a a single node--has been around for a long time and had steadily gained attention as a potential candidate for an exascale technology. However I don't think there was a realistic hope for this until IBM's AC922 node--the one that comprises the Summit and Sierra systems--hit the market and demonstrated a unified, hardware-enabled coherent memory space across CPUs and GPUs.

    The actual story there wasn't great though; coherence between CPU and GPU was enabled using NVIDIA's proprietary NVLink protocol while the CPU and NIC were connected via a different coherence protocol, OpenCAPI, over the same physical interface. CCIX and GenZ also emerged as high-speed protocols for cache coherence and disaggregation, and the story only got worse when Intel put forth CXL as its standard for coherence and disaggregation.

    Fortunately, the dust is now settling and it appears that CXL and GenZ are emerging at the front of the pack. There was an amicable panel session where members of these two consortia presented a unified vision for CXL and GenZ which almost appeared credible: CXL would be the preferred protocol for inside a chassis or rack, and GenZ would be the preferred protocol between chassis and racks. Key features of the finalized CXL 2.0 standard were unveiled which largely revolved around support for CXL switches:

    @@ -140,15 +222,18 @@

    Disaggregation in Practi

    Latency of CXL in the context of storage devices
    Latency of CXL in the context of storage devices
    -
    (2) CXL will not replace PCIe as a host I/O interface; is a superset of PCIe and many devices will remain happy with PCIe's load/store semantics. Of course, this is what I would say too if I had effective control over both the CXL standard and the PCIe SIG.

    +


    (2) CXL will not replace PCIe as a host I/O interface; is a superset of PCIe and many devices will remain happy with PCIe’s load/store semantics. Of course, this is what I would say too if I had effective control over both the CXL standard and the PCIe SIG. <p></p>

    When asked directly if Intel had joined the GenZ consortium though, Sharma gave a terse "no" followed by "no comment" as to why. He then immediately followed that with a very carefully crafted statement:

    "While we have not joined the GenZ consortium, we are fully supportive of making the CXL enhancements that will help GenZ."

    -

    The panelists also commented that the MOU was designed to make transitioning from CXL to GenZ protocols smooth, but when asked exactly how the CXL-to-GenZ bridge would be exposed, Tim Symons (representing Microchip and GenZ) could not offer an answer since this bridging function is still being defined. These sorts of answers left me with the impression that CXL is in the driver's seat and GenZ has been allowed to come along for the ride.

    + +

    The panelists also commented that the MOU was designed to make transitioning from CXL to GenZ protocols smooth, but when asked exactly how the CXL-to-GenZ bridge would be exposed, Tim Symons (representing Microchip and GenZ) could not offer an answer since this bridging function is still being defined. These sorts of answers left me with the impression that CXL is in the driver's seat and GenZ has been allowed to come along for the ride.

    Reading between the lines further, there was a striking absence of HPE people on the panel given the fact that GenZ originated within HPE's "The Machine" project. It remains unclear where GenZ fits now that HPE owns Slingshot, a different high-performance scale-out switched fabric technology. What would be the benefit of having a three-tier Slingshot-GenZ-CXL fabric? If CXL 2.0 adopted a single-hop switch and fabric manager, what's to stop CXL 3.0 from expanding its scope to a higher radix or multi-hop switch that can sensibly interface directly with Slingshot?

    Given that CXL has already eaten a part of GenZ's lunch by obviating the need for GenZ host interfaces, I wouldn't be surprised if GenZ eventually meets the same fate as The Machine and gets cannibalized for parts that get split between future versions of Slingshot and CXL. CXL has already effectively killed CCIX, and IBM's decision to join CXL suggests that it may be positioning to merge OpenCAPI's differentiators into CXL after Power10. This is pure speculation on my part though.

    -

    Spectrum Scale User Group vs. Lustre BOF

    Because SC'20 was smeared over two weeks instead of one, I got to attend both the Lustre BOF and one of the Spectrum Scale User Group (SSUG) sessions. I also came equipped with a much more meaningful technical understanding of Spectrum Scale this year (I've spend the last year managing a group responsible for Spectrum Scale at work), and it was quite fascinating to contrast the two events and their communities' respective priorities and interests.

    +

    Spectrum Scale User Group vs. Lustre BOF

    +

    Because SC'20 was smeared over two weeks instead of one, I got to attend both the Lustre BOF and one of the Spectrum Scale User Group (SSUG) sessions. I also came equipped with a much more meaningful technical understanding of Spectrum Scale this year (I've spend the last year managing a group responsible for Spectrum Scale at work), and it was quite fascinating to contrast the two events and their communities' respective priorities and interests.

    The Spectrum Scale User Group featured a presentation on "What is new in Spectrum Scale 5.1.0" and the Lustre BOF had its analogous Feature Discussion. I broadly bucketize the new features presented at both events into four categories:

    -

    1. Enterprisey features that organizations may care about

    For Spectrum Scale, this included support for newer releases of RHEL, SLES, Ubuntu, AIX(!), and Windows (!!). IBM also noted that Spectrum Scale also now supports the zEDC hardware compression unit on the z15 mainframe processor:

    +

    1. Enterprisey features that organizations may care about

    +

    For Spectrum Scale, this included support for newer releases of RHEL, SLES, Ubuntu, AIX(!), and Windows (!!). IBM also noted that Spectrum Scale also now supports the zEDC hardware compression unit on the z15 mainframe processor:

    https://www.spectrumscaleug.org/wp-content/uploads/2020/11/episode-11-what-is-new-in-5-1.pdf
    Spectrum Scale 5.1 platform updates

    @@ -161,9 +246,11 @@

    1. Enterprisey features that organizat

    New improvements to Spectrum Scale's security posture were also presented that were a little alarming to me. For example, one no longer has to add scp and echo to the sudoers file for Spectrum Scale to work (yikes!). There was also a very harsh question from the audience to the effect of "why are there suddenly so many security fixes being issued by IBM?" and the answer was similarly frightening; Spectrum Scale is now entering markets with stringent security demands which has increased IBM's internal security audit requirements, and a lot of new vulnerabilities are being discovered because of this.

    It's ultimately a good thing that Spectrum Scale is finding a fixing a bunch of security problems, since the prior state of the practice was just not performing stringent audits. I assume that Lustre's approach to security audits is closer to where Spectrum Scale was in years past, and should Lustre ever enter these "new markets" to compete with Spectrum Scale, I expect a similarly uncomfortable quantity of security notices would come to light. This is all speculative though; the only definite is that IBM is moving GPFS towards role-based access control which is a positive direction.

    Overall, Spectrum Scale seemed considerably more focused on developing these enterprisey features than Lustre.

    -

    2. Manageability features that administrators may care about

    Spectrum Scale also revealed a bunch of smaller features that are nice to have for administrators including

    +

    2. Manageability features that administrators may care about

    +

    Spectrum Scale also revealed a bunch of smaller features that are nice to have for administrators including

    +

    +
    • Faster failing of hung RDMA requests - you can now set a maximum time that an RDMA request can hang (e.g., if an endpoint fails) before its thread is killed by Spectrum Scale itself. This avoids having to wait for lower-level timeouts and seems like a nice-to-have knob for a file system that supports a lot of path and endpoint diversity. Lustre may be ahead on this front with its lnet_transaction_timeout parameter, but it's unclear exactly how these two settings differ.
    • Safeguards against administrator error - Spectrum Scale added features that warn the administrator about doing something that may be a mistake, such as accidentally breaking quorum by downing a node or mapping incorrect drive slots to RAID groups. There's not really equivalent functionality in Lustre; these are the places where Lustre solution providers (think HPE/Cray ClusterStor) get to value-add management software on top of open-source Lustre (think cscli)
    • GUI and REST API changes - you can do an increasing amount of management operations using the Spectrum Scale GUI or its underlying control-plane REST API. Lustre has the IML GUI, but it isn't treated as a first-class citizen in the same way that Spectrum Scale does and it was not mentioned at the Lustre BOF at all. Again, this is an area where vendors usually value-add their own management on top of community Lustre.
    • Improved monitoring, reporting, and phone-home - a framework called "MAPS" was recently introduced to essentially do what Nagios does in most DIY environments--raise alarms for crashes, resource exhaustion, misconfiguration, and the like. It also does performance monitoring and historical data aggregation. As with the other manageability features mentioned, Lustre relies on third-party tools for these features.

    -
    • Faster failing of hung RDMA requests - you can now set a maximum time that an RDMA request can hang (e.g., if an endpoint fails) before its thread is killed by Spectrum Scale itself. This avoids having to wait for lower-level timeouts and seems like a nice-to-have knob for a file system that supports a lot of path and endpoint diversity. Lustre may be ahead on this front with its lnet_transaction_timeout parameter, but it's unclear exactly how these two settings differ.
    • Safeguards against administrator error - Spectrum Scale added features that warn the administrator about doing something that may be a mistake, such as accidentally breaking quorum by downing a node or mapping incorrect drive slots to RAID groups. There's not really equivalent functionality in Lustre; these are the places where Lustre solution providers (think HPE/Cray ClusterStor) get to value-add management software on top of open-source Lustre (think cscli)
    • GUI and REST API changes - you can do an increasing amount of management operations using the Spectrum Scale GUI or its underlying control-plane REST API. Lustre has the IML GUI, but it isn't treated as a first-class citizen in the same way that Spectrum Scale does and it was not mentioned at the Lustre BOF at all. Again, this is an area where vendors usually value-add their own management on top of community Lustre.
    • Improved monitoring, reporting, and phone-home - a framework called "MAPS" was recently introduced to essentially do what Nagios does in most DIY environments--raise alarms for crashes, resource exhaustion, misconfiguration, and the like. It also does performance monitoring and historical data aggregation. As with the other manageability features mentioned, Lustre relies on third-party tools for these features.

    For resilience, Spectrum Scale announced new tunable parameters to improve parallel journal recovery:

    Spectrum Scale's latest advancements in improving recovery performance
    @@ -182,7 +269,8 @@

    2. Manageability features that adminis

    This is actually somewhere that Lustre is quite a bit ahead in some regards because it has long had features like UID/GID mapping and subdirectory mounts that allow for a greater degree of isolation that maps well to untrusted containers.

    That all said, Lustre's focus is not on taking on more of these nice-to-have manageability features. When asked about adding basic manageability features like supporting easy addition/removal of Lustre OSTs and OSSes to enable evergreen Lustre systems analogous to Spectrum Scale's mmrestripefs command, the answer was effectively "no." The reason given is that (1) Lustre clients are where files get stitched together, so migration will always have to involve client access, and (2) lfs find and lfs migrate already provide the tools necessary to move data files in theory. From this, I take away that stitching those two lfs commands together into a tool that actually does what mmfsrestripe does is an exercise left to the viewer--or a company who can value-add such a tool on top of their Lustre offering.

    -

    3. Performance, scalability, and reliability features that end users may care about

    Spectrum Scale didn't have a huge amount to offer in the user-facing performance/scalability/reliability features this year. They improved their support for QOS (which is admittedly fantastic when compared to Lustre's Token Bucket Filter QOSwhich cannot limit IOPS like Spectrum Scale can) from an administrator standpoint, and they have begun to think about how to incorporate TRIMming into flash-based Spectrum Scale deployments to offer reliable performance.

    +

    3. Performance, scalability, and reliability features that end users may care about

    +

    Spectrum Scale didn't have a huge amount to offer in the user-facing performance/scalability/reliability features this year. They improved their support for QOS (which is admittedly fantastic when compared to Lustre's Token Bucket Filter QOSwhich cannot limit IOPS like Spectrum Scale can) from an administrator standpoint, and they have begun to think about how to incorporate TRIMming into flash-based Spectrum Scale deployments to offer reliable performance.

    By comparison, Lustre's new features really shine in this department. Andreas Dilger presented this slide near the beginning of his talk:

    Some of Lustre's many upcoming performance improvements
    @@ -190,8 +278,10 @@

    3. Performance, scalability, and relia

    which reflects significant attention being paid to improving the performance of emerging noncontiguous and otherwise adversarial I/O pattern--perhaps motivated by storage-hungry AI and genomics markets.

    Lustre is also introducing features aimed at both scale-up and scale-out, with a 30x speedup in the time it takes to mount petabyte OSTs (likely in preparation for the exascale Lustre installations coming in the next year or two) and automated directory metadata sharding, shrinking, and balancing. From this, it's clear that the primary focus of Lustre continues to be extreme scale and performance above all else, but it's unclear how much of this effort is putting Lustre ahead of Spectrum Scale as much as it is catching up to all the effort that went into making Spectrum Scale scale out to 250 PB for the Summit system.

    -

    4. Interface features that platform developers may care about

    The newest release of Spectrum Scale introduces improvements to NFS (by adding v4.1 support), CSI (incremental improvements), SMB (incremental improvements), and most surprising to me, HDFS. By comparison, I don't think Lustre directly supports any of these interfaces--you have to use third-party software to expose these protocols--and if they are supported, they aren't under active development.

    -

    Overall Impressions

    These two presentations pointed to a sharp contrast between how Spectrum Scale and Lustre position themselves as storage systems; IBM's vision for Spectrum Scale is as a high-capacity data lake tier against which a diversity of apps (HPC, containerized services, map-reduce-style analytics) can consume and product data. They even said as much while talking about their HDFS support:

    +

    4. Interface features that platform developers may care about

    +

    The newest release of Spectrum Scale introduces improvements to NFS (by adding v4.1 support), CSI (incremental improvements), SMB (incremental improvements), and most surprising to me, HDFS. By comparison, I don't think Lustre directly supports any of these interfaces--you have to use third-party software to expose these protocols--and if they are supported, they aren't under active development.

    +

    Overall Impressions

    +

    These two presentations pointed to a sharp contrast between how Spectrum Scale and Lustre position themselves as storage systems; IBM's vision for Spectrum Scale is as a high-capacity data lake tier against which a diversity of apps (HPC, containerized services, map-reduce-style analytics) can consume and product data. They even said as much while talking about their HDFS support:

    Spectrum Scale's vision as a hub for all data in the enterprise
    Spectrum Scale's vision as a hub for all data in the enterprise
    @@ -204,9 +294,11 @@

    Overall Impressions

    These

    Lustre, by comparison, appears to be focused squarely on performance and scale. There was no logo soup or architectural vision presented at the Lustre BOF itself. This is likely a deliberate effort by the Lustre community to focus on being an open-source piece to a larger puzzle that others can package up by anyone with the need or business acumen to do so. Just as Linux itself is just a community effort around which companies like Red Hat (IBM) or SUSE build and market a solution, Lustre should be just one part of an organization's overall data management strategy whereas Spectrum Scale is trying to be the entire answer.

    This isn't a value judgment for or against either; Lustre offers more architectural flexibility at the cost of having to do a lot of day-to-day lifting and large-scale architectural design oneself, while Spectrum Scale is a one-stop shop that likely requires fewer FTEs and engineering effort to build infrastructure for complex workflows. The tradeoff, of course, is that Spectrum Scale and its surrounding ecosystem is priced for enterprises, and absent a new pricing scheme that economically scales cost with capacity (hypothetically referred to as "data lake pricing" at the SSUG), the choice of whether to buy into Spectrum Scale or Lustre as a part of a larger data strategy may come down to how expensive your FTEs are.

    On a non-technical note, the Lustre BOF certainly felt more community-oriented than the Spectrum Scale UG; the dialog was more collegial and there were no undertones of "customers" demanding answers from "vendors." This is not to say that the SSUG wasn't distinctly more friendly than a traditional briefing; it just felt a bit more IBM-controlled since it was on an IBM WebEx whose registration was moderated by IBM and where all the speakers and question answerers were IBM employees. Perhaps there's no other way in a proprietary product since the vendor ultimately holds the keys to the kingdom.

    -

    IO-500 BOF

    The IO-500 BOF is one of my favorite events at both ISC and SC each year, but as with the rest of SC'20, this year's IO-500 BOF felt like a quiet affair. I noticed two noteworthy themes:

    +

    IO-500 BOF

    +

    The IO-500 BOF is one of my favorite events at both ISC and SC each year, but as with the rest of SC'20, this year's IO-500 BOF felt like a quiet affair. I noticed two noteworthy themes:

    +

    +
    1. I/O performance is being awarded in dimensions beyond just peak I/O bandwidth. There are six awards now being given for first place: 10-node bandwidth, 10-node metadata, 10-node overall, total bandwidth, total metadata, and total overall. This contrasts with Top500 which treats performance in a single dimension (peak HPL) and implicitly perpetuates the position that HPL performance is the only aspect of performance that defines "#1." I quite like the IO-500 approach because it makes it easier to see a multidimensional picture of I/O performance and apply your own value system to the list to decide what combination of hardware and storage system software qualifies as #1.
    2. The importance of system configuration is elevating in the IO-500 community--defining a system hardware schema, presenting the data uniformly, and establishing standard tools and techniques for collecting this data from the systems running the IO500 benchmark are all on the roadmap for the IO-500 benchmark. Again, this makes the list much more valuable for the purposes of learning something since a properly annotated set of submissions would allow you to understand the effects of, for example, choosing NVMe over SAS SSDs or declustered parity over RAID6 on nonvolatile media.

    -
    1. I/O performance is being awarded in dimensions beyond just peak I/O bandwidth. There are six awards now being given for first place: 10-node bandwidth, 10-node metadata, 10-node overall, total bandwidth, total metadata, and total overall. This contrasts with Top500 which treats performance in a single dimension (peak HPL) and implicitly perpetuates the position that HPL performance is the only aspect of performance that defines "#1." I quite like the IO-500 approach because it makes it easier to see a multidimensional picture of I/O performance and apply your own value system to the list to decide what combination of hardware and storage system software qualifies as #1.
    2. The importance of system configuration is elevating in the IO-500 community--defining a system hardware schema, presenting the data uniformly, and establishing standard tools and techniques for collecting this data from the systems running the IO500 benchmark are all on the roadmap for the IO-500 benchmark. Again, this makes the list much more valuable for the purposes of learning something since a properly annotated set of submissions would allow you to understand the effects of, for example, choosing NVMe over SAS SSDs or declustered parity over RAID6 on nonvolatile media.

    The final IO-500 list for SC'20 itself didn't change much this time; experimental and proof-of-concept file systems remain dominant in the top 10 positions, and DAOS, WekaFS, and IME carry most of the weight. However the #1 position was a surprise:

    Overall winner for the IO-500 full list was Pengcheng Laboratory's MadFS
    @@ -215,10 +307,12 @@

    IO-500 BOF

    The IO-500 BOF is one

    A new file system called "MadFS" took the top spot with some ridiculous performance numbers, and frustratingly, there have been no public disclosures about what this file system is or how it works. The IO-500 committee said that they spoke privately with the submitters and felt comfortable that the entry was legitimate, but they were not at liberty to disclose many details since Pengcheng Laboratory is preparing to present MadFS at another venue. They did hint that MadFS drew inspiration from DAOS, but they didn't offer much more.

    Peeling the MadFS submission apart does reveal a few things:

    -
    • It is a file system attached to Pengcheng Laboratory's Cloudbrain-II system, which is a Huawei Atlas 900 supercomputer packed with Huawei Kungpeng 920 ARM CPUs and Huawei Ascend 910 coprocessors. Cloudbrain-II is a huge system with a huge budget, so it should have a very capable storage subsystem.
    • 72 processes were run on each of the 255 client nodes, reaching a peak of2,209,496 MiB/second. This translates to 73 Gbit/sec out of each 100 Gb/s node--pretty darned efficient.
    • The MadFS file system used is 9.6 PB in size, and the fastest-running tests (ior-easy-*) ran for a little over six minutes. This corresponds to863 TB read and written in the best case, which is reasonable.
    • The ior-easy tests were run using a transfer size of2,350,400 bytes which is a really weird optimization point. Thus, it's unlikely that MadFS is block-based; it probably runs entirely in DRAM or HBM, is log-structured, and/or relies on persistent memory to buffer byte-granular I/O from any underlying block devices.
    • The submission indicates that 254 metadata nodes were used, and each node had six storage devices. The submission also says that data servers (of an undefined quantity) has 2 TB NVMe drives.
      • Since 255 clients and 254 metadata servers were used, this may suggest that metadata is federated out to the client nodes. This would explain why the metadata rates are so astonishing.
      • If the 9.6 PB of NVMe for data was located entirely on the 255 clients, this means each compute node would've had to have had over 37 TB of NVMe after parity. This seems unlikely.
      • From this, we might guess that MadFS stores metadata locally but data remotely. This would be a very fragile architecture for important data, but a reasonable one for ephemeral storage akin to UnifyFS.
    • MadFS is not ready for prime time, as its statfs(2) returns nonsense data. For example, the MadFS ior-easy-* runs report the file system has zero inodes, while the ior-hard-* runs reported268 trillion inodes all of which are used.

    +
    • It is a file system attached to Pengcheng Laboratory's Cloudbrain-II system, which is a Huawei Atlas 900 supercomputer packed with Huawei Kungpeng 920 ARM CPUs and Huawei Ascend 910 coprocessors. Cloudbrain-II is a huge system with a huge budget, so it should have a very capable storage subsystem.
    • 72 processes were run on each of the 255 client nodes, reaching a peak of2,209,496 MiB/second. This translates to 73 Gbit/sec out of each 100 Gb/s node--pretty darned efficient.
    • The MadFS file system used is 9.6 PB in size, and the fastest-running tests (ior-easy-*) ran for a little over six minutes. This corresponds to863 TB read and written in the best case, which is reasonable.
    • The ior-easy tests were run using a transfer size of2,350,400 bytes which is a really weird optimization point. Thus, it's unlikely that MadFS is block-based; it probably runs entirely in DRAM or HBM, is log-structured, and/or relies on persistent memory to buffer byte-granular I/O from any underlying block devices.
    • The submission indicates that 254 metadata nodes were used, and each node had six storage devices. The submission also says that data servers (of an undefined quantity) has 2 TB NVMe drives.
      • Since 255 clients and 254 metadata servers were used, this may suggest that metadata is federated out to the client nodes. This would explain why the metadata rates are so astonishing.
      • If the 9.6 PB of NVMe for data was located entirely on the 255 clients, this means each compute node would've had to have had over 37 TB of NVMe after parity. This seems unlikely.
      • From this, we might guess that MadFS stores metadata locally but data remotely. This would be a very fragile architecture for important data, but a reasonable one for ephemeral storage akin to UnifyFS.
    • MadFS is not ready for prime time, as its statfs(2) returns nonsense data. For example, the MadFS ior-easy-* runs report the file system has zero inodes, while the ior-hard-* runs reported268 trillion inodes all of which are used.
    +

    Until more disclosures are made about MadFS and the Cloudbrain-II system though, there's little intellectual value in this IO-500 submission. However the waters are definitely chummed, and I for one will be keeping an eye out for news about this Chinese system.

    Finally, although not part of the IO-500 BOF, Microsoft Azure released some benchmark results shortly after about their successful demonstration of over 1 TB/sec using BeeGFS in Azure. This wasn't run to the IO-500 spec so it wouldn't have been a valid submission, but it is the single fastest IOR run in the cloud of which I am aware. This bodes well for the future of parallel file systems in the cloud as a blessed BeeGFS/Azure configuration would compete directly with Amazon FSx for Lustre.

    -

    Concluding Thoughts

    Virtual SC this year turned out to be far more exhausting than I had anticipated despite the fact that I never had to leave my chair. On the upside, I got to attend SC with my cat for the first time:

    +

    Concluding Thoughts

    +

    Virtual SC this year turned out to be far more exhausting than I had anticipated despite the fact that I never had to leave my chair. On the upside, I got to attend SC with my cat for the first time:

    Harriet dialing into the Women in HPC Workshop
    Harriet dialing into the Women in HPC Workshop with me
    @@ -227,4 +321,76 @@

    Concluding Thoughts

    Virtual

    In my past SC recaps I remarked that I get the most out of attending the expo and accosting engineers on the floor, and the complete absence of that made SC feel a lot less whole. As a speaker, the lack of engagement with the audience was very challenging too. The 45-second delay between live video and Q&A made dialog challenging, and there was no way to follow up on questions or comments using the virtual platform. I suppose that is the price to be paid for having an otherwise robust virtual event platform.

    Although COVID forced us all into a sub-optimal SC venue this year, I think it also took away a lot of advancements, discussions, and dialog that would've fed a richer SC experience as well. With any luck SC can be in-person again next year and the community will have bounced back and made up for the time lost this year. When SC'21 rolls around, we should have at least one exascale system hitting the floor in the US (and perhaps another in China) to talk about, and the Aurora system should be very well defined. We'll have a few monster all-flash file systems on the I/O front to boot (including one in which I had a had a hand!), and the world will be opening up again--both in the technological sense and the literal sense. The future looks bright.

    As always, I owe my sincerest thanks to the organizers of SC this year for putting together the programs that spurred this internal monologue and the dialogues in which I engaged online these past two weeks. I didn't name every person from whom I drew insight, but if you recognize a comment that you made and would like attribution, please do let me know.

    -

    Finally, if you'd like to read more, see my recaps of the PDSW'20 workshop, my tiered storage panel, and the forthcoming DAOS User Group.

    \ No newline at end of file +

    Finally, if you'd like to read more, see my recaps of the PDSW'20 workshop, my tiered storage panel, and the forthcoming DAOS User Group.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/ajdecon/2020-12-14-p=113.md b/2020/sketching-out-hpc-clusters-at-different-scales/index.html similarity index 58% rename from _posts/ajdecon/2020-12-14-p=113.md rename to 2020/sketching-out-hpc-clusters-at-different-scales/index.html index a2c00e6..242222b 100644 --- a/_posts/ajdecon/2020-12-14-p=113.md +++ b/2020/sketching-out-hpc-clusters-at-different-scales/index.html @@ -1,210 +1,240 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2020-12-14 01:03:23' -layout: post -original_url: https://thinking.ajdecon.org/2020/12/14/sketching-out-hpc-clusters-at-different-scales/ -slug: sketching-out-hpc-clusters-at-different-scales -title: Sketching out HPC clusters at different scales ---- - -

    High-performance computing (HPC) clusters come in a variety of shapes and sizes, depending on the scale of the problems you’re working on, the number of different people using the cluster, and what kinds of resources they need to use.

    - - - + + + + + + + Sketching out HPC clusters at different scales - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
    + +
    +

    Sketching out HPC clusters at different scales

    +

    High-performance computing (HPC) clusters come in a variety of shapes and sizes, depending on the scale of the problems you’re working on, the number of different people using the cluster, and what kinds of resources they need to use.

    However, it’s often not clear what kinds of differences separate the kind of cluster you might build for your small research team:

    - - -
    Note: do not use in production
    - -

    From the kind of cluster that might serve a large laboratory with many different researchers:

    - - -
    The Trinity supercomputer at Los Alamos National Lab, also known as “that goddamn machine” when I used to get paged at 3am
    - -

    There are lots of differences between a supercomputer and my toy Raspberry Pi cluster, but also a lot in common. From a management perspective, a big part of the difference is how many different specialized node types you might find in the larger system.

    - - - - - - +

    Just a note: in this post I’m assuming we’re talking about compute clusters of the type that might be used to run simulations or data analysis jobs. This probably won’t help if you’re designing a database cluster, a Kubernetes cluster to serve a web infrastructure, etc.

    - - -

    Let’s start with one of the simplest ways you can build a cluster: a collection of compute nodes, all connected to a network, with a single “head node” that coordinates work between them:

    - - -
    Diagram showing a single head node connected to five compute  nodes with a single network
    - -

    With this design, the head node performs most of the functions that coordinate work or provide shared services on the cluster. The compute nodes are then free for the actual compute jobs on the cluster, like simulating the weather or analyzing telescope data!

    - - -

    Some of the shared services that most clusters provide from the head node include:

    - - -
    • Running a job scheduler that accepts requests from the users and queues them up to run on the compute nodes
    • Exporting a shared filesystem to the other machines, so they can all access the same storage space
    • Accepting user logins so that the people who want to run on the cluster have an access point to the cluster
    • Acting as a management node that the cluster sysadmins can use to help maintain the rest of the cluster
    - -

    This kind of design can scale remarkably well, and it’s probably the most common kind of cluster out there. But at some point, you might find that the head node is doing too much, and you need to split its functions across multiple machines.

    - - -

    The first thing you’ll often see is moving user logins onto their own dedicated login node:

    - - -
    Diagram showing a login node, a management node, and five compute nodes connected on the same network
    - -

    All the other functions are still on the head node (which is often explicitly called a management node at this point). But by moving user logins to their own node, it becomes easier to do maintenance or make changes to the larger system without disturbing your users.

    - - -

    (It also means that if your users accidentally crash the login node, they’re less likely to take down all those shared services on the management node…)

    - - -

    If you have lots of users, you can also easily add more login nodes! These scale pretty well because the shared services are all still on the management node, but your users get more interactive nodes for their development work

    - - -
    Diagram showing three login nodes, a management node, and five compute nodes on the same network
    - -

    At this point, you might also set up a second management node in order to provide redundancy or failover in case your primary management node fails:

    - - -
    Diagram showing three login nodes, two management nodes, and five compute nodes
    - -

    At this point we have a lot of compute nodes, redundant management nodes, and a nice collection of login nodes for the users to use for their work. What else might we need as we scale up?

    - - -

    Well, for one thing, the shared filesystem is still on the management node. We might want to split it off onto its own machine to provide better performance:

    - - -
    Diagram showing three login nodes, two management nodes, a storage node, and five compute nodes on the same network
    Following tradition, storage is represented as a poorly-drawn cylinder to match the shape of a hard drive platter ?
    - -

    Or if we want to scale our performance higher than a single storage server can provide, we might want to use a distributed filesystem like Lustre, BeeGFS, or GPFS and provide a whole tier of dedicated storage machines:

    - - -
    Replace single storage node with three storage nodes in a cluster
    - -

    You might also notice that we’re using the same network for everything! Communication between compute nodes, access to storage, and management services are all competing to send messages over the same network. This could be a problem if, for example, the application wants to simultaneously read lots of data from storage and exchange messages with neighboring compute nodes.

    - - -

    At this point we may want to split these different types of traffic onto their own networks:

    - - -
    Same diagram, but add a separate application network connecting only the compute nodes , and a separate storage network connecting storage and compute only
    - -

    Depending on how much you need to optimize (or how much you want to spend!), you may have several different networks connecting all the machines in the cluster, separated by function. You may have dedicated networks for functions like:

    - - -
    • High-speed network (or application network): This is a dedicated network for user applications to communicate between compute nodes, and is often built using specialized hardware like Infiniband or a vendor-proprietary technology. This is especially important if you use technologies like MPI in your applications, which rely heavily on inter-node communication.
    • Storage network: This is a dedicated network for access to storage. If you rely on especially fast network storage, you might use Infiniband or another very fast network here too.
    • Management network: This is often the “everything else” network, used for job scheduling, SSH, and other miscellaneous traffic. This is often a less-performant network, using 1Gb or 10Gb Ethernet, because we expect the heavier usage to be on the application or storage networks.
    • Out-of-band management network: Many datacenter environments have methods for managing individual servers outside their operating systems, such as accessing the baseboard management controllers. However, this kind of access can be a security risk, and it’s often put on its own network to restrict access.
    - -

    All these different networks may be on their own hardware, for the best performance; or they may be virtual networks (VLANs) sharing the same physical connections.

    - - -

    Once you get past this point, there are many different ways to continue splitting off or adding special-purpose functions, but these are less common outside of very large sites.

    - - -

    For example, you may have multiple independent storage systems you want to access:

    - - -
    Add a second storage cluster, separate from the first, on the storage network
    - -

    Or your cluster may depend on fast access to an external resource, and you want to attach a dedicated tier of network routers:

    - - -
    Add a pair of router nodes on the management node. The router nodes also have connections to the internet
    - -

    Or you may even have some slower tier of storage that you need to move data in and out of, such as S3 or a tape system, and build a set of dedicated machines for data movement:

    - - -
    Add a pair of data movement nodes connected to the management nodes. The data movement nodes also have a connection to an external storage system
    - -

    In other words, you can add as much complexity as you like! ? Or, as much as your users and workloads require. Very complex environments serving many researchers may have many different tiers of dedicated machines, for data movement, network routing, managing software licenses, and more. But not every environment will need this type of complexity.

    - - -

    In all cases, the general strategy is the same: if your work is being bottlenecked by some you special-purpose function, you may consider moving that work to dedicated machines to get better performance.

    +

    This needs to be balanced, though, against the costs of doing so, in money, power, rack space, or other constraints. Frequently, there’s a trade-off between adding special-purpose machines and adding more compute machines, and your users might prefer to just have more compute!

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + - - -

    This needs to be balanced, though, against the costs of doing so, in money, power, rack space, or other constraints. Frequently, there’s a trade-off between adding special-purpose machines and adding more compute machines, and your users might prefer to just have more compute!

    \ No newline at end of file diff --git a/2020/things-i-learned-from-looking-at-500-research-computing-manager-jobs-over-10-months/index.html b/2020/things-i-learned-from-looking-at-500-research-computing-manager-jobs-over-10-months/index.html new file mode 100644 index 0000000..e5520dc --- /dev/null +++ b/2020/things-i-learned-from-looking-at-500-research-computing-manager-jobs-over-10-months/index.html @@ -0,0 +1,244 @@ + + + + + + + Things I Learned from Looking at 500 Research Computing Manager Jobs over 10 Months - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Things I Learned from Looking at 500 Research Computing Manager Jobs over 10 Months

    +

    I write a weekly newsletter +for research computing managers, team leads, or those aspiring to +those roles. One of the things I’ve wanted to emphasize in the +newsletter is that managing research computing teams is a profession +in and of itself, and worth doing well. Part of that is emphasizing +the existence of career opportunities.

    + +

    So since the beginning I’ve included job listings and maintained +a job board, +posting about 500 such jobs over the past 10 months and removing +them as they become filled or otherwise unavailable. My main +criteria for such jobs are whether or not I would describe the work +as principally about managing or leading a research computing team - +admittedly a fuzzy definition.

    + +

    Over the course of examining those 500 jobs - and looking through +many many more that never made it to the board - I’ve learned some +things:

    + +

    There are a lot of jobs out there for people managing research +computing teams. I’ve never had any trouble finding some weekly +to put in the job board or with highlights interesting enough to +list at the end of the newsletter.

    + +

    There are certainly many more I’m missing. As the field matures +there are starting to be job +boards for research +software development or for particular sub-fields of research +computing like +bioinformatics. +But, consistent with research’s neglect of management as something +that needs to be done and done well, no such resources exist for +the managers of those important roles. So I have a go-to list of +google and other searches for jobs which I go through a couple of +times a week.

    + +

    In research, when you’re doing a literature search and you start +hitting the same papers again and again, you’re pretty sure you’ve +got a mostly complete list of references as a starting point. I’m +nowhere near that with my managing research computing teams job +list, largely because the names we use for these roles vary so +widely. So I’m confident that I only see a fraction of these jobs. +(You can help out by submitting any +jobs you know about).

    + +

    Research computing teams are broadening, and so is the need for +managers. Where this is most obvious is in data science or data +engineering teams, which have spread to every sector and every +industry. Generic “Manager, Data Science” jobs are so plentiful +that I don’t list most of them - many of them are more operational +rather than “jobs leading research computing teams” - but even the +ones that make the cut are in sectors from health to transportation +to retail to engineering. There are increasingly data engineering, +cloud architecture, etc roles for supporting research computing +efforts, to say nothing of ML/AI jobs. And there are countless +management/team lead jobs for specialist research computing in +health, biology, and biomedicine.

    + +

    Research data management is increasingly employable. As the +initial data science and data engineering work in organizations +mature, many institutions are realizing that they now need principled +approaches to data governance, stewardship, and modelling. This +is happening most rapidly in heavily regulated industries — +health, finance — but is starting to percolate outwards. +Those who have maintained and curated data resources for research, +or who have supported those that do, will be surprised at the number +of jobs in the private sector for doing similar work.

    + +

    “Traditional” research computing team management jobs remain, and +they take forever to fill: There are definitely still routinely +“Director of Research Computing, University of Somethingorother” +jobs out there. And I don’t know whether it’s because of the +pandemic, or because of the competition from other sectors, but +such jobs are taking forever to fill this year. I routinely see +them open for months, and then reposted one or more times. I see +this in both for managers of teams running on-premises hardware and +for teams mainly doing software development.

    + +

    Despite the talk of RSE units, most research computing jobs within +academic institutions are lone outposts: While in companies +research computing - data science, computing resource management, +software development - tends to be centralized (even if it is +matrixed out or embedded into other teams), in academia we’re +definitely not there - most of the team leads/manger jobs I see in +Universities are for small teams embedded in a single institute or +project. I think that’s a shame; it greatly reduces the opportunity +for cross-pollination, learning, and developing best practices, +makes work less efficient and less satisfying, and it makes teams +more management heavy than they need to be.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/dursi/2020-10-24-research-computing-in-the-aftertimeshtml.md b/2020/what-will-post-pandemic-academic-research-computing-look-like/index.html similarity index 60% rename from _posts/dursi/2020-10-24-research-computing-in-the-aftertimeshtml.md rename to 2020/what-will-post-pandemic-academic-research-computing-look-like/index.html index 4eeed57..e385a49 100644 --- a/_posts/dursi/2020-10-24-research-computing-in-the-aftertimeshtml.md +++ b/2020/what-will-post-pandemic-academic-research-computing-look-like/index.html @@ -1,127 +1,169 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2020-10-24 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/research-computing-in-the-aftertimes.html -slug: what-will-post-pandemic-academic-research-computing-look-like- -title: What will Post-Pandemic Academic Research Computing Look Like? ---- - -

    We’re nowhere near the endgame yet. But even now in the middle of the COVID-19 times it is not too soon to think about what research computing will look like when the threat of infection by SARS-CoV-2 no longer shapes our work lives. While the future looks good for research computing team individual contributors who are willing to learn on the fly, the coming years + + + + + + + What will Post-Pandemic Academic Research Computing Look Like? - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    What will Post-Pandemic Academic Research Computing Look Like?

    +

    We’re nowhere near the endgame yet. But even now in the middle of the COVID-19 times it is not too soon to think about what research computing will look like when the threat of infection by SARS-CoV-2 no longer shapes our work lives. While the future looks good for research computing team individual contributors who are willing to learn on the fly, the coming years will be treacherous for teams as organizations, and their managers.

    -

    What hath 2020 wrought

    There’s a few pretty unambiguous “inputs” from 2020 that will have consequences for years to come:

    -

    Institutional and governmental coffers are depleted

    Entire sectors of the economy are in bad shape. Institutional budgets have suffered across the board. There have been large unforeseen costs for dealing with the pandemic, while normal operating costs haven’t gone down much except in tiny budget lines like travel.

    -

    At Universities, international student tuitions have dropped less than expected, but there are well-founded worries that they will continue dropping and not bounce back. In a lot of jurisdictions, dollops of one-off support for educational institution came from governments. Those governments will be tightening their budget as soon as they can, and reducing rather than increasing payouts over the course of many years to claw their way back to budget balance.

    -

    Clients are now used to research computing teams being distant

    We’ve all been working from home over the course of months. A lot of previously unquestioned assumptions about how important it is to have certain groups or equipment “here” with the research groups so that they could be accessible are now known to be mistaken. Researchers, VPRs, and funders are seeing that virtual teams for research computing can support research perfectly well with some controls in place. Yes, it’s handy to sit down beside someone to get things sorted sometimes but we’ve learned we can do pretty well without that.

    -

    Primacy of health research

    Life sciences has been an increasingly important part of research computing since quantitative molecular methods took off, and even more since the human genome project’s completion. During the pandemic, centres have dramatically shifted towards prioritizing various kinds of health research workloads, which in turn has boosted capacity (and expectations) of lots of health related research groups and their funders.

    -

    Importance of data and data sharing better understood

    With most of the world daily monitoring case counts, “excess deaths”, case fatality rate vs infection fatality rate and the like, the importance of clean, high-quality data has never been more widely understood. And the limits of what “AI” or advanced analysis techniques can do with poor quality data is very clear.

    -

    And as data’s importance becomes clearer the importance of pooling data has never been more obvious, even in disciplines typically very reluctant to do so (sometimes for good reasons, sometimes not). That’s very unlikely to rapidly change back.

    -

    The best research computing teams have learned to communicate a lot better

    The research computing and data teams that have come through this pretty well and with satisfied clients have really had to up their games in communications - internally and externally, synchronous and asynchronous. Many of these teams already had experience sucessfully working with distributed collaborators and partners, and built on those strengths.

    -

    But not all research computing and data teams have come through this experience with satisfied client researchers.

    -

    Consequences: 2021 and beyond

    None of the changes I’ve described above are particularly subtle or ambiguous, and I think the short-term consequences are almost as clear. Some short and mid-term consequences will be, roughly in order of certainty:

    -

    Research computing teams are never going back to 100% work-from-office

    This one is so obvious it hardly needs to be said, but let’s say it. Space on University campuses has always been tight, and 2020 has shown us that research computing teams don’t need to be on campus. While each team will have to figure out its own approach - fully distributed, rotating with hot-desking, hybrid - we’re never going back to routinely being all together on campus.

    -

    Research budgets are mostly going to shrink, except in health

    Governments worldwide will start trying to get their finances back into balance after the huge COVID-19 expenditures and shrunken tax revenues of 2020 and early(?) 2021. While research budgets probably won’t be drastically cut, they certainly won’t grow.

    -

    On the other hand, even once the pandemic is well and truly over, funding for health and health research will be extremely popular, voters will be wary of another pandemic, and COVID-19 long-term effects will still need to be studied and monitored. Health and health research will have an even larger claim to priority over stagnant research funding than before, and institutions will be eager to support such efforts.

    -

    Research support budgets are going to shrink

    With research budgets flat and institutions facing declining government funding and possibly international enrolments, there is going to be pressure to make cuts wherever possible. “Overheads” for the basic missions of teaching and research are going to be under increasing scrutiny.

    -

    Any research computing team that can’t communicate very clearly its value to VPRs and university administration in terms of research dollars and other outcomes the administration cares about is going to be facing a lot of very uncomfortable questions. Any cuts to research support services that won’t result in months and months worth of of angry phone calls are going to look pretty attractive to administrations trying to figure out what to cut without firing faculty or teaching staff.

    -

    Research computing teams will consolidate

    VPRs have long eyed various kinds of core facilities and wondered if they could be contracted out1. A year from now, with VPRs earnestly looking for budget cuts, researchers increasingly comfortable with getting research computing support over Zoom and Miro, an increased emphasis on data-sharing and thus remote data infrastructures, and some research computing teams better able to communicate their value than others, there will be consolidation and sorting of researching data computing teams.

    -

    Very small groups - a couple of specialists embedded in a large (especially health-related) research group, or a handful of research computing experts in a large corporate IT shop - are likely safe as long as they support research that continues to be funded as they’re too small a target to be worth cutting. But medium-sized centres with vague goals and priorities who can’t communicate the value they bring are going to be called upon to justify their existence.

    -

    As this shakes out, funding will favour small, hyper-specialized teams who deeply understand some segment of local needs, and large regional centres with diversified funding sources, excellent communications, and clear goals and priorities that enter contracts with other institutions and groups.

    -

    There isn’t going to be a dramatic “big bang” of closures, dissolutions, or mergers. Instead, straitened circumstances and very broad acceptance of virtual research support and data infrastructure will accelerate trends that have already been visible. And it’s going to be lead by individual contributors who are about to realize their employment options have significantly increased.

    -

    More adoption of industry best practices for running computer systems

    Research software quality takes a lot of of (unjustified) guff, but the truth is that with version control, unit tests, CI/CD, and packaging, research software development is much closer to industry best practices than research computing systems operations is.

    -

    With health data applications becoming increasingly important, that will have to change. Privacy restrictions around PHI will require better controls, documentation, and processes, including security incident reporting. Emphasis on data sharing and availability will push teams towards higher availability SLAs, which will push towards on-calls and practices like, if not chaos-engineering, at least routine testing of failures as with “disasterpiece theatre”.

    -

    Portfolios of research computing systems are going to be rebalanced away from “big metal”

    As with research computing teams, this isn’t going to be a big bang or a sudden pivot, but an acceleration of trends already in place.

    -

    With greater emphasis on data and health applications, very large-scale physical science simulations (my own background) will be an even smaller, while still important, use case for research computing. With greater emphasis on remote data infrastructures, remote teams, and data sharing, commercial cloud adoption in research computing will continue to grow. On-premises infrastructure is going to continue to tilt away from being able to support small numbers of large simulations towards architectures which can provide more flexibility for a wider range of computing and data applications.

    -

    What does it mean for us?

    Like the mainstreaming of telemedicine, many of the consequences of the pandemic will just be pushing forward something that was always going to happen eventually but had lacked an impetus until now. And for many (most?) research computing team individual contributors, things will look pretty good - work-from-home will open up more job opportunities, even if the portfolio of projects they support starts looking different.

    -

    But for research computing teams as organizations, and for their managers, the coming years will be treacherous. If the research computing team supporting University research groups doesn’t have to be on campus any more, why do they have to be University employees at all? If a neighbouring centre has better-run systems with better availability and already handle PHI, why not just use them for research software development support too?

    -

    It is not too early to start upping your game when it comes to the adminstration, your researchers, and your team members. For the administration, you’re going to have to ensure that you can justify every budget item in terms the administration recognize and value, and that you have clear and focussed goals and priorities. For researchers, you can start making sure that your systems, processes, and practices are as high-quality and researcher-focussed and -friendly as possible. For your team members, if you’re not regularly communicating with them to make sure they’re happy in their current roles and with their career development, this is the time to start.

    -
    @@ -131,4 +173,76 @@

    What does it mean for us?

    -
    \ No newline at end of file +
    + +
    +
    + +
    + + + + + + + + + + + + + + + + diff --git a/2020/when-research-infrastructure-is-and-isn-t-maintained/index.html b/2020/when-research-infrastructure-is-and-isn-t-maintained/index.html new file mode 100644 index 0000000..cba640e --- /dev/null +++ b/2020/when-research-infrastructure-is-and-isn-t-maintained/index.html @@ -0,0 +1,170 @@ + + + + + + + When Research Infrastructure Is and Isn't Maintained - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    When Research Infrastructure Is and Isn't Maintained

    +

    (Note: This post is adapted from #53 of the Research Computing Teams Newsletter)

    + +

    There were two big stories in the news this week (as I write this, at the end of 2020) about what’s possible with sustained research infrastructure funding and what happens when research infrastructure isn’t sustained.

    + +

    In the first, you’ve probably read about AlphaFold, Google Brain’s efforts to bring deep learning to protein folding. It did very well in the 14th annual Critical Assessment of (protein) Structure Prediction (CASP) contest. Predictably but unfortunately, Google’s press releases wildly overhyped the results - “Protein Folding Solved”.

    + +

    Most proteins fold very robustly in the chaotic environment of the cell, and so it’s expected that there should be complex features that predict how the proteins folded configurations look. We still don’t know anything about the model AlphaFold used - other than it did very well on these 100 proteins - or how it was trained. There are a lot of questions of how it will work with more poorly behaved proteins - a wrong confident prediction could be much worse than no prediction. But it did get very good results, and with a very small amount of computational time to actually make the predictions. That raises a lot of hope for the scope of near-term future advances.

    + +

    But as Aled Edwards points out on twitter, the real story here is one of long term, multi-decadal, investment in research infrastructure including research data infrastructure by the structural biology community. The protein data bank was set up 50 years ago (!!); and a culture of data sharing of these laboriously solved protein structures was set up, with a norm of contributing to (and helping curate) the data bank. That databank has been continuously curated and maintained, new techniques developed, eventually leading to the massive database now on which methods can be trained and results compared.

    + +

    It’s the sustained funding and support - monetarily but also in terms of aligning research incentives like credit - which built the PDB. The other big story we heard this week tells us that you can’t just fund a piece of infrastructure, walk away, and expect the result to be self-sustaining. On December 1st, the iconic Arecibo Radio Telescope in Puerto Rico collapsed. The telescope was considered important enough to keep running - there was no move to decommission it until late November - but not important enough to keep funding the maintenance to keep it functioning.

    + +

    Overhead image of a broken Arecibo Telescope

    + +

    Digital research infrastructure - software, data resources, computing systems - fall apart at least as quickly without ongoing funded effort to maintain them. It’s not about whether these digital pieces of infrastructure are “sustainable”; it’s whether or not they are sustained. Too many critical pieces of our digital research infrastructure are not being sustained.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2020/when-you-got-time-on-your-side-create-something/index.html b/2020/when-you-got-time-on-your-side-create-something/index.html new file mode 100644 index 0000000..027b37b --- /dev/null +++ b/2020/when-you-got-time-on-your-side-create-something/index.html @@ -0,0 +1,176 @@ + + + + + + + When you got time on your side - create something - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    When you got time on your side - create something

    +

    As we all settle down into the new norm of being housebound during this global epidemic, it’s given the opportunity to work on projects which would have +remained on the back burner for an indefinite period.

    + +

    As the eternal tinkerer, I’ve lately turned my attention to the Adruino community and all of the very interesting projects and possibilities that exist. +One wearable electronics project that caught my eye a number of months back was a wristwatch project which I spotted on the Adafruit site here. Of course, ordering the parts was the easy part. What I found in the meantime is that my soldering iron was also kaput and +I could not for the life of me find any of the wires, solder and other electronics tools. So alongside the box full of electronics components, I ordered a +shiny new soldering iron, essentials for soldering and a few different types of glue.

    + +

    And the last important piece of this jigsaw puzzle was the watch band. I had been scouting around some time for a suitable band - something high quality, +yet fashionable. I managed to purchase a fantastic Kapital (Japan) indigo velcro band from Grailed.

    + +

    As all of the pieces were finally in my hands, what was missing was time. This past weekend, I was able to devote some time to prototyping and ultimately +soldering together all of the pieces with some younger helping hands. Definitely my soldering skills were not what they used to be. But there was something +special about sitting on my back porch in the spring sunshine stripping wires, and soldering. The most challenging part for me was not assembling the watch. rather it was gluing the straps to the back o the watch face in order to be able to mount it to the watch band. I had to try a few different glues with a lot of patience. I wasn’t keen on using E6000 glue due to it’s toxicity…and rather opted to use a non-toxic glue from Aleene’s. Not sure how it will hold up in the long term though - time will tell (pun intended). Above is a photo of the watch connected to it’s USB “umbillical cord” for power and to load sketch (code).

    + +

    And this is how it looks on my arm running off of a mini LiPo battery (also courtesy of Adafruit).

    + +
    +
    + +

    Tinkering is fun!

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2020/white-managers-in-research-computing-we-need-to-be-speaking-out-about-racism-then-listening-and-advocating/index.html b/2020/white-managers-in-research-computing-we-need-to-be-speaking-out-about-racism-then-listening-and-advocating/index.html new file mode 100644 index 0000000..f173ebf --- /dev/null +++ b/2020/white-managers-in-research-computing-we-need-to-be-speaking-out-about-racism-then-listening-and-advocating/index.html @@ -0,0 +1,180 @@ + + + + + + + White Managers in Research Computing, We Need to be Speaking Out About Racism, then Listening and Advocating - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    White Managers in Research Computing, We Need to be Speaking Out About Racism, then Listening and Advocating

    +

    Many people in our research computing community — and in the broader research community we serve — are in pain this week. There’s another video of another Black man, George Floyd, begging for his life while being murdered by a police officer in Minneapolis. Here in Toronto a Black woman, Regis Korchinski-Paquet, died when what should have been a routine call resulted in a mystifying number of police officers showing up. With only police officers present in her apartment, she went over her high-rise balcony to her death, with her last words being, repeatedly, “Mom, help”. This is all taking place during a pandemic which is disproportionately killing and incapacitating Black people, Indigenous people, and people of colour because they have less access to jobs that can be worked from home, and are more likely to be living in overcrowded multi-generational homes.

    + +

    So with news and social media being dominated by the consequences of systemic racism, anti-Black violence in particular, and police violence in reaction to anti-police-brutality protests, a lot of people are feeling despair and anguish.

    + +

    As managers, we are leaders of communities. Small communities, but nonetheless. We have a responsibility to members of those communities to let them know we support them and are here for them. It doesn’t take much to be small bit of genuine help to someone really struggling. But we have to initiate the conversations. Our community members won’t open up to us about these topics until we’ve demonstrated we can have some kind of adult conversation about racism.

    + +

    Doing or saying something is scary for many of us in research computing — who are overwhelmingly not Black and mostly white, which is a related conversation we need to have — because we are worried, reasonably, about getting it wrong. And it’s easy to make the excuse that because we don’t have Black team members (which… you know, same) it’s not something we need to address.

    + +

    Most of us don’t have team members who have gotten sick with COVID-19 either, but we’ve certainly been addressing that. It’s been hard and uncomfortable and we didn’t get it all right the first time around and we did it anyway. You don’t necessarily know who’s hurting in your team and community or why. Not addressing a topic dominating the news and social media now doesn’t project professionalism, it just suggests discomfort or indifference.

    + +

    I do not have great suggestions about what to say or do. I can offer some articles and collections of resources I’m finding useful:

    + + + +

    I can also tell you what I’m doing at work. I’ve raised the issue at our all hands meeting using words much like the above, and let people know they can talk to me about it if they need to. Unhelpfully, I sounded a bit awkward, even after practicing, but the next conversation will be easier. I’ve made a point of checking in a little deeper with people during one-on-ones, and doing a lot of listening, I’m listening for feedback even when it’s uncomfortable, and I’ll keep reading those materials, and others, to see what I can do better and how I can support change.

    + +

    That’s not the best or even a particularly good way to address what’s going on now and what’s been going on for a very long time. It’s the bare minimum, and started too late. The challenge will come when making changes, then advocating for more change to peers and upwards. But it’s a start.

    + +

    From issue #27 of the Research Computing Teams newsletter

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2020/xrootd-client-manager/index.html b/2020/xrootd-client-manager/index.html new file mode 100644 index 0000000..f739013 --- /dev/null +++ b/2020/xrootd-client-manager/index.html @@ -0,0 +1,271 @@ + + + + + + + XRootD Client Manager - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Dereks Web Thoughts from Derek. See the original post here.
    + +
    +

    XRootD Client Manager

    +

    The validation project for XRootD Monitoring is moving to phase 2, scale +testing. Phase 1 focused on correctness of single server monitoring. The +report is available.

    + +

    We are still forming the testing plan for the scale test of XRootD, but a +component of the testing will be multiple clients downloading from multiple +servers. In addition, we must record exactly how much data each client reads +from each server in order to validate the monitoring with the client’s real behavior.

    + +

    This level of testing will require detailed coordination and recording of client +actions. I am not aware of a testing framework that can coordinate and record +accesses of multiple clients and servers, therefore I spent the weekend +developing a simple framework for coordinating these tests.

    + +

    Some requirements for the application are:

    + +
      +
    • Easy to use interface
    • +
    • Easy to add clients and servers
    • +
    • Authenticated access for clients, servers, and interface
    • +
    • Storage of tests and results
    • +
    + +

    I chose Heroku for prototyping this application.

    + +

    Interface

    + +

    The web interface is available at https://xrootd-client-manager.herokuapp.com/. +I chose to host it on heroku as it is my go to for pet projects. I will likely +move this over to OSG’s production kubernetes installation soon. The entire +application is only the web interface and a back-end Redis +data store.

    + +
    + Screenshot of web interface
    + Screenshot of simple web interface + +
    + +

    The web interface shows the connected clients and servers. The web interface +also connects to the web server with an persistent connection to update the list +of connected clients.

    + +

    Client Communication

    + +

    Client communcation is handled through a Socket.IO connection. Socket.IO is a +library that will at create a bi-directional event based communcation between +the client and the server. The communcation is over websockets if possible, but +will fall back to HTTP long polling. A good discussion of long polling vs. +websockets is available from +Ably. The Socket.IO +connection is established between each worker, server, and web client and the +web server.

    + +

    The difficult part is authenticating the Socket.IO connections. We discuss this +in the security session.

    + +

    Security

    +

    Securing the commands and web interface is required since the web interface is +sending commands to the connected worker nodes and servers.

    + +

    Socket.IO Connections

    + +

    The Socket.IO connection is secured with a shared key. The communication flow +for a non-web client (worker/server):

    + +
      +
    1. A JWT is created from the secret key. The secret key is communicated through +a separate secure channel. In most cases, it will be through the command +line arguments of the client. The JWT has a limited lifetime and a scope.
    2. +
    3. The client registers with the web server, with an Authentication bearer token +in the headers. The registration includes details about the client. It +returns a special (secret) client_id that will be used to authenticate the +Socket.IO connection. The registration is valid for 30 +seconds before the client_id is no longer valid.
    4. +
    5. The client creates a Socket.IO connection with the client_id in the request +arguments.
    6. +
    + +

    Web Interface

    + +

    The web interface is secured with an OAuth login from GitHub. There is a whitelist +of allowed GitHub users that can access the interface.

    + +

    The flow for web clients connecting with Socket.IO is much easier since they are already authenticated +with OAuth from GitHub.

    + +
      +
    1. The user authenticates with GitHub
    2. +
    3. The Socket.IO connection includes cookies such as the session, which is a +signed by a secret key on the server. The session’s github key is compared to the +whitelist of allowed users.
    4. +
    + +

    Storage of tests and results

    + +

    Storage of the tests and results are still being designed. Most likely, the +tests and results will be stored in a database such as Postgres.

    + +

    Conclusions

    + +

    Heroku provides a great playing ground to prototype these +web applications. I hope that I can find an alternative eventually that will run on +OSG’s production kubernetes installation.

    + +

    The web application is still be developed, and there is much to be done before +it can be fully utilized for the scale validation. But, many of the difficult +components are completed, including the communcation and eventing, secure web +interface, and clients.

    + +

    The GitHub repos are available at:

    + + + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2021/10-4-to-the-ten64-with-rockstor/index.html b/2021/10-4-to-the-ten64-with-rockstor/index.html new file mode 100644 index 0000000..e61a22c --- /dev/null +++ b/2021/10-4-to-the-ten64-with-rockstor/index.html @@ -0,0 +1,256 @@ + + + + + + + 10-4 to the Ten64 with Rockstor - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    10-4 to the Ten64 with Rockstor

    +

    I love it when a plan comes together! And this time, I’m not referring to a +daring rescue by the A-Team, but rather something just slightly more mundane - +network attached storage (NAS).

    + +

    I wrote back in March of this year about my experience setting up an Arm based +NAS for home use running Rockstor on my venerable SolidRun macchiatoBIN board. Although the macchiatoBIN served +in this role well, one limiting factor is the 3 onboard SATA ports. When used +as a desktop, this wasn’t an issue, but as a NAS it would limit things down +the road in terms of adding storage. Yes, I know I could have purchased a +PCIe SATA card to add additional ports, but decided against going this route +given the various foibles I encountered with PCIe support over the years with +the macchiatoBIN.

    + +

    My mind had been set a number of months earlier to purchase a Traverse Ten64 network appliance +and to use it primarily as a NAS. The Ten64 was attractive to me because of +it’s expandability, ECC RAM support, abundance of network ports and an +interesting capability known as DPAA2, which is thanks to the use of +NXP Layerscape LS1088A processor. A little bit more about DPAA2 later in +the writeup. Furthermore, Ten64 could stand in for home router duties should my +(also venerable) Turris Omnia router decide to give up the ghost.

    + +

    Through all of this, I heard the chants of QNAP and Synology from my friends, +who all thought that I was making things overly complicated for +myself. For me, it was a no brainer. The Ten64 would provide unprecedented +flexibilty and would give me a handy appliance which could take on NAS duties +as well as other tasks such as tinkering with K8s (k3s) clusters. And, who +could deny the additional cool factor of the red PCB of the Ten64! +Ultimately, I just love messing around with technology, and I’m always looking +for unique and flexible solutions. Plug and play? Nein Danke!

    + +

    Back in March, after assessing that an Arm based NAS was indeed a workable +solution, I started to seek out the necessary bits and pieces in anticipation +of the arrival of the Ten64 board. Of course, with COVID still in the air +I was quite worried about being able to get all of the bits I needed in time. +Over the summer, I dilligently got all of the following pieces ready:

    + +
      +
    • 1 x Kingston KSM268SE8/16ME 16GB DDR4 2666 MHz ECC SODIMM
    • +
    • 1 x IOCrest IO-M2F585-5I M.2 B/M 5-port SATA controller
    • +
    • 2 x Seagate Ironwolf 2 TB NAS drives
    • +
    • 1 x Seagate Ironwolf 240 GB NAS SSD
    • +
    • 1 x Fraktal Array R2 mini-ITX NAS case
    • +
    +

    And the plan was coming slowly together. At this stage only the Ten64 itself +was missing. And then, one fateful day in August the Ten64 arrived +at long last! And it was rock n' roll time. The Traverse Ten64 online +documentation and forum turned out to be invaluable sources of information to help me +get up and running. In fact if you search the forum you’ll find my name there +in a few threads, in particular around DPAA2, which was the most thorny issue +to resolve. Full disclosure that DPAA2 support in Linux distros is a bit hit +and miss.

    + +

    The Ten64 shipped in it’s own small form factor case. I setup the Ten64 on my +workbench and installed the RAM, M.2 SATA controller and connected the 240GB +SATA SSD. The end game was to get the system booting the openWrt based +muvirt from the 240GB SATA SSD and +to run Rockstor as a virtual machine under muvirt, with network interfaces +managed by DPAA2.

    + +
    +
    + +

    Once the software side of the house was figured out, it was time to install the +Ten64 board into the Fraktal NAS case. This is what is looked like during +the installation phase.

    + +
    +
    + +

    There are tons of resources on NXP DPAA2 which can be found on the Internet. +The Ten64 online documentation includes a useful overview and details. +It’s effectively a way that you can represent network objects on the NXP LS1088A processor of the Ten64 and pass those securely into the VM running on the +system - which in my case was going to be Rockstor running on an OpenSUSE LEAP 15.3 VM. With DPAA2 I can avoid using virtualized networking for the VMs for +better performance. Again, I’m very far from being an authority on DPAA2, +but it was definitely an important selling point for me, given my use case.

    + +

    DPAA2 took some effort to get working, but I’m very pleased with the outcome. +Ultimately, it required updated versions of muvirt, re-compilation of the VM +guest kernel to include the necessary DPAA2 patches and to flash a +new data path layout to the Ten64 board. You can find all of the nitty-gritty details about this in the following +Ten64 forum thread.

    + +

    Here is a view of the Rockstor dashboard showing activity on the system. +I’m a fan of the dashboard as it gives important details at a glance about +the state of the NAS.

    + +
    +
    + +

    So what does the future hold? At the moment I’m migrating data to the +Rockstor NAS. I’ve not done extensive performance tests, but suffice it to +say that the performance reading/writing to the NAS is as I would expect +with Gigabit Ethernet. I’ve installed both Jellyfin and Netdata rock-ons +as well to provide media server capabilities and detailed metrics on +the system load. I anticipate that I’ll be looking more closely at +k3s in the coming weeks.

    + +

    So this is bit of a pat myself on the back moment. I’m very pleased with +the outcome and the capabilities of the Ten64 now and the room it will +provide to grow in the future. And what also matters to me is that in the +end, I did it my way.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2021/booting-hifive-unmatched/index.html b/2021/booting-hifive-unmatched/index.html new file mode 100644 index 0000000..4dd3499 --- /dev/null +++ b/2021/booting-hifive-unmatched/index.html @@ -0,0 +1,164 @@ + + + + + + + Booting HiFive Unmatched - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Booting HiFive Unmatched

    +

    For those of you who like system bootup videos, here is the latest addition +to my collection. Here I’ve captured the bootup of Ubuntu 21.04 on a +SiFive HiFive Unmatched developer board. This is a capture of the bootup +mesages over the serial console using minicom and the appropriate USB +serial cable.

    + +
    + +
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2021/ceph-crimson-2021-q2-project-update/index.html b/2021/ceph-crimson-2021-q2-project-update/index.html new file mode 100644 index 0000000..d883170 --- /dev/null +++ b/2021/ceph-crimson-2021-q2-project-update/index.html @@ -0,0 +1,156 @@ + + + + + + + Ceph Crimson 2021 Q2 Project Update - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Mark Nelson’s Blog I like to make distributed systems go fast.. See the original post here.
    + +
    +

    Ceph Crimson 2021 Q2 Project Update

    +

    Slides are available here.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2021/ceph-crimson-2021-q3-project-update/index.html b/2021/ceph-crimson-2021-q3-project-update/index.html new file mode 100644 index 0000000..fee5d32 --- /dev/null +++ b/2021/ceph-crimson-2021-q3-project-update/index.html @@ -0,0 +1,156 @@ + + + + + + + Ceph Crimson 2021 Q3 Project Update - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Mark Nelson’s Blog I like to make distributed systems go fast.. See the original post here.
    + +
    +

    Ceph Crimson 2021 Q3 Project Update

    +

    This is the first time we’re seeing Bluestore in Crimson beating Bluestore in Classic in some (low core count) tests. Starting to see lower tail latency as well which is a really good sign. Top end performance will be contingent on multi-reactor support though. Slides available here.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2021/crimson-vs-classic-1-nvme-multi-osd-analysis/index.html b/2021/crimson-vs-classic-1-nvme-multi-osd-analysis/index.html new file mode 100644 index 0000000..74755ec --- /dev/null +++ b/2021/crimson-vs-classic-1-nvme-multi-osd-analysis/index.html @@ -0,0 +1,156 @@ + + + + + + + Crimson vs Classic 1 NVMe Multi-OSD Analysis - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Mark Nelson’s Blog I like to make distributed systems go fast.. See the original post here.
    + +
    +

    Crimson vs Classic 1 NVMe Multi-OSD Analysis

    +

    Spreadsheet looking at Crimson vs Classic performance when scaling multiple OSDs on one NVMe drive. Done to simulate what we can hopefully expect from multi-reactor down the road. Includes cycles/OP comparisons as well.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/gaborsamu/2021-3-5-aarch64_nas.md b/2021/fun-with-an-aarch64-nas/index.html similarity index 96% rename from _posts/gaborsamu/2021-3-5-aarch64_nas.md rename to 2021/fun-with-an-aarch64-nas/index.html index 0c0d1f0..af93a3f 100644 --- a/_posts/gaborsamu/2021-3-5-aarch64_nas.md +++ b/2021/fun-with-an-aarch64-nas/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2021-03-05 20:57:31' -layout: post -original_url: https://www.gaborsamu.com/blog/aarch64_nas/ -slug: fun-with-an-aarch64-nas -title: Fun with an AArch64 NAS ---- - -

    If you’re anything like me, managing the data that is produced by our modern + + + + + + + Fun with an AArch64 NAS - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Fun with an AArch64 NAS

    +

    If you’re anything like me, managing the data that is produced by our modern lifestyles is a chore. I’m the designated archival person in the family and as such I’m always looking for better ways to manage the huge volumes of data, from family photos and video to all of my music which I’ve digitized from my @@ -557,4 +626,76 @@ real 10m25.537s user 0m1.316s -sys 0m14.994s

    \ No newline at end of file +sys 0m14.994s
    + + +
    + +
    + + + + + + + + + + + + + + + + diff --git a/_posts/gaborsamu/2021-12-15-lsf_openshift_mpi.md b/2021/hello-operator-i-need-an-hpc-cluster-fast/index.html similarity index 68% rename from _posts/gaborsamu/2021-12-15-lsf_openshift_mpi.md rename to 2021/hello-operator-i-need-an-hpc-cluster-fast/index.html index e45bc79..16e6b32 100644 --- a/_posts/gaborsamu/2021-12-15-lsf_openshift_mpi.md +++ b/2021/hello-operator-i-need-an-hpc-cluster-fast/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2021-12-15 00:00:39' -layout: post -original_url: https://www.gaborsamu.com/blog/lsf_openshift_mpi/ -slug: hello-operator-i-need-an-hpc-cluster-fast -title: Hello operator. I need an HPC cluster – fast ---- - -

    As users of HPC look to build new workflows that go beyond traditional simulation and modeling, cloud native development models that rely upon Kubernetes (K8s) and Docker are front of mind. K8s provides the framework and a large ecosystem of key applications and technologies which can help to facilitate this transformation of HPC. This naturally leads to HPC centers looking at approaches to use their infrastructure to run their traditional HPC workloads alongside K8s workloads.

    + + + + + + + Hello operator. I need an HPC cluster – fast - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Hello operator. I need an HPC cluster – fast

    +

    As users of HPC look to build new workflows that go beyond traditional simulation and modeling, cloud native development models that rely upon Kubernetes (K8s) and Docker are front of mind. K8s provides the framework and a large ecosystem of key applications and technologies which can help to facilitate this transformation of HPC. This naturally leads to HPC centers looking at approaches to use their infrastructure to run their traditional HPC workloads alongside K8s workloads.

    To this end, there is an available K8s/OpenShift integration with IBM Spectrum LSF which is available as a tech preview on the Spectrum Computing github. There are a few parts to the integration. Firstly, LSF can act as a scheduler for K8s/OpenShift pods. Secondly an operator is available, that makes it easy to deploy an LSF cluster on top of a K8s/OpenShift cluster. Note that the integration is a technical preview.

    @@ -21,7 +90,6 @@ -

    This December I’ve had the opportunity to revisit the K8s operator for LSF. The motivation in this case, was the need to quickly spin up LSF test clusters in order to run some Intel MPI workloads. And as we’ll see, although getting the LSF clusters spun up on demand using the operator is very straightforward, a bit of fine tuning was needed in order to be able to successfully run the Intel MPI workloads.

    The github page where the K8s/OpenShift and Spectrum LSF integration is hosted contains documentation on how to setup the operator and deploy an LSF cluster on K8s/OpenShift. Spinning up the LSF cluster is quite simple, once you’ve followed the steps in the above noted documentation. We’ve configured the deployment to include an LSF management pod, and 4 LSF compute pods. LSF Suite for HPC v10.2.0.11 is the version that was deployed. And the target OpenShift cluster is hosted in the IBM Cloud.

    @@ -236,4 +304,76 @@

    In summary, the LSF operator for K8s/OpenShift makes it very easy to spin up an LSF cluster for your HPC needs. For specific types of workloads, the default shared memory setting for the pods is not sufficient. There is a procedure to address this post deployment currently. And Intel MPI jobs run through LSF transparently use the blaunch task starter – as expected.

    -

    In an upcoming blog, I plan to devote a bit more time discussing the Spectrum LSF operator for K8s/OpenShift.

    \ No newline at end of file +

    In an upcoming blog, I plan to devote a bit more time discussing the Spectrum LSF operator for K8s/OpenShift.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/gaborsamu/2021-9-14-riscv_benchmarking.md b/2021/hifive-unmatched-some-benchmarking-results/index.html similarity index 88% rename from _posts/gaborsamu/2021-9-14-riscv_benchmarking.md rename to 2021/hifive-unmatched-some-benchmarking-results/index.html index c5406b2..81d11bf 100644 --- a/_posts/gaborsamu/2021-9-14-riscv_benchmarking.md +++ b/2021/hifive-unmatched-some-benchmarking-results/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2021-09-15 00:14:28' -layout: post -original_url: https://www.gaborsamu.com/blog/riscv_benchmarking/ -slug: hifive-unmatched-some-benchmarking-results -title: HiFive Unmatched - some benchmarking results ---- - -

    No sooner did I receive my SiFive HiFive Unmatched board than did the questions + + + + + + + HiFive Unmatched - some benchmarking results - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    HiFive Unmatched - some benchmarking results

    +

    No sooner did I receive my SiFive HiFive Unmatched board than did the questions about the performance of the board start to come in - from far and wide. Prior to receiving the board, articles about the performance of the Freedom U740 SOC compared it with Arm Cortex-A55 cores. I’ve had a range of Arm @@ -86,7 +155,6 @@

    -

    With the folly of my MPI ways better understood, I ran HPL to completion with a result of 2.5 GFlops. I’ve not devoted more time to squeeze more performance from the system yet, as this result still seems low to me.

    @@ -791,7 +859,6 @@ -

    That concludes this post. What struck me was how easily I could compile things such as HPL, HPCG on the system. Certainly, there are no record breaking results to report here, but that was anticipated. Personally, I’m @@ -800,4 +867,76 @@ Initiative (EPI) where the (vector) accelerator will rely upon RISCV. So I’d wager that the future is bright for RISCV in HPC - and as the saying goes, variety is the -spice of life.

    \ No newline at end of file +spice of life
    .

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/glennklockwood/2021-10-24-tagbloggercom1999blog-4307061427721284246post-7905659352474005091.md b/2021/iops-are-dumb/index.html similarity index 60% rename from _posts/glennklockwood/2021-10-24-tagbloggercom1999blog-4307061427721284246post-7905659352474005091.md rename to 2021/iops-are-dumb/index.html index 00cca26..b4cce91 100644 --- a/_posts/glennklockwood/2021-10-24-tagbloggercom1999blog-4307061427721284246post-7905659352474005091.md +++ b/2021/iops-are-dumb/index.html @@ -1,35 +1,107 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2021-10-24 17:56:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2021/10/iops-are-dumb.html -slug: iops-are-dumb -title: IOPS are dumb ---- - -
    This post is a long-form dump of some thoughts I've had while testing all-flash file systems this past year, and bits of this appear in a presentation and paper I'm presenting at PDSW'21 about new benchmarking techniques for testing all-flash file systems.
    -

    "How many IOPS do you need?"

    + + + + + + + IOPS are dumb - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
    + +
    +

    IOPS are dumb

    +
    This post is a long-form dump of some thoughts I've had while testing all-flash file systems this past year, and bits of this appear in a presentation and paper I'm presenting at PDSW'21 about new benchmarking techniques for testing all-flash file systems.
    +

    "How many IOPS do you need?"

    I'm often asked this by storage vendors, and the question drives me a little bonkers.  I assume they ask it because their other customers bring them black-and-white IOPS requirements, but I argue that anyone would be hard-pressed to explain the scientific value of one I/O operation (versus one gigabyte) if ever called on it.  And yet, IOPS are undeniably important; the illustrious Rob Ross devoted a whole slide dedicated to this at a recent ASCAC meeting:

    Rob Ross' perspective on why IOPS are now important for HPC I/O
    Rob Ross' perspective on why IOPS are now important for HPC I/O

    I agree with all of Rob's bullets and yet I disagree with the title of his slide; IOPS are dumb, and yet ignoring them when designing a performance-optimized parallel file system is even more dumb in contemporary times.  So let's talk about the grey area in between that creates this dichotomy.

    -

    First, bandwidth is pretty dumb

    If there's one constant in HPC, it's that everyone hates I/O.  And there's a good reason: it's a waste of time because every second you wait for I/O to complete is a second you aren't doing the math that led you to use a supercomputer in the first place.  I/O is the time you are doing zero computing amidst a field called "high performance computing."

    +

    First, bandwidth is pretty dumb

    +

    If there's one constant in HPC, it's that everyone hates I/O.  And there's a good reason: it's a waste of time because every second you wait for I/O to complete is a second you aren't doing the math that led you to use a supercomputer in the first place.  I/O is the time you are doing zero computing amidst a field called "high performance computing."

    That said, everyone appreciates the product of I/O--data.  I/O is a necessary part of preserving the results of your calculation, so nobody ever says they wish there was no I/O.  Instead, infinitely fast I/O is what people want since it implies that 100% of a scientist's time using an HPC is spent actually performing computations while still preserving the results of that computation after the job has completed.

    Peeling back another layer of that onion, the saved results of that computation--data--has intrinsic value.  In a typical simulation or data analysis, every byte of input or output is typically the hard-earned product of a lot of work performed by a person or machine, and it follows that if you want to both save a lot of bytes but want to spend as little time as possible performing I/O, the true value of a parallel storage system's performance is in how many bytes per second it can read or write.  At a fundamental level, this is why I/O performance has long been gauged in terms of megabytes per second, gigabytes per second, and now terabytes per second.  To the casual observer, a file system that can deliver 100 GB/s is more valuable than a file system that can deliver only 50 GB/s assuming all things are equal for this very reason.  Easy.

    This singular metric of storage system "goodness" quickly breaks down once you start trying to set expectations around it though.  For example, let's say your HPC job generates 21 TB of valuable data that must be stored, and it must be stored so frequently that we really can't tolerate more than 30 seconds writing that data out before we start feeling like "too much time" is being spent on I/O instead of computation.  This turns out to be 700 GB/s--a rather arbitrary choice since that 30 seconds is a matter of subjectivity, but one that reflects the value of your 21 TB and the value of your time.  It should follow that any file system that claims 700 GB/s of write capability should meet your requirements, and any vendor who can deliver such a system should get your business, right?

    Of course not.  It's no secret that obtaining those hero bandwidths, much like obtaining Linpack-level FLOPS, requires you (the end-user) to perform I/O in exactly the right way.  In the case of the aforementioned 700 GB/s file system, this means

    -
    1. Having each MPI process write to its own file (a single shared file will get slowed down by file system lock traffic)
    2. Writing 4 MiB at a time (to exactly match the size of the network transmission buffers, remote memory buffers, RAID alignment, ...)
    3. Using 4 processes per node (enough parallelism to drive the NIC, but not too much to choke the node)
    4. Using 960 nodes (enough parallelism to drive all the file system drives, but not too much to choke the servers)

    +
    1. Having each MPI process write to its own file (a single shared file will get slowed down by file system lock traffic)
    2. Writing 4 MiB at a time (to exactly match the size of the network transmission buffers, remote memory buffers, RAID alignment, ...)
    3. Using 4 processes per node (enough parallelism to drive the NIC, but not too much to choke the node)
    4. Using 960 nodes (enough parallelism to drive all the file system drives, but not too much to choke the servers)
    +

    I've never seen a scientific application perform this exact pattern, and consequentially, I don't expect that any scientific application has ever gotten that 700 GB/s of performance from a "700 GB/s file system" in practice.  In that sense, this 700 GB/s bandwidth metric is pretty dumb since nobody actually achieves its rated performance. Of course, that hasn't prevented me from saying these same dumb things when I stump for file systems.  The one saving grace of using bandwidth as a meaningful metric of I/O performance, though, is that I/O patterns are a synthetic construct and can be squished, stretched, and reshaped without affecting the underlying scientific data being transmitted.

    The value of data is in its contents, not the way it is arranged or accessed.  There's no intrinsic scientific reason why someone should or shouldn't read their data 4 MiB at a time as long as the bits eventually get to the CPU that will perform calculations on it in the correct order.  The only reason HPC users perform nice, 1 MiB-aligned reads and writes is because they learn (either in training or on the streets) that randomly reading a few thousand bytes at a time is very slow and works against their own interests of minimizing I/O time.   This contrasts sharply with the computing side of HPC where the laws of physics generally dictate the equations that must be computed, and the order in which those computations happen dictates whether the final results accurately model some physical process or just spit out a bunch of unphysical garbage results.

    Because I/O patterns are not intrinsically valuable, we are free to rearrange them to best suit the strengths and weaknesses of a storage system to maximize the GB/s we can get out of it.  This is the entire foundation of MPI-IO, which receives I/O patterns that are convenient for the physics being simulated and reorders them into patterns that are convenient for the storage system.  So while saying a file system can deliver 700 GB/s is a bit disingenuous on an absolute scale, it does indicate what is possible if you are willing to twist your I/O pattern to exactly match the design optimum.

    -

    But IOPS are particularly dumb

    IOPS are what happen when you take the value out of a value-based performance metric like bandwidth.  Rather than expressing how many valuable bytes a file system can move per second, IOPS express how many arbitrary I/O operations a file system can service per second.  And since the notion of an "I/O operation" is completely synthetic and can be twisted without compromising the value of the underlying data, you might already see why IOPS are a dumb metric of performance.  They measure how quickly a file system can do something meaningless, where that meaningless thing (an I/O operation) is itself a function of the file system.  It's like saying you can run a marathon at five steps per second--it doesn't actually indicate how long it will take you to cover the twenty six miles.

    +

    But IOPS are particularly dumb

    +

    IOPS are what happen when you take the value out of a value-based performance metric like bandwidth.  Rather than expressing how many valuable bytes a file system can move per second, IOPS express how many arbitrary I/O operations a file system can service per second.  And since the notion of an "I/O operation" is completely synthetic and can be twisted without compromising the value of the underlying data, you might already see why IOPS are a dumb metric of performance.  They measure how quickly a file system can do something meaningless, where that meaningless thing (an I/O operation) is itself a function of the file system.  It's like saying you can run a marathon at five steps per second--it doesn't actually indicate how long it will take you to cover the twenty six miles.

    IOPS as a performance measure was relatively unknown to HPC for most of history.  Until 2012, HPC storage was dominated by hard drives which which only delivered high-value performance for large, sequential reads and writes and the notion of an "IOP" was antithetical to performance.  The advent of flash introduced a new dimension of performance in its ability to read and write a lot of data at discontiguous (or even random) positions within files or across entire file systems.  Make no mistake: you still read and write more bytes per second (i.e., get more value) from flash with a contiguous I/O pattern.  Flash just raised the bottom end of performance in the event that you are unable or unwilling to contort your application to perform I/O in a way that is convenient for your storage media.

    To that end, when a vendor advertises how many IOPS they can deliver, they really are advertising how many discontiguous 4 KiB reads or writes they can deliver under the worst-case I/O pattern (fully random offsets).  You can convert a vendor's IOPS performance back into a meaningful value metric simply by multiplying it by 4 KiB; for example, I've been presenting a slide that claims I measured 29,000 write IOPS and 1,400 read IOPS from a single ClusterStor E1000 OST array:

    Performance measurements of a single ClusterStor E1000 NVMe Lustre OST
    @@ -37,43 +109,49 @@

    But IOPS are particularly dumb

    IOPS are wha

    In reality, I was able to write data at 0.12 GB/s and read data at 5.7 GB/s, and stating these performance metrics as IOPS makes it clear that these data rates reflect the worst-case scenario of tiny I/Os happening at random locations rather than the best-case scenario of sequential I/Os which can happen at 27 GB/s and 41 GB/s, respectively.

    Where IOPS get particularly stupid is when we try to cast them as some sort of hero number analogous to the 700 GB/s bandwidth metric discussed above.  Because IOPS reflect a worst-case performance scenario, no user should ever be asking "how can I get the highest IOPS" because they'd really be asking "how can I get the best, worst-case performance?"  Relatedly, trying to measure the IOPS capability of a storage system gets very convoluted because it often requires twisting your I/O pattern in such unrealistic ways that heroic effort is required to get such terrible performance.  At some point, every I/O performance engineer should find themselves questioning why they are putting so much time into defeating every optimization the file system implements to avoid this worst-case scenario.

    To make this a little more concrete, let's look at this slide I made in 2019 to discuss the IOPS projections of this exact same ClusterStor E1000 array:

    -
    Projected performance of a ClusterStor E1000 NVMe Lustre OST based on a PCIe Gen3 platform
    +
    Projected performance of a ClusterStor E1000 NVMe Lustre OST based on a PCIe Gen3 platform
    Projected performance of a ClusterStor E1000 NVMe Lustre OST based on a PCIe Gen3 platform
    -

    Somehow the random read rate went from a projected 600,000 to an astonishing 1,400,000 read IOPS--which one is the correct measure of read IOPS?

    -

    It turns out that they're both correct; the huge difference in measured read IOPS are the result of the the 600 KIOPS estimate coming from a measurement that

    -
    1. ran for a much longer sustained period (180 seconds vs. 69 seconds)
    2. used fewer client nodes (21 nodes vs. of 32 nodes)
    3. wrote larger files (1,008× 8 GiB files vs. 1,024× 384 GiB files)

    Unlike the IOPS measurements on individual SSDs which are measured using a standard tool (fio with libaio from a single node), there is no standard method for measuring the IOPS of a parallel file system.  And just as the hero bandwidth number we discussed above is unattainable by real applications, any standardized IOPS test for a parallel file system would result in a relatively meaningless number.  And yes, this includes IO-500; its numbers have little quantitative value if you want to design a parallel file system the right way.

    +

    Somehow the random read rate went from a projected 600,000 to an astonishing 1,400,000 read IOPS--which one is the correct measure of read IOPS?

    +

    It turns out that they're both correct; the huge difference in measured read IOPS are the result of the the 600 KIOPS estimate coming from a measurement that

    +
    1. ran for a much longer sustained period (180 seconds vs. 69 seconds)
    2. used fewer client nodes (21 nodes vs. of 32 nodes)
    3. wrote larger files (1,008× 8 GiB files vs. 1,024× 384 GiB files)
    +

    Unlike the IOPS measurements on individual SSDs which are measured using a standard tool (fio with libaio from a single node), there is no standard method for measuring the IOPS of a parallel file system.  And just as the hero bandwidth number we discussed above is unattainable by real applications, any standardized IOPS test for a parallel file system would result in a relatively meaningless number.  And yes, this includes IO-500; its numbers have little quantitative value if you want to design a parallel file system the right way.

    So who's to say whether a ClusterStor E1000 OST is capable of 600 kIOPS or 1,400 kIOPS?  I argue that 1,400 kIOPS is more accurate since I/O is bursty and a three-minute-long burst of completely random reads is less likely than a one-minute long one on a production system.  If I worked for a vendor though, I'm sure this would be taken to be a dishonest marketing number since it doesn't reflect an indefinitely sustainable level of performance.  And perhaps courageously, the official Cray ClusterStor E1000 data sheet doesn't even wade into these waters and avoids quoting any kind of IOPS performance expectation.  Ultimately, the true value of the random read capability is the bandwidth achievable by all of the most random workloads that will realistically be run at the same time on a file system.  Good luck figuring that out.

    -

    Write IOPS are really dumb

    As I said at the outset, I cannot disagree with any of the bullets in the slide Rob presented at ASCAC.  That first one is particularly salient--there are a new class of HPC workloads, particularly in AI, whose primary purpose is to randomly sample large datasets to train statistical models.  If these datasets are too large to fit into memory, you cannot avoid some degree of random read I/O without introducing biases into your weights.  For this reason, there is legitimate need for HPC to demand high random read performance from their file systems.  Casting this requirement in terms of 4 KiB random read rates to have a neat answer to the "how many IOPS do you need" question is dubious, but whatever.  There's little room for intellectual purity in HPC.

    +

    Write IOPS are really dumb

    +

    As I said at the outset, I cannot disagree with any of the bullets in the slide Rob presented at ASCAC.  That first one is particularly salient--there are a new class of HPC workloads, particularly in AI, whose primary purpose is to randomly sample large datasets to train statistical models.  If these datasets are too large to fit into memory, you cannot avoid some degree of random read I/O without introducing biases into your weights.  For this reason, there is legitimate need for HPC to demand high random read performance from their file systems.  Casting this requirement in terms of 4 KiB random read rates to have a neat answer to the "how many IOPS do you need" question is dubious, but whatever.  There's little room for intellectual purity in HPC.

    The same can't be said for random write rates.  Write IOPS are a completely worthless and misleading performance metric in parallel file systems.

    In most cases, HPC applications approximate some aspect of the physical world, and mathematics and physics were created to describe this physical world in a structured way.  Whether you're computing over atoms, meshes, or matrices, there is structure to the data you are writing out and the way your application traverses memory to write everything out.  You may not write data out in a perfectly ordered way; you may have more atoms on one MPI process than another, or you may be traversing an imbalanced graph.  But there is almost always enough structure to scientific data to squish it into a non-random I/O pattern using middleware like MPI-IO.

    Granted, there are a few workloads where this is not true.  Out-of-core sorting of short-read DNA sequences and in-place updates of telescope mosaics are two workloads that come to mind where you don't know where to write a small bit of data until you've computed on that small bit of data.  In both these cases though, the files are never read and written at the same time, meaning that these random-ish writes can be cached in memory, reordered to be less random, and written out to the file asynchronously.  And the effect of write-back caching on random write workloads is staggering.

    To illustrate this, consider three different ways in which IOR can be run against an all-NVMe file system to measure random 4 KiB writes:

    -
    • In the naïve case, we just write 4 KiB pages at random locations within a bunch of files (one file per MPI process) and report what IOR tells us the write IOPS were at the end.  This includes only the time spent in write(2) calls.
    • In the case where we include fsync, we call fsync(2) at the end of all the writes and include the time it takes to return along with all the time spent in write(2).
    • In the O_DIRECT case, we open the file with direct I/O to completely bypass the client write-back cache and ensure that write(2) doesn't return until the data has been written to the file system servers.
    These seemingly minor changes result in write IOPS rates that differ by over 30x:
    +
    • In the naïve case, we just write 4 KiB pages at random locations within a bunch of files (one file per MPI process) and report what IOR tells us the write IOPS were at the end.  This includes only the time spent in write(2) calls.
    • In the case where we include fsync, we call fsync(2) at the end of all the writes and include the time it takes to return along with all the time spent in write(2).
    • In the O_DIRECT case, we open the file with direct I/O to completely bypass the client write-back cache and ensure that write(2) doesn't return until the data has been written to the file system servers.
    +
    These seemingly minor changes result in write IOPS rates that differ by over 30x:

    Random write IOPS measured using IOR on an all-NVMe parallel file system
    Random write IOPS measured using IOR on an all-NVMe parallel file system

    Again we ask: which one is the right value for the file system's write IOPS performance?

    If we split apart the time spent in each phase of this I/O performance test, we immediately see that the naïve case is wildly deceptive:

    -

    -
    Breakdown of time spent in I/O calls for 4K random write IOR workload
    +

    +
    Breakdown of time spent in I/O calls for 4K random write IOR workload
    Breakdown of time spent in I/O calls for 4K random write IOR workload
    -

    The reason IOR reported a 2.6 million write IOPS rate is because all those random writes actually got cached in each compute node's memory, and I/O didn't actually happen until the file was closed and all cached dirty pages were flushed.  At the point this happens, the cache flushing process doesn't result in random writes anymore; the client reordered all of those cached writes into large, 1 MiB network requests and converted our random write workload into a sequential write workload.

    +

    The reason IOR reported a 2.6 million write IOPS rate is because all those random writes actually got cached in each compute node's memory, and I/O didn't actually happen until the file was closed and all cached dirty pages were flushed.  At the point this happens, the cache flushing process doesn't result in random writes anymore; the client reordered all of those cached writes into large, 1 MiB network requests and converted our random write workload into a sequential write workload.

    The same thing happens in the case where we include fsync; the only difference is that we're including the time required to flush caches in the denominator of our IOPS measurement.  Rather frustratingly, we actually stopped issuing write(2) calls after 45 seconds, but so many writes were cached in memory during those 45 seconds that it took almost 15 minutes to reorder and write them all out during that final fsync and file close.  What should've been 45 seconds of random writes to the file system turned into 45 seconds of random writes to memory and 850 seconds of sequential writes to the file system.

    The O_DIRECT case is the most straightforward since we don't cache any writes, and every one of our random writes from the application turns into a random write out to the file system.  This cuts our measured IOPS almost in half, but otherwise leaves no surprises when we expect to only write for 45 seconds.  Of course, we wrote far fewer bytes overall in this case since the effective bytes/sec during this 45 seconds was so low.

    Based on all this, it's tempting to say that the O_DIRECT case is the correct way to measure random write IOPS since it avoids write-back caches--but is it really?  In the rare case where an application intentionally does random writes (e.g., out-of-core sort or in-place updates), what are the odds that two MPI processes on different nodes will try to write to the same part of the same file at the same time and therefore trigger cache flushing?  Perhaps more directly, what are the odds that a scientific application would be using O_DIRECT and random writes at the same time?  Only the most masochistic HPC user would ever purposely do something like this since it results in worst-case I/O performance; it doesn't take long for a user to realize this I/O pattern is terrible and reformulating their I/O pattern would increase their productive use of their supercomputer.

    So if no user in their right mind does truly unbuffered random writes, what's the point in measuring it in the first place?  There is none.  Measuring write IOPS is dumb.  Using O_DIRECT to measure random write performance is dumb, and measuring write IOPS through write-back cache, while representative of most users' actual workloads, isn't actually doing 4K random I/Os and therefore isn't even measuring IOPS.

    -

    -

    Not all IOPS are always dumb

    This all being said, measuring IOPS can be valuable in contexts outside of parallel file systems.  Two cases come to mind where measuring IOPS can be a rational yard stick.
    -

    1. Serving up LUNs to containers and VMs

    By definition, infrastructure providers shouldn't be responsible for the applications that run inside black-box containers and VMs because they are providing storage infrastructure (block devices) and not storage services (file systems).  Blocks in and blocks out are measured in IOPS, so the fit is natural.  That said, HPC users care about file systems (that is, scientific applications do not perform I/O using SCSI commands directly!), so worrying about LUN performance isn't meaningful in the HPC context.
    -

    2. Measuring the effect of many users doing many things

    While individual HPC workloads rarely perform random I/Os on purpose, if you have enough users doing many small tasks all at once, the file system itself sees a workload that approaches something random.  The more, small, independent tasks running parallel and the farther back you stand from the overall I/O load timeline, the more random it looks.  So, I argue that it is fair to measure the IOPS of a parallel file system for the purposes of measuring how much abuse a file system can take before it begins to impact everybody.
    +

    +

    Not all IOPS are always dumb

    +
    This all being said, measuring IOPS can be valuable in contexts outside of parallel file systems.  Two cases come to mind where measuring IOPS can be a rational yard stick.
    +

    1. Serving up LUNs to containers and VMs

    +
    By definition, infrastructure providers shouldn't be responsible for the applications that run inside black-box containers and VMs because they are providing storage infrastructure (block devices) and not storage services (file systems).  Blocks in and blocks out are measured in IOPS, so the fit is natural.  That said, HPC users care about file systems (that is, scientific applications do not perform I/O using SCSI commands directly!), so worrying about LUN performance isn't meaningful in the HPC context.
    +

    2. Measuring the effect of many users doing many things

    +
    While individual HPC workloads rarely perform random I/Os on purpose, if you have enough users doing many small tasks all at once, the file system itself sees a workload that approaches something random.  The more, small, independent tasks running parallel and the farther back you stand from the overall I/O load timeline, the more random it looks.  So, I argue that it is fair to measure the IOPS of a parallel file system for the purposes of measuring how much abuse a file system can take before it begins to impact everybody.

    Take, for example, these IOPS scaling I measured on a small all-flash file system using IOR:

    Scale-up IOPS benchmarking to demonstrate the saturation point of an all-flash file system
    Scale-up IOPS benchmarking to demonstrate the saturation point of an all-flash file system
    -
    It looks like it takes about 4,096 concurrent random readers or writers to max out the file system.  This alone isn't meaningful until you consider what this means in the context of the whole compute and storage platform.
    +


    <div>It looks like it takes about 4,096 concurrent random readers or writers to max out the file system.  This alone isn’t meaningful until you consider what this means in the context of the whole compute and storage platform.</div>


    What fraction of the cluster's compute nodes corresponds to 4096 cores?  If you've got, say, 728 dual-socket 64-core AMD Epyc processors, it would only take 32 compute nodes to max out this file system.  And if another user wanted to use any of the remaining 696 compute nodes to, say, run a Python script that needed to read in random packages scattered across the file system, there would be no remaining IOPS capacity left at this point, and everyone would experience perceptible lag.

    @@ -82,7 +160,8 @@

    2. Measuring the effect of many users doing many t
    Once you realize that a lot of the unglamorous parts of of scientific computing--reading dotfiles when you log in, loading shared objects when you launch a dynamically linked executable, or even just editing source code--are full of random-like reads, you can establish a quantitative basis for figuring out how badly an IOPS-intensive data analysis application may affect everyone else's interactive accesses on the same file system.

    This is not to say that we can easily answer the question of "How many IOPS do you need?" though.  How many IOPS a workload can drive is not how many IOPS that workload needs--it's really how fast it can compute before it has run out of data to process and needs to read more in.  The faster your compute nodes, generally, the more data they can consume.  They still want all the IOPS you can give them so they can spend as much time computing (and not waiting for I/O) as possible, and how many IOPS your application can drive is a function of how quickly it runs given the full stack between it and the storage, including CPU, memory, and networking.
    -

    If everything is dumb, now what?

    Give up trying to reduce I/O performance down to a single IOPS number, because it's two degrees away from being useful.  Bandwidth is a better metric in that it's only one degree away from what actually matters, but at the end of the day, the real metric of I/O performance is how much time an application has to wait on I/O before it can resume performing meaningful computations.  Granted, most storage vendors will give you a blank stare if you take this angle to them; telling them that your application spends 50% of its time waiting on I/O isn't going to get you a better file system from a storage company alone, so think about what the real problem could be.
    +

    If everything is dumb, now what?

    +
    Give up trying to reduce I/O performance down to a single IOPS number, because it's two degrees away from being useful.  Bandwidth is a better metric in that it's only one degree away from what actually matters, but at the end of the day, the real metric of I/O performance is how much time an application has to wait on I/O before it can resume performing meaningful computations.  Granted, most storage vendors will give you a blank stare if you take this angle to them; telling them that your application spends 50% of its time waiting on I/O isn't going to get you a better file system from a storage company alone, so think about what the real problem could be.

    Is the application doing I/O in a pattern (random or otherwise) that prevents the storage system from delivering as many bytes/second as possible?  If so, ask your vendor for a storage system that delivers more bandwidth to a wider range of I/O patterns than just perfectly aligned 1 MiB reads and writes.

    Is the storage system already running as well as it can, but it only takes a few compute nodes to max it out?  If so, your storage system is too small relative to your compute system, and you should ask your vendor for more servers and drives to scale out.
    @@ -92,4 +171,76 @@

    If everything is dumb, now what?

    Give up

    Is the storage system slow but you don't have the time to figure out why?  If so, it sounds like you work for an organization that doesn't actually value data because it's not appropriately staffed.  This isn't a storage problem!

    -
    Ultimately, if solving I/O problems was as easy answering how many IOPS you need, storage wouldn't be the perpetual pain point in HPC that it has been.  As with all things in computing, there is no shortcut and the proper way to approach this is by rolling up your sleeves and start ruling out problems.  You can (and should!) ask for a lot from your storage vendors--flexibility in delivering bandwidth, CPU-efficient file systems, and quality of service controls are all valid requests when buying storage.  But IOPS are not.
    \ No newline at end of file +
    Ultimately, if solving I/O problems was as easy answering how many IOPS you need, storage wouldn't be the perpetual pain point in HPC that it has been.  As with all things in computing, there is no shortcut and the proper way to approach this is by rolling up your sleeves and start ruling out problems.  You can (and should!) ask for a lot from your storage vendors--flexibility in delivering bandwidth, CPU-efficient file systems, and quality of service controls are all valid requests when buying storage.  But IOPS are not.
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2021/late-to-the-party-and-a-few-bits-short/index.html b/2021/late-to-the-party-and-a-few-bits-short/index.html new file mode 100644 index 0000000..c21920c --- /dev/null +++ b/2021/late-to-the-party-and-a-few-bits-short/index.html @@ -0,0 +1,218 @@ + + + + + + + Late to the party and a few bits short - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Late to the party and a few bits short

    +

    I recently had the opportunity to purchase a pristine Novena desktop system. +For those who aren’t aware, Novena is a Freescale i.mx6 based open-hardware computing platform +which began shipping in 2015. It was available as a desktop, laptop, standalone +board and a really neat heirloom version with a wooden case. The Novena was +always a curiosity for me since it was announced. But back in 2015, I missed my +opportunity to purchase one – probably due to a bit of procrastination and the +fact that I already had a Udoo Quad board, which is powered by the same +processor. Because it’s based on the 32-bit processor, I purchased it with open +eyes, knowing that it would not deliver M1 performance. Remarkably, although +the creators of the Novena have declared it EOL status, there are still some +components available to purchase on Crowd Supply, including mainboards.

    + +

    Hackable? Yes, please

    + +

    I’m a bit of a boomer when it comes to technology. I cut my teeth back in the +day on highly expandable systems such as the IBM PC XT, Commodore Amiga 2000 +and still to this day do my fair share of tinkering - for example that +super cool Flora watch project which I did back in 2020. That being said, I’ve +also been one to appreciate leading edge design from Apple and the super cool +NeXT and SPARCstation systems designed by the renown team at Frog Design. But cases are designed to house and protect what’s inside of a computer when it’s operating.

    + +

    The Novena desktop and laptop versions eschew this for a design which features +a pop out screen, supported by a gas strut similar to what you’d see on a +hatchback liftgate, exposing the mainboard in all it’s glory - when the system +is operating - caution is always a good idea.

    + +

    Of course I could tell you about the time many moons ago that I fried a system +by carelessly dropping a metalic object on the mainboard while the system was +running. With that hard lesson learned, I’m being super cautious with Novena.

    + +

    Better late than never

    + +

    My Novena desktop arrived from a far off land and survived a transatlantic +voyage unscathed, due to impeccable packaging. So although I’m very late to the Novena party, I managed to make it, circa 2021.

    + +

    Before deciding on purchasing this previously loved Novena, one important +factor I did research was OS support. OS support is often spotty for such non +standard systems. Luckily an industrious person has kicked off the novena-next +project, which aims to deliver OS support for +the Novena for the foreseeable future. As always, your mileage may vary.

    + +

    Seeing is believing

    + +

    Opening the package, I was like a kid at Christmas. The previous owner shipped +me the whole shebang - Novena-RF SDR, extra green bezel, speakers, screws, +clips, power adapter etc. I connected the system to the power and it immediately +sprang to life and booted an older Debian version.

    + +

    I’ve done a lot of tinkering since that first day. My Novena now has Debian 10 +Buster installed (relying on support from novena-next), and boots from a +SATA SSD. The speakers have been installed along with the Novena-RF SDR (which +replaces the breakout board). In fact, I’m writing this blog on the Novena +running LibreOffice, while listening to music from YouTube through Chromium, +along with a bunch of terminals opened to some much more powerful systems +humming along in my basement.

    + +
    +
    + +

    Novena definitely won’t win any speed records and is a few bits short of 64. +But it makes up for all of that in character. As I experiment with Novena, I +plan a few more blogs along the way. Stay tuned for more. A computer with nine +lives? It just may be the case with Novena.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2021/nobody-else-cares-about-your-tech-stack/index.html b/2021/nobody-else-cares-about-your-tech-stack/index.html new file mode 100644 index 0000000..5eb06a2 --- /dev/null +++ b/2021/nobody-else-cares-about-your-tech-stack/index.html @@ -0,0 +1,230 @@ + + + + + + + Nobody Else Cares About Your Tech Stack - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Nobody Else Cares About Your Tech Stack

    +

    Focus on your researchers’ and funders’ problems, not your technical solution

    + +

    (Note: This post is adapted from #75 of the Research Computing Teams Newsletter)

    + +

    Many of us who are managing research computing and data teams come up through the ranks doing research ourselves, and have +experience in grantwriting for open research calls. That can actually hold us back from succeeding with getting grants +for “digital research infrastructure” — building teams and infrastructure to support research.

    + +

    The thing is, digital research infrastructure calls, the sort that support research computing and data teams and tools, +are more like applying to grants as a nonprofit than as a researcher. And we can learn a lot from how the nonprofit +community writes funding proposals.

    + +

    We're not proposing a research project, we're proposing to solve problems a funder sees for a research community.

    + +

    Any funder has things they want to accomplish, and the goal as a potential fundee is to find something in the intersection of +“work that helps the funder accomplish their goals” and “work that we are able to do and that is aligned +with our goals”. Excellent work that isn’t in that first set won’t get funding. Money attached to work that isn’t +in the second set is at best a distraction, at worst drains your teams’ credibility.

    + +

    Most of us in research got our experience in grants from open disciplinary competitions where the funders and fundees goals +are aligned — be seen to be funding/doing the best research. That means you don’t have to think about the distinction +very much. The funder wants a portfolio of projects that are promising and could have impact - some will pan out and some +won’t, but such is research. So everyone is focussed on “the best” work. There’s a lot of focus on methods and technology +used, because those are relevant for assessing the best work. A new technology or method might be why it’s important to +fund this work now - some key observation wasn’t possible before, but now it is, and the funder and team who makes the +observation now will get the impact. And methods can sabotage a project - a team that does great work with the wrong +methods won’t get the best results.

    + +

    Special digital research infrastructure calls — like those that research computing projects typically fall under — +and calls by nonprofit funders, are different. The funder has some particular change they want to see in the world; +some community they want to see better served. They are generally much less willing to take a flyer on projects with +only a modest chance of success, because failures won’t serve the community they want to see served. Something that +successfully serves the community can always be improved in future iterations; something that fails to meet the communities +needs may well be unsalvagable.

    + +

    Methods and technology matter much less to these funders. They want to know that you can credibly deliver on the proposal, +and that you have a plan, but the nuts and bolts typically are much less interesting.

    + +

    A nonprofit funder absolutely wants to understand how the after-school homework tutoring program you’re proposing will +interact with the community — how it will find underserved students, how the tutoring will be delivered to the +students, what indicators will be used to measure success — but the behind the scenes tech stack like what task +management and tutor booking software you’ll use is completely irrelevant unless it’s to justify that you’ll +be able to deliver the program. (And if you are in a position where you need details like that to justify your +credibility for delivering the program, you are probably not in serious contention for the funding). Every paragraph +you spend talking about the cool new tutor booking software you’re going to use is a paragraph that doesn’t get spent +highlighting the funder’s goals being achieved — more underserved students doing better in school.

    + +

    A research computing funder who’s receptive to a “we’ll run a new research data management platform specifically +aimed at [discipline X]” proposal absolutely wants to know that you’re familiar with the underserved area, that +you’ve been successful delivering similar things before, and what metrics you’ll use for success. They do not care +that your roadmap includes Kubernetes and some exciting new operators. Would they be disappointed if mid-stream, you +pivoted to running the tasks on bare metal with Ansible? If not, why draw their attention and yours to obscure and +uncertain details rather than to how your work will best advance their goals?

    + +

    The thing is, this same approach applies to not just research funders, but anyone you plan to work with; any research +group that contacts your team looking for something. They have a problem; the greater the up-front focus on understanding + and solving researcher’s problem, the better the chance of success.

    + +

    How will you know what the funder’s or researcher’s problems and goals are? In the funder’s case, the call will sometimes +spell it out; in the researcher’s case, they’ll usually say something. In both cases, it may require some question-asking +and digging deeper; the researcher’s or even the funder’s “presenting problem” may not be the underlying issue, +and the funder’s call may focus on one particular aspect rather than the overarching goals. But the solution is the same; +just ask a bunch of questions.

    + +

    “Do you mean they will they just tell you?” I know a team in a Hackathon who went to an open pre-hackathon info +session, and approached the organizer and sponsor in a gaggle afterwards. They asked the sponsor — the lead judge — what +a successful Hackathon would be from their point of view. The sponsor — who, again, was the lead judge — answered with +a particular problem they’d like solved as an example. That team and mystifyingly only that team delivered a partial but +promising solution to the exact problem described in detail and in public, and they of course won first prize. How could +they not? People organize special funding calls and hackathons because they want other people to help them +achieve their goals. Yes, they’ll tell you, and if you keep asking questions they’ll keep talking about it until you politely explain +that you have to leave for the evening. They put that contact information there and run informational sessions for a reason.

    + +

    The stakeholder side of research computing isn’t rocket surgery. But listening, digging in, and focussing on their goals +is still rare enough that doing it well is almost an unfair advantage.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2021/research-computing-funding-should-mostly-just-go-to-researchers/index.html b/2021/research-computing-funding-should-mostly-just-go-to-researchers/index.html new file mode 100644 index 0000000..0850a05 --- /dev/null +++ b/2021/research-computing-funding-should-mostly-just-go-to-researchers/index.html @@ -0,0 +1,248 @@ + + + + + + + Research Computing Funding Should Mostly Just Go To Researchers - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Research Computing Funding Should Mostly Just Go To Researchers

    +

    Research computing and data — supporting research efforts with +software, computer and data expertise and resources — is fundamentally +all of a piece. Today there’s fewer and fewer hard boundaries +between where the system requirements end and where the software +or data resource requirements begin; and teams supporting researchers +must have expertise across the stack.

    + +

    This convergence is a huge opportunity for research computing, but +it’s also a challenge for funders. How to know how much to allocate to software, +and how much to hardware? Within software, how many resources +should go to new software development or procurement, and how much +to maintenance? In hardware, what is the right balance between +GPUs and CPUs or FPGAs, and within data, how much should we support +curation efforts vs discovery, or archival vs near-line storage?

    + +

    Luckily, there is a simple, robust, time-tested mechanism research +computing funders can easily take advantage of, and they should do so. +Funders for research computing and data efforts manage their portfolio +effortlessly — in exactly the same way health funders +know how to balance spending between reagents and lab staff, or the +same way physical science funders know how much to allocate to +trainee salaries vs tabletop equipment.

    + +

    Most research computing funding should go directly to researchers, +via traditional funding councils, and the researchers should spend +that research computing and data portion of their grants as and where +they see fit.

    + +

    With research computing and data funding as an integral component +of project funding, the same research review process that adjudicates +the research proposal would weigh in on the computing and data +resources requested to conduct it. This eliminates nonsensical but +all-too-common situations where a researcher successfully wins computing +cycles for a non-funded project, or gets funding for a postdoc for +a project but doesn’t get enough compute or storage resources for +the trainee to perform the project. It would also allow the +researcher to adjust how they were using resources mid-stream; if +after initial efforts it turned out that software development effort +to improve the code was a better use of funding than throwing +hardware at the problem, the money could be spent that way, rather +than applying ahead of time for people time and computing resources +separately and hoping that it all works out in the end.

    + +
    + A technician validates genetic variants identified through whole-exome sequencing at the Cancer Genomics Research Laboratory, part of the National Cancer Institute's Division of Cancer Epidemiology and Genetics (DCEG). +
    We fund researchers to buy all kinds of complex equipment, they can handle buying research computing services.
    +
    +

    In this model, a researcher would include in their grant proposal +a research computing and data component where necessary. As with +the purchasing wet lab equipment, animal experiments, or large +physical apparatus — undertakings which are no less technical or +complex than research computing — research grants would include +cost justifications for the proposed research computing services +or equipment, and funding agencies would rate the quality of the +justification and the worthiness of the proposed goals versus the +cost.

    + +

    A researcher whose proposal was successful would then, as with other +line items, be free to spend that research computing and data +component of their grant where they wish on for software development, +data management and analysis, or access to storage and compute +resources. Obviously as known entities with existing working +relationships, local research computing centres — now working in a +familiar core facility model — would have a huge advantage. But +the researcher would not be limited to working with those centres, +nor to working with only one service provider.

    + +

    This approach will work well for capacity computing, data, and +expertise — those needs where there are many possible service +providers. And in those areas, having the researcher in control +of what services they can use where will help drive those vendors +to providing the kinds and quality of services that researchers +need. But not every kind of computing or expertise capability is +available enough for researchers to be able to easily buy needed +quantities of. Researchers can’t conjure into existence a (say) +quantum computing shared facility one investigator-led grant at a +time. Those new and emerging capabilities have to be handled +separately, with existing funding councils setting priorities. Once +those new capabilities are operational, they can and should be +sustained with the same core-facility portable-funding model; if +they can’t, maybe they didn’t need to be built. Other needs like +foundational infrastructures — research and education networks, +advisory bodies — will also need to be handled separately by funders.

    + +

    But for the bulk of research computing, for capacity support of +research using computing, data and related expertise, there’s no +longer need for endless surveys and consultations and projections +to indirectly inform decision making. Parallel competitions for +different kinds of support for a research project have long since +stopped making sense. Internal computing organization debates about +what kinds of services to offer should make way for researchers +allocating the funds themselves. Let researchers decide what works +best for advancing their research.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2021/researcher-s-time-has-value-too/index.html b/2021/researcher-s-time-has-value-too/index.html new file mode 100644 index 0000000..897b3cf --- /dev/null +++ b/2021/researcher-s-time-has-value-too/index.html @@ -0,0 +1,175 @@ + + + + + + + Researcher's Time Has Value, Too - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    Researcher's Time Has Value, Too

    +

    ..And Researchers Value Their Time

    + +

    (Note: This post is adapted from #102 of the Research Computing Teams Newsletter)

    + +

    If you followed HPC twitter in late 2021 at all, you will have seen a heartfelt thread by a well-known research software developer, one who was a key contributor to the Singularity project among others, lamenting the frankly appalling state of developer productivity in HPC - both in what tools exist, and support for them (and other tools for developers) at academic centres. A lot of people chimed into the discussion, including one of the leading developers of the PetSC project, embedded software developers, some key people at big computing centres, all agreeing that there was a problem, but typically zooming in on one or another particular technical or procedural issue and not coming to any conclusion.

    + +

    I think the issue is a lot bigger than HPC software development workflows - it comes up in too many contexts to be about specific technical issues of running CI/CD pipelines on fixed infrastructure. The only people to identify the correct underlying issue, in my opinion, were people with experience of both academia and the private sector, such as Brendan Bouffler at AWS:

    + + + +

    The same argument got made by R&D research staff in the private sector. Their time actually has value; as a result, it gets valued.

    + +

    In academic research computing, partly because of low salaries — especially for the endless stream of trainees — but also because we typically provide research computing systems for free, we tend to put zero value on people’s time. Thus our “lowest-cost” approach definitely does not apply to researcher or trainee effort. If researchers have to jump through absurd hoops to get or renew their accounts, or have to distort their workflows to fit one-size-fits-all clusters and queueing systems, or postdocs have to spend hours of work by hand every month hand because tools to automate some of that work would cost $500, well, what do they expect, right?

    + +

    It’s not that this is an indefensible position to take, but one can’t take this position and act surprised when researchers who can afford to are seriously investigating taking their projects into the commercial cloud even though it costs 2x as much. It turns out that people’s time is worth quite a lot to them, and is certainly worth some money. If we were to let researchers spend their research computing and data money wherever they pleased, I think we’d find that significantly less than 100% of researchers would use “lowest price possible” as their sole criterion for choosing providers. Core facilities like animal facilities, sequencing centres, and microscopy centres compete on dimensions other than being the cheapest option available.

    + +

    To be sure, there are process issues in academia which exacerbates the tendency to see people’s time as valueless - rules about capital vs operating costs, for instance - but those rules aren’t a law of nature. If we were paying people in academia what they pay in tech, administration would suddenly discover some additional flexibility in the thresholds and criteria for considering something a capital expense if it meant we could be a bit more parsimonious with people’s time.

    + +

    Until then, one can’t be too surprised when the most talented and ambitious staff get routinely poached by the private sector, and when research groups start considering service providers that cost more but respect their time.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2021/sre-to-solutions-architect/index.html b/2021/sre-to-solutions-architect/index.html new file mode 100644 index 0000000..2b2963f --- /dev/null +++ b/2021/sre-to-solutions-architect/index.html @@ -0,0 +1,188 @@ + + + + + + + SRE to Solutions Architect - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
    + +
    +

    SRE to Solutions Architect

    +

    It’s been about two years since I joined NVIDIA as a Solutions Architect, which was a pretty big job change for me! Most of my previous work was in jobs that could fall under the heading of “site reliability engineering”, where I was actively responsible for the operations of computing systems, but my new job mostly has me helping customers design and build their own systems.

    + +

    I’m finally starting to feel like I know what I’m doing at least 25% of the time ? so I thought this would be a good time to reflect on the differences between these roles and what my past experience brings to the table for my (sort of) new job.

    + +

    + +

    (Just a note: I feel like job titles for ops folks are a fraught topic. My job titles have included things like “Production Engineer”, “HPC Cluster Administrator”, and “HPC/Cloud Systems Engineer”. I tend to self-identify more with the term “sysadmin”, but I’m using “SRE” as the most current term that captures the work I’ve spent a lot of my career doing, where I generally approached ops from a software engineering perspective. Feel free to substitute your job title of choice!)

    + +

    I spent most of the past 10 years building and running large computing systems. With the exception of ~18 months working on backend storage for a fairly large website, I’ve mostly worked on large high-performance-computing (HPC) clusters. These systems are generally used by researchers and engineers to run simulations and data analysis. The teams I joined were generally responsible for building these clusters, keeping them running, helping the researchers who used them, and making sure they performed well.

    + +

    In my day-to-day work in SRE (or whatever you call it), I mostly thought about problems like:

    + +
    • Are my team’s services operating reliably and predictably, according to our defined metrics?
      • Translated: What’s broken today?! ?
    • Are our (internal) customers having a good qualitative experience?
    • For any current or recently-past incidents, how can we understand what went wrong and incorporate that into our future development?
    • What major features or other changes are we hoping to release soon? How can we be confident they’ll work correctly and reliably?
    • Are we expecting to have to turn up more capacity or new systems soon? Are we ready to do so?
    • What projects can I pursue to automate anything boring that I have to work on?
    + +
    + +

    My role as a solutions architect is rather different, as I don’t actually have any services I’m responsible for keeping online. Instead, I’m generally working with external customers who are working with our products and using them in their own production environments. Because I’m focused on HPC and supercomputing, my customers have generally purchased NVIDIA’s hardware products, and are operating them in their own datacenters. I’m frequently talking to the SRE teams, but I’m not part of them myself.

    + +

    In my daily work as a solutions architect, I’m thinking more about questions like:

    + +
    • Do my (external) customers have a good understanding of what our products are and how to use them?
      • This may include products they already use, or new products that they may be planning to deploy
    • What are their pain points, and how can I feed that back to the product teams?
      • And also: What new product developments can I provide pro-active advice on before it makes it to the customer?
    • What new customer deployments are coming up, and how can I help them go smoothly?
    • How are our customers doing running their current clusters, and are they feeling a lot of pain?
    • What tools can I develop, or what content can I write, to help all of the above go well?
    + +
    + +

    On the one hand, I work on a lot of the same problems as a solutions architect as I did in SRE. I still spend a lot of time thinking about the scalability, performance, and reliability of HPC systems. I still care a lot about making sure the systems I help build are useful and usable for researchers.

    + +

    On the other hand, I’m not so much on the pointy end of these problems anymore. My work is mostly focused on enabling others to run reliable systems, rather than being directly on the hook for them. And while I do help directly manage some internal lab clusters, those systems have very loose SLOs. So in practice I haven’t been on call in about two years.

    + +

    I do think my experience in SRE has been really important in doing a good job in solutions architecture. I like to think I have a pretty good instinct for systems design at this point, and I can often help identify problems and bottlenecks in early stages. My troubleshooting skills from SRE work are incredibly helpful, as a lot of my work is helping customers understand what the heck is broken on their clusters. And I also find that it really helps to have someone who “speaks the same language” as the SRE teams for our customers, especially because I feel like so many vendor relationships neglect reliability concerns in favor of features.

    + +

    The transition has been really interesting, and I’m still conflicted about which kind of job I prefer. I don’t exactly miss being on call… but I do miss somewhat the more visceral feeling of understanding a running system really well through sheer continuous contact with it. However, I really love helping my customers build cool systems, and I like the satisfaction of helping many different teams do well, versus focusing tightly on a single service.

    + +

    I’m really enjoying the solutions architect gig right now, but I also wouldn’t be surprised if I ended up doing SRE work directly again at some point.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2021/the-easy-hpc-button/index.html b/2021/the-easy-hpc-button/index.html new file mode 100644 index 0000000..ed42849 --- /dev/null +++ b/2021/the-easy-hpc-button/index.html @@ -0,0 +1,170 @@ + + + + + + + The Easy HPC button - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    The Easy HPC button

    +

    We live in a results-driven world. Whether it’s an aerodynamicist waiting on simulation results to determine the efficiency of their latest model, or a doctor waiting on genomic pipeline results to determine next steps for a patient, results make the world go round. And this of course goes beyond the sciences. As any thespian will tell you, stage productions are the result of the work of many individuals behind the scenes.

    + +

    Much in the same way, complex computational processes that are found in HPC rely upon many things behind the scenes to be carried out. And although the devil may be in the details, consumers of HPC resources shouldn’t have to go through purgatory to get results. Organizations today rely on HPC to drive their core mission, delivering products to market faster. So, it goes without saying that the need for HPC to be easy to drive productivity is crucial. And much like the technology of HPC has changed so have the skills of the users. Modern HPC infrastructure relies upon a myriad of technologies including containerization, accelerators and cloud. And for users, gone are the expectations of learning a complex CLI, replaced by the need for easy-to-use interfaces.

    + +

    Workload schedulers are a necessary component of any HPC cluster. Schedulers have been around for a very long time and as they become more sophisticated, they support an ever-increasing number of CLI and configuration options. Although these options provide greater functionality, their use can be complicated to end users. What if you could provide an HPC easy button for your users?

    + +

    IBM Spectrum LSF is a workload management solution for HPC environments. Over the past 30 years, it’s evolved from being just a workload scheduler, to an entire suite of capabilities covering the lifecycle of HPC jobs. Scheduling wise, LSF has not only kept pace with the massive scale of commercial HPC environments today, but also provides capabilities which dramatically lower the bar to access HPC.

    + +

    Ease of use starts with the users and LSF provides a web-based job submission and management portal which greatly simplifies the use of your HPC cluster. Administrators define custom forms that hide the complexity, and they can even be customized to use application and domain specific language understood by your users. For users on the go, LSF has Android and iOS mobile clients so you can check on the state of your running jobs. And a RESTful API is also available to integrate LSF into your corporate infrastructure.

    + +

    With users well taken care of, LSF features many capabilities which allow administrators to take advantage of technologies such as containerization, hybrid cloud and GPUs. Out of the box support for various container technologies let’s administrators control which containers can be used in the environment and hides the complex container startup commands from users. Support for dynamic hybrid cloud enables LSF to burst out to any of the supported cloud providers when needed and scale back the resources when no longer required. And intelligent data staging takes care of moving data to and from the cloud without blocking or making resources wait for transfers.

    + +

    What does this all add up to? Well, you can think of it as an HPC easy button. Your users simply fill in a form and submit their job. LSF worries about the underlying complexities, where to place the job, moving data, CPU and GPU allocation. The user waits to get the job results back and is oblivious everything that is going on behind the curtain.

    + +

    Learn more about easy HPC with IBM Spectrum LSF in this session: Simplifying HPC - Just push the button.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/dursi/2021-9-11-specialty-to-competehtml.md b/2021/to-compete-your-team-needs-a-specialty/index.html similarity index 60% rename from _posts/dursi/2021-9-11-specialty-to-competehtml.md rename to 2021/to-compete-your-team-needs-a-specialty/index.html index 379c82a..b700552 100644 --- a/_posts/dursi/2021-9-11-specialty-to-competehtml.md +++ b/2021/to-compete-your-team-needs-a-specialty/index.html @@ -1,31 +1,96 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2021-09-11 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/specialty-to-compete.html -slug: to-compete-your-team-needs-a-specialty -title: To Compete, Your Team Needs a Specialty ---- - -

    And ‘HPC’ or ‘Research Software Development’ isn’t a specialty

    + + + + + + + To Compete, Your Team Needs a Specialty - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
    + +
    +

    To Compete, Your Team Needs a Specialty

    +

    And ‘HPC’ or ‘Research Software Development’ isn’t a specialty

    (Note: This post is adapted from #90 of the Research Computing Teams Newsletter)

    -

    Quick: what’s your team’s specialty?

    -

    Your team’s specialty is its reputation for what it’s good at. Not what you think your team is good at; what matters is what specific thing your stakeholders (funders, clients, institutional decision makers) think your specialty is. What they recommend you for to peers, what they recommend funding you for to decision makers.

    -

    In the post-pandemic world, researchers are used to getting their support remotely from anywhere. To compete, your team will need well-defined specialties; and “HPC” or “research software development” isn’t a specialty.

    -
    Standout from the crowd by choosing a specific path.
    Stand out from the crowd by having your team choose a specific path and owning it.
    @@ -33,65 +98,46 @@

    And ‘HPC’

    The pandemic isn’t over, but the end of this phase has begun, and with September (“academic new years”) here, it’s a good time to think about the future. Last October I wrote about what post-pandemic research computing is going to look like, and it’s holding up pretty well. With researchers now very comfortable getting research computing and data support virtually and with budgets under pressure, there is going to be a lot more competition for research computing and data teams. Research collaborations are going to be looking elsewhere more and more often - academic teams at other institutions, or with commercial companies (either commercial cloud vendors for compute, or emerging collaborations between well-known names, like NAG and Azure, for services).

    -

    This is an opportunity for well run, focussed teams to grow and prosper. But it’s going to take more planning and forethought than decades past, where one could count on having a near monopsony, of being the only available seller of services to local researchers. It’s going to take developing and maintaining a strong reputation for a small set of specialties.

    -

    “HPC” may sound and feel like a specialty within the community, but to researchers and decision makers it’s incredibly generic and so meaningless. It’s not a technical term, but a term of advocacy and marketing which has been come to mean resources for anything from high throughput batch services to huge tightly coupled simulations to single-node multi-GPU code runs. Even advocates for the term define it as “anything bigger than what a researcher could provide on their own” which is incredibly generic, and so necessarily meaningless. How can your team’s specialty be “anything”? A team is expecting researchers to recommend them for “anything?” There’s a reason why VPRs would be just as happy contracting it out (e.g. see table 2 here).

    -

    “Services and expertise for quickly analyzing public-health bioinformatics data”, “a platform for firing off and monitoring aerospace CFD calculations”, “a centre of excellence for digital humanities data curation and archiving”: these are examples of specialities - products, services - that researchers and institutional decision makers can see the value of and be willing to put money into, services and products and teams that researchers can recommend to each other. They are areas where a team could build a strong reputation - they could be the group that researchers recommend to collaborators when they chat about research needs.

    -

    “Research Software Development” at least, to its credit, doesn’t pretend to be a narrow specialty - it’s a broad area which can encompass any area of software development in support of research work. As a result, a team can’t have a specialty in “Research Software Development”; it can have a specialty in “web applications and mobile apps for data collection”, or “GIS analysis tools” or “agent-based simulations for social sciences modelling”. But almost certainly not all three at the same time.

    -

    Even so, research software development is too specific in one unhelpful sense. It could be that researchers are just looking for your team to write some software for them, hand it over, and be done. But increasingly, researchers are looking not just to be delivered some software, but for a team to host the software, run it, operate it - and/or collect and curate data to be used with the tool, for tests or otherwise. Focusing solely on research software development, as a separate activity from systems operation or data analysis and management, can be overly limiting.

    -

    Ok, so what does all of this have to do with competition?

    -

    One of my venial weaknesses is spending too much time on twitter. I’m seeing increasing concern there from research computing teams that cloud vendors or teams using cloud vendors are coming into their institutions and winning or trying to win contracts for projects that “should” have gone to the in-house teams. I’m hearing complaints that the external bids are for amounts of money 2x or more what the in-house team says they could do it for. Incredibly (and almost certainly incorrectly) I’ve even heard 10x.

    -

    Reader, as hard as it is to believe, those complaining see this as an affront1, and a threat, rather than the enormous opportunity it is.

    -

    If a contract at your institution is won - or even in serious contention - that is 2x what you estimate you could have provided the services for, that’s not evidence that the external contractor is overcharging. It’s evidence that your team is undercharging, that you could have proposed doing more to support that project and the researchers, and that you’re leaving money on the table. It’s also evidence that you haven’t fully convinced the relevant decision makers that you can provide that service; they don’t see it as being part of your specialty.

    -

    Clearly your institution found it worthwhile to spend or consider spending that 2x, because they understood that it was worth at least that much to them to have those services. A bid for half that amount having failed or being questioned means that they really didn’t believe the in-house team could do it as well. That’s revealed-preferences data that you can use. (And if I truly believed someone at my institution was seriously considering spending 10x (1000%!) to work with an outside company rather than work with my team, well, that would occasion some serious soul searching.)

    -

    Cloud providers and other external contractors do have advantages. They have a library of reference architectures they can deploy, so they can pitch (say) CFD solutions to the mech eng department, and bioinformatics pipeline solutions to the biology department. They can pull from a library of testimonials to demonstrate that they can do the work.

    -

    But so can you. You have access to all the literature to search for how others have deployed such solutions. You have (or should have) testimonials from the people that matter - researchers at that very institution. And you have a network of deep relationships in the institution, relationships based on collaboration on research problems. Those relationships and shared expertise and history of collaboration is something the external contractors have no chance of matching.

    -

    If you’re in danger of losing out on these sorts of competitions, it’s because you’re not communicating your specialities in a way that matters, in a way that’s convincing, to the people who could pay for your services. They can’t see how your “HPC batch services” connects with “a digital twinning platform for building simulation”. They don’t see “GIS exploration for private social sciences data” as being an obvious of your “Research Software Development” effort - where’s the data part? If there’s a miscommunication there about what your team can provide, that’s on you and your team, not on the researchers or other decision makers.

    -

    You have specialities - if you don’t know what they are, ask the researchers who keep coming back. How do they describe what you do? What would they say your speciality is, how do they talk about you to their colleagues? What would you have to demonstrate to them to have them recommend their colleagues to you?

    -

    Similarly, you already have a million things you don’t do. You won’t fix a researcher’s printer, you don’t help them do graphic design for their posters, my guess is you don’t help them set up spreadsheets in OneDrive or set up lab webpages. So it’s not like declaring that there’s computing stuff you do and don’t help researchers with is some completely new thing, previously utterly unknown to your organization.

    -

    Once you make explicit your specialties, you can start playing to your strengths, and communicating them endlessly. You can make a point of reaching out, having your team talk at conferences in the specialties, and at departmental colloquia. You can be well-regarded enough in your institution for those specialties that external contractors pitching work within your speciality never get in the door. You can start more easily hiring people that are interested in that specialty. A specialty builds on itself, snowballs. You can start steering future work towards that specialty to build on it, and start directing work well outside the specialty to somewhere else - where it does fit inside their specialty.

    -

    Yeah, that last part is scary. Sticking to this path isn’t easy. It means turning down opportunities that aren’t in or adjacent to your specialities. Especially for new teams, eager to please, this can be scary.

    -

    But as anywhere in research, your team’s reputation is all that matters. Your team has a reputation, has stuff it does and doesn’t do. Did you choose it, did you shape it, or are you content to just let it happen?

    -

    Your team can be extremely strong in, specialize in, develop a reputation in, any of a number of things. But not all of the things. Being a manager or leader means choosing.

    -
    +
    1. @@ -99,4 +145,76 @@

      And ‘HPC’

    -
    \ No newline at end of file +

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/vsoch/2021-9-19-uptodate.md b/2021/uptodate/index.html similarity index 76% rename from _posts/vsoch/2021-9-19-uptodate.md rename to 2021/uptodate/index.html index ac2f0f8..1a0395f 100644 --- a/_posts/vsoch/2021-9-19-uptodate.md +++ b/2021/uptodate/index.html @@ -1,18 +1,87 @@ ---- -author: Vanessasaurus -author_tag: vsoch -blog_subtitle: dinosaurs, programming, and parsnips -blog_title: VanessaSaurus -blog_url: https://vsoch.github.io/ -category: vsoch -date: '2021-09-19 09:30:00' -layout: post -original_url: https://vsoch.github.io/2021/uptodate/ -slug: uptodate -title: Uptodate ---- - -

    I recently had an itch to scratch - and that itch was writing a library in Go. + + + + + + + Uptodate - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   VanessaSaurus dinosaurs, programming, and parsnips. See the original post here.
    + +
    +

    Uptodate

    +

    I recently had an itch to scratch - and that itch was writing a library in Go. We don’t use Go much for my work, so I figured out a compelling reason to start a new personal project - a command line tool written in Go (and matching GitHub action) to help keep things up to date in a repository. Appropriately, I called it uptodate! @@ -21,20 +90,16 @@ Docker, and to have total control so I could go wild and crazy with writing Go code without worrying about forcing it on the owner, alecbcs, to merge my wild ideas.

    -


    -
    -

    Uptodate

    Uptodate is a command line tool in Go and GitHub action that makes it easy to:

    -
    1. Update FROM statements in Dockerfile to have the latest shas
    2. Update build arguments that are for spack versions, GitHub releases and commits, and container hashes.
    3. @@ -54,7 +119,6 @@

      Uptodate

      other metadata. You should check out the user guide for detailed usage, or read about the GitHub action

      -

      How does it work?

      I’ll give a brief overview of a few of the commands and then a quick example GitHub workflow, @@ -62,51 +126,41 @@

      How does it work?

      The examples below assumed that you’ve installed uptodate and have the binary “uptodate” in your path.

      -

      Dockerfile

      If you have one or more Dockerfile in your repository you can run uptodate to update digests. For example:

      -
      $ uptodate dockerfile .
       
      -

      will find Dockerfile in the present working directory and subfolders and update. For digests, you might see that:

      -
      FROM ubuntu:20.04
       
      -

      is updated to

      -
      FROM ubuntu:18.04@sha256:9bc830af2bef73276515a29aa896eedfa7bdf4bdbc5c1063b4c457a4bbb8cd79
       
      -

      Note in the above we still have the digest and the tag, so subsequent updates can further update the sha by looking up the container based on the tag. And we can also update build arguments that match a particular format! This one, specifically:

      -
      ARG uptodate_<build-arg-type>_<build-arg-value>=<default>
       
      -

      The above flags the build argument for uptodate to look at using the prefix of the library name, and then the next string after the underscore is the kind of update, followed by specific metadata for that updater, and of course the value! A few examples are provided below.

      -

      Spack Build Arguments

      Spack is a package manager intended for HPC, and it’s @@ -114,82 +168,64 @@

      Spack Build Arguments

      look up the latest spack versions for some package. To create an argument that matched to a spack package (and its version) you might see:

      -
      ARG uptodate_spack_ace=6.5.6
       
      -

      After the updater runs, if it finds a new version 6.5.12, the line will read:

      -
      ARG uptodate_spack_ace=6.5.12
       
      -

      This works by using the static API that is deployed alongside the Spack Packages repository that I designed earlier this year. So the updater will get the latest versions as known within the last 24 hours.

      -

      GitHub Release Build Argument

      If we want an updated version from a GitHub release (let’s say the spack software itself) we might see this:

      -
      ARG uptodate_github_release_spack__spack=v0.16.1
       
      -

      The above will look for new releases from spack on GitHub and update as follows:

      -
      ARG uptodate_github_release_spack__spack=v0.16.2
       
      -

      GitHub Commit Build Argument

      Similarity, if we want more “bleeding edge” changes we can ask for a commit from a specific branch, following this pattern:

      -
      ARG uptodate_github_commit_<org>__<name>__<branch>=<release-tag>
       
      -

      Here is an example of asking for updates for the develop branch.

      -
      ARG uptodate_github_commit_spack__spack__develop=NA
       
      -

      which wouldn’t care about the first “commit” NA as it would update to:

      -
      ARG uptodate_github_commit_spack__spack__develop=be8e52fbbec8106150680fc628dc72e69e5a20be
       
      -

      And then to use it in your Dockerfile, you might pop into an environment variable:

      -
      ENV spack_commit=${uptodate_github_commit_spack__spack__develop}
       
      -

      See the docs for more detailed usage and an example for the Dockerfile updater.

      -

      Docker Build

      The second updater that I think is pretty useful is the Docker build updater. @@ -197,7 +233,6 @@

      Docker Build

      for version regular expressoins and different kinds of builds args to generate a matrix of builds (intended for GitHub actions). For example, let’s say that we start with this configuration file:

      -
      
       dockerbuild:
         build_args:
      @@ -234,58 +269,50 @@ 

      Docker Build

      -

      You’ll see the primary section of interest is under “dockerbuild” and under this we have three build args for a manually defined set of versions, a version from a spack package, and a container. You could run this in a repository root to look for these config files (and a Dockerfile that they render with in the same directory or below it) to generate a build matrix.

      -
      $ uptodate dockerbuild 
       
      -

      Or to only include changed uptodate.yaml files:

      -
      $ uptodate dockerbuild --changes
       
      -

      If you provide a registry URI that the containers build to, we can actually check these containers to look at current build args (that are saved as labels and then viewable in the image config by uptodate) to determine if an update is needed.

      -
      $ uptodate dockerbuild --registry ghcr.io/rse-radiuss
       
      -

      the container. I think this is one of the neatest features - it was just added in evenings this last week! Check out an example image config that has these labels! This registry URI will also be included in the output to make it easy to build In a GitHub action, it might be used like this:

      -
      jobs:
         generate:
           name: Generate Build Matrix
           runs-on: ubuntu-latest
           outputs:
      -      dockerbuild_matrix: ${{ steps.dockerbuild.outputs.dockerbuild_matrix }}
      -      empty_matrix: ${{ steps.dockerbuild.outputs.dockerbuild_matrix_empty }}
      +      dockerbuild_matrix: $
      +      empty_matrix: $
       
           steps:
           - uses: actions/checkout@v2
             if: github.event_name == 'pull_request'
             with:
                fetch-depth: 0
      -         ref: ${{ github.event.pull_request.head.ref }}
      +         ref: $
       
           - uses: actions/checkout@v2
             if: github.event_name != 'pull_request'
      @@ -302,7 +329,7 @@ 

      Docker Build

      - name: View and Check Build Matrix Result env: - result: ${{ steps.dockerbuild.outputs.dockerbuild_matrix }} + result: $ run: | echo ${result} @@ -313,10 +340,10 @@

      Docker Build

      strategy: fail-fast: false matrix: - result: ${{ fromJson(needs.generate.outputs.dockerbuild_matrix) }} - if: ${{ needs.generate.outputs.empty_matrix == 'false' }} + result: $ + if: $ - name: "Build ${{ matrix.result.container_name }}" + name: "Build $" steps: - name: Checkout Repository uses: actions/checkout@v2 @@ -324,12 +351,12 @@

      Docker Build

      - name: Set up Docker Buildx uses: docker/setup-buildx-action@v1 - - name: Build ${{ matrix.result.container_name }} + - name: Build $ id: builder env: - container: ${{ matrix.result.container_name }} - prefix: ${{ matrix.result.command_prefix }} - filename: ${{ matrix.result.filename }} + container: $ + prefix: $ + filename: $ run: | basedir=$(dirname $filename) cd $basedir @@ -337,12 +364,10 @@

      Docker Build

      -

      Of course you’d want to login to a registry, and then also possibly calculate metrics for the container, so consider this a very simple example. The build matrix that is being passed between those steps has entries like this:

      -
      [
         {
           "name": "ubuntu/clang/uptodate.yaml",
      @@ -361,13 +386,11 @@ 

      Docker Build

      -

      Git Updater

      I also like this updater because it easily generates for you a matrix of files that are changed, according to git. Running locally it looks like this:

      -
      $ ./uptodate git /path/to/repo
                     _            _       _       
         _   _ _ __ | |_ ___   __| | __ _| |_ ___ 
      @@ -382,10 +405,8 @@ 

      Git Updater

      -

      And would generate a matrix for a GitHub action too:

      -
      [
         {
           "name": "Modify",
      @@ -415,15 +436,12 @@ 

      Git Updater

      -

      And of course you can change the default “main” to another branch:

      -
      $ ./uptodate git /path/to/repo --branch master
       
      -

      and that also pipes into a GitHub action. I don’t want to redundantly reproduce the docs, so if you are interested you can read more at the user guide @@ -431,7 +449,6 @@

      Git Updater

      Mind you that the library is heavily under develop, so if you have a request for a new updater or want to report a a bug, please let me know!.

      -

      Overview

      I have loved working on this library. I think it’s the first library in Go where @@ -441,4 +458,76 @@

      Overview

      my design, and want to re-write it. But I think that means I’ll eventually get better. But it’s always good to have one or more projects you are passionate about, because I don’t personally see a point in being a software engineer if I don’t (yes, I know it -makes a salary, but I require more than that).

      \ No newline at end of file +makes a salary, but I require more than that).

      + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git "a/2021/very-risqu\303\251-computing/index.html" "b/2021/very-risqu\303\251-computing/index.html" new file mode 100644 index 0000000..fa8dde9 --- /dev/null +++ "b/2021/very-risqu\303\251-computing/index.html" @@ -0,0 +1,239 @@ + + + + + + + Very risqué computing - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Very risqué computing

    +

    This spring, we’ve been blessed with fantastic and almost tropical weather here +in Southern Ontario, Canada. Normally at this time, after a long winter the last +thing on my mind are indoor activities. However on June 3rd, I was greeted one +morning by an email about an incoming delivery. It turns out it was on of the +items I’ve been waiting patiently for from [Crowd Supply](https://www.crowdsuppl +y.com/) in the hopes of keeping me busy during what I thought would be a cold +spring season.

    + +

    Christmas in June

    + +

    As I have multiple things from Crowd Supply on order (don’t ask!) I didn’t quite +know which item was arriving. It turns out it was the long awaited SiFive +HiFive Unmatched RISCV powered +board. Those who know me (and I’ve said this many times) understand that I don’t +like mainstream anything. And that also applies to computers. My interest in Arm +based systems dates from the 1990’s with the venerable Acorn Archimedes +computers. However, all of the news around the RISCV community has really piqued +my interest. I passed on the SiFive Unleashed primarily because it didn’t have a +PCIe slot - although this was remedied with an optional, but costly add-on +board.

    + +

    So when the SiFive Unmatched was announced with a competitive price and a bump +to 16GB I jumped at the opportunity to purchase one. And it turned out to be a +great decision.

    + +

    The HiFive Unmatched is based on the SiFive Freedom U740 SOC with four U74 cores +and one S7 core and features an all important PCIe slot. With 16GB of onboard +RAM and a M.2 Key M for an SSD, my goal was to get the Unmatched setup as a desk +top. For those looking to learn more about RISCV, I’d recommend starting with +the RISCV International foundation site. As per the RISCV +site, “RISC-V is a free and open ISA enabling a new era of processor innovation +through open standard collaboration.” In simple terms, the ISA or instruction +set architecture defines the set of instructions that are supported by the +processor – so things like arithmetic, logic, and branch instructions to name +a few. So it’s the way that programmers can issue commands to the processor to +do “things”.

    + +

    First impressions

    + +

    I’ve become accustomed to developer boards being packaged in rather non-descript +packaging. The first impression of the Unmatched board could not be further from +this. The board was shipped in a lovely box and included an SD card with a +bootable Freedom U SDK image and I/O shield and a USB cable. So the first +impression for me was quite positive.

    + +
    +
    + +

    Bootstrapping

    + +

    I mounted the Unmatched board to my Streacom BC1 benchmark table and installed +a XFX Radeon 2GB Heatsink edition to the PCIe slot. It’s an old GPU, but fanless +– which I always appreciate. Plus, I’m not looking to do any serious gaming on +the system.

    + +

    The first boot of the system from the SD card was a success (albeit a bit slow). +I monitored the boot over the serial console (minicom) from another system. The +Unmatched sprang to life and eventually booted up to a fully working XFCE +desktop. This was actually a lot smoother than what I anticipated. Once I +confirmed that everything was working as expected, I installed a Samsung 780 +NVME SSD to the M.2 Key M slot and turned my focus to Ubuntu 21.04. The SiFive +Forums have proven an invaluable resource to help +me get Ubuntu up and runing on the system and to make sure the board was booting +Ubuntu with a clock of 1.2 Ghz. Of course, I followed the steps to install +Ubuntu to the NVME onboard, so I/O performance is much better now naturally.

    + +

    Burning in

    + +

    Does it run Linpack? Of course it does :) As with any new board I receive, +running a High Performance Linpack benchmark is often one of the first things I +do. It’s a well known bechmark which provides data for the Top500 ranking of +supercomputers.

    + +

    I used the current HPL v2.3 and +compiled it using the Ubuntu supplied gcc, openmpi and math libraries. +A few runs of HPL yielded a result of 2 GFlops (see screenshots below). +Although I’ve not looked closely at what the theoretical peak of the U740 SOC +is, the result is roughly what I expected given what I’ve been reading up on +the board. Ultimately, I was pleased that HPL compiled and ran to completion and it was a great way to stress the board.

    + +
    +
    + +

    Stay tuned to this channel for more risqué computing escapades…

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/gaborsamu/2021-11-2-sparc_solaris9.md b/2021/weekend-it-sparc-eology/index.html similarity index 71% rename from _posts/gaborsamu/2021-11-2-sparc_solaris9.md rename to 2021/weekend-it-sparc-eology/index.html index 8ccbbb1..0262b09 100644 --- a/_posts/gaborsamu/2021-11-2-sparc_solaris9.md +++ b/2021/weekend-it-sparc-eology/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2021-11-03 00:49:40' -layout: post -original_url: https://www.gaborsamu.com/blog/sparc_solaris9/ -slug: weekend-it-sparc-eology -title: Weekend IT SPARC-eology ---- - -

    Well it’s that time of the year again in the northern hemisphere where the weather starts to change and the autumn + + + + + + + Weekend IT SPARC-eology - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Weekend IT SPARC-eology

    +

    Well it’s that time of the year again in the northern hemisphere where the weather starts to change and the autumn colours are in full swing. And the gray, rainy weekends have left me looking for - something to provide a spark. And there’s no better way than revisiting some of my old hobby IT projects which have been languishing in my basement. This time I decided to turn my attention to something SPARC powered. And before you wonder which SUN Microsystems @@ -333,4 +402,76 @@ hopefully be the subject of another writeup in the future.

    And as for that Ultra 5 that graced my desk between 2000-2005? Well it’s sitting in my basement too waiting for another -rainy weekend day.

    \ No newline at end of file +rainy weekend day.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2022/a-supportive-job-interview-story/index.html b/2022/a-supportive-job-interview-story/index.html new file mode 100644 index 0000000..bdab578 --- /dev/null +++ b/2022/a-supportive-job-interview-story/index.html @@ -0,0 +1,166 @@ + + + + + + + A supportive job interview story - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
    + +
    +

    A supportive job interview story

    +

    (adapted from an old lobste.rs comment)

    + +

    My favorite interview ever was a systems interview that didn’t go as planned. This was for an SRE position, and while I expected the interview to be a distributed systems discussion, the interviewer instead wanted to talk kernel internals.

    + +

    I was not at all prepared for this, and admitted it up front. The interviewer said something along the lines of, “well, why don’t we see how it goes anyway?”

    + +

    He then proceeded to teach me a ton about how filesystem drivers work in Linux, in the form of leading me carefully through the interview question he was “asking” me. The interviewer was incredibly encouraging throughout, and we had a good discussion about why certain design decisions worked the way they did.

    + +

    I ended the interview (a) convinced I had bombed it, but (b) having had an excellent time anyway and having learned a bunch of new things. I later learned the interviewer had recommended to hire me based on how our conversation had gone, though I didn’t end up taking the job for unrelated reasons having to do with relocation.

    + +

    I’ve given a number of similar interviews since, on system design or general sysadmin skills. I’ve always tried to go into these thinking about both where I could learn, and where I could teach, and how either outcome would give the candidate a chance to shine.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2022/adam-s-weekly-ish-update-2022-12-20/index.html b/2022/adam-s-weekly-ish-update-2022-12-20/index.html new file mode 100644 index 0000000..679625b --- /dev/null +++ b/2022/adam-s-weekly-ish-update-2022-12-20/index.html @@ -0,0 +1,208 @@ + + + + + + + Adam’s weekly (-ish) update, 2022-12-20 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
    + +
    +

    Adam’s weekly (-ish) update, 2022-12-20

    +

    What’s new

    + +

    The past few weeks have been on the intense side at work, so I completely lost track of the blog and haven’t had a chance to write much in that time. However, I’m now on a holiday break, and finally have time to sit down at a keyboard to write more than code and Slack messages.

    + +

    + +

    One of the highlights of the past few weeks was a trip to San Jose, and the NVIDIA headquarters. I changed teams at work back in July, transferring from a group that was closely integrated with product management, to a more straightforward engineering team which designs and builds new high-performance computing systems.

    + +

    This was the first chance I’ve had to meet up with other members of my new team in person, and it was a really wonderful experience to be in the same physical space as folks who were previously just images on my screen. I love working remotely, but it’s also great to be able to stand in front of a white board with someone and brainstorm, or get coffee and just have a chat with a coworker outside of a video call with an agenda.

    + +

    (Plus, we were all careful and managed to avoid catching COVID from each other! Which was a win on its own.)

    + +

    Now, for the next two weeks I’m off work, and planning to take some time to relax and spend time on projects that are harder to focus on during busy work weeks. Expect (maybe) less about computers in my blog and social feeds, and more about D&D, baking, and tasty cocktails.

    + +

    What I’m reading, watching, and listening to

    + +

    I’ve been a bit too scattered to focus on actual books the past few weeks, but I did find time for a few interesting articles and podcasts. In particular,

    + +
      +
    • “Why Roman Egypt was such a strange province”, from Bret Devereaux: As usual from Devereaux, an accessible but extremely detailed discussion of why so much of what we know about the Roman empire is from Egyptian records, but why that also might not be representative of the broader empire.
    • + + + +
    • “Emoji as incident resolution tools”, from Will Gallego: A fun discussion of how using emoji as part of a team’s communication can add nuance and shared understanding during incident management, along with a discussion of the disadvantages and costs associated with the practice.
    • + + + +
    • “What does modern software architecture look like in 2022?”, from Bartosz Mikulski: A nice article which discusses how service-oriented software architecture can often include an explicit expectation of change. For example, the architecture might include notes on an ongoing deprecation of a library, or might signpost the need to factor a new microservice out when overall system load gets high enough.
    • + + + +
    • The Brady Heywood podcast: Found via the Oxide and Friends podcast, the Brady Heywood podcast is a series on engineering disasters and their consequences from a forensic engineering firm. It’s mostly not being updated any more (with the podcasters moving on to a separate series on complexity science), but it has a deep back catalog of good episodes, and includes thoughtful discussions of human factors, safety engineering, and how organizational pressures become manifest in engineering artifacts.
    • +
    + +

    Recent recipes

    + +
      +
    • Smitten Kitchen’s Homemade Irish Cream: This is a recipe I make every year, and I often give away small bottles of it as holiday gifts. It’s really ridiculously tasty, much better than Baileys or similar, and good either on its own or in hot chocolate.
    • + + + +
    • Smitten Kitchen’s Fairytale of New York: This is a really tasty whiskey cocktail, and the star of the show is a “winter warmth syrup” that substitutes in for simple syrup. The syrup is simply very tasty, and turns what’s effectively an OId Fashioned variant into a lovely holiday cocktail.
    • + + + +
    • Sparkling gingerbread from Yossy Arefi’s Snaking Cakes: This recipe takes a little more prep than most of Arefi’s “snacking cakes”, as it includes ginger three ways (ground, fresh, and crystallized), but it’s worth the few minutes of extra work.
    • +
    + +

    Pet photos

    + +
    A white calico cat and a gray tabby cat lounging on a large brown pet bed in front of a gas fireplace.
    I’m pretty sure these two want me to turn the fireplace on.
    + +
    A gray tabby cat lounges on a dog bed, while a golden doodle lays on the floor nearby and looks forlornly at the bed.
    Just Percy bullying the dog by stealing his bed.
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2022/adam-s-weekly-update-2022-11-27/index.html b/2022/adam-s-weekly-update-2022-11-27/index.html new file mode 100644 index 0000000..4dc2347 --- /dev/null +++ b/2022/adam-s-weekly-update-2022-11-27/index.html @@ -0,0 +1,218 @@ + + + + + + + Adam’s weekly update, 2022-11-27 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
    + +
    +

    Adam’s weekly update, 2022-11-27

    +

    What’s new

    + +

    The first thing that’s new is… this post! I’m going to try to do at least a weekly post on the blog now, just a general update and some links. This will hopefully help me get back into the habit of writing on the blog regularly, and maybe inspire me to write a bit more in general.

    + +

    + +

    I was off work this week for the Thanksgiving holiday, and traveled Michigan to visit my parents and my brother’s family. My mom has been struggling with some pretty major health issues this year, so it was really wonderful and reassuring to get to spend some time with her and my dad. I also finally got to meet my brother’s three-year-old son, who was born right before the pandemic started, and who I hadn’t managed to meet up until now.

    + +

    On the tech-related front, I used this week to take a break from Twitter (mostly), and to be honest… it was kinda refreshing! I had developed a pretty bad Twitter habit this year, doomscrolling for more time than I like to admit. While I really like Twitter and I’ve had some nice career boosts from it, it was also a time sink that was not entirely healthy.

    + +

    Admittedly, that time was somewhat replaced by playing around on the Fediverse / Mastodon. But with the lack of algorithmic suggestions, quote tweets, and other means of virality, that network so far feels a lot quieter and less time-consuming than Twitter. Tim Bray has a good post up which discusses some of the advantages and pitfalls of federated social media, and I can highly recommend reading that. I’m still a bit skeptical that it will be a practical “Twitter replacement” for most people, but so far I’m finding it pleasant.

    + +

    What I’m reading

    + +
      +
    • Nonfiction book: Code, Second Edition, by Charles Petzold. This book walks through the process of building a working computer, starting with ideas like Morse code, then working up from logic gates on up. This is technically a re-read, as I read the first edition… 10+ years ago? But I’m getting a lot more out of it this time around, and really enjoying it.
    • + + + +
    • Fiction book: The Spare Man, by Mary Robinette Kowal. A cozy murder mystery on a luxury cruise to Mars. I’m only a few chapters in, but already greatly enjoying myself.
    • + + + +
    • “Hiding theory in practice”, by Fred Hebert. I’ve been reading a lot about safety engineering and its application to computing lately, but that community can sometimes get off into the weeds about points of theory that don’t have consensus in the broader computing community. This post has a good discussion of how to use the theory of safety engineering to guide decisions, without requiring that everyone working with you be handed a reading list.
    • + + + +
    • “Paper: Repentance as Rebuke: Betrayal and Moral Injury in Safety Engineering”, also by Fred Hebert. A discussion of a paper by Dekker et al which looks at the aftermath of the 737 MAX air disasters, and the public repentance of some of the engineers who were involved. Go read the post, it’s great. And I’m planning to read the original paper this week.
    • + + + +
    • “Cannon Lake: Intel’s Forgotten Generation”, from Chips and Cheese. Really I’ve been reading a bunch of the technical posts from Chips and Cheese lately, and they’re doing pretty good analyses of recent hardware. They’ve definitely earned that spot in my RSS reader.
    • + + + +
    • Glenn K Lockwood’s “SC’22 Recap”. I was sad to miss Supercomputing this year, though enough folks have come down with COVID that I don’t really regret the decision. But Glenn wrote up a really interesting recap post, with an interesting new viewpoint now that he’s working at Microsoft Azure. Among other things, he included a whole section titled The underwhelming, with the opening line “The biggest deal appears to be that exascale is here, and it turns out that it’s not that big of a deal.”
    • +
    + +

    Recent recipes

    + +

    Because it was Thanksgiving, I did a lot of cooking this week! I’m not going to list everything I made, but a few of my favorites were:

    + +
      +
    • Cheesy Garlic Butter Rolls from Delish: Nothing special, but really tasty.
    • + + + +
    • Challah Stuffing from Smitten Kitchen: This recipe was a huge winner, with most of the family coming back for seconds, and then having more the next day for leftovers. It was really good, and is probably what I’ll make if I ever do stuffing again.
    • + + + +
    • Best Challah from Smitten Kitchen: I baked the bread that went into the stuffing, and it was really tasty on its own! This recipe makes two loaves, and I only needed one for the stuffing. So I also made french toast with it, which worked really nicely.
    • +
    + +

    Pet photos

    + +

    Gotta have those pet photos.

    + +
    A blond golden doodle in a red harness and a blue bandanna lays on sandy dirt and looks into the camera
    + +
    A white calico cat sits on a blanket and washes her front paw
    + +
    A gray-brown tabby cat wearing a green collar sitting on a wall, looking vaguely toward the camera
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/2022/adam-s-weekly-update-2022-12-04/index.html b/2022/adam-s-weekly-update-2022-12-04/index.html new file mode 100644 index 0000000..f97159f --- /dev/null +++ b/2022/adam-s-weekly-update-2022-12-04/index.html @@ -0,0 +1,202 @@ + + + + + + + Adam’s weekly update, 2022-12-04 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
    + +
    +

    Adam’s weekly update, 2022-12-04

    +

    What’s new

    + +

    This week was really intense from a work perspective. Not “bad intense”, but the kind of week where every day was spent with such a level of focus, that at 5 PM or so I found myself staring off into space and forgetting words. I think I got some good things accomplished, but my brain also felt like mush by the time the weekend came.

    + +

    + +

    This week I’m traveling to San Jose for work (I just checked into my hotel a little while ago!), so I fully expect this week to also be eaten by work. So I don’t promise anything terribly interesting for next week’s post…

    + +

    However, I did take advantage of a Sunday in San Jose to visit the Computer History Museum in Mountain View! I try to visit the museum every few years, and while a lot of the exhibits are the same, enough things change that I always get something new from the visit. Also, I’ve been doing a lot of reading about hardware development and the history thereof lately, so it was interesting to examine the museum through that new lens.

    + +

    I may write more about my visit later this week — it definitely sparked some thoughts — but in the mean time, here are a few photos I took while wandering around the museum.

    + +
    A mechanical computer built mostly of brass, with various numerical dials. A small placard labels this as a replica of the Babbage Difference Engine No. 1 Demonstration Piece.
    The Babbage Difference Engine, and other mechanical computers, have always fascinated me.
    + +
    The Cray-1, a round computer with its own built-in seating attached.
    Can’t visit the museum without visiting the Cray-1.
    + +
    The Connection Machine 1, a large black cube divided in eight sections.
    I would have loved to have seen a CM-1 in operation, with its red LEDs showing the operation of its many single-bit CPUs.
    + +
    The front panel of an Altair 8800 computer, with an array of LEDs and switches controlling the state of individual bits.
    Having recently read Charles Petzold’s “Code”, I was struck by how closely the front panel of the Altair 8800 resembles the fictional front panel of the computer that Petzold constructs from logic gates up.
    + +
    A Dell PowerEdge R710 lays on a white plastic table, top cover off, surrounded by instructions on how to disassemble it.
    The CHM Learning Lab now includes a back room with a couple of Dell PowerEdge R710 servers, complete with instructions for how to disassemble and reassemble them. Anyone who wants can wander in and take them apart. It was great fun watching a 5-year-old kid pulling components out of one of these… As well as feeling a little weird, as I think I’ve run these in production!
    + +

    What I’m reading

    + +

    I don’t have a ton to share this week — honestly, the whole week feels like a blur — but here are two books that I recommend.

    + +
      +
    • The Red Scholar’s Wake, by Aliette de Bodard: As the blurb says, “Lesbian space pirates!” Also, a really wonderful novella about building a new relationship amidst grief, power differentials, politics, and space battles. I think I basically recommend everything that de Bodard writes, but especially this. And it basically stands alone! So you can read this first, without going back to the other stories in the same world.
    • + + + +
    • Dealers of Lightning: XEROX PARC and the Dawn of the Computer Age, by Michael Hiltzik: I’ve just started this, but it’s already a really interesting snapshot of a key period in the development of the personal computer.
    • +
    + +

    Recent recipes

    + + + +

    Pet photos

    + +
    Phyrne the calico cat stares down into the camera from a stairway
    + +
    Close-up on the face of Percy the gray tabby cat
    + +
    Benny the golden doodle curled up on a dog bed
    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/_posts/markhpc/2022-11-28-lambda.md b/2022/an-initial-look-at-deep-learning-io-performance/index.html similarity index 80% rename from _posts/markhpc/2022-11-28-lambda.md rename to 2022/an-initial-look-at-deep-learning-io-performance/index.html index 7643a86..d326e89 100644 --- a/_posts/markhpc/2022-11-28-lambda.md +++ b/2022/an-initial-look-at-deep-learning-io-performance/index.html @@ -1,27 +1,94 @@ ---- -author: Mark Nelson's Blog -author_tag: markhpc -blog_subtitle: I like to make distributed systems go fast. -blog_title: Mark Nelson’s Blog -blog_url: https://markhpc.github.io/ -category: markhpc -date: '2022-11-28 00:00:00' -layout: post -original_url: https://markhpc.github.io/2022/11/28/Lambda.html -slug: an-initial-look-at-deep-learning-io-performance -title: An Initial Look at Deep Learning IO Performance ---- - -

    Abstract

    + + + + + + + An Initial Look at Deep Learning IO Performance - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Mark Nelson’s Blog I like to make distributed systems go fast.. See the original post here.
    + +
    +

    An Initial Look at Deep Learning IO Performance

    +

    Abstract

    This blog post describes an investigation of IO behavior of TensorFlow and PyTorch during resnet50 training running on Lambda Lab’s 8x V100 GPU instances. Both ephemeral local NVMe storage and network attached persistent storage was tested. The local NVMe storage was fast enough to achieve a throughput rate required to hit synthetic test targets. The network attached persistent storage may not be able to fully saturate 8 V100 GPUs during training, though can achieve nearly the same level of performance as the local storage so long as TFRecords are utilized. Further, there are specific behaviors and bottlenecks in TensorFlow and PyTorch that can reduce training performance when using real data from ImageNet.

    -

    Acknowledgements

    Thank you to Michael Balaban at Lambda Labs for providing access to their GPU cloud for this testing. Thank you to Chuan Li for the creation of his TensorFlow benchmarking tools. Thank you also to Andrej Karpathy, Toby Boyd, Yanan Cao, Sanjoy Das, Thomas Joerg, and Justin Lebar for their excellent blog posts on deep learning and XLA performance that helped inform this article. I hope that this post will be useful for others as your work and writing was useful for me.

    -

    Introduction

    @@ -34,33 +101,25 @@

    Introduction

    That was the phrase that stuck in my head when I first started this project. What project you may ask? I want to understand how deep learning experiments utilize fast storage devices. Not just any experiments either: real ones, preferably big. That’s how I happened upon Andrej Karpathy’s blog. He is the former Sr. Director of AI at Tesla and knows a thing or two about training big neural networks. I’ve spent the last decade working on Ceph and have worked on distributed systems and distributed storage for nearly 2 decades at this point. But training neural nets? The closest I’ve come was back in the early 2000s when I tried to build a tool to predict video game framerates. I scraped benchmark numbers from review websites and built M5 decision trees based on hardware and video card settings. It sort of worked, but was terribly overtrained on a small (~4000 sample) dataset. Training with petabytes of data to teach an AI how to responsibly drive a car? I can already feel a bit of imposter syndrome setting in.

    -

    Thankfully my goal is comparatively modest. I don’t need to build a cutting edge classifier or explore the intricacies of manually implementing back-propagation. I simply want to understand the IO patterns that are involved when training big datasets with fast GPUs so I can help researchers speed up their work. Up until now, my ability to do this was fairly limited. At the day job I’ve had access to a small group of nodes with extremely modest GPUs. I set up runs with MLPerf but the datasets (WMT G-E and CoCo) easily fit into memory. Other than a short burst of read traffic at the very beginning of training there was very little IO. Recently I had the opportunity to meet Michael Balaban, Co-Founder of Lambda Labs. I told him what I wanted to do and he gave me access to Lambda’s GPU cloud and beta persistent storage to give it a try. I was able to grab one of Lambda’s 8x Tesla V100 instances (These things are incredibly popular so it’s best to grab one early in the morning!). Not all of Lambda’s instance types currently have access to the persistent storage but the V100 instances in the Texas zone do. Once secured, I got to work.

    -

    TensorFlow - Synthetic

    Before even attempting to run tests with real data, I realized I needed a baseline to start with. Luckily, Chuan Li, Lambda’s Chief Scientific Officer, wrote a tool for running TensorFlow benchmarks and made it available on github here. One of the advantages of Lambda’s cloud is that they’ve already bundled up many popular tools for running deep-learning workloads into one package called Lambda Stack which comes pre-installed when you start an instance. This made it fast to get started, though I did run into one issue. Lambda Stack comes standard with TensorFlow 2, but Chuan Li’s tool relies on a TensorFlow benchmark submodule that is designed to work with TensorFlow 1. Luckily, the parent repository was unofficially updated to work with Tensorflow 2 (with a warning that it is no longer being maintained). A quick “git checkout master” in the “benchmarks” submodule directory got everything working. Chuan Li’s tool makes it simple to run tests with several preconfigured templates already included. I chose the fp16 resnet50 configuration as it should be fast at processing images and is fairly standard.

    -
    TF_XLA_FLAGS=--tf_xla_auto_jit=2 ./batch_benchmark.sh X X 1 100 2 config/config_resnet50_replicated_fp16_train_syn
     

    Using the invocation provided in the benchmark README.md file, I was able to quickly run benchmarks with synthetic data on up to 8 V100 GPUs in the node. At one point I got stuck, hitting what appeared at first to be an unexplainable 25% performance loss. I reran the tests multiple times and even monitored GPU clockspeeds/temperatures in nvidia-smi with no luck. Ultimately I discovered my error. In the slow cases, I had inadvertently left out the “TF_XLA_FLAGS=–tf_xla_auto_jit=2” environment variable. It turns out that setting this allows Tensorflow compile and execute functions with XLA (Accelerated Linear Algebra) support which is a pretty big win for these tests.

    -

    -

    At this point I decided that I needed to understand how Chuan Li’s tool works. It turns out that he is using the same base tf_cnn_benchmarks.py benchmark code that companies like Nvidia and Dell also use for benchmarking their GPU solutions. I spent some time running it directly with Dell’s settings from their deep learning overview here. Unfortunately those tests had mixed results, even after various tweaks. While researching the XLA issues I mentioned earlier however, I made an even better discovery on the TensorFlow website. I found an excellent blog post with performance data written by some of the core Tensorflow developers. It’s now 4 years old, but still appears to be quite valid. The tuning options used were both simpler and resulted in higher performance versus other configurations that I’ve come across.

    -

    -

    Training with synthetic data in Lambda’s cloud resulted in similar performance to what the Tensorflow developer’s reported. In fact, using their own settings yielded slightly faster results when running on Lambda’s 8xV100 instance! It was incredibly encouraging to me that even in Lambda’s cloud environment with virtual machine instances I could achieve performance that was as fast or faster than what the Tensorflow developers were reporting.

    -

    Choosing a Real Data Set

    @@ -73,19 +132,15 @@

    Choosing a Real Data Set

    Having convinced myself that I had Tensorflow operating reasonably efficiently in synthetic tests, it was time to start thinking about what dataset to use for “real” training. The largest and most obvious choice is ImageNet. ImageNet is composed of over 1.2 million categorized images that form a roughly 160GB training dataset. It is also the largest dataset I could find that was publicly accessible. Downloading it isn’t so easy however. The only version that I could access is the ImageNet Object Localization Challenge dataset hosted on kaggle.

    -

    After finally figuring out how to download the data, it was time to follow Andrej’s advice and try to learn something about it. While ImageNet is curated and annotated, it has many images of different sizes, dimensions, and pixel counts. Images also come from many sources with different levels of quality. Through the power of stack-exchange I was able to find a bash one-liner script to generate a histogram of image sizes:

    -
    find . -type f -print0 | xargs -0 ls -l | awk '{size[int(log($5)/log(2))]++}END{for (i in size) printf("%10d %3d\n", 2^i, size[i])}' | sort -n
     

    -

    Roughly 80% of the images are in the 64KB or 128KB size bins. Almost all of the remaining images are smaller. That gives us a pretty good idea of what kind of IOs to expect during classification. Or at least…it does for frameworks that read those images directly. In Tensorflow’s case, there’s an alternative format called TFRecord. TFRecords are basically collections of image data sequentially laid out in much larger files. Instead of iterating over thousands or millions of individual image files, TFRecords allow Tensorflow to instead stream fewer, larger files that each house multiple images. It’s a one time cost to pre-process the data so Tensorflow has less work to do during training. After I downloaded the ImageNet data I took a shot at converting the ImageNet LOC data into TensorFlow records. Luckily, the TensorFlow tpu github repository already has a tool that can do this. I had to manipulate the dataset slightly, but ultimately this process worked (at least for the training data):

    -
    pip install gcloud google-cloud-storage
     pip install protobuf==3.20.1
     
    @@ -99,60 +154,46 @@ 

    Choosing a Real Data Set

    Perhaps I should say that this worked so long as the original dataset was located on the local NVMe drive. The persistent storage didn’t fare as well. Attempting to decompress ImageNet on the persistent storage resulted in blowing past the max number of open files allowed with errors like:

    -
    OSError: [Errno 24] Too many open files.
     

    Unfortunately this couldn’t be fixed on the instance. It appeared to be passed through from the host and the persistent storage was completely unusable until the instance was rebooted. Recently I spoke to one of Lambda’s engineers and they are working on a fix. (It may already be implemented by the time you read this!) I also want to note that the persistent storage is still in beta so issues like this are not entirely unexpected. Having said that, before hitting the error it was significantly slower extracting ImageNet on the persistent storage vs on the local NVMe storage. It’s probably best to extract ImageNet locally and then write the large TFRecords to the persistent storage during the conversion process. Luckily extracting ImageNet to local storage was fine, and storing the original archive and the resulting TFRecords on the persistent storage worked perfectly fine as well.

    -

    FIO - Baseline IO Results

    Next, I turned my attention to running baseline tests on Lambda’s local and persistent storage using fio. Fio is a highly configurable and well respected benchmark in the storage community and perfect for generating baseline results. I decided to use a dataset size that is roughly similar to ImageNet (200GB), the libaio engine in fio with direct IO, and an appropriately high IO depth to let the NVMe drives stretch their legs a bit.

    -

    -

    Throughput with the local NVMe drive(s) is surprisingly good. The persistent storage is slower, but still might be fast enough at a little over 1GB/s for large reads. 16K IOPS was somewhat slower in both cases. I chose 16K so that I could quickly compare to tests I ran in my Ceph QEMU/KVM performance blog post here. Without getting into the details, I suspect there’s still some room for improved IOPS with Lambda’s setup. Luckily though, converting into TFRecords should make Tensorflow throughput bound instead of latency bound. What about PyTorch or other tools that want to read images directly though? Fio gives us the ability to simulate it by using its ‘bssplit’ feature. We can take the size ranges and percentiles generated when examining ImageNet and give fio a similar distribution:

    -
    fio --ioengine=libaio --direct=1 --bssplit=2K/1:4K/2:8K/4:16K/8:32K/13:64K/38:128K/33:256K/1 --iodepth=128 --rw=randread --norandommap --size=200G --numjobs=1 --runtime=300 --time_based --name=foo
     

    -

    This isn’t exactly right as we are not reading data spread across millions of files, but it should provide something of an upper bound on what to expect. It looks like the persistent storage can do approximately 10K reads/second at a throughput rate of around 750MB/s. The local storage is about 3-4 times faster. Local storage should be fast enough to support the kind of images/second throughput rates we want to hit in Tensorflow on 8 V100 GPUs, but the jury is still out for the persistent storage.

    -

    Tensorflow - ImageNet

    Running benchmarks with real data rather than synthetic data is fairly straightforward in Tensorflow. You simply append data_dir and data_name flags to the CLI invocation to let it know where the TFRecords are located:

    -
    sync; echo 3 | sudo tee /proc/sys/vm/drop_caches
     python ./tf_cnn_benchmarks.py --batch_size=256 --num_batches=100 --model=resnet50 --optimizer=momentum --variable_update=replicated --all_reduce_spec=nccl --use_fp16=True --nodistortions --gradient_repacking=2 --compute_lr_on_cpu=True --single_l2_loss_op=True --xla_compile=True --num_gpus=8 --loss_type_to_report=base_loss --data_dir=/home/ubuntu/ImageNet-TF/train --data_name=imagenet
     

    -

    Ouch. Much lower performance with the ImageNet data vs synthetic! This is especially unfortunate given that 4 years ago the Tensorflow developers reported much better results. I spent some time reading and experimenting with different settings. Ultimately the one setting that made a substantial difference was “datasets_num_private_threads”. In the Tensorflow benchmark source code, this setting is described as: “[The] number of threads for a private threadpool created for all datasets computation.” I’ll go into more detail what these threads are doing in a bit. For now, let’s see how increasing the number of threads affects the results:

    -

    -

    Increasing the number of private threads has a dramatic effect on performance, though I was unable to fully match the performance achieved in the synthetic tests on either the local or persistent storage. The local storage fared better at high thread counts gradually topping out at around 8600 images/second. At high private thread counts the persistent storage topped out between 7000-8000 images/second with a higher degree of variability between runs. I suspect that in this case the persistent storage has likely hit its (per instance) limit.

    -

    In addition to having a dramatic effect on performance, changing the private thread count also had a large effect on the CPU consumption of the TensorFlow process. CPU usage increases almost linearly with additional private threads up to around 30 cores. What exactly are these private threads doing? To answer that question, I utilized two tools that I often deploy when diagnosing CPU usage in Ceph. When testing with a lower number of private threads, I used linux’s perf tool to look at where cycles are being consumed when the private threads are fully saturated. At higher levels of private threads, I used my wallclock profiler uwpmp to look at how private threads spend their time when increasing the thread count no longer improves performance.

    -

    In the first case with perf, we can get a good view of the work that these private threads are doing:

    -
    --77.31%--tensorflow::ThreadPoolDevice::Compute
               |          
               |--51.19%--0x7f511a00c7d8
    @@ -164,7 +205,6 @@ 

    Tensorflow - ImageNet

    The majority of the cycles consumed is in jpeg decompression and resize operations, along with a smattering of other stuff. What happens if we look at a case with a higher private thread count but now look at wallclock time instead of cycles? I ended up having some trouble getting the profiler to work properly and consistently get clean callgraphs, but I was able to get at least one run in that revealed some interesting information. First, I saw time spent in the same functions that perf told us we were spending cycles in:

    -
    + 100.00% Eigen::ThreadPoolTempl<tensorflow::thread::EigenEnvironment>::WorkerLoop(int)
      + 99.90% ???
      |+ 97.30% ???
    @@ -179,7 +219,6 @@ 

    Tensorflow - ImageNet

    But the wallclock profile also exposed that there may be contention in multiple areas in the private threads around some of the nsync synchronization primitives being used:

    -
     |||||||    |  + 4.50% nsync::nsync_mu_semaphore_p(nsync::nsync_semaphore_s_*)
      |||||||    |   + 4.50% syscall
     
    @@ -187,18 +226,15 @@ 

    Tensorflow - ImageNet

    This almost always appeared nested deep inside:

    -
    tensorflow::BFCAllocator::AllocateRaw(unsigned long, unsigned long, tensorflow::AllocationAttributes const&)
     

    Sadly I was missing a number of debug symbols and don’t 100% trust the wallclock trace. For now I’ll just say that the private threads are doing a significant amount of work decompressing and manipulating the image data to keep the GPUs fed. I suspect that with newer and faster GPUs the image retrieval pipeline could become an even bigger issue when training with real image data. The mystery for me is how The TensorFlow developers achieved such good results 4 years ago without using dedicated private threads at all. Perhaps they had a significantly faster jpeg decompression mechanism that I am unaware of?

    -

    PyTorch - ImageNet

    After running Tensorflow, I also ran some benchmarks in PyTorch using Nvidia’s “DeepLearningExamples” github repo. First, I installed the prereqs and setup the repository:

    -
    pip install 'git+https://github.com/NVIDIA/dllogger'
     pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-cuda110
     git clone https://github.com/NVIDIA/DeepLearningExamples
    @@ -219,13 +255,10 @@ 

    PyTorch - ImageNet

    There are a couple of differences here versus the TensorFlow tests. First, I’m using the raw ImageNet archive instead of a preprocessed TFRecord dataset, so the read behavior is different. Because I was unable to extract or copy the raw ImageNet archive onto the persistent storage, I’m also only testing the local NVMe drive. Finally, I didn’t see any specific examples for running with fp16 in nVidia’s documentation, so I’m using amp (automatic mixed precision) which may be slightly slower.

    -

    -

    Given the number of differences it’s tough to draw direct comparisons with Tensorflow. Amp is one difference, but it’s quite possible that there are tuning options that could improve performance here that I don’t know about. I did notice that PyTorch, like Tensorflow, is using quite a bit of CPU to keep the GPUs working. I suspect that there are ways to tweak the IO pipeline that could improve performance. For now though, let’s compare the IO patterns on the local NVMe drive during the Tensorflow and PyTorch runs. I was hoping to be able to use blktrace to do this, but unfortunately was unable to get any data from the virtual devices in the instance. I was able to collect more general statistics using collectl however.

    -
    Disk Read Statistics During PyTorch 8 GPU run:
    @@ -379,15 +412,12 @@
    Disk Read Statistics D


    When just looking at the IO sizes, both runs appear similar, but that doesn’t tell the whole story. It is likely that Tensorflow is doing much larger reads that are broken up into contiguous 128KB chunks by the block layer based on the underlying device’s max_sectors_kb setting. The tells here are the very low queue length and wait times for the TensorFlow run versus the PyTorch run. In both case the device service times are low (0), but in the TensorFlow case IOs are still backing up in the device queue.

    -

    Interestingly, it appears that it may be possible to use nVidia’s DALI (Data Loading Library) package to read TFRecords into PyTorch. I didn’t have time to attempt it, but potentially that could have a big effect on IO behavior and performance as well.

    -

    Conclusion

    As I’ve been writing this post, I realize just how complicated it is to understand the performance characteristics of training of neural networks. Even as we talk about metrics like images/second, the options that are used (batch size for instance) can also affect convergence. It’s very difficult to come up with a common methodology that is always better than others. I wonder if another metric, like reaching a desired level of convergence, would be better in the end. Having said that, I am glad for having done this exercise as I learned some valuable things:

    -
    1. Pre-processing data into a format like TFRecords on fast local storage is a big win from an IO perspective. It lets storage systems that have slow metadata performance succeed so long as they have enough sequential read throughput to keep the machine learning framework busy. This is a big win for many distributed file systems that may have substandard metadata performance (and even the good ones may still benefit).

      @@ -417,5 +447,76 @@

      Conclusion

      This wraps up my initial work looking at Deep Learning IO behavior. I hope that next time I can come armed with a bit more knowledge about the internals of how PyTorch and Tensorflow work, focus a bit more on the quality of the training, find even larger datasets to work with, and maybe actually accomplish something useful rather than just play with ImageNet.

      +

      Thanks for reading!

      + + +
      + +
      + + + + + + + + + + + + + + + -

      Thanks for reading!

      \ No newline at end of file diff --git a/2022/an-unstructured-rant-on-running-long-lived-software-services/index.html b/2022/an-unstructured-rant-on-running-long-lived-software-services/index.html new file mode 100644 index 0000000..76e87fe --- /dev/null +++ b/2022/an-unstructured-rant-on-running-long-lived-software-services/index.html @@ -0,0 +1,198 @@ + + + + + + + An unstructured rant on running long-lived software services - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
      + +
      +

      An unstructured rant on running long-lived software services

      +

      – Be kind to your colleagues. Be kind to your users. Be kind to yourself. This is a long haul and you’ll all fuck up.

      + +

      ⁃ The natural environment for your code is production. It will run there longer than it does anywhere else. Design for prod first, and if possible, make your dev environment act like prod.

      + +

      ⁃ Legacy code is the only code worth caring about.

      + +

      ⁃ Users do weird stuff, but they usually have a very good reason, at least in their context. Learn from them.

      + +

      ⁃ It’s 2022, please do structured logging.

      + +

      ⁃ Contexts and tracing make everyone’s lives easier when it comes time to debug. At minimum, include a unique request id with every request and plumb it through the system.

      + +

      ⁃ Do your logging in a separate thread. It sucks to find a daemon blocked and hanging because of a full disk or a down syslog server.

      + +

      ⁃ Don’t page for individual machines going down. Do provide an easy or automated way for bad nodes to get thrown out of the system.

      + +

      – Be prepared for your automation to be the problem, and include circuit breakers or kill switches to stop it. I’ve seen health checks that started flagging every machine in the fleet as bad, whether it was healthy or not. We didn’t bring down prod because the code assumed if it flagged more than 15% of the fleet as bad, the problem was probably with the test, not the service.

      + +

      ⁃ Make sure you have a way to know who your users are. If you allow anonymous access, you’ll discover in five years that a business-critical team you’ve never heard of is relying on you.

      + +

      ⁃ Make sure you have a way to turn off access for an individual machine, user, etc. If your system does anything more expensive than sending network requests, it will be possible for a single bad client to overwhelm a distributed system with thousands of servers. Turning off their access is easier than begging them to stop.

      + +

      ⁃ If you don’t implement QOS early on, it will be hellish to add it later, and you will certainly need it if your system lasts long enough.

      + +

      ⁃ If you provide a client library, and your system is internal only, have it send logs to the same system as your servers. This will help trace issues back to misbehaving clients so much.

      + +

      ⁃ Track the build time for every deployed server binary and monitor how old they are. If your CI process deploys daily, week-old binaries are a problem. Month-old binaries are a major incident.

      + +

      ⁃ If you can get away with it (internal services): track the age of client library builds and either refuse to support builds older than X, or just cut them off entirely. It sucks to support requests from year-old clients, force them to upgrade!

      + +

      ⁃ Despite all this, you will at some point start getting requests from an ancient software version, or otherwise malformed. Make sure these requests don’t break anything.

      + +

      ⁃ Backups are a pain, and the tooling is often bad, but I swear they will save you one day. Take the time to invest in them.

      + +

      ⁃ Your CI process should exercise your turnup process, your decommission process, and your backups workflow. Life will suck later if you discover one of these is broken.

      + +

      ⁃ Third party services go down. Your service goes down too, but they probably won’t happen at the same time. Be prepared to either operate without them, or mirror them yourself

      + +

      ⁃ Your users will never, ever care if you’re down because of a dependency. Every datacenter owned by AWS could be hit by a meteor at the same time, but your user will only ever ask “why doesn’t my service work?”

      + +

      ⁃ Have good human relationships with your software dependencies. Know the people who develop them, keep in touch with them, make sure you understand each other. This is especially true internally but also important with external deps. In the end, software is made of people.

      + +

      ⁃ If users don’t have personal buy-in to the security policy, they will find ways to work around them and complain about you for making their lives harder. Take the time to educate them, or you’ll be fighting them continuously.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/cache-age-binning-pr-finally-merged/index.html b/2022/cache-age-binning-pr-finally-merged/index.html new file mode 100644 index 0000000..d89e67d --- /dev/null +++ b/2022/cache-age-binning-pr-finally-merged/index.html @@ -0,0 +1,156 @@ + + + + + + + Cache Age Binning PR Finally Merged! - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Mark Nelson’s Blog I like to make distributed systems go fast.. See the original post here.
      + +
      +

      Cache Age Binning PR Finally Merged!

      +

      I’ve had this PR hanging around in various forms for years. It’s basically the last peice of the OSD memory target code. We can now get a “binned” view of the relative ages of items in different LRU caches and dynamically adjust target sizes for different caches. PR is here and memory usage behavior charts are here.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/ceph-osd-cpu-scaling-part-1/index.html b/2022/ceph-osd-cpu-scaling-part-1/index.html new file mode 100644 index 0000000..0d97a36 --- /dev/null +++ b/2022/ceph-osd-cpu-scaling-part-1/index.html @@ -0,0 +1,156 @@ + + + + + + + Ceph OSD CPU Scaling - Part 1 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Mark Nelson’s Blog I like to make distributed systems go fast.. See the original post here.
      + +
      +

      Ceph OSD CPU Scaling - Part 1

      +

      Last summer we had a user that hit some performance issues based on a recommendation to use 2 cores per OSD in their systems. I wanted to provide some data for the community and wrote up a blog post on the ceph.io website. Please take a look!

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/ceph-rocksdb-tuning-deep-dive/index.html b/2022/ceph-rocksdb-tuning-deep-dive/index.html new file mode 100644 index 0000000..7b32cba --- /dev/null +++ b/2022/ceph-rocksdb-tuning-deep-dive/index.html @@ -0,0 +1,156 @@ + + + + + + + Ceph RocksDB Tuning Deep-Dive - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Mark Nelson’s Blog I like to make distributed systems go fast.. See the original post here.
      + +
      +

      Ceph RocksDB Tuning Deep-Dive

      +

      See my post on the Ceph.io blog about tuning RocksDB in Ceph!

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/containerize-it-baby/index.html b/2022/containerize-it-baby/index.html new file mode 100644 index 0000000..a65dfd1 --- /dev/null +++ b/2022/containerize-it-baby/index.html @@ -0,0 +1,169 @@ + + + + + + + Containerize It, Baby! - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   VanessaSaurus dinosaurs, programming, and parsnips. See the original post here.
      + +
      +

      Containerize It, Baby!

      +

      I’ve just submit my entry to the HPC Guru Elevator Pitch Contest for the Supercomputing 2022 conference!

      + +

      I’m fairly sure (like many of these contests) it will be a politically correct winner - someone that is best appealing +to the conference, but I’ll take a stand right now that I think my submission is tops in terms of creativity +and excited energy! I mean, there is just no alternative when it comes to technologies I’m excited about.

      + +
      +

      Containerize it, baby!

      + +
      + +

      Mic Drop! 🎙️

      + +

      Regardless of the outcome of this contest, I feel like I’ve already won - I’ve had so much fun making this and sharing with the community! 🎉️

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/converged-computing/index.html b/2022/converged-computing/index.html new file mode 100644 index 0000000..c82faec --- /dev/null +++ b/2022/converged-computing/index.html @@ -0,0 +1,205 @@ + + + + + + + Converged Computing - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   VanessaSaurus dinosaurs, programming, and parsnips. See the original post here.
      + +
      +

      Converged Computing

      +

      For many years, there has been a battle between cloud and HPC. The cloud side of the equation says “micro services, cloud native!” +and the HPC side says “too expensive!” Conversations often don’t progress because both sides are up-in-arms and +focused on why they cannot work together. At best, we might get access to cloud from an HPC center, +or an company might present a product as branded for “HPC.” But it’s not truly collaborative in the way that I’d like.

      + +

      I’ll also step back and comment that (I do not believe) folks (myself included) on the HPC side have done enough +to sit at the table. For example, we haven’t been a voice in the Open Containers Initiative (although I’ve tried), nor have we been present (historically) for conferences that are more focused around cloud native technologies. +There is no pointing fingers or fault here - it’s just a matter of two different cultures, and it’s been challenging figuring out how to talk to one another, and how to work together. I’ve tried my best to be involved, to the best of my ability, in small ways on both sides. But I’m only one person. This isn’t to say there haven’t been small collaborations, but I believe we can do more.

      + +

      Change is Coming

      + +

      I think this is going to change. The reason is because both sides of the equation have started to realize we have similar goals, +and it’s not about creating hybrid environments – having both pancakes and waffles for breakfast – but rather convergence – recognizing that pancakes and waffles are both kinds of breakfast cakes, and we can take features that we like of each to create a breakfast cake that will make everyone happy. +The idea of “Converged Computing” comes from my amazing team (see Dan’s talk at KubeCon here) and is the idea that technologies from HPC can be integrated into more traditionally cloud approaches to produce a solution that +solves problems on both sides. Explicitly for these projects, it means testing the Flux Framework scheduler alongside Kubernetes. Do we still want portable workflows that can move from an HPC environment to cloud? Of course. +However, the niche or gradient that I’m interested in is the space that lives between these two worlds.

      + +

      While I won’t go into huge detail (this would be more appropriate for a talk) the lab openly works on +Flux Framework, a resource manager that (in my opinion) is one of the coolest projects coming out of our space. I started working with these teams a few months ago, and am bringing my excitement and vision for (what I hope to be) a future where we are actively developing alongside other Kubernetes projects, and our work is well-known and established in this space. +What does that mean? Let me share some cool work under development. This is all being done publicly on GitHub, so there is +no issue to talk about it! My first year or so at the lab I was hired under a research project, and although I learned a lot, I haven’t felt inspired and driven until starting this work. Let’s talk about some of it! 🎉️

      + +

      The Flux Operator

      + +
      + +
      + +

      If you aren’t familiar with Kubernetes Operators, let’s step back and talk about a human operator. If you are a syadmin managing apps +with associated services and databases on a cluster, you often had to do maintenance or update tasks like increasing a storage volume, +or modifying a service to a new user need. As this pattern has emerged as a common thing, they have come up with the concept of a Kubernetes Operator - an actual controller you install to your cluster that can automate this. In simple terms, after you install an operator to your cluster, +you can hand it a desired state (represented in a yaml configuration file) and the operator will do whatever it takes to reach that state. What does that means in the context of Flux? The Flux Operator is interested in creating +what we are calling a “Mini Cluster,” illustrated below.

      + +
      + +
      + +

      In Kubernetes object terms this is an Indexed Job, a few config maps, secrets, and a RESTFul API and user interface that I designed exposed as a service. You can read more about our current design here.

      + +

      This Mini Cluster is generated from a “custom resource definition” or CRD (the yaml you provide), and it can take these parameters. Concetually, you as the user own the Mini Cluster and can submit jobs to it (either via the web interface or the API) until you are done. When you are done, you can bring down the cluster.

      + +

      We are excited for this work because in the next months (to a bit longer) we are going to be testing different kinds of workloads +running using Flux alongside this Mini Cluster, but on Kubernetes! I’ve started a small repository of dummy examples that I’m extending quickly at +rse-ops/flux-hpc and please open an issue there if you have a suggestion.

      + +

      Stay Tuned!

      + +

      Stay tuned for more work in this space! I’ve been doing a ton of programming in Go, Python, and working +on a wide range of technologies, and fairly quickly, and I am very much in my happy place. Please come and join us! ❤️

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/gaborsamu/2022-5-12-lsf_output.md b/2022/customizing-command-output-in-ibm-spectrum-lsf/index.html similarity index 71% rename from _posts/gaborsamu/2022-5-12-lsf_output.md rename to 2022/customizing-command-output-in-ibm-spectrum-lsf/index.html index 34ab028..78716a7 100644 --- a/_posts/gaborsamu/2022-5-12-lsf_output.md +++ b/2022/customizing-command-output-in-ibm-spectrum-lsf/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2022-05-12 13:16:02' -layout: post -original_url: https://www.gaborsamu.com/blog/lsf_output/ -slug: customizing-command-output-in-ibm-spectrum-lsf -title: Customizing command output in IBM Spectrum LSF ---- - -

      IBM Spectrum LSF provides many ways to query the LSF cluster for information about workloads. As a user, once you’ve submitted a job to LSF, it’s logical to want to understand what has happened to your job. Has the job started yet? Is the job pending? If so, why is it pending? And the all important, “Is my job done yet?”. Of course, LSF provides a very rich CLI which has been developed and refined - over the past three decades. It’s also possible to get JSON-formatted output from various LSF query commands. This is useful for users and administrators alike as JSON-formatted output is easy to parse, and scripting can be used to extract values from the JSON output.

      + + + + + + + Customizing command output in IBM Spectrum LSF - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      Customizing command output in IBM Spectrum LSF

      +

      IBM Spectrum LSF provides many ways to query the LSF cluster for information about workloads. As a user, once you’ve submitted a job to LSF, it’s logical to want to understand what has happened to your job. Has the job started yet? Is the job pending? If so, why is it pending? And the all important, “Is my job done yet?”. Of course, LSF provides a very rich CLI which has been developed and refined - over the past three decades. It’s also possible to get JSON-formatted output from various LSF query commands. This is useful for users and administrators alike as JSON-formatted output is easy to parse, and scripting can be used to extract values from the JSON output.

      This is not meant to be a definitive guide on how to query information in LSF, but rather provides some examples of the various ways that users can query job related information using the LSF CLI. This will include a look at the -json and -o options which have been introduced during the lifecycle of LSF v10.1.0 family. The -json option can be used to provide JSON-formatted output from various LSF query commands and the -o can be used to customize the fields in the output to only those desired.

      @@ -406,4 +475,76 @@ ] }
      -

      That concludes our brief look at LSF query commands. We’ve only scratched the surface here in terms of capabilities and query commands for LSF. The LSF command line interface is powerful and flexible including ways to customize the command outputs and to output in JSON-format. For more details, the complete set of IBM Spectrum LSF documentation can be found online at IBM Documentation here.

      \ No newline at end of file +

      That concludes our brief look at LSF query commands. We’ve only scratched the surface here in terms of capabilities and query commands for LSF. The LSF command line interface is powerful and flexible including ways to customize the command outputs and to output in JSON-format. For more details, the complete set of IBM Spectrum LSF documentation can be found online at IBM Documentation here.

      + +
      +
      + +
      + + + + + + + + + + + + + + + + diff --git a/2022/dashboards-for-learning-data-visualizations/index.html b/2022/dashboards-for-learning-data-visualizations/index.html new file mode 100644 index 0000000..422af83 --- /dev/null +++ b/2022/dashboards-for-learning-data-visualizations/index.html @@ -0,0 +1,227 @@ + + + + + + + Dashboards for Learning Data Visualizations - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Dereks Web Thoughts from Derek. See the original post here.
      + +
      +

      Dashboards for Learning Data Visualizations

      +

      Creating dashboards and data visualizations are a favorite past time of mine. Also, I jump at any chance to learn a new technology. That is why I have spent the last couple of months building dashboards and data visualizations for various projects while learning several web technologies.

      + +

      Through these dashboards, I have learned many new technologies:

      + + + +

      GP-ARGO Dashboard

      + +

      The Great Plains Augmented Regional Gateway to the Open Science Grid (GP-ARGO) is a regional collaboration of 16 campuses hosting computing that is made available to the OSG. My goal with the GP-ARGO dashboard was to show who is using the resources, as well as give high level overview of the region and sites hosting GP-ARGO resources.

      + +

      The metrics are gathered from OSG’s GRACC Elasticsearch. The list of projects are also from GRACC, and the bar graph in the bottom right are from OSG is simply an iframe to a grafana panel from GRACC.

      + +

      Technologies used: React, NextJS, Leaflet, Elasticsearch

      + +

      Repo: GP-ARGO Map

      + +

      GP-ARGO

      + +

      OSDF Website

      + +

      My next website was the Open Science Data Federation landing page. I was more bold in the design of the OSDF page. I took heavy inspiration from other technology websites such as the Mapbox website and the Lens website. The theme is darker and it was also my first experience with the TailwindCSS library. Additionally, I learned the CSS flexbox layout techniques.

      + +

      The spinning globe is using the Globe.gl library. The library is great to create visualizations to show distribution throughout the world. On the globe I added “transfers” between the OSDF origins and caches. Each origin sends transfers to every cache in the visualization, though it’s all just animation. There is no data behind the transfers, it’s only for visual effect. Also, on the globe, each cache location is labeled. The globe can be rotated and zoomed with your mouse.

      + +

      The number of bytes read and files read is gathered using the Elasticsearch client querying GRACC, the OSG’s accounting service. The OSG gathers statistics on every transfer a cache or origin perform. Additionally, we calculate the rate of data transfers and rate of files being read using GRACC.

      + +

      One unique feature of the OSDF website is the resiliency of the bytes read and files read metrics. We wanted to make sure that the metrics would be shown even if a data component has failed. The metrics are gathered in 3 different ways for resiliency:

      + +
        +
      1. If all components are working correctly, the metrics are downloaded from the OSG’s Elasticsearch instance.
      2. +
      3. If OSG Elasticsearch has failed, the dashboard pulls saved metrics from NRP’s S3 storage. The metrics are saved everytime they are succesfully gathered from Elasticsearch, so they should be fairly recent.
      4. +
      5. The metrics are gathered and saved on each website build. The metrics are static and immediatly available upon website load. If all else fails, these saved static metrics are always available, even if they may be old.
      6. +
      + +

      Technologies used: React, NextJS, Globe.gl

      + +

      Repo: OSDF Website

      + +

      OSDF

      + +

      NRP Dashboard

      + +

      The National Research Platform dashboard is largely similar to the GP-ARGO dashboard. It uses the same basic framework and technologies. But, the data acquisition is different.

      + +

      The metrics shown are the number of gpus allocated, number of pod running, and the number of active research groups. The metrics are gathered from the NRP’s prometheus server on-demand. The graph in the background of the metric is generated with D3.js.

      + +

      Technologies used: React, NextJS, D3.js, Prometheus, TailwindCSS

      + +

      Repo: NRP Map App

      + +

      NRP Dashboard

      + +

      PNRP Website

      + +

      The Prototype National Research Platform is a NSF research platform. The dashboard is also in prototype stage as the PNRP hardware is not fully delivered and operational yet.

      + +

      The dashboard is my first experience with a large map from Mapbox. I used a React binding to interface with the Mapbox service. Also, when you click on a site, it zooms into the building where the PNRP hardware will be hosted.

      + +

      The transfer metrics come from the NRP’s prometheus which shows the bytes moving into and out of the node. The transfer metrics are for cache nodes nearby the sites, but once PNRP hardware becomes operational the transfer metrics will show the site’s cache.

      + +

      Technologies Used: React, NextJS, Mapbox, TailwindCSS, Prometheus

      + +

      Repo: NRP Website

      + +

      PNRP Website

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/developing-managed-vs-self-hosted-software/index.html b/2022/developing-managed-vs-self-hosted-software/index.html new file mode 100644 index 0000000..fdf899c --- /dev/null +++ b/2022/developing-managed-vs-self-hosted-software/index.html @@ -0,0 +1,178 @@ + + + + + + + Developing managed vs self-hosted software - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
      + +
      +

      Developing managed vs self-hosted software

      +

      I’ve done some work lately with teams that deliver their products in very different ways, and it has me thinking about how much our “best practices” depend on a product’s delivery and operations model. I’ve had a bunch of conversations about this tension

      + +

      On the one hand, some of the teams I’ve worked with build software services that are developed and operated by the same team, and where the customers (internal or external) directly make use of the operated service. These teams try to follow what I think of as “conventional” SaaS best practices:

      + +
      • Their development workflow prioritizes iteration speed above all else
      • They tend to deploy from HEAD, or close to it, in their source repository
        • In almost all cases, branches are short-lived for feature development
      • They’ve built good automated test suites and well-tuned CI/CD pipelines
      • Releases are very frequent
      • They make extensive use of observability tooling, often using third-party SaaS for this
      • Fast roll-back is prioritized over perfect testing ahead of time
      • While their user documentation is mostly good, their operations documentation tends to be “just good enough” to onboard new team members, and a lot of it lives in Slack
      + +

      However, we also have plenty of customers who deploy our software to their own systems, whether in the cloud or on-premise. (Some of them don’t even connect to the Internet on a regular basis!) The development workflow for software aimed at these customers looks rather different:

      + +
      • Deploys are managed by the customer, and release cycles are longer
      • These teams do still have CI/CD and extensive automated tests… but they may also have explicit QA steps before releases
      • There tend to be lots of longer-lived version branches, and even “LTS” branches with their own roadmaps
      • Logging is prioritized over observability, because they can’t make assumptions about the customer tooling
      • They put a lot more effort into operational documentation, because most operators will not also be developers
      + +

      From a developer perspective, of course, this all feels much more painful! The managed service use case feels much more comfortable to develop for, and most of the community tooling and best practices for web development seems to optimize for that model.

      + +

      But from a sysadmin perspective, used to mostly operating third-party software, the constraints of self-hosted development are all very familiar. And even managed service teams often rely on third-party software developed using this kind of model, relying on LTS releases of Linux distributions and pinning major versions of dependencies.

      + +

      The biggest challenge I’ve seen, however, is when a development team tries to target the same software at both use cases. As far as I can tell, it’s very difficult to simultaneously operate a reliable service that is being continuously developed and deployed, and to provide predictable and high-quality releases to self-hosted customers.

      + +

      So far, I’ve seen this tension resolved in three different ways:

      + +
      • The internal service becomes “just another customer”, operating something close to the latest external release, resulting in a slower release cycle for the internal service
      • Fast development for the internal service gets prioritized, with external releases becoming less frequent and including bigger and bigger changes
      • Internal and external diverge completely, with separate development teams taking over (and often a name change for one of them)
      + +

      I don’t really have a conclusion here, except that I don’t really love any of these results. /sigh

      + +

      If you’re reading this and have run into similar tensions, how have you seen this resolved? Have you seen any success stories in deploying the same code internally and externally? Or alternatively — any interesting stories of failure to share? 😉 Feel free to send me an email, I’d be interested to hear from you.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/experimenting-with-igor-s-bluestore-wal/index.html b/2022/experimenting-with-igor-s-bluestore-wal/index.html new file mode 100644 index 0000000..1caa200 --- /dev/null +++ b/2022/experimenting-with-igor-s-bluestore-wal/index.html @@ -0,0 +1,156 @@ + + + + + + + Experimenting with Igor’s Bluestore WAL - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Mark Nelson’s Blog I like to make distributed systems go fast.. See the original post here.
      + +
      +

      Experimenting with Igor’s Bluestore WAL

      +

      Igor Fedetov is one of the most knowledgable developers working on Ceph. He’s started working on replacing our use of RocksDB’s write ahead log with a bluestore native implementation. After tuning we can achieve up to 122K random write IOPS on a single OSD! That’s nearly a 50% improvment over the current main branch and over twice as fast as Pacific!

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/happy-living-close-ish-to-the-metal/index.html b/2022/happy-living-close-ish-to-the-metal/index.html new file mode 100644 index 0000000..5a287af --- /dev/null +++ b/2022/happy-living-close-ish-to-the-metal/index.html @@ -0,0 +1,172 @@ + + + + + + + happy living close (-ish) to the metal - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
      + +
      +

      happy living close (-ish) to the metal

      +

      For various reasons, I’ve been doing a little bit of career introspection lately. One of the interesting realizations to come out of this is that, despite in practice doing mostly software work, I’ve been happiest when my work involved a strong awareness of the hardware I was running on.

      + +

      + +

      I suppose it shouldn’t be a surprise, exactly, but I hadn’t exactly thought about it in those terms before! Before I got into computing, I got a bachelors degree in physics, and got through much of a PhD in materials science. While I wasn’t building computers directly, I was definitely working regularly on hardware, building experimental apparatus involving various combinations of vacuum chambers, lasers, exotic microscopes, custom electronics, and microfluidics.

      + +

      In terms of my computing career, I’ve generally worked in the area of “high-performance computing”, a buzzword that means I’ve focused on building fast parallel systems aimed at researchers.

      + +

      It’s a sub-field that lends itself to awareness of hardware: even as a new baby sysadmin, I was staring at motherboard block diagrams and thinking about the performance differences between different PCIe topologies.

      + +

      And because HPC is one of the areas that took the longest to embrace cloud computing, I spent a lot of years doing work in datacenters. Most of my work would usually involve writing code, doing configuration management, and managing Linux systems… but on a regular basis I’d head into a big loud room full of air conditioners and server racks, carrying a screwdriver.

      + +

      Amusingly, my relatively recent stint at a hyperscaler was the first time I had worked on computers, but didn’t have my office in the same building as the computers I was running! Even there I was at least somewhat cognizant of hardware specifics, and one of my early projects was performance testing on the Bryce Canyon storage node, to see if it was ready for use in a large-scale distributed filesystem.

      + +

      And these days, at NVIDIA, I’m enjoying being even closer to the metal. (At least conceptually; I still work remote…) I spend my days thinking about datacenter requirements, cable lengths, firmware upgrades, hardware health checks, and application performance tests on large clusters. And I love getting to play with these shiny toys.

      + +

      Anyway, this is just a ramble. But a useful one. While I’d be the first to admit that cloud has its place, and I use it for some personal projects, I really enjoy understanding the hardware I run on. I have trouble thinking of computers as remote abstractions with no underlying detail. They are pleasingly physical in my mind, even if they’re thousands of miles away.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/improving-the-open-science-data-federation-s-cache-selection/index.html b/2022/improving-the-open-science-data-federation-s-cache-selection/index.html new file mode 100644 index 0000000..e56d2c7 --- /dev/null +++ b/2022/improving-the-open-science-data-federation-s-cache-selection/index.html @@ -0,0 +1,220 @@ + + + + + + + Improving the Open Science Data Federation’s Cache Selection - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Dereks Web Thoughts from Derek. See the original post here.
      + +
      +

      Improving the Open Science Data Federation’s Cache Selection

      +

      Optimizing data transfers requires tuning many parameters. High latency between the client and a server can decrease data transfer throughput. The Open Science Data Federation (OSDF) attempts to optimize the latency between a client and cache by using GeoIP to locate the nearest cache to the client. But, using GeoIP alone has many flaws. In this post, we utilize Cloudflare Workers to provide GeoIP information during cache selection. During the evaluation, we found that location accuracy grew from 86% accurate with the original GeoIP service to 95% accurate with Cloudflare Workers.

      + +
      + Map of U.S. OSDF
      + Map of OSDF locations + +
      + +

      GeoIP has many flaws, first, the nearest physical cache may not be the nearest in the network topology. Determining the nearest cache in the network would require probing the network topology between the client and every cache, a intensive task to perform for each client startup, and may be impossible with some network configurations, such as blocked network protocols.

      + +

      Second, the GeoIP database is not perfect. It does not have every IP address, and the addresses may not have accurate location information. When GeoIP is unable to determine a location, it will default to “guessing” the location is a lake in Kansas (a well known issue).

      + +

      Following a review of the Open Science Data Federation (OSDF), we found that we could improve effeciency by improving the geo locating of clients. In the review, several sites where detected to not be using the nearest cache.

      + +

      Implementation

      + +

      StashCP queries the CVMFS geo location service which relies on the MaxMind GeoIP database.

      + +

      Cloudflare Workers are designed to run at Cloudflare’s many colocation facilities near the client. Cloudflare directs a client’s request to a nearby data center using DNS. Each request is annotaed with an approximate location of the client, as well as the colocation center that received the request. Cloudflare uses a GeoIP database much like MaxMind, but it also falls back to the colocation site that the request was serviced.

      + +

      I wrote a Cloudflare worker, cache-locator, which calculates the nearest cache to the client. It uses the GeoIP location of the client to calculate the ordered list of nearest caches. If the GeoIP fails for a location, the incoming request to the worker will not be annotated with the location but will include the IATA airport code of the colocation center that received the client request. We then return the ordered list of nearest caches to the airport.

      + +

      We imported a database of airport codes to locations that is pubically available. The database is stored in the Cloudflare Key-Value, keyed by the IATA code of the airport.

      + +

      Evaluation

      + +

      To evaluate the location, I submitted test jobs to each site available in the OSG OSPool, 43 different sites at the time of evaluation. The test jobs:

      + +
        +
      1. +

        Run the existing stashcp to retrieve the closest cache.

        + + +
         stashcp --closest
        +
        +
        + +
      2. +
      3. +

        Run a custom closest script that will query the Cloudflare worker for the nearest caches and print out the cache.

        + +
      4. +
      + +

      After the jobs completed, I compiled the caches decisions to a spreadsheet and manually evaluated each cache selection decision. The site names in the spreadsheet are the somewhat arbitrary internal names given to sites.

      + +

      In the spreadsheet, you can see that the correct cache was choosen 86% of the time with the old GeoIP service, and 95% of the time with Cloudflare workers.

      + +

      Notes during the Evaluation

      + +

      Cloudflare was determined to be incorrect at two sites, the first being UColorado_HEP (University of Colorado in Boulder). In this case, the Colorado clients failed the primary GeoIP lookup and the cloudflare workers fell back to using the IATA code from the request. The requests from Colorado all where recieved by the Cloudflare Dallas colocation site, which is nearest the Houston cache. The original GeoIP service choose the Kansas City cache, which is the correct decision. It is unknown if the orignal GeoIP service choose KC cache because it knew the GeoIP location of the clients, or it defaulted to the Kansas default.

      + +

      The second site where the Cloudflare worker implementation was incorrect was SIUE-CC-production (Southern Illinois University Edwardsville). In this case, the original GeoIP service choose Chicago, while the new service choose Kansas. Edwardsville is almost equal distance from both the KC cache and Chicago. The difference in the distance to the caches is ~0.6 KM, with Chicago being closer.

      + + + +

      An example of a site that did not work with GeoIP was ASU-DELL_M420 (Arizona Statue University). The original service returned that the KC cache was the nearest. The Cloudflare service gave the default Lat/Log if GeoIP failed, the middle of Kansas, but the data center serving the request had the airport code of LAX (Los Angeles). The nearest cache to LAX is the UCSD cache, which is the correct cache decision.

      + +

      During the evaluation, I originally used the Cloudflare worker development DNS address, stash-location.djw8605.workers.dev. Purdue University and the American Museum of Natural History sites both blocked the development DNS address. The block was from an OpenDNS service which reported the domain had been linked to malware and phishing. Since the DNS hostname was hours old, it’s likely that most *workers.dev domains were blocked.

      + +

      Conclusion

      + +

      Improving the cache selection can improve the download effeciency. It is left as future work to measure if the nearest geographical cache is the best choice. While the OSDF is using GeoIP service for cache selection, it is important to select the correct cache. Using the new Cloudflare service results in 95% correct cache decision vs. 86% with the original service.

      + +

      Cloudflare Workers is also very affordable for the scale that the OSDF would require. The first 100,000 requests are free, while it is $5/mo for the next 10 Million requests. The OSPool runs between 100,000 to 230,000 jobs per day, easily fitting within the $5/mo tier.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/vsoch/2022-2-15-paks.md b/2022/interactive-development-containers/index.html similarity index 89% rename from _posts/vsoch/2022-2-15-paks.md rename to 2022/interactive-development-containers/index.html index 19abb10..e530138 100644 --- a/_posts/vsoch/2022-2-15-paks.md +++ b/2022/interactive-development-containers/index.html @@ -1,22 +1,90 @@ ---- -author: Vanessasaurus -author_tag: vsoch -blog_subtitle: dinosaurs, programming, and parsnips -blog_title: VanessaSaurus -blog_url: https://vsoch.github.io/ -category: vsoch -date: '2022-02-15 12:30:00' -layout: post -original_url: https://vsoch.github.io/2022/paks/ -slug: interactive-development-containers -title: Interactive Development Containers ---- - -

      I’ve recently been interested in developer workflows. Aside from being a developer, I feel + + + + + + + Interactive Development Containers - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   VanessaSaurus dinosaurs, programming, and parsnips. See the original post here.
      + +
      +

      Interactive Development Containers

      +

      I’ve recently been interested in developer workflows. Aside from being a developer, I feel like the tooling for our community, and especially for HPC or hybrid environments, is lacking. As a simple example, let’s ask a basic question:

      -

      How do I start developing here and move it over there?

      @@ -33,14 +101,12 @@ push to a registry with some kind of “work in progress” tag and then pull somewhere else. Minimally we’d need to build fresh again, and then reproduce all the steps to set up our environment.

      -

      Interactive Development Containers

      Now I don’t have all the answers, but recently @alecbcs and I have been dreaming about what kinds of development environments we want. functionality such as:

      -
      1. Saving the container state without leaving it.
      2. Loading or saving or otherwise interacting with named environments.
      3. @@ -53,22 +119,18 @@

        Interactive Development ContainersPaks.

        -
        -

        Paks is a Python library that I’m calling a developer wrapper for containers. Mind you, it’s more of a playground right now to experiment with ideas. But I’ve had so much fun even this early on that I want to share what I’ve learned.

        -

        Wrapper

        Because Paks is a wrapper, you will run containers using the paks command. Here are a few quick examples.

        -
        
         $ paks run ubuntu
         $ paks run --shell /bin/sh busybox
        @@ -77,14 +139,12 @@ 

        Wrapper

        -

        What is happening on the backend that took me a bit to figure out is that we will need to run a subprocess, but create a pseudo terminal to better watch and interact with it. This is going to happen in the “interactive_terminal” command below. But unless you want your terminal to get wonky, we need to use termios to grab the current tty and make sure it gets restored no matter what at the end. That looks like this:

        -
        
             def interactive_command(self, cmd):
                 """
        @@ -105,11 +165,9 @@ 

        Wrapper

        -

        What happens if you don’t do that? Your terminal gets weird and wonky. And then in the interactive command function, this is where we launch a subprocess with a new pseudo terminal:

        -
        
                 tty.setraw(sys.stdin.fileno())
         
        @@ -132,7 +190,6 @@ 

        Wrapper

        -

        The setsid as a pre-exec function is ensuring the child process is a new session and won’t exit, sort of akin to a daemon. So at face value, yes it is doing exactly what you think - we are shelling into the container @@ -141,18 +198,15 @@

        Wrapper

        and pynput is really scary because it doesn’t just get keys from the terminal - it’s watching anything you type anywhere! That gave me the heebie jeebies. I hope there is some scanner for pypi that is looking for that package and checking it’s not being malicious.

        -

        All of the above said, and all the time spent, I’m not convinced that this exact method is the best way to be running commands from inside the container. There are other ideas that need to be tested!

        -

        Structure

        We could have talked about this first, but let me show you the basic structure of paks so you get an understanding of the components.

        -
        paks
         
         # Backends are different wrappers, so logically we start with podman and docker
        @@ -196,11 +250,9 @@ 

        Structure

        -

        So that should give you the gist - we have container wrappers (backends) and then commands that we can issue while we are inside the container. Let’s talk about them next.

        -

        Saving State

        The first thing I wanted to try with Paks was to save a container state, but not needing @@ -210,10 +262,8 @@

        Saving State

        Reproducibilty is important, but mostly for the final production thing, and only up to a level of not giving us pain. So how might I do this?

        -

        For paks, while you are inside the container (let’s say ubuntu) you simply ask to #save:

        -
        
         $ paks run ubuntu
         # touch PANCAKES
        @@ -231,10 +281,8 @@ 

        Saving State

        -

        And then you can see that there is an ubuntu-saved container!

        -
        
         $ docker images | grep ubuntu
         ubuntu-saved                                      latest    93e336d994de   2 minutes ago   72.8MB
        @@ -243,14 +291,12 @@ 

        Saving State

        -

        So this has saved me some tiny bit of energy to open up another terminal, remember how to docker commit, and then also rebuild with a squash to minimize the layers (as there is a maximum number we don’t want to hit). What Paks could then eventually do is make it easy to move this entire container between places, e.g., from your local machine to HPC without a hitch. I haven’t started to work on that yet because this is a fun side project.

        -

        Environments

        One thing I do a lot is use GitHub tokens to do fun stuff with the API. I usually need to @@ -258,7 +304,6 @@

        Environments

        And then I do that a million times when I have to run a new container. But with Paks, we can create a named environment on the host (a file to source with exports):

        -
        
         $ paks env edit github
         You can also quickly show an environment:
        @@ -269,10 +314,8 @@ 

        Environments

        -

        And then in our container, as many times as we need, load it seamlessly!

        -
        
         root@9ec6c3d43591:/# #envload github
         Loading environment...
        @@ -284,10 +327,8 @@ 

        Environments

        -

        If only my GitHub username was dinosaur! 😁️ Is it loaded?

        -
        
         root@9ec6c3d43591:/# env | grep GITHUB
         GITHUB_USER=dinosaur
        @@ -296,13 +337,11 @@ 

        Environments

        -

        Okay, so to be fair, there are a bunch of other commands for inspection and size, and I’m not going to go through them all! You can see them in the Paks user guide. And I don’t mean to say you should use this - you probably shouldn’t. But you might be interested to try it out.

        -

        Parsing Keystrokes

        So the most interesting part of this project has been learning about input from the terminal, @@ -310,7 +349,6 @@

        Parsing Keystrokes

        function where we ran subprocess and created a pseudo terminal. There actually is a pretty simple way to watch what is being typed:

        -
        # This is the subprocess return code, keep going until we are done (e.g. have a return code)
         while p.poll() is None:
         
        @@ -332,10 +370,8 @@ 

        Parsing Keystrokes

        -

        I learned a lot from this! Let’s talk about it.

        -

        Debugging

        So the first thing I learned is that my typical “import IPython” and “IPython.embed()” @@ -346,20 +382,17 @@

        Debugging

        a little bit. So what I wound up doing so I could easily see every keypress was to write to file in append mode:

        -
        with open('/tmp/file.txt', 'a') as fd:
             fd.write(new_char)
         
        -

        This was kind of neat because I could be typing in one terminal, and then have a file open (watching it) that updates with changes, and I’d get a sense of what is going on. I could append anything to this file to debug. And this is also really different from how we normally use subprocess, where maybe we will parse entire lines at once:

        -
        
         p = subprocess.Popen(['python','thing.py'], stdout=subprocess.PIPE)
         while True:
        @@ -369,12 +402,10 @@ 

        Debugging

        -

        because we are reading on character at a time! So what we essentially need to do is keep a string that we continue appending to unless there is a newline, up or down, or left or right to indicate moving the cursor.

        -

        Ascii Characters

        I started to quickly see characters that my editor didn’t know - e.g., likely @@ -384,7 +415,6 @@

        Ascii Characters

        of the character and compare to a number. For example, for a backspace the number is 127. So to act on it I might do:

        -
        
         # if we have a backspace (ord 127)
         if len(new_char) == 1 and ord(new_char) == 127:
        @@ -405,12 +435,10 @@ 

        Ascii Characters

        -

        The above is basically looking for a backspace, and if we find one, we remove one character from the line we are assembling. Otherwise we just add the new character to the line.

        -

        xterm sequences

        And a similar thing happens for pressing up/down and right/left, except the @@ -423,7 +451,6 @@

        xterm sequences

        So for the purposes of my library, for now I decided I’m not going to handle moving left and right, nor do I want to deal with weird extra ascii characters that are added, so I just clean them up.

        -
        
         # Get rid of left/right
         string_input = string_input.replace("[D", "").replace("[C", "")
        @@ -433,11 +460,9 @@ 

        xterm sequences

        -

        Yes, that probably means some of your ninja shortcuts won’t work perfectly when running paks, and if you absolutely want one to be parsed please let me know and we can add it.

        -

        Newlines

        So the gold nugget of content that Paks is interested in is when you press enter. @@ -445,7 +470,6 @@

        Newlines

        or carriage return. This is also a pretty variable thing depending on the platform you are on - newlines can come in very different forms! I tried to honor the two that I see most often:

        -
        1. \r\n: Windows
        2. \n: UNIX (e.g., Mac OSX)
        3. @@ -456,11 +480,9 @@

          Newlines

      -

      At this point, we can start acting on what we see. E.g., if the user has asked for any kind of exit, I honor it.

      -
      # Universal exit command
       if "exit" in string_input and has_newline:
           print("\n\rContainer exited.\n\r")
      @@ -468,11 +490,9 @@ 

      Newlines

      -

      The return of the name at the end is to handle cleaning up the image, which was allocated a temporary name.

      -

      History

      One of the more interesting parts of this project was realizing that people use history, a lot. @@ -480,7 +500,6 @@

      History

      is some item in history re-executed. So first let’s look for exploring history with up/down. There are two cases - pressing up/down without a newline:

      -
      # Pressing up or down, but not enter
       if ("[A" in string_input or "[B" in string_input) and not has_newline:
           string_input = self.get_history(string_input, openpty)
      @@ -489,10 +508,8 @@ 

      History

      -

      And with one:

      -
      # Pressing up or down with enter
       if ("[A" in string_input or "[B" in string_input) and has_newline:
           string_input = self.get_history(string_input, openpty)
      @@ -500,7 +517,6 @@ 

      History

      -

      If we don’t have a newline, we add a continue to keep parsing characters the user is typing. If we do have a newline, we let the loop keep running to keep parsing the line of history we retrieved. But let’s step back and talk about that history. We basically want to retrieve whatever line of history that @@ -513,7 +529,6 @@

      History

      listened to every person that has ever told me to stop working on something because “REASONS!” I wouldn’t ultimately work on much at all.

      -

      The short answer was that I needed a function to be able to get a line of history, and based on the number of times pressing up or down. For my first attempt I said “nevermind this, I’ll just save my own history!” but that got hugely complicated very fast because it turns out, we don’t just stupidly type commands over and over, @@ -525,30 +540,25 @@

      History

      sense in case there is sensitive information) but it was problematic for me because I couldn’t parse it. For example, when someone presses up and down a bunch of times, I might see:

      -
      [A[A[A[A[A[B[A
       
      -

      This is a reference to some previous command that I can only find in history given I’m parsing the input/output as I am. So my second attempt (well, maybe second through tenth) I was trying different variations of trying to be able to parse the history. If you looked at the tweet you’ll see we need to run:

      -
      $ history -a
       
      -

      to start writing what’s in memory to file. I didn’t want to do this on every command, because along with the user seeing it and the UI being awful, it was just too much. Instead, I realized that I had a small opportunity when the user first shells into the container (and is expecting a jump in their UI) to run whatever I need and then clear the terminal. So I ran it there, right before a clear and welcome message.

      -
      
           def welcome(self, openpty):
               """
      @@ -562,12 +572,10 @@ 

      History

      -

      And with this method you aren’t aware of the extra commands at all! And did you notice the spaces above? That’s also another trick! Any command that you type with a leading space won’t be saved to history, and this is thanks to HISTCONTROL that has an ignorespace option. I think most people / containers set it to ignore space and to ignore duplicates:

      -
      
       root@1c268386714a:/# echo $HISTCONTROL
       ignoredups:ignorespace
      @@ -575,12 +583,10 @@ 

      History

      -

      That said, I don’t explicitly try to reset this in the container, so that could be a bug if there is a container base that doesn’t do that. And I’m pretty sure centos doesn’t come with clear! I’ll likely need to work on this a bit more.

      -

      For now, please consider this only working for debian/ubuntu bases and we can inspect the other ones later!

      @@ -590,7 +596,6 @@

      History

      get the history, that’s actually done via a Paks command that we will talk about after. Here is what is going on:

      -
      def get_history(self, line, openpty):
           """
           Given an input with some number of up/down and newline, derive command.
      @@ -630,12 +635,10 @@ 

      History

      -

      The above might not be perfect, but it worked the best for everything that I tried! This allows us to issue a command that paks knows, press up to get it again, and then edit it and have the command work correctly. Speaking of commands…

      -

      Commands

      The core meat of paks is the commands that it recognizes. Every command has a base class @@ -646,7 +649,6 @@

      Commands

      and we’ve parsed it per the above (looking up history and such) we can sniff it to see if it matches a known command pattern:

      -
      # If we have a newline (and possibly a command)
       if has_newline:
           self.run_executor(string_input, openpty)
      @@ -657,13 +659,11 @@ 

      Commands

      -

      The function “run_executor” is going to make this call if there is a Paks command and handle it. And no matter what, we reset our string input to be empty given that the user pressed enter, because they are going to start typing fresh. But before that, this function “run_executor” is going to see if there are any known commands, and if so, to run them! That function looks like this:

      -
      
       def run_executor(self, string_input, openpty):
           """
      @@ -695,7 +695,6 @@ 

      Commands

      -

      The result object holds what you would expect - a return code, some message, and the basic outputs of the call. It’s up to the executor (command) to decide what to show the user. Some might not show anything beyond commands that are run @@ -703,7 +702,6 @@

      Commands

      This is where we delive into the commands module, where there is a simple lookup of the starting prefixes of commands matched to Command classes:

      -
      
       # lookup of named commands and settings
       docker_commands = {
      @@ -718,14 +716,12 @@ 

      Commands

      -

      When I add a load functionality, all it will need to do is update this dictionary. And the reason those are “docker commands” is that you can imagine we eventually support other container technologies, and the commands you run are going to vary. Each Command actually has a class attribute for the container types that are supported. Here is a snippet of the DockerCommands class attached to the client that we are calling “get_executor” on:

      -
      
       class DockerCommands:
       
      @@ -758,18 +754,15 @@ 

      Commands

      -

      To focus on the last function, you basically see that we parse the line (name), and then see if it’s in our lookup. If so, we return the initialized executor, and we need to add the output source in case it needs to interact with the current terminal. The self.command refers to the container technology (e.g., docker or podman in this case).

      -

      Then we can look at a particular command (e.g., inspect) and see it’s pretty simple! We have defined the supported container technologies along with optional messages, and a main run function. Here is the command to inspect, which will dump out the json manifest and optionally take a section:

      -
      
       class InspectContainer(Command):
       
      @@ -794,7 +787,7 @@ 

      Commands

      self.tech, "inspect", "--format", - "{{json .%s }}" % section.capitalize(), + "" % section.capitalize(), container_name, ] ) @@ -808,23 +801,19 @@

      Commands

      -

      You’ll now know the main Paks trick - because we are still running on the host, we can issue commands to the host while we are in the container! In the above, we can just type:

      -
      
       #inspect
       #inspect config
       
      -

      And see the output in the terminal! This is how a lot of the interactions with the host work. It’s kind of simple and silly, but also really cool when you see it work on the container! So the run function above, just as a reminder, is called by this part:

      -
      
       result = executor.run(
           name=self.image,
      @@ -834,10 +823,8 @@ 

      Commands

      -

      And honestly, that’s the majority of Paks! 🎉️

      -

      Discussion

      Paks has honestly been so fun to work on, despite long hours of trying to figure things out during evenings and weekends. I’m so excited @@ -846,7 +833,6 @@

      Discussion

      I had some things on my mind, but it was an excellent use of the time, despite the fact that I woke up 4 hours later and I’m going to crash tonight (err tomorrow night… err now that I’m tweaking up the finishing touches to this post)!

      -

      Next Steps

      I’m working on a “paks load” command that will let someone develop a Python module @@ -861,14 +847,12 @@

      Next Steps

      that removes the file. I haven’t added this yet is because if I’m developing in the container and want to say, move it from my local machine to HPC, I kind of want to have my history so I can lazily use it.

      -

      But Really…

      We have some magic up our sleeves for what we are actually working on to inspire these ideas! I guess you’ll just have to wait for the future, because @alecbcs and I are both have vision and are a great tag team! 🎉️

      -

      Security

      So there are obviously security issues around a library like this - and I added notes @@ -882,7 +866,6 @@

      Security

      container? Possibly, but it’s not Paks in Python in its current state. I think that’s okay - we have to start small with ideas and go from there.

      -

      Didn’t I see paks before?

      Yes, you did! A previous version was intended for making spack build caches on GitHub, but that @@ -892,6 +875,77 @@

      Didn’t I see paks before?

      So for now it’s on a separate branch but largely I am not working on it. If you want to see this branch, it’s still here!

      -

      Thanks for reading friends! I hope this has been interesting and you might be inspired to -also work on better tooling for developers, even if that just means exploring the ideas.

      \ No newline at end of file +also work on better tooling for developers, even if that just means exploring the ideas.

      + + +
      + +
      + + + + + + + + + + + + + + + + diff --git a/2022/interesting-links-i-clicked-this-week/index.html b/2022/interesting-links-i-clicked-this-week/index.html new file mode 100644 index 0000000..7763561 --- /dev/null +++ b/2022/interesting-links-i-clicked-this-week/index.html @@ -0,0 +1,164 @@ + + + + + + + Interesting links I clicked this week - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
      + +
      +

      Interesting links I clicked this week

      +

      I watched several really interesting talks from SRECon22 Americas this week, and in particular I’d like to highlight:

      + +
      • Principled Performance Analytics, Narayan Desai and Brent Bryan from Google. Some interesting thoughts on quantitative analysis of live performance data for monitoring and observability purposes, moving past simple percentile analysis.
      • The ‘Success’ in SRE is Silent, Casey Rosenthal from Verica.io. Interesting thoughts here on the visibility of reliability, qualitative analysis of systems, and why regulation and certification might not be the right thing for web systems.
      • Building and Running a Diversity-focused Pre-internship program for SRE, from Andrew Ryan at Facebook Meta. Some good lessons-learned here from an early-career internship-like program, in its first year.
      • Taking the 737 to the Max, Nickolas Means from Sym. A really interesting analysis of the Boeing 737 Max failures from both a technical and cultural perspective, complete with some graph tracing to understand failure modes.
      + +

      I also ran across some other articles that I’ve been actively recommending and sharing with friends and colleagues, including:

      + +
      • Plato’s Dashboards, Fred Hebert at Honeycomb. This article has some great analysis of how easily-measurable metrics are often poor proxies for the information we’re actually interested in, and discussing qualitative research methods as a way to gain more insight.
      • The End of Roe Will Bring About A Sea Change In The Encryption Debate, Rianna Pfefferkorn from the Stanford Internet Observatory. You should absolutely go read this article, but to sum up: Law enforcement in states than ban abortion is now absolutely part of the threat model that encrypted messaging defends against. No one claiming to be a progressive should be arguing in favor of “exceptional access” or other law enforcement access to encryption.
      + +

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/life-and-leaving-nersc/index.html b/2022/life-and-leaving-nersc/index.html new file mode 100644 index 0000000..aff9b7c --- /dev/null +++ b/2022/life-and-leaving-nersc/index.html @@ -0,0 +1,208 @@ + + + + + + + Life and leaving NERSC - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
      + +
      +

      Life and leaving NERSC

      +

      When word started to spread that I was leaving my job at NERSC for Microsoft, a lot of people either directly or indirectly attributed my decision to being one motivated by money.  Rationalizing my decision to leave is certainly a lot easier with this "Glenn was lured away with bags of cash" narrative, but that wasn't really a factor when I chose to move on.  Rather, my decision is a reflection of where I see the world of HPC going in the coming decade and where I personally wanted to position myself.  For my own therapeutic reasons (and perhaps the benefit of anyone interested in what it's like to work within, and subsequently leave, the DOE HPC complex), I'll try to write it all out here.

      +

      +

      Working at NERSC

      +

      First things first: NERSC has been a wonderful place to work.

      +
      +

      <div style="text-align: center;">A typical view from outside NERSC’s facility in Berkeley after work during the winter months.  Yes, it really does look like this.</div> +<p>When I started in mid-2015, I came in with about three years of prior work experience (two at SDSC doing user support and one at a biotech startup) and knew a little bit about a lot of things in HPC.  But I didn’t really know the basics of I/O or storage–I couldn’t tell you what “POSIX I/O” really meant or how GPFS worked.  The fact that I got to help author NERSC’s ten-year strategy around storage in just two years, was invited to present my view on how to bridge the gap between HPC and enterprise storage at Samsung’s North American headquarters a year later, and was trusted to oversee the design and execution of the world’s first 35 petabyte all-flash Lustre file system through my first four years is a testament to how much opportunity is available to learn and grow at NERSC.</p>

      +

      There are a couple of reasons for this.

      +

      Stable funding

      +

      Perhaps foremost, NERSC (and DOE's Leadership Computing Facilities, ALCF and OLCF) enjoy healthy budgets and financial stability since worldwide leadership in scientific advancement is generally a national priority by both major political parties in the US.  This means that, regardless of who is president and which party holds majorities in Congress, the DOE HPC facilities can pay their employees and deploy new supercomputers.  This solid funding makes it much easier to invest in staff development and long-term planning; I was able to become a resident I/O expert at NERSC because I was never forced to chase after the funding du jour to make ends meet.  Congress trusts NERSC to allocate its funding responsibly, and NERSC prioritized letting me learn as much as I could without distraction.

      +

      Instant credibility and access

      +

      Second, having a NERSC affiliation gives you instant credibility and access in many cases.  It's not necessarily fair, but it's definitely true.  Within my first year at NERSC, I was invited to give a presentation about I/O performance monitoring in Paris because the organizer wanted a lineup of speakers from all the big players in HPC.  I had never been to Europe at that point in my life, but being the I/O guy from NERSC (and being able to present well!) was enough to get me there.  And it was during that trip to Paris that I got to meet--and literally have conversation over dinner with--more industry bigshots that I can remember.  And that trip to Paris was not an outlier; pandemic aside, NERSC let me go to Europe at least once or twice every year I've worked there.

      +
      +

      <div style="text-align: center;">The first photo I ever took of Notre Dame on the first day I’d ever set foot in Europe.  NERSC sent me there less than a year after I started.</div> +<p>Of course, this is not to say that every employee at a DOE HPC facility is wining and dining in Paris every summer.  Many of these opportunities are earned by showing the value of the work you’re doing, just like at any job.  But owing to healthy budgets, travel expenses are rarely the limiting factor in chasing after these opportunities.  In addition, going out into the world and talking about what you do is part of the job at a DOE facility; being a leader in the field of HPC is part of the mission of NERSC, ALCF, and OLCF, so doing high-risk, first-of-a-kind work and telling the world about it is uniquely valued within DOE in a way that it is not in industry.</p>

      +

      Smart people

      +

      A product of these two factors (stable budget and instant credibility) results in coworkers and colleagues who are generally very experienced and capable.  There's an interesting mix of laissez-faire management and rigorous process-driven management as a result.

      +

      Staff are generally given the freedom to choose their own destiny and focus on work that they enjoy much like in any academic environment; it's not hard to pick up passion projects or even move between groups if things get stale on a day-to-day basis.  Since everyone is working on their own slices of HPC, there's also easy access to world experts in different areas of technology if you need one.  For example, I recall once reviewing a storage system that appeared to rely on multiplexing two 12G SAS links over a single 24G SAS.  After one email and a few hours, a coworker confirmed, complete with a citation to the SCSI standards, that this was totally possible.  Even if someone in-house didn't know the answer, I had direct access to an engineering manager at a leading storage vendor who owed me a favor and definitely would've known the answer.  It's really, really hard to find as many smart people in arm's reach in most other HPC centers. 

      +

      At the same time, there is rigorous federal oversight on major projects and procurements to ensure that taxpayer dollars are responsibly spent.  This is a double-edged sword because all of the reporting and reviews that go into massive capital projects make forward progress very slow at times.  All DOE HPC facilities review and re-review everything about these giant supercomputers before making a decision, so by the time the public sees a press release about a new supercomputer, lab staff have spent literal years going over every detail and risk.  It sometimes may not seem that way (how many problems has Aurora had?), but rest assured that every schedule slip or technology change the public hears was preceded by countless hours of meetings about risk and cost minimization.  On the flip-side though, you have the opportunity to learn every gory detail about the system directly from the people who designed it.

      +

      Pay

      +

      In true millennial fashion, I think it's important to have an open discussion about the pay.  DOE labs pay more than any other HPC facility in the world as far as I am aware, and even in the San Francisco Bay Area, salary at NERSC is comparable to the base salaries offered by all the big tech companies.  You can get an idea of what entry-level salaries (think: first job after postdoc or a few years out of undergrad) by searching H1B Visa postings, and anecdotally, I'd wager that a typical HPC job at NERSC pays about 2x that of the same job at a typical US university and 3x-4x that of the same job at a British or European university.  All the labs pay about the same to boot, so an HPC job at somewhere like Oak Ridge can afford you a relatively luxurious lifestyle.

      +

      Don't get me wrong though; affording to buy a Bay Area house on a single NERSC salary alone would be tough in the same way that buying a Bay Area house on any single salary would be.  And while NERSC's compensation is comparable to the base salary of the big tech companies, that base is about all you can get since DOE labs cannot offer equity or substantial bonuses.  This is less of a gap if you're just starting out, but anyone who's looked at compensation structures in tech knows that stock-based compensation, not base salary, dominates total compensation as you move up.

      +

      So, if money wasn't an issue for me and NERSC is such a great place to work, why would I ever leave?

      +

      The road ahead for HPC

      +

      On one hand, HPC's future has never been brighter thanks to how much life (and money!) the AI industry is bringing to the development of HPC technologies.  We have new all-flash file systems, gigantic GPUs, awesome CPU memory technologies, and mixed-precision techniques in the HPC space that were all directly driven by developments primarily intended for AI workloads.  On the other hand, leadership HPC appears to be engaging in unsustainable brinkmanship while midrange HPC is having its value completely undercut by cloud vendors.  I've not been shy about my overall anxiety about where HPC is going because of this, but I'll elaborate now that the exascale race has been won.

      +

      The future of leadership HPC

      +

      Without some monumental breakthrough in transistor technology, there is only one path forward in continuing to build faster and faster supercomputers in the next decade: pour more and more energy (and dissipate more and more heat) into larger and larger (and more and more) GPUs.

      +

      The goal post for exascale power keeps moving because that's been the easiest way to hit the mythical exaflop milestone; while the original goal was 20 MW, Frontier is coming in at 29 MW and Aurora at "under 60 MW."  Not only is this just a lot of power to feed into a single room, but the cost and effort of actually building this infrastructure is newsworthy in and of itself these days.  At the current trajectory, the cost of building a new data center and extensive power and cooling infrastructure for every new leadership supercomputer is going to become prohibitive very soon.

      +

      HPC data centers situated in places where the cost of electricity and real estate (stacked atop the risk of earthquake or wildfire) further skew the economics of just adding more power are going to run up against this first.  It used to be easy to dismiss these practicality concerns by arguing that colocating scientists with supercomputers created immeasurable synergy and exchange of ideas, but the fact that science never stopped during the work-from-home days of the pandemic have taken a lot of air out of that argument.

      +

      My guess is that all the 50-60 MW data centers being built for the exascale supercomputers will be the last of their kind, and that there will be no public appetite to keep doubling down.

      +

      Given this, DOE's leadership computing facilities are facing an existential threat: how do you define leadership computing after exascale if you can't just add another 50% more power into your facility?  How do you justify spending another $600 million for a supercomputer that uses the same power but only delivers 15% more performance?  You can pour similarly huge amounts of money into application modernization to accelerate science, but at the end of the day, you'd still be buying a lot of hardware that's not a lot faster.

      +

      The future of places like NERSC

      +

      NERSC is probably a little better off since its lack of an exascale machine today gives it at least one more turn of the crank before it hits a hard power limit in its data center.  That gives it the ability to deploy at least one more system after Perlmutter that is significantly (at least 2x) more capable but draws significantly more power.  However, compared to Frontier and Aurora, such a system may still look rather silly when it lands in the same way that Perlmutter looks a bit silly compared Summit, which was funded by the same agency but deployed years earlier.

      +

      And therein lies the dilemma of centers like NERSC--how do you position yourself now so that by the time you deploy an HPC system that is close to maxing out on power, it is sufficiently different from a pure-FLOPS leadership system that it can solve problems that the leadership systems cannot?

      +

      The easy go-to solution is to craft a story around "data-centric" supercomputing.  We did this when I was at the San Diego Supercomputer Center when we were budget-limited and had to differentiate our $12 million Comet supercomputer from TACC's $30 million Stampede.  You invest more in the file system than you would for a pure-FLOPS play, you provide low-cost but high-value onramps like Jupyter and science gateways to enable new science communities that have modest computing needs, and you fiddle with policies like allocations and queue priority to better suit interactive and urgent computing workloads.  From a productivity standpoint, this is can be a great story since users will always respond well to lower queue wait times and less frustrations with the file system.  From a system architect's standpoint, though, this is really boring.  The innovation happens in policies and software, not clever hardware or design, so there's very little that's new for a system designer to think about in this case.

      +

      A more innovative approach is to start thinking about how to build a system that does more than just run batch jobs.  Perhaps it gives you a private, fast file system where you can store all your data in a way indistinguishable from your personal laptop.  Perhaps it gives you a convenient place to run a Jupyter notebook that has immediate access to a powerful GPU.  Or perhaps it gives you all the tools to set up an automated process where all you have to do is upload a file to trigger an automatic data analysis and reduction pipeline that returns its output to a shiny HTTP interface.  Such a system may not be able to crank out an exaflop using HPL, but does that matter if it's the only system in the country that supports such automation?

      +

      There are interesting system architecture questions in the latter case, so as a system designer, I much prefer it over the "data-centric" angle to non-exaflop supercomputing strategies.  But there remains a problem.

      +

      The problem: cloud

      +

      Such a "more than just batch jobs" supercomputer actually already exists.  It's called the cloud, and it's far, far ahead of where state-of-the-art large-scale HPC is today--it pioneered the idea of providing an integrated platform where you can twist the infrastructure and its services to exactly fit what you want to get done.  Triggering data analysis based on the arrival of new data has been around for the better part of a decade in the form of serverless computing frameworks like Azure Functions.  If you need to run a Jupyter notebook on a server that has a beefy GPU on it, just pop a few quarters into your favorite cloud provider.  And if you don't even want to worry about what infrastructure you need to make your Jupyter-based machine learning workload go fast, the cloud providers all have integrated machine learning development environments that hide all of the underlying infrastructure.

      +

      And therein lies the problem: the definition of "innovation" as non-exaflop HPC runs up against this power wall might actually mean "catching up to the cloud."

      +

      This is not to say that NERSC-like HPC centers are entirely behind the cloud; all the DOE HPC facilities have bigger, faster, and more convenient parallel file systems that are generally always on and where data is always somewhere "fast."  They also provide familiar, managed software environments and more egalitarian support to small- to mid-scale science projects.  DOE HPC also takes the most risk in deploying unproven technologies to shake them out before they become available to the wide market.

      +

      However, those gaps are beginning to close.  You can stick a full Cray EX system, identical to what you might find at NERSC or OLCF, inside Azure nowadays and avoid that whole burdensome mess of building out a 50 MW data center.  You can also integrate such a system with all the rich infrastructure features the cloud has to offer like triggered functions.  And when it comes to being first to market for risky HPC hardware, the cloud has already caught up in many ways--Microsoft deployed AMD Milan-X CPUs in their data centers before any HPC shop did, and more recently, Microsoft invested in AMD MI-200 GPUs before Frontier had a chance to shake them out.

      +

      Given this steep trajectory, I see only two scenarios for large-scale, non-exaflop HPC facilities in the 10+ year horizon:

      +

      +
      1. They develop, adopt, steal, or squish cloud technologies into their supercomputers to make them functionally equivalent to cloud HPC deployments.  They may be a little friendlier to scientific users since cloud functionality wasn't designed for scientific computing alone, but they also may not be as stable, mature, or feature-rich as their cloud cousins.
      2. They find better overall economics in eventually moving to massive, long-term, billion-dollar deals where flagship HPC systems and their "more than just batch jobs" features are colocated inside cloud datacenters sited at economically advantageous (that is, cheap power, cooling, and labor) locations in the country.
      +

      There's also grey area in between where national HPC facilities consolidate their physical infrastructure in cheap areas to manage costs but still self-manage their infrastructure rather than fully outsource to a commercial cloud.  CSCS has hinted at this model as their future plan since they cannot build 100 MW datacenters in Switzerland, and this is proof that leading HPC facilities around the world see the writing on the wall and need to maneuver now to ensure they remain relevant beyond the next decade.  Unfortunately, the politics of consolidating the physical infrastructure across the DOE HPC sites would likely be mired in Congressional politics and take at least a decade to work out.  Since serious work towards this hasn't started yet, I don't envision such a grey-area solution emerging before all the DOE facilities hit their power limit.

      +

      Hopefully I've painted a picture of how I perceive the road ahead for large-scale HPC facilities and you can guess which one I think will win out.

      +

      Final thoughts

      +

      I have every confidence that there will still be DOE HPC facilities in ten years and that they will still be staffed by some of the brightest minds in HPC.  And even if a cloud-based HPC facility ultimately consumes centers like NERSC, I don't think many people would be out of work.  The vast majority of what DOE's HPC people do is think carefully about technology trends, maintain a deep understanding of user requirements, provide excellent support to its thousands of users, and keep complex supercomputers running well.  Those jobs don't go away if the supercomputer is in the cloud; it's just the physical location, the hands doing physical hardware swaps, and the breadth of vendor interactions that may change.

      +

      For me as a system architect though, it's become too hard for me to catch up to all the new technologies and techniques HPC needs for the future while also building up other staff to be masters of today's I/O challenges.  I found myself at a fork in the road.  One path would mean catching up on a technical level and then getting in front of where the future of HPC lies before it gets there.  The other path would mean trying to steer the entire DOE HPC ship in the right direction, as long as that may take, and have faith that the people I bring along can race far enough ahead to tell me if we're still going where we need to go.  Perhaps a bit selfishly, I chose the former.  I'm just not ready to give up on racing ahead myself yet, and the only way I could hope to catch up was to make it a full-time job.

      +

      I don't claim to know the future, and a lot of what I've laid out is all speculative at best.  NERSC, ALCF, or OLCF very well may build another round of data centers to keep the DOE HPC party going for another decade.  However, there's no denying that the stakes keep getting higher with every passing year.

      +

      That all said, DOE has pulled off stranger things in the past, and it still has a bunch of talented people to make the best of whatever the future holds.

      +

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/gaborsamu/2022-4-12-criu_lsf.md b/2022/lsf-hookin-up-with-the-criu/index.html similarity index 62% rename from _posts/gaborsamu/2022-4-12-criu_lsf.md rename to 2022/lsf-hookin-up-with-the-criu/index.html index eae2a49..4f5f5df 100644 --- a/_posts/gaborsamu/2022-4-12-criu_lsf.md +++ b/2022/lsf-hookin-up-with-the-criu/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2022-04-12 19:32:04' -layout: post -original_url: https://www.gaborsamu.com/blog/criu_lsf/ -slug: lsf-hookin-up-with-the-criu -title: LSF hookin' up with the CRIU ---- - -

      With the unpredicable spring weather here in Southern Ontario, weekend projects + + + + + + + LSF hookin' up with the CRIU - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      LSF hookin' up with the CRIU

      +

      With the unpredicable spring weather here in Southern Ontario, weekend projects are the order of the day. Whether it’s fixing my bike for spring, repairing things in the home which I’ve neglected for far long or topics relating to IT which have been percolating in my head, I am a textbook busybody.

      A few decades back, when I was a support engineer at Platform Computing, I had @@ -269,4 +338,76 @@ with IBM Spectrum LSF using the echkpnt and erestart interfaces. As highlighted earlier, LSF provides a number of plugin interfaces which provides flexibility to organizations looking to do site specific -customizations.

      \ No newline at end of file +customizations.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/gaborsamu/2022-6-9-reform_reloaded.md b/2022/mnt-reform-2-part-deux/index.html similarity index 77% rename from _posts/gaborsamu/2022-6-9-reform_reloaded.md rename to 2022/mnt-reform-2-part-deux/index.html index 2bacb66..729a40c 100644 --- a/_posts/gaborsamu/2022-6-9-reform_reloaded.md +++ b/2022/mnt-reform-2-part-deux/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2022-06-09 01:06:51' -layout: post -original_url: https://www.gaborsamu.com/blog/reform_reloaded/ -slug: mnt-reform-2-part-deux -title: MNT Reform 2 - part deux ---- - -

      A few days back I posted some of my initial thoughts of the MNT Reform 2 laptop which just + + + + + + + MNT Reform 2 - part deux - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      MNT Reform 2 - part deux

      +

      A few days back I posted some of my initial thoughts of the MNT Reform 2 laptop which just recently arrived. I ran the usual battery of tests on the laptop including the High Performance Linpack (HPL) of course just for kicks.

      @@ -507,4 +576,76 @@ -------------------------------------------------------------------------------- End of Tests. -================================================================================
      \ No newline at end of file +================================================================================
      + + +
      + +
      + + + + + + + + + + + + + + + + diff --git a/_posts/gaborsamu/2022-6-6-neunundneunzig_reform.md b/2022/neunundneunzig-mnt-reform-s/index.html similarity index 58% rename from _posts/gaborsamu/2022-6-6-neunundneunzig_reform.md rename to 2022/neunundneunzig-mnt-reform-s/index.html index 7ddc1de..1473118 100644 --- a/_posts/gaborsamu/2022-6-6-neunundneunzig_reform.md +++ b/2022/neunundneunzig-mnt-reform-s/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2022-06-06 18:54:07' -layout: post -original_url: https://www.gaborsamu.com/blog/neunundneunzig_reform/ -slug: neunundneunzig-mnt-reform-s- -title: Neunundneunzig MNT Reform(s) ---- - -

      I’ll admit it. I sat on the fence for a long time before placing an order + + + + + + + Neunundneunzig MNT Reform(s) - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      Neunundneunzig MNT Reform(s)

      +

      I’ll admit it. I sat on the fence for a long time before placing an order for the MNT Reform 2 laptop. At the time, I was in the market for a laptop as my 2 Macbook Pro retina laptops were repurposed for online schooling for my children during the pandemic (and as it turns out were never @@ -220,4 +289,76 @@

      -

      \ No newline at end of file +

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/new-year-s-resolution-for-hpc-using-resources-more-efficiently/index.html b/2022/new-year-s-resolution-for-hpc-using-resources-more-efficiently/index.html new file mode 100644 index 0000000..df8621f --- /dev/null +++ b/2022/new-year-s-resolution-for-hpc-using-resources-more-efficiently/index.html @@ -0,0 +1,169 @@ + + + + + + + New Year's Resolution for HPC- Using Resources More Efficiently - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      New Year's Resolution for HPC- Using Resources More Efficiently

      +

      A hearty happy new year to everyone. It’s that time of the year that we hear from folks about their resolutions for new year’s. But rather than talk about me purchasing a gym membership, I’d like to share my thoughts on a new year’s resolution for HPC.

      + +

      With the topsy-turvy weather that we’re seeing all over the planet, we’re all acutely aware of the changes that are happening to our climate and what is represents for humankind. HPC is a key engine for science, including efforts that are crucial to help with our climate change battle. Climate and ocean modelling are some examples of the use of HPC that immediately come to mind in this respect. Modelling the environment is important for us to understand what is occurring around us and what is projected to occur. Additionally, materials science is also important in order to help develop the necessary technologies to more effectively store energy from renewable sources and transmit, generate energy. HPC is a consumer of energy, which brings me to the HPC resolution for this year – using computing resources more efficiently.

      + +

      We’ve seen great strides in the efficiency of processors and systems. But at scale, large HPC centers consume large amounts of energy for both powering the servers and storage systems, as well as the cost of cooling. And if you’re using cloud for HPC, then of course you’re not concerned with the energy and cooling, but rather the cost to you. In either case, making the most efficient use of your infrastructure should be a key consideration. Workload schedulers are the interface between users and jobs in any HPC environment. Users submit work and it’s the task of the workload scheduler to find suitable compute resources to dispatch the work to. On the surface, this may seem like a trivial task. But with potentially large numbers of jobs, users, servers and priorities, workload and resource management is anything but a trivial. The good news is that there are workload management solutions which bring decades of experience to the table.

      + +

      IBM Spectrum LSF Suites provide a fully integrated workload management solution for HPC environments. LSF builds on almost 30 years of experience in workload and resource management and is used on some of the worlds’ largest supercomputers including Summit, at the Oak Ridge Leadership Computing Facility. On a high-level, here are some critical areas where LSF can help to drive better efficiency in your HPC infrastructure:

      + +
        +
      • Dynamic hybrid cloud – automatically flex up and down cloud resources according to policies, with support for all major cloud providers. Learn more here
      • +
      • Dynamic multi-instance GPU support – right size NVIDIA A100 multi-instance GPU slices according to incoming workload demands. Learn more here
      • +
      • User productivity – single unified UI for job submission and management which captures repeatable best practices. Learn more here
      • +
      +

      Start the year off right, with a focus efficiency in your HPC environment with IBM Spectrum LSF. Learn more here.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/gaborsamu/2022-1-25-numa_lsf.md b/2022/numa-on-power9/index.html similarity index 71% rename from _posts/gaborsamu/2022-1-25-numa_lsf.md rename to 2022/numa-on-power9/index.html index 2e62b3c..d4f3e9a 100644 --- a/_posts/gaborsamu/2022-1-25-numa_lsf.md +++ b/2022/numa-on-power9/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2022-01-25 00:53:21' -layout: post -original_url: https://www.gaborsamu.com/blog/numa_lsf/ -slug: numa-on-power9 -title: NUMA on POWER9 ---- - -

      Non-uniform memory access (NUMA) systems are servers made up a single planar board (motherboard) with more than one CPU socket. On such servers, each CPU socket is directly connected to part of the system main memory but can also use parts of the main memory to which it is not directly connected via a crossbar or interconnect. Memory access times on NUMA systems are thus not uniform and depend upon the location of the memory from the CPU socket from which it is accessed. In other words, there is a performance penalty to accessing memory which is not local to a given CPU socket via the interconnect. Much has been written about NUMA from both a hardware and OS perspective.

      + + + + + + + NUMA on POWER9 - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      NUMA on POWER9

      +

      Non-uniform memory access (NUMA) systems are servers made up a single planar board (motherboard) with more than one CPU socket. On such servers, each CPU socket is directly connected to part of the system main memory but can also use parts of the main memory to which it is not directly connected via a crossbar or interconnect. Memory access times on NUMA systems are thus not uniform and depend upon the location of the memory from the CPU socket from which it is accessed. In other words, there is a performance penalty to accessing memory which is not local to a given CPU socket via the interconnect. Much has been written about NUMA from both a hardware and OS perspective.

      From a performance perspective therefore, strategies such as memory affinity and CPU pinning are important considerations when running on NUMA systems. As this is an HPC focused blog, we’ll look at this through the lens of the well-known IBM Spectrum LSF workload scheduler. LSF supports Linux on ppc64le, aarch64 and x86-64. The test system in this case is a dual-socket IBM POWER9 based server configured as follows:

      @@ -406,4 +475,76 @@
      bsub -n 10  -R “affinity[core(same=numa):distribute=pack]”  -gpu “num=1:mode=exclusive” ./train.py
      -

      We’ve discussed NUMA concepts in brief and the ability to easily control CPU pinning and memory binding for jobs submitted to LSF. In HPC environments where performance is crucial, using these concepts can help to drive performance by preventing workloads from using the NUMA interconnect where feasible. That concludes this quick recap of affinity jobs in LSF. You can find out more about the capabilities of LSF in the online documentation.

      \ No newline at end of file +

      We’ve discussed NUMA concepts in brief and the ability to easily control CPU pinning and memory binding for jobs submitted to LSF. In HPC environments where performance is crucial, using these concepts can help to drive performance by preventing workloads from using the NUMA interconnect where feasible. That concludes this quick recap of affinity jobs in LSF. You can find out more about the capabilities of LSF in the online documentation.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/vsoch/2022-5-7-pipelib.md b/2022/pipelib-simple-library-to-parse-filter-and-sort-things/index.html similarity index 77% rename from _posts/vsoch/2022-5-7-pipelib.md rename to 2022/pipelib-simple-library-to-parse-filter-and-sort-things/index.html index fb4de0c..aeb0f6c 100644 --- a/_posts/vsoch/2022-5-7-pipelib.md +++ b/2022/pipelib-simple-library-to-parse-filter-and-sort-things/index.html @@ -1,24 +1,92 @@ ---- -author: Vanessasaurus -author_tag: vsoch -blog_subtitle: dinosaurs, programming, and parsnips -blog_title: VanessaSaurus -blog_url: https://vsoch.github.io/ -category: vsoch -date: '2022-05-07 13:30:00' -layout: post -original_url: https://vsoch.github.io/2022/pipelib/ -slug: pipelib-simple-library-to-parse-filter-and-sort-things -title: Pipelib- Simple Library to Parse, Filter, and Sort Things ---- - -

      In early April I added an “update” command to Singularity Registry HPC (see the pull request here and needed to start with a list of docker tags and + + + + + + + Pipelib- Simple Library to Parse, Filter, and Sort Things - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   VanessaSaurus dinosaurs, programming, and parsnips. See the original post here.
      + +
      +

      Pipelib- Simple Library to Parse, Filter, and Sort Things

      +

      In early April I added an “update” command to Singularity Registry HPC (see the pull request here and needed to start with a list of docker tags and parse them into version strings to sort, and still return the original tag for later use. I wound up creating a custom class and set of functions that use distutils.LooseVersion to support that, but in creating this “hard coded thing” I stepped back and had a question.

      -

      Can we more intelligentally compose custom parsing pipelines?

      @@ -26,7 +94,6 @@

      Specifically I wanted to:

      -
      1. Start with a list of container tags for an image from a registry
      2. Filter out anything that looks like a commit, but isn't a string (e.g., latest)
      3. @@ -39,17 +106,14 @@ Ultimately of course I dove right in, and this led to the creation of Pipelib, which was an itch I terribly wanted to scratch! In this quick post, I want to share the overall design, because it was really fun to make.

        -
        -

        Design

        Before we talk about the design, let me show it to you.

        -
        
         import pipelib.steps as step
         import pipelib.pipeline as pipeline
        @@ -76,13 +140,11 @@ 

        Design

        -

        In the above, we take a pipeline object and add steps to it. That design is fairly simple, as the Pipeline class takes an optional iterable of things to process. I say “things” because we can give it steps, composed steps, or even entire other pipelines. Here is an example of adding an entire other Pipeline!

        -
        
         import pipelib.steps as step
         import pipelib.pipeline as pipeline
        @@ -111,13 +173,11 @@ 

        Design

        -

        Implementation-wise, this is also fairly simple. We can check the underlying class of the provided object and either add a single step, or insert a set of steps given another pipeline. In fact, pipelib comes with a small set of “pipelines” that are ready for you to use. For example, here is one to filter out “things that look like complete or partial git commits”

        -
        
         import pipelib.steps as step
         import pipelib.pipeline as pipeline
        @@ -133,11 +193,9 @@ 

        Design

        -

        This is something I found useful because people sometimes use commits as Docker tags, and I don’t find this incredibly meaningful as a version to compare to (and want to remove them). Under the hood, it looks like this:

        -
        
         RemoveCommits = pipeline.Pipeline(
             steps=(
        @@ -148,11 +206,9 @@ 

        Design

        -

        Do you also notice something interesting in the above? We are actually combining steps akin to logical operations. The above “pipeline” is actually just one step that combined other steps!

        -
        
         pipelines.git.RemoveCommits.steps
         [HasMinLength_AND_NotHasAllLowerLettersNumbers]
        @@ -160,44 +216,35 @@ 

        Design

        -

        Let’s step back and talk about some concepts that allow this.

        -

        Concepts

        Pipeline

        As we’ve seen above, a pipeline is a collection of steps that take, as input, a listing of items and return a parser and filtered list.

        -

        Step

        A step is some action in a pipeline. The way this works is that we have different kinds of steps, and this makes them easy to implement and even test. A boolean step is akin to a filter, and is expected to return True or False to indicate if the item passes, e.g., False means it’s filtered out. Boolean steps are neat because they afford different kinds of logic and combination.

        -

        Logical Operations

        Let’s say that we have a step that checks that an input is all letters:

        -
        step.filters.HasAllLetters()
         
        -

        For the above, anything that had a number (e.g., orange123) would be filtered out. But what if we wanted to inverse that, and allow passing of inputs that don’t have all letters (meaning we want numbers or special characters?) We can simply do that:

        -
        ~step.filters.HasAllLetters()
         
        -

        Implementation wise, this was really fun to do! For Python to respect the logical operator ~ I simply define the “invert” function for the BooleanStep class.

        -
        def __invert__(self):
             """
             We can say "~step" and reverse the logic.
        @@ -207,11 +254,9 @@ 

        Logical Operations

        -

        It sets an attribute “reverse” to True, and returns itself, that way we use the same step, but with this variable set to be true. What does that do? In the “run” function of the BooleanStep we basically retrieve an outcome from the underlying step (True or False) and simply reverse it given that boolean is True! Again, it’s very simple, and allows for doing things like this:

        -
        
         from pipelib.pipeline import Pipeline
         import pipelib.steps as steps
        @@ -225,12 +270,10 @@ 

        Logical Operations

        -

        What if we wanted to combine steps? E.g., what if I want to say “has all letters” OR “has minimum length 10?” If we put the steps side by side we would only be able to support an AND - allowing passing through of entries that have all letters and the minimum length of 10. Pipelib supports both those operators - AND and OR as follows:

        -
        
         > step = steps.filters.HasAllLetters() & steps.filters.HasMinLength(length=10)
         > step
        @@ -242,10 +285,8 @@ 

        Logical Operations

        -

        For both cases above, we are using the “and” and “or functions, respectively, and:

        -
        1. Checking for class compatibility (both must be BooleanStep)
        2. Creating a list of composed steps to added to a class attribute "composed"
        3. @@ -261,7 +302,6 @@

          Logical Operations

          steps with other steps as many times as you like - a new check is simply added to the front or back of the list. The result (returned) is the new class that is ready to run. Here is what an OR looks like:

          -
          
           > step = steps.filters.HasAllLetters() | steps.filters.HasMinLength(length=10)
           > step
          @@ -273,10 +313,8 @@ 

          Logical Operations

          -

          If you are interested in this function, you can see the entire thing here.

          -

          Transformation Operations

          A base step can be thought of as a transformation. Instead of expecting a boolean to be returned, we are @@ -284,13 +322,11 @@

          Transformation Operations

          of “None” will be removed from the list, however in most cases a transform is intended to perform an operation on the item passed. Here is an example of a transformation operation:

          -
          Pipeline(steps.transform.ToLowercase()).run(["AHHHH"])
           ['ahhhh']
           
          -

          Sort Operations

          A sort operation is a step that is one level up. Instead of operating on individual items, the step @@ -298,7 +334,6 @@

          Sort Operations

          A good example from Pipelib is the use case that originally inspired me - to start with a messy list of Docker tags, do some parsing to derive versions, and return back a sorted list.

          -
          
           pipeline.Pipeline(steps.container.ContainerTagSort(ascending=False)).run(["1.2.3", "0.1.0", "8.3.2"])
           ['8.3.2', '1.2.3', '0.1.0']
          @@ -309,12 +344,10 @@ 

          Sort Operations

          -

          In the above we also demonstrate that steps can take parameters, such as the order of a sort! This particular sorting step also allows you to say you want to return unique major, minor, or patch versions.

          -
          
           pipeline.Pipeline(steps.container.ContainerTagSort(unique_major=True)).run(["1.2.3", "1.1.0", "8.3.2"])
           ['8.3.2', '1.2.3']
          @@ -322,10 +355,8 @@ 

          Sort Operations

          -

          And if you wanted to do a more comprehensive clean up and sort, you could do something like this.

          -

          Wrapper

          Pipelib needed a way to be able to pass around some parsed version of an item, but still maintain @@ -333,7 +364,6 @@

          Wrapper

          semantic version, I might have filtered 1.2.3-boop to be just 1.2.3, but at the end of the day I need the original tag to pull. Pipelib accomplishes this via wrappers.

          -

          A wrapper is conceptually that - an internal wrapper class to an item that allows for storing an original value, and still doing operations to change a current state. Wrappers are used inside steps and allow for things like sorting and comparison. You probably don’t need to worry about wrappers @@ -341,7 +371,6 @@

          Wrapper

          types. However, you can ask Pipelib to not do this unwrapping, and then you can get back the derived and original values:

          -
          
           tags  = ["1.2.3", "1.1.0", "8.3.2"]
           updated = pipeline.Pipeline(steps.container.ContainerTagSort()).run(tags, unwrap=False)
          @@ -357,7 +386,6 @@ 

          Wrapper

          -

          Conclusion

          I’ve had so much fun making this library! Like many of my projects it’s probably not super useful, @@ -365,7 +393,6 @@

          Conclusion

          for a use case that you might be interested in. Please don’t hesitate to ask me for help, I’m always running out of fun things to do :)

          -

          Why should I care?

          @@ -376,4 +403,76 @@

          Conclusion

          in a library it might might it slightly easier, or your work more reproducible because someone else can use the steps. And if you don’t care? That’s okay too. I recognize this was mostly a fun project, and yet-another-itch I really wanted to scratch because I’ve never -made a design like this before, either in terms of the idea or underlying testing and automation.

          \ No newline at end of file +made a design like this before, either in terms of the idea or underlying testing and automation.

          + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/qemu-kvm-ceph-librbd-performance/index.html b/2022/qemu-kvm-ceph-librbd-performance/index.html new file mode 100644 index 0000000..53eb88e --- /dev/null +++ b/2022/qemu-kvm-ceph-librbd-performance/index.html @@ -0,0 +1,156 @@ + + + + + + + QEMU/KVM + Ceph Librbd Performance - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Mark Nelson’s Blog I like to make distributed systems go fast.. See the original post here.
      + +
      +

      QEMU/KVM + Ceph Librbd Performance

      +

      Checkout my blog post at the ceph.io website about tuning QEMU/KVM for high performance with librbd. We got over 123K random read IOPs with 16K IOs from a single VM!

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/gaborsamu/2022-3-29-reliving_amiga.md b/2022/relivin-the-90-s-amiga-style/index.html similarity index 54% rename from _posts/gaborsamu/2022-3-29-reliving_amiga.md rename to 2022/relivin-the-90-s-amiga-style/index.html index 8a7bd1f..8a0ab79 100644 --- a/_posts/gaborsamu/2022-3-29-reliving_amiga.md +++ b/2022/relivin-the-90-s-amiga-style/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2022-03-29 13:53:09' -layout: post -original_url: https://www.gaborsamu.com/blog/reliving_amiga/ -slug: relivin-the-90-s-amiga-style -title: Relivin' the 90's - Amiga style ---- - -

      Although I very much started my experience with home computers with IBM + + + + + + + Relivin' the 90's - Amiga style - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      Relivin' the 90's - Amiga style

      +

      Although I very much started my experience with home computers with IBM compatibles running MSDOS in the late 1980’s, I’m a lifelong, self-professed Commodore-Amiga addict. I distinctly recall the launch of the Amiga A1000 and being dazzled by it’s multimedia capabilities around the same time that @@ -163,4 +232,76 @@

      -
      \ No newline at end of file + + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/vsoch/2022-6-19-research-software-registries.md b/2022/research-software-registries/index.html similarity index 69% rename from _posts/vsoch/2022-6-19-research-software-registries.md rename to 2022/research-software-registries/index.html index 6d44161..4549934 100644 --- a/_posts/vsoch/2022-6-19-research-software-registries.md +++ b/2022/research-software-registries/index.html @@ -1,61 +1,117 @@ ---- -author: Vanessasaurus -author_tag: vsoch -blog_subtitle: dinosaurs, programming, and parsnips -blog_title: VanessaSaurus -blog_url: https://vsoch.github.io/ -category: vsoch -date: '2022-06-19 13:15:00' -layout: post -original_url: https://vsoch.github.io/2022/research-software-registries/ -slug: research-software-registries -title: Research Software Registries ---- - -

      This post spurred from some original thinking about research software registries, and my recent discovery of the SciCodes Consortium, which I’m excited to find (and a bit surprised I didn’t earlier given my experience with research software and registries)! Since I’ve developed registries and been involved extensively in communities that develop standards and tooling for them, I’ve naturally been ruminating over ideas for several months, and hoping to find others that are motivated to think about similar things. This is the motivation of this post - to ruminate, share my thinking, and think together about ideas. You can read the content, or listen to the ideas below.

      - - + + + + + + + Research Software Registries - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   VanessaSaurus dinosaurs, programming, and parsnips. See the original post here.
      + +
      +

      Research Software Registries

      +

      This post spurred from some original thinking about research software registries, and my recent discovery of the SciCodes Consortium, which I’m excited to find (and a bit surprised I didn’t earlier given my experience with research software and registries)! Since I’ve developed registries and been involved extensively in communities that develop standards and tooling for them, I’ve naturally been ruminating over ideas for several months, and hoping to find others that are motivated to think about similar things. This is the motivation of this post - to ruminate, share my thinking, and think together about ideas. You can read the content, or listen to the ideas below.

      -

      Why do we want research software registries?

      Research software registries have value when they are deployed for a specific context. However, I’m not convinced that a research software registry, at the most basic form providing archives with DOIS and metadata, is a useful thing in and of itself. It’s adding complexity and redundancy to an already cluttered ecosystem. The reason is because the source of truth of software is usually the source code in version control, e.g., the GitHub repository, which often already has support for features we need to enable easy citation (CITATION.cff), tagged releases, and programmatically accessible metadata. In this context, any kind of registry that provides another identifier and points to the first is providing redundant information. The only potential benefit is grouping and curation, which I would then argue should still point to the version control and/or a specific release as a source of truth.

      -

      I’m also not convinced that we have established an actual use case of “searching a registry for software.” What happens in labs and communities is that you establish communities around the software, and then there are established workflows or slack communities or GitHub organizations to join around that. Most labs already have chosen languages, and even software pipelines that new members extend or work on. I would even go as far to say that for some (myself included) I don’t find research software, but it finds me. It appears as a link in some social media or chat channel, and I click the link and then there are about 15 seconds during which I make a determination if the software can help me to solve a problem that I have, or if it looks easy, professional, and/or fun and I simply want to try it out. If the answer is “yes” then I add it to a list in a Google Document with other things to try out when I have time. If not, I close the lab and life moves on. But I want to point out that nowhere in this workflow do I explicitly go looking for software. The software often finds me, and then I keep a mental cache of “tools that I’ve seen” and go back to it when the use case arises.

      -

      So being able to answer this question about wanting research software registries is especially challenging because I’m not sure I’ve ever wanted one. Unless there is a specific kind of context around a registry (e.g., search for a specific name in a package manager to use, or look for an already assembled workflow) I haven’t been able to convince myself (yet) that I would find a use for one. I could be wrong about this, however, because as we know, people (myself included) are fairly bad at predicting the future, and perhaps there could be some future where “checking a research software registry” is a part of a daily workflow. I am skeptical because I think that a context is needed. Even if some central source of software ability truth was established, would it not be the case that a graduate student or researcher needs to go there with a use case or context in mind? I can’t imagine just mindlessly browsing for the sake of browsing. It’s akin to search engines - we are usually looking for something very specific. We don’t search without a purpose. The question here then is, what is the purpose?

      -

      Research Software Registries with a Purpose

      A very good example of purpose comes down to workflows. This is the “I need to perform this specific function and I want to use what many others have done before me and not re-invent the wheel.” The minimum example of a workflow registry would be a search interface that indexes pipelines that are perhaps stored in version control. And extended version of that includes being able to provide structured inputs, outputs, and arguments, so the registry can programmatically provide this information to tools. You can then also quickly see how changing this to be general inputs/outputs of software (and functions within) and entrypoints of containers can quickly become a more generalized registry for software that could be used by any workflow manager that knows how to consume its information. However, there is a fine line here, because when we talk about I/O we are going squarely into workflow management territoty, and again in my opinion, we have to be careful about that scope. The closest thing that comes to mind for providing workflows as a service is something like openneuro that has a beautiful idea of “Get your data into this standard format and we will serve it and provide other easy ways to analyze it.” This kind of success story tells me that perhaps there is something to say for developing anything related to processing or pipelines in the context of a community. You can’t create the perfect registry for every scientific discipline, or perhaps you can do a mediocre job at trying, but perhaps if you scope to a specific one you can do a very good job. I’ve found the same to be true with software - it’s often better to do one or few things very well than more things kind of mediocre.

      -

      A Provider of Identifiers?

      I’m skeptical when I hear that people want to apply our traditional model of publication (e.g., having a DOI) to software. The reason isn’t because I don’t value means to support reproducibility (and knowing the exact version of something that was used) but rather that we already have means to tag specific versions of software, and means that fit into a well-established ecosystem: package managers, versions, and releases. To think that a single frozen version of software is “the correct unit to provide” I also disagree with. Software is a living, and changing entity, and when it truly does “freeze” and stops being worked on, unlike a DOI in the academic space, this is sort of its death. The correct entrypoint for a piece of software, in my opinion, is the current version on version control, from where you could decide to pin a particular release or install a particular version from a package manager. But to provide a single frozen DOI that is wrapping some other version / release of the software? It doesn’t make sense to me. It’s adding additional complexity that’s not needed. So my opinion (as I’ve shared before) is that we should be thinking more about preserving specific timepoints in package managers, and not adding on an artificially created layer of “DOI” that seems (in my opinion) more of a reflection of our need to shove things into an academic framework we are comfortable with than anything else.

      -

      So (I hope) that the purpose of a research software registry would not just be to provide DOIs. That doesn’t help me get my work done at the end of the day. All that said, I don’t think there can be a singular answer for purpose. I think the purpose ultimately comes down to the institution (or community) and the specific goals of the registry. For this reason there is no one answer for what a registry should look like or provide, and it is (or will be) challenging to define attributes that “any registry should have.”

      -

      What is my purpose?

      - -

      You cut butter!

      -

      Just kidding :_) I’ve been ruminating on this idea for quite some time, and namely because I’m motivated to build a new kind of research software registry, but first I need to convince myself of a meaningful purpose. While I don’t have my convincing answer yet (but I do have a sense of direction) the way I’ve been thinking about this is to provide a set of questions or use cases that seem plausible. It seems like most people are asking “What kind of information should we have in a registry” but I think this isn’t exactly the question I’m interested in - I want to know:

      -

      What do you want to do next with the software you find?

      @@ -63,7 +119,6 @@

      What is my purpose?

      This is important because it’s going to drive the context and purpose of the registry. Here are a few examples:

      -
      1. I want to quickly try this out → a registry that can deploy a developer environment
      2. I want to find if this is in a package manager → a reproducible install
      3. @@ -79,30 +134,24 @@

        What is my purpose?

        Indeed many of the above contexts require additional information. For example, if we want to be able to ask what software is specifically used to perform X, we need a set of functions that are common to a domain, and then to annotate specific software (or even functions) that do it. If we want to then ask “Which of these is the best?” we need to then generate benchmarks to measure this functionality. E.g., how long does it take to run? What are the inputs and outputs and are they correct? What are resource needs? It would be an incredibly cool thing to be able to ask these questions, but an enormous amount of work for any particular scientific domain. As an example of thinking about functional needs, we might look to brain imaging, which is arguably a subfield of neuroinformatics. We might define custom processing functions like thresholding, registration, normalization, or creating regions of interest, tag specific functions that can do each, and then collect and share metrics about the degree to which easy is successful to do each. Arguably, if I wanted to do this I would create wrappers to workflow managers (akin to Snakemake Wrappers) that not only measure metrics, but make it easy for people to quickly use it in their work.

        -

        It needs to be easy

        Whether I’m thinking about being a user of a research software registry or a developer, it just needs to be easy. Here are some ideas around that.

        -

        Re-inventing the wheel?

        I come with the experience of deploying a custom container registry (Singularity Hub) years ago, and then being involved in standards committees (the Open Container Initiative) that develop generalized specifications that now drive the most common software (container) registries. I’ve also developed registry proxies that do interesting things, along with a Python OCI registry, and I’m the main developer for oras-py (ORAS == OCI Registry as Storage). So believe me when I say that in terms of storing blobs and metadata about them, I don’t think we should re-invent the wheel. Any new registry I create is going to start with these standards. You might disagree, and that’s OK. But I think people have thought long and hard about these things, and we are stronger for working together on them over always making our own new thing.

        -

        As a supplement to that, I want to point out one of the biggest challenges in our community. The majority of research software, I would argue, doesn’t get used beyond the lab it’s created for. Said lab might submit or include it in a paper, and then they get their publication and move on. This is reflective of many things, and I’ll review them here. The first is our funding model - we maybe can fund working on a piece of software only up until the funding dries out, and then it becomes an abandoned repository, if it’s made publicly available. The second is our incentive model - the academic community is focused on writing papers, so once you get there, you don’t have reason to consider the long term impact of the software. The third is communication. It is actually much easier to throw together your own library than to have to search and then try contributing to someone else’s. I say this because I don’t think the way that things are are necessarily the fault of anyone - we are all agents responding to incentives and resources available.

        -

        But then on the flip side - these observations beg to ask what leads to software that is successful, on a community level? I think a few things can happen. Either someone puts time and energy into establishing community, period, meaning bringing together people that are working on common goals and explicitly asking “How can we do this together,” or what I’ve seen with more commercial open source - having enough money or power that you can create strong branding and community just by way of having the funds for it. I’ve talked about this a few times before and it’s not necessarily bad, but it’s unfair at best. Software that maybe would not be successful by its own merit rises to the top, and really great software that doesn’t have those resources does not. That said, I’ve also seen sort of mediocre software get much better and earn its reputation, so I can’t say it’s a completely wrong dynamic.

        -

        Is the answer Mooooar Metadata?

        As we design the “perfect set of information” we want provided for any piece of software, we need to put people first. We have to ask ourselves what are people willing to do, and generally people aren’t wanting to spend inordinate amounts of extra time defining metadata or inputs/outputs for their custom scripts. This was a point also brought up by Paula in the SciCodes meeting and I am 100% behind it. If we require extensive metadata about software, it needs to be done in an automated fashion. In practice when I think of archives for software, I’m just not that motivated to provide more than the absolute minimum to click the “submit” button.

        -

        Do people know what they want?

        One of the hardest things about this kind of problem is that people don’t often know what they want. @@ -116,15 +165,84 @@

        Do people know what they want?

        (on a high level) to browse and interact with research software. What are the compelling reasons for this registry, for you?

        -

        This is actually really fun to think about, because what even is a research software registry? Is it a place to find software to plug into workflows? Does it provide ABI or more general function signatures to help you plug into workflows? Does it provide a citation? A container? An interactive environment? Dependency graph? Something else? This is inded why this problem is so hard - there are so many ways to thinkabout this basic concept. And that’s kind of what makes it fun too? But also what makes it hard. Personally speaking sinceI’m more interested in building things I find myself ruminating about details for a specific use case. And since I’m a developer and craving better support for developer environments, this tends to be where my brain goes. And have you noticed I haven’t given a direct answer for what is a research software registry? It’s 1. because I don’t know, and 2. because we are trying to define a registry for a kind of output that we don’t even have an agreed upon definition for yet! So perhaps the definition will happen on the level of the deployment or institution? Anyway, I hope you take the opportunity to discuss with your peers, pets, and even yourself, to try and answer this question.

        -

        Summary

        To summarize, I’m spending a lot of time thinking about this, and less in an “I’m an academic that wants DOIs and metadata” and more in a “I am a software engineer that wants to build something that I actually find useful.” Might I scratch itches along the way? Sure. And I do have some early ideas that I plan to hack on before sharing publicly. In the meantime, I do hope you are interested in some of these ideas and take time to write or introspect yourself.

        +

        And on a higher level, I really like this format of writing and speaking, where the speaking isn’t formal enough to be a talk that you put together and practice for weeks (I put this all together in an afternoon) but it still is a media format that literally gives a voice.

        + +
      +
      + +
      + + + + + + + + + + +
      + + + + -

      And on a higher level, I really like this format of writing and speaking, where the speaking isn’t formal enough to be a talk that you put together and practice for weeks (I put this all together in an afternoon) but it still is a media format that literally gives a voice.

      \ No newline at end of file diff --git a/_posts/glennklockwood/2022-11-24-tagbloggercom1999blog-4307061427721284246post-2068110509046297403.md b/2022/sc-22-recap/index.html similarity index 65% rename from _posts/glennklockwood/2022-11-24-tagbloggercom1999blog-4307061427721284246post-2068110509046297403.md rename to 2022/sc-22-recap/index.html index 2b52390..b60008c 100644 --- a/_posts/glennklockwood/2022-11-24-tagbloggercom1999blog-4307061427721284246post-2068110509046297403.md +++ b/2022/sc-22-recap/index.html @@ -1,42 +1,117 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2022-11-24 02:00:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2022/11/sc22-recap.html -slug: sc-22-recap -title: SC'22 Recap ---- - -

      The biggest annual conference in HPC, the SC conference, was recently held in Dallas, Texas in its second hybrid incarnation since being all-remote for the pandemic. This year attracted over 11,000 attendees which is much closer to the pre-pandemic high of 14,000 than last year's 7,000, and judging from the crushed conference rooms and busy expo floor, it looks like SC is not that much worse for wear.

      + + + + + + + SC'22 Recap - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Glenn K. Lockwood Personal thoughts and opinions of a supercomputing enthusiast. See the original post here.
      + +
      +

      SC'22 Recap

      +

      The biggest annual conference in HPC, the SC conference, was recently held in Dallas, Texas in its second hybrid incarnation since being all-remote for the pandemic. This year attracted over 11,000 attendees which is much closer to the pre-pandemic high of 14,000 than last year's 7,000, and judging from the crushed conference rooms and busy expo floor, it looks like SC is not that much worse for wear.

      This year's conference quite different for me since I attended for my first time as a vendor, not a researcher or practitioner, and I spent most of my days behind closed doors talking to customers. I didn't get to attend any of the keynotes, BOFs, or panels to which I wasn't invited as a result, so I'm not really qualified to give an erudite summary of the conference or expo this year.

      So instead, I'm just writing down what I remember in order that I remember it and not necessarily in a coherent narrative form. I'm sure I missed a lot (for example, mixed precision seemed big this year, and I heard Jack Dongarra gave a fantastic Turing Award talk) so I encourage others to write their own recaps and share with the community!

      -

      High-level themes

      I actually started writing an SC'21 recap last year which I never posted, and re-reading the intro was funny--you'd think nothing has changed in the last year.

      -

      The underwhelming

      The biggest deal appears to be that exascale is here, and it turns out that it's not that big of a deal. China let the air out of the tires by debuting their exascale systems at SC'21, and not only did they thumb their nose at Top500 by not submitting, they debuted by winning a Gordon Bell prize instead. The first US exascale system, Frontier, was debuted at ISC this year leaving its showing at SC a bit deflated too. Frontier was featured in the Gordon Bell prize-winning paper this year, but that work required the use of four Top-10 systems, not just Frontier, painting the reality that one giant computer rarely stands on its own when it comes to advancing science.

      +

      High-level themes

      +

      I actually started writing an SC'21 recap last year which I never posted, and re-reading the intro was funny--you'd think nothing has changed in the last year.

      +

      The underwhelming

      +

      The biggest deal appears to be that exascale is here, and it turns out that it's not that big of a deal. China let the air out of the tires by debuting their exascale systems at SC'21, and not only did they thumb their nose at Top500 by not submitting, they debuted by winning a Gordon Bell prize instead. The first US exascale system, Frontier, was debuted at ISC this year leaving its showing at SC a bit deflated too. Frontier was featured in the Gordon Bell prize-winning paper this year, but that work required the use of four Top-10 systems, not just Frontier, painting the reality that one giant computer rarely stands on its own when it comes to advancing science.

      This isn't to say that deploying exascale systems isn't a noteworthy feat and worth commendation, but I felt like the hype over the last five years treated the achievement like an end state instead of a milestone. And now that we've passed the milestone, the community is grasping to figure out what comes next. So what is next?

      Quantum had a strong and growing presence at SC, as it has for the last few years. But the conclusion of the panel "Quantum Computing: A Future for HPC Acceleration" was that no, it's not close to being ready.

      Disaggregation and composability was another theme with growing momentum. And like quantum, there was a panel asking the same question: "Does HPC need composability now?" The answer, again, was no, not yet. More on that below.

      What about RISC-V? Surely that will revolutionize the field. As it turns out, the answer there is also that RISC-V is not ready to do anything useful for HPC yet.

      The list goes on of technologies and trends that people are trying to boost now that exascale is "solved." The reality, I think, is that "exascale" will take years to actually mature since it appears to have a ton of technical debt that accumulated during the race to be first. US Exascale rests on the shoulders of AMD and Intel, two companies whose software stacks have not caught up to the market leader, so there will be a lot of thrashing around as development practices and optimization settle out around these systems.

      Struggling with code porting is not very exciting to computer science Ph.D.s, so I expect future SCs to mirror this one and bifurcate into two distinct tracks: those struggling to identify the next big thing in the research space, and those struggling to use the systems that were rushed to deployment.

      -

      The unexpected

      My SC experience was very biased since I didn't get out much, but two related themes kept popping up across different meetings and the sessions I did attend.

      +

      The unexpected

      +

      My SC experience was very biased since I didn't get out much, but two related themes kept popping up across different meetings and the sessions I did attend.

      Power efficiency is serious business now. It used to seem like people talked about the need for energy-efficient HPC in an abstract sense while continuing to jam more power into every rack without changing their approach to system design, facilities, and deployment models. That has hit a hard wall with energy prices soaring in Europe, though. The financial impacts of power-inefficient supercomputing have gone from a one-time capex cost to an ongoing opex cost that is putting many HPC facilities on an unsustainable cost trajectory. Even sites that aren't doing new deployments are facing sudden, sharp increases in their costs, and nobody has good answers about how they will keep the lights on.

      Cloud HPC is confusing. With only 15% of total HPC dollars winding up in the cloud, it's little surprise that most HPC folks are only peripherally aware of what HPC in the cloud really means. Worse yet, a subset of those folks are actively hostile towards the idea of running HPC workloads in the cloud. I spoke with my colleagues from all three major cloud service providers as well as my colleagues in DOE, NSF, and education throughout the week, and everyone painted this same general picture.

      There seems to be a mismatch between the expectations of on-prem HPC folks and cloud HPC folks. For example, I was asked why Windows doesn't support OpenMP very well, and after a bit of digging, I realized that the question really wasn't about using OpenMP on Windows as much as it was about using OpenMP in the cloud. There was a latent assumption that "HPC in Microsoft's cloud" must mean "HPC on Windows" which, for the record, is false--I don't even know how to use Windows anymore. Similarly, people decried the performance impacts of sharing HPC nodes with others in the cloud (they are not shared), overheads of virtualizing InfiniBand or GPUs (everyone uses PCIe passthrough or SR-IOV for HPC nodes), and other misconceptions.

      This isn't to say that cloud people aren't confused too; I heard stories about conversations that went sideways because a cloud folks (not from my employer, thankfully!) didn’t realize that the requirements of a traditional gov/edu HPC facility couldn’t be neatly wrapped up into a single workload with a single solution, contrary to the case across many commercial AI shops. And both sides are struggling to find models for partnership and engagement that mirror the traditional relationship between places like a DOE or NSF facility and a company like Cray. HPC departments are used to buying supercomputers and parallel file systems, while cloud providers sell computing and storage as a service. The distinction may seem trivial at the surface, but there's a large divide that becomes evident once both sides start trying to drill into the details of what a partnership would look like.

      -

      Parallel I/O in Practice Tutorial

      This was my fifth year contributing to the Parallel I/O in Practice Tutorial with my colleagues at Argonne and Google, and it was our first time doing it in-person since 2019. It felt really good to be back in front of people to opine about the perils of POSIX and the greatness of the Darshan I/O profiling tool, and this year I retired out the material I used to present on burst buffers (since DataWarp and Infinite Memory Engine have lost relevance in HPC) and the TOKIO holistic I/O analysis framework (since it is no longer funded/maintained). In their stead, I presented material on benchmarking with IOR and mdtest I debuted at LUG 2022 this year.

      +

      Parallel I/O in Practice Tutorial

      +

      This was my fifth year contributing to the Parallel I/O in Practice Tutorial with my colleagues at Argonne and Google, and it was our first time doing it in-person since 2019. It felt really good to be back in front of people to opine about the perils of POSIX and the greatness of the Darshan I/O profiling tool, and this year I retired out the material I used to present on burst buffers (since DataWarp and Infinite Memory Engine have lost relevance in HPC) and the TOKIO holistic I/O analysis framework (since it is no longer funded/maintained). In their stead, I presented material on benchmarking with IOR and mdtest I debuted at LUG 2022 this year.

      I haven't gotten feedback yet on whether this change was a net positive one, but I think it went over well. Benchmarking I/O is really challenging if you don't understand how things like page cache really work in distributed systems, and walking through some benchmark examples concretizes a lot of abstract parallel file system concepts like locking and striping. And since benchmarking is a rabbit hole of arbitrary complexity, ending the tutorial with advanced benchmarking topics turned out to be a nice way to add buffer to the end of an eight-hour stretch of carefully timed presentations. It's very easy to skip over the nuances of analyzing mdtest outputs if attendees have a lot of questions about more important things at the end of the day.

      The most surprising observation of the tutorial is how many attendees aren't using MPI anymore. We got a lot of questions last year about task-oriented I/O, and this year had some great questions about trying to understand or tune the I/O performed by Python-based analytics frameworks. We decided to add support for Darshan to profile non-MPI applications back in 2019 which is now paying dividends by ensuring it is a relevant tool for these new analytics and AI workloads, and we'll probably have to give more attention to optimizing these workloads' I/O in the future.

      -

      DAOS User Group

      Monday morning was cold and rainy--a perfect day to attend the 2022 DAOS User Group which was held off-site at the Fairmont Hotel.

      +

      DAOS User Group

      +

      Monday morning was cold and rainy--a perfect day to attend the 2022 DAOS User Group which was held off-site at the Fairmont Hotel.

      Whether you particularly care about DAOS or not, the cross-community HPC I/O brain trust is guaranteed to be in attendance, and this year did not disappoint. In addition to the expected stakeholders from Intel and DOE, representatives from all three big CSPs were in attendance. Google Cloud, Seagate, and HPE/Cray were all on the agenda, painting a diversifying landscape of large HPC companies investing time into DAOS and the strength and willingness of the DAOS team to partner with all comers.

      -

      Life after Optane

      The question that opened up the meeting, of course, was "what is the future of DAOS since Intel cancelled Optane?" Kelsey Prantis had the official statement (I'll replace the grainy photo once the DUG slides are online...):

      +

      Life after Optane

      +

      The question that opened up the meeting, of course, was "what is the future of DAOS since Intel cancelled Optane?" Kelsey Prantis had the official statement (I'll replace the grainy photo once the DUG slides are online...):

      The high-level project answer is that DAOS isn't going anywhere. Aurora, by virtue of still having Optane DIMMs, will not be affected, and DAOS will maintain support for Optane until Intel drops its last Optane DIMMs (Crow Pass for Sapphire Rapids) from support life sometime towards the end of this decade.

      For new customers who aren't going to use Optane, the answer is "Metadata on NVMe," a development being codeveloped by Intel, HPE, and Google to implement a write-ahead log (WAL) and allow DAOS to use volatile DRAM instead of Optane. It will work like a file system journal in that a compact representation of writes will be committed to NVMe immediately after landing in DRAM, and then DAOS will asynchronously write back the properly serialized representation of that transaction after it is acknowledged. Johann Lombardi had a helpful cartoon that showed how this WAL will fit into DAOS:

      @@ -44,22 +119,29 @@

      Life after Optane

      The question that opened

      A key benefit of DAOS's implementation of this WAL is that it will be able to still service incoming writes while flushing old writes; although I don't fully grasp how this works, it is something enabled by the sophisticated I/O scheduler already implemented in DAOS.

      The complete implementation isn't expected to be released until Spring 2024, but it appears to touch only a few components of DAOS and doesn't affect anything above the VOS layer of the DAOS server.

      There was also mention of developing operability with new CXL-attached memory-semantic SSDs to keep the persistent memory capability of DAOS alive beyond Optane. I'm not sure if this would offer a performance benefit over the metadata-on-NVMe feature; early results show that metadata-on-NVMe actually delivers higher IOPS than Optane since the synchronous write path is much simpler without having to account for memory persistence. That said, I didn't really follow the full extent of options on the table for how DAOS metadata may work across different types of memory though.

      -

      DAOS in the flesh at Argonne

      Kevin Harms presented an update on Aurora's massive 220 PB DAOS installation and laid out its configuration. There are 1,024 DAOS servers based on the Intel Coyote Pass server design, each sporting

      +

      DAOS in the flesh at Argonne

      +

      Kevin Harms presented an update on Aurora's massive 220 PB DAOS installation and laid out its configuration. There are 1,024 DAOS servers based on the Intel Coyote Pass server design, each sporting

      -
      • 2x Intel Xeon 5320 (Ice Lake) sockets
      • 2x DAOS engines (one per socket)
      • 16x 32GB DDR4 DIMMs
      • 16x 512GB Optane DIMMs (Persistent Memory 200)
      • 16x 15.36 TB Samsung PM1733 NVMe SSDs
      • 2x 200 Gb/s Slingshot NICs

      The total configuration is quoted at 220 PB usable, but Kevin pointed out that this assumes that every object is erasure coded at 16+2. Unlike virtually every other storage system out there, though, users can choose the data protection for their individual objects when they create them, meaning this 220 PB capacity is an upper limit to what users can do. Users with very hot, read-only objects may choose to replicate instead of erasure code, while others who are capacity-constrained may choose to erasure code everything at 16+2 at the cost of latency and IOPS. This flexibility is really powerful for users since they can tailor their object layout ("object class" in DAOS parlance) to match the needs of their workload.

      +
      • 2x Intel Xeon 5320 (Ice Lake) sockets
      • 2x DAOS engines (one per socket)
      • 16x 32GB DDR4 DIMMs
      • 16x 512GB Optane DIMMs (Persistent Memory 200)
      • 16x 15.36 TB Samsung PM1733 NVMe SSDs
      • 2x 200 Gb/s Slingshot NICs
      +

      The total configuration is quoted at 220 PB usable, but Kevin pointed out that this assumes that every object is erasure coded at 16+2. Unlike virtually every other storage system out there, though, users can choose the data protection for their individual objects when they create them, meaning this 220 PB capacity is an upper limit to what users can do. Users with very hot, read-only objects may choose to replicate instead of erasure code, while others who are capacity-constrained may choose to erasure code everything at 16+2 at the cost of latency and IOPS. This flexibility is really powerful for users since they can tailor their object layout ("object class" in DAOS parlance) to match the needs of their workload.

      Argonne will be slicing up this DAOS system by giving each scientific project its own DAOS pool, and each pool will be assigned to only 80% of the available DAOS servers by default. This seems like a nice way of providing most of the storage system performance to every user, but offering more freedom to work around bad hardware, bad users, and other performance problems that plague file systems like Lustre that distribute everything across every single server equally.

      Finally, I noticed that Aurora will be using Samsung SSDs, not the Intel (now Solidigm) QLC NAND that appeared in all the DAOS slides floating around two years ago. I'm not sure what happened there, but the move from Solidigm QLC to Samsung TLC couldn't have been cheap.

      -

      New features and contributions

      DAOS is starting to pick up some truly valuable features that are being developed and contributed by third parties. Of note, croit has contributed a feature which allows DAOS to serve up NVMe over Fabrics targets, and Seagate contributed an S3 gateway for DAOS. Along with the DFS file system interface, DAOS now offers the trifecta of standard object, block, and file services just like Ceph. Unlike Ceph though, performance on DAOS is a first-class citizen. While croit made it clear that the NVMeoF support still has a ways to go to improve the way it does thread pooling and provides resilience, they showed 1.4 million IOPS from a single storage client using TCP over Ethernet with minimal client-side overhead.

      +

      New features and contributions

      +

      DAOS is starting to pick up some truly valuable features that are being developed and contributed by third parties. Of note, croit has contributed a feature which allows DAOS to serve up NVMe over Fabrics targets, and Seagate contributed an S3 gateway for DAOS. Along with the DFS file system interface, DAOS now offers the trifecta of standard object, block, and file services just like Ceph. Unlike Ceph though, performance on DAOS is a first-class citizen. While croit made it clear that the NVMeoF support still has a ways to go to improve the way it does thread pooling and provides resilience, they showed 1.4 million IOPS from a single storage client using TCP over Ethernet with minimal client-side overhead.

      Intel is also developing multitenant support for DFUSE, allowing a single compute node to share a DAOS mount and let permissions be enforced through UID/GID just like a regular file system. Before this update, the FUSE-based nature of DAOS allowed any unprivileged user to mount their container (good), but only one FUSE agent could be alive on a single node at a time (not good) which prevented multiple users sharing a node from both mounting their own containers.

      DAOS also has some longer-term enhancements that I thought were interesting:

      -
      • expanding the range of POSIX calls supported by DAOS's intercept library to include metadata calls and memory-mapped I/O using userfaultfd
      • implementing collaborative caching - essentially reimplementing the Linux kernel page cache in userspace so that multiple processes can share cached DAOS pages
      • supporting a computational storage paradigm by enabling offload of userspace eBPF scripts to DAOS servers

      DAOS in a larger data center ecosystem

      Dean Hildebrand from Google Cloud then gave an overview of Google's efforts in bringing DAOS into the cloud. He had some nice performance graphs and I'll link the full presentation here once it's uploaded (it's worth a watch), but the part I found the most insightful was how they are trying to decide where a technology like DAOS fits in the larger cloud storage ecosystem. He outlined two different ways DAOS could work in GCP:

      +
      • expanding the range of POSIX calls supported by DAOS's intercept library to include metadata calls and memory-mapped I/O using userfaultfd
      • implementing collaborative caching - essentially reimplementing the Linux kernel page cache in userspace so that multiple processes can share cached DAOS pages
      • supporting a computational storage paradigm by enabling offload of userspace eBPF scripts to DAOS servers
      +

      DAOS in a larger data center ecosystem

      +

      Dean Hildebrand from Google Cloud then gave an overview of Google's efforts in bringing DAOS into the cloud. He had some nice performance graphs and I'll link the full presentation here once it's uploaded (it's worth a watch), but the part I found the most insightful was how they are trying to decide where a technology like DAOS fits in the larger cloud storage ecosystem. He outlined two different ways DAOS could work in GCP:

      +

      +
      1. Caching: Google Cloud Storage (GCS) is the point of truth and DAOS is a cache
      2. Tiering: DAOS is a point of truth, and GCS is an archive

      -
      1. Caching: Google Cloud Storage (GCS) is the point of truth and DAOS is a cache
      2. Tiering: DAOS is a point of truth, and GCS is an archive

      He said they were leaning towards the caching model where data only lives ephemerally in DAOS, and personally, I think this is the right move since DAOS in the cloud is not resilient without Optane. However, this choice reflects a much larger tension in cloud storage for HPC:

      -
      1. The centerpiece of every cloud's data story is a scalable, low-cost, low-performance object store which is analogous to what on-prem HPC would call campaign, community, or project storage.
      2. HPC demands higher performance than what these object stores can generally deliver though.
      To bridge the gap between these two truths, auxiliary services must bolt on to the object layer and provide higher performance, at a higher cost, for the duration of I/O-intensive HPC jobs. Some choose to provide true tiering from object into a resilient layer of flash (like FSx Lustre and Weka do), while others project the contents of the object through a high-performance caching layer (like HPC Cache and File Cache) and are never meant to persistently hold data.
      +
      1. The centerpiece of every cloud's data story is a scalable, low-cost, low-performance object store which is analogous to what on-prem HPC would call campaign, community, or project storage.
      2. HPC demands higher performance than what these object stores can generally deliver though.
      +
      To bridge the gap between these two truths, auxiliary services must bolt on to the object layer and provide higher performance, at a higher cost, for the duration of I/O-intensive HPC jobs. Some choose to provide true tiering from object into a resilient layer of flash (like FSx Lustre and Weka do), while others project the contents of the object through a high-performance caching layer (like HPC Cache and File Cache) and are never meant to persistently hold data.

      This isn't rocket science, but I never thought deeply about the two models since campaign/community/project storage in on-prem HPC is usually fast enough to avoid needing caches or fine-grained tiering capabilities.

      John Bent also had a thought-provoking presentation about how Seagate's now-"deprioritized" CORTX object store, which once competed with DAOS as Mero, contains ideas that can complement DAOS:

      @@ -68,48 +150,58 @@

      New features and contributions

      DAOS is star

      Of course, completely tossing Reed-Solomon for something more sophisticated (like VAST does with its locally decodable 150+4 scheme) obviates the need for multilevel erasure entirely. But DAOS has not gone down that route yet.

      And as with every talk John gives, there were lots of other interesting nuggets scattered throughout his presentation. Two of my favorites were:

      -
      • A slide that pointed out that, when you buy something like Ceph as an appliance, you may be spending only 25% of the total cost on storage media and the rest is infrastructure, service, and support. This struck me as a bit on the low end, but some enterprisey NAS and midrange parallel file system appliances can go this low. Spending 60% to 90% on media is a lot nicer for the buyer (and companies like Seagate) if you can buy at scale or eschew the white-glove support, and John suggested that it's up to companies like Seagate to fix the software issues that require customers to pay for white-glove support in the first place.  After all, the less someone spends on support and licenses, the more they can spend on Seagate hard drives.
      • John's final slide pointed out that object stores were originally designed to get around the limitations of POSIX file systems, but as they've evolved over the last decade, they're starting to look a lot like file systems anyway since they require strong consistency, hierarchical namespaces, and familiar file semantics. Has all the work put into developing super-fast object stores like DAOS over the last ten years really just brought us back full circle to parallel file systems?  Companies like VAST and Weka have shown that maybe POSIX isn't as bad as the research community (myself included!) have claimed it to be; it was really just low-performance implementations that nobody wanted.
      Once John's talk is uploaded to the DUG 2022 website, I'll link it here.  Like Dean Hildebrand's talk, it is well worth watching (but for wildly different reasons!)
      +
      • A slide that pointed out that, when you buy something like Ceph as an appliance, you may be spending only 25% of the total cost on storage media and the rest is infrastructure, service, and support. This struck me as a bit on the low end, but some enterprisey NAS and midrange parallel file system appliances can go this low. Spending 60% to 90% on media is a lot nicer for the buyer (and companies like Seagate) if you can buy at scale or eschew the white-glove support, and John suggested that it's up to companies like Seagate to fix the software issues that require customers to pay for white-glove support in the first place.  After all, the less someone spends on support and licenses, the more they can spend on Seagate hard drives.
      • John's final slide pointed out that object stores were originally designed to get around the limitations of POSIX file systems, but as they've evolved over the last decade, they're starting to look a lot like file systems anyway since they require strong consistency, hierarchical namespaces, and familiar file semantics. Has all the work put into developing super-fast object stores like DAOS over the last ten years really just brought us back full circle to parallel file systems?  Companies like VAST and Weka have shown that maybe POSIX isn't as bad as the research community (myself included!) have claimed it to be; it was really just low-performance implementations that nobody wanted.
      +
      Once John's talk is uploaded to the DUG 2022 website, I'll link it here.  Like Dean Hildebrand's talk, it is well worth watching (but for wildly different reasons!)

      -

      PDSW 2022

      I had to duck out of the DAOS User Group early to run (through the rain) to the 7th International Parallel Data Systems Workshop (PDSW 2022) on Monday afternoon.

      +

      PDSW 2022

      +

      I had to duck out of the DAOS User Group early to run (through the rain) to the 7th International Parallel Data Systems Workshop (PDSW 2022) on Monday afternoon.

      -
      Much to everyone's surprise, PDSW was only given a half day this year and everything felt a little compressed as a result. The organizers kept the work-in-progress (WIP) sessions which can often be an interesting peek into what students are pursuing, but little A/V problems and the unforgiving schedule probably did a disservice to the up-and-comers who use the WIP track to lay the groundwork for future full-length papers. Hopefully SC'23 restores PDSW to its original full-day status.

      -

      Splinters keynote from Arif Merchant at Google

      The keynote presentation was given by Arif Merchant from Google about Splinters, the framework that Google Cloud uses to sample I/Os in a scalable way. The challenge they face is that it's impossible to trace and store every single I/O that hits Google's storage servers (D servers), but having an understanding of I/O patterns is essential for characterizing workload I/O behavior and planning for future infrastructure. In fact, this problem is so important that Google isn't the only cloud that's solved it!

      +


      Much to everyone’s surprise, PDSW was only given a half day this year and everything felt a little compressed as a result. The organizers kept the work-in-progress (WIP) sessions which can often be an interesting peek into what students are pursuing, but little A/V problems and the unforgiving schedule probably did a disservice to the up-and-comers who use the WIP track to lay the groundwork for future full-length papers. Hopefully SC’23 restores PDSW to its original full-day status.<p></p>

      +

      Splinters keynote from Arif Merchant at Google

      +

      The keynote presentation was given by Arif Merchant from Google about Splinters, the framework that Google Cloud uses to sample I/Os in a scalable way. The challenge they face is that it's impossible to trace and store every single I/O that hits Google's storage servers (D servers), but having an understanding of I/O patterns is essential for characterizing workload I/O behavior and planning for future infrastructure. In fact, this problem is so important that Google isn't the only cloud that's solved it!

      A lot of what Arif talked about is very similar to how Azure does its I/O tracing under the hood. I suppose it should not be surprise that there are only so many ways to solve the challenge of sampling individual IOPS in a way that fairly represents the aggregate workload of a huge distributed storage system. One really smart thing Splinters does that I liked was sample along two different dimensions: not only do they evenly sample across all IOPS at a fixed rate (the obvious thing), but they also sample across files at a fixed rate. In this latter case of per-file sampling, they take a tiny fraction of files and capture every I/O for that file to get a complete picture of how individual files are being accessed.

      This file sampling fills the huge gap that exists when randomly sampling IOPS alone. Because different I/Os have different "costs" (for example, reading a 1 MiB file using a single 1 MiB read op or 256x 4 KiB read ops are functionally equivalent to an application), randomly sampling ops introduces systematic biases that can be difficult to back out after the data has been sampled, subsampled, aggregated, and reduced. Splinters' approach lets you see the workload from two different angles (and biases) and answer a much larger range of questions about what's really happening across thousands of storage servers.

      That said, it was interesting to hear Arif describe how Splinters evolved out of a different internal Google project but wound up outliving it. Splinters is also similar to, but slightly different from, their Dapper infrastructure which also does scalable distributed system tracing. And he made overtures to F1, a scalable SQL database that is similar to (but not the same as) the SQL-like query interface that Splinters uses. I got the impression that new technologies come and go pretty quickly at Google, and there's a large appetite for creating new software systems outright rather than shoehorning an existing system into solving a new problem. I can't say one way is better than the other; I was just surprised at the contrast with my own experiences.

      -

      Practical papers

      PDSW had a healthy combination of both very-researchy papers and applied research papers this year. I could only stick around for the applied papers, and two left an impression.

      +

      Practical papers

      +

      PDSW had a healthy combination of both very-researchy papers and applied research papers this year. I could only stick around for the applied papers, and two left an impression.

      In the first, Jean Luca Bez presented Drishti, a tool that lives downstream of the Darshan I/O profiling library and finally does what the Darshan community has danced around for years--turning a Darshan log into an actionable set of recommendations on how to improve I/O performance. It does this by cataloguing a bunch of heuristics and using Darshan's new Python integrations to pore through a log and identify known-problematic I/O patterns. Like Jean Luca's DXT Explorer tool, Drishti has a slick user interface and greatly extends the usability and insights that can be pulled out of a Darshan log file. It probably won't win a Turing Award, but this sort of work is probably going to benefit scores of HPC end-users by making Darshan (and troubleshooting I/O problems) much more accessible to mere mortals for years to come.

      Adrian Jackson also presented a very tidy apples-to-apples comparison of DAOS and Lustre on the same hardware using both a systems-level benchmark and an application-inspired, object-oriented data model benchmark. The specific bake-off of a new curiosity (DAOS) and the decades-old incumbent (Lustre) is probably interesting to storage nerds, but I think the real novelty of the work is in its exploration of some uncomfortable realities that the HPC I/O community will have to face in the coming years:

      -
      • Does "slow memory" (nonvolatile Optane or CXL-attached memory SSDs) give actual benefit to existing file systems (like Lustre), or is rethinking the entire storage stack (like DAOS did) really necessary to unlock the performance of new hardware?
      • Do applications need to rethink their approach to I/O to make use of post-POSIX storage systems like DAOS, or is performing I/O as you would on a file system (Lustre) on a post-POSIX storage system (DAOS) good enough?

      My take from the work is that, for simple I/O patterns like checkpoint/restart, you can get pretty far by just treating something like DAOS the same as you would a parallel file system:

      +
      • Does "slow memory" (nonvolatile Optane or CXL-attached memory SSDs) give actual benefit to existing file systems (like Lustre), or is rethinking the entire storage stack (like DAOS did) really necessary to unlock the performance of new hardware?
      • Do applications need to rethink their approach to I/O to make use of post-POSIX storage systems like DAOS, or is performing I/O as you would on a file system (Lustre) on a post-POSIX storage system (DAOS) good enough?
      +

      My take from the work is that, for simple I/O patterns like checkpoint/restart, you can get pretty far by just treating something like DAOS the same as you would a parallel file system:

      But if you want your data at rest to have the same data model as how it's handled within the application, you really ought to use a storage system that supports data models that are more expressive than a stream of bytes (which is what POSIX files are).

      The authors didn't do a perfect job of giving Lustre its fair shake since they chose to use (abuse) directories and files to represent their application's data model on-disk instead of developing an object-file model that file systems like Lustre handle a little better. But let's be real--HPC is full of applications that do the exact same thing and represent datasets on-disk using complex hierarchies of directories and files simply because that's the easiest way to map the application's representation of data into the standard file system model. In that sense, storage systems that represent rich data models in a high-performance way should be really valuable to naive applications that map in-memory data structures directly to files and directories.

      Going back to John Bent's closing slide from his DAOS User Group talk, though, does any of this even matter since all answers lead back to parallel file systems? Maybe there's something to be learned about adding better back-door APIs that support more diverse data models than what POSIX file interfaces give us.

      -

      The SC22 Expo

      The expo is my favorite part of SC because it's when I get to talk to people one-on-one and learn about corners of the HPC industry that I would've never otherwise sought out. Much to my dismay, though, I had very little time to walk the floor this year--so little that I didn't get any swag. If you want to read up on what interesting technology was being showcased, I strongly recommend reading all the great content that Patrick Kennedy and his team at STH created covering the expo.

      +

      The SC22 Expo

      +

      The expo is my favorite part of SC because it's when I get to talk to people one-on-one and learn about corners of the HPC industry that I would've never otherwise sought out. Much to my dismay, though, I had very little time to walk the floor this year--so little that I didn't get any swag. If you want to read up on what interesting technology was being showcased, I strongly recommend reading all the great content that Patrick Kennedy and his team at STH created covering the expo.

      That said, I did notice some curious trends about the show floor overall.

      The NVIDIA booth was notably absent this year (though they shared booth space with partners), and many of the usual top vendors had significantly smaller presence on the expo floor. Just for fun, I compiled the top ten(ish) vendors by booth size:

      -
      1. Weka.io (3,200 sqft)
      2. VAST Data, Department of Energy, Penguin Computing, HPE, and Microsoft (2,500 sqft)
      3. AWS (2,000 sqft)
      4. Google and TACC (1,600 sqft)
      5. Supermicro, AMD, Intel, Dell, NASA, and Indiana University (1,500 sqft)

      I think it's amazing to see all-flash storage companies at the top of the list alongside all of the Big 3 cloud service providers. I may be reading too much into this, but this may mean that the money behind SC is shifting towards companies playing in the cloud-based AI space instead of traditional big iron for simulation. Or perhaps it's a sign that most of the traditional HPC players are taking a hard look at the return they get on a big booth given the current economic climate and pulled back this year.

      +
      1. Weka.io (3,200 sqft)
      2. VAST Data, Department of Energy, Penguin Computing, HPE, and Microsoft (2,500 sqft)
      3. AWS (2,000 sqft)
      4. Google and TACC (1,600 sqft)
      5. Supermicro, AMD, Intel, Dell, NASA, and Indiana University (1,500 sqft)
      +

      I think it's amazing to see all-flash storage companies at the top of the list alongside all of the Big 3 cloud service providers. I may be reading too much into this, but this may mean that the money behind SC is shifting towards companies playing in the cloud-based AI space instead of traditional big iron for simulation. Or perhaps it's a sign that most of the traditional HPC players are taking a hard look at the return they get on a big booth given the current economic climate and pulled back this year.

      I did chat with a couple colleagues who completely opted out of a booth this year (for reference, SC'21 had 10% fewer exhibitor booths than SC'19), and the reasoning was consistent: they found more value in having staff meet with customers privately or attend the technical sessions and engage with people organically. Combined with a bit of bad taste left over from SC's high cost of hosting pandemic-era "digital booths" despite low return (did anyone visit digital booths at SC'20 or SC'21?), I can see why some vendors may have chosen to skip the expo this year.

      Whatever the reasons may be, I was a bit sad to see such a small presence from some of my favorites like IBM, Fujitsu, Atos, and NEC. Hopefully the SC Exhibits Committee (and the economy!) can find ways to bring back the pre-pandemic glory of the show floor.

      The expo wasn't all doom and gloom though! Even though I couldn't make my complete rounds this year, there were a couple of highlights for me.

      -

      VAST's masterful marketing

      Perhaps the splashiest vendor at SC was VAST Data who had a brilliant marketing presence. First was the giant Vastronaut mascot that was the centerpiece of their booth:

      +

      VAST's masterful marketing

      +

      Perhaps the splashiest vendor at SC was VAST Data who had a brilliant marketing presence. First was the giant Vastronaut mascot that was the centerpiece of their booth:

      A quick search of Twitter shows just how many people seized the opportunity to take a selfie at their booth. I would love to know how they transported that thing to and from the conference, but whatever the cost, I'll bet it was worth it.

      At the Grand Opening Gala on Monday, they also gave out delightfully tacky light-up cowboy hats that everyone seemed to be wearing:

      The subtle genius of this was that not only did people wear them during the gala and the Flop Gun-themed Beowulf Bash 2022 party later that night, but they had to wear them on their plane rides home since they were so inconveniently bulky. Proof in point, my wife (who doesn't work in tech) sent me this text message to confirm that she was waiting for me at the right luggage carousel at San Francisco Airport:

      +— ntnu-hpc (@ntnuhpc) November 15, 2022
      +

      The subtle genius of this was that not only did people wear them during the gala and the Flop Gun-themed Beowulf Bash 2022 party later that night, but they had to wear them on their plane rides home since they were so inconveniently bulky. Proof in point, my wife (who doesn't work in tech) sent me this text message to confirm that she was waiting for me at the right luggage carousel at San Francisco Airport:

      I wonder how many innocent bystanders, traveling home for Thanksgiving on Thursday or Friday, saw the shiny cowboy hats at airports around the country and wondered what VAST was.

      The icing on the cake was VAST's CEO, Renen Hallak, parading around in an unmissable Chuck McGill-style space suit all week, clearly not taking himself too seriously and painting VAST as a work hard/play hard kind of company. Now, do flashy space suits and blinking cowboy hats alone mean VAST has a great product? I can't say**. But marketing is an art that I appreciate, and VAST hit some great notes this year.

      ** (Seriously, I'm not sure I wouldn't get in trouble for opining about another company here.)

      -

      The Microsoft hardware bar

      The only booth where I spent any appreciable time this year was my own employer's. I personally love booth duty and accosting strangers on the show floor, especially if there's something interesting at the booth to jumpstart a conversation. When I worked at SDSC it was a Raspberry Pi cluster, and at the Microsoft booth this year it was the "hardware bar."

      +

      The Microsoft hardware bar

      +

      The only booth where I spent any appreciable time this year was my own employer's. I personally love booth duty and accosting strangers on the show floor, especially if there's something interesting at the booth to jumpstart a conversation. When I worked at SDSC it was a Raspberry Pi cluster, and at the Microsoft booth this year it was the "hardware bar."

      In addition to the customary booth presentations with giveaways, swag desk, seating area, and a fun caricature artist, the physical servers that underpin the HPC nodes in Azure were on display. Microsoft contributes its hardware platform designs to the Open Compute Project so the physical hardware that runs in Azure data centers isn't entirely mysterious. Still, every cloud has its hardware secrets, so I was surprised to see these servers laid bare.

      The newest HPC node type (dubbed HBv4) on display was a node powered by AMD's Genoa processors just announced a few days earlier:

      @@ -123,12 +215,14 @@

      The Microsoft hardware bar

      The only booth w

      I'm not sure what more I'm allowed to say, but my colleague Karl made a nice, quick video that runs through the entire Microsoft booth that's worth a watch, and more details can be had by contacting me or your favorite Microsoft account team privately.

      Of course, the hardware bar was just a way to lure people into the booth so I could achieve my real goal: meeting new folks. As I wrote before, one of my biggest realizations at SC this year is how generally confused people are about what HPC in the cloud really means--both people who come from traditional on-prem HPC and people who come from traditional enterprisey cloud. I found myself surprising many of the people with whom I spoke on the show floor with factoids that I have taken for granted. For example,

      -
      • Linux is the most common OS on these HPC node types. While you probably(?) can run Windows if you want on this stuff, I think only a few niche markets do this.
      • The usage model for an HPC cluster in the cloud can be the same as on-prem. You can have login nodes, Slurm, home directories, parallel file systems, and all that. Jobs don't have to be containerized or turned into a VM image.
      • The InfiniBand coming out of these nodes is real InfiniBand with real OFED that supports real mpich/mvapich/OpenMPI. It's the same stuff as in on-prem supercomputers. And nodes are assembled into full-bisection fat tree InfiniBand clusters just like normal.
      • There's no noisy neighbor problem on compute nodes because HPC node types aren't shared between users. When you run a VM on an HPC node, you get the whole thing. Just like on large supercomputers.
      • There's no horrible loss of performance due to running in a VM. Virtualization extensions, PCIe passthrough, and SR-IOV bypass the hypervisor for most things. Inside your VM, you see real Zen cores and real Mellanox HCAs, not virtualized devices.

      My takeaway impression is that a lot of traditional HPC folks looked at the cloud five or ten years ago, had a sour experience, and haven't paid attention since. In those last five years, though, AI has changed the game. Massive demand for the latest CPUs and accelerators, funded by live-fast-die-young venture capital, has given cloud vendors tremendous financial incentive to catch up to on-prem levels of performance efficiency for AI workloads. And it just so happens that infrastructure that's good for AI is also good for traditional modeling and simulation.

      -

      SCinet!

      One of the unexpected highlights of my SC this year arose from a chance encounter with a former coworker from NERSC, Ron Kumar, who gave me a whirlwind tour of SCinet.

      +
      • Linux is the most common OS on these HPC node types. While you probably(?) can run Windows if you want on this stuff, I think only a few niche markets do this.
      • The usage model for an HPC cluster in the cloud can be the same as on-prem. You can have login nodes, Slurm, home directories, parallel file systems, and all that. Jobs don't have to be containerized or turned into a VM image.
      • The InfiniBand coming out of these nodes is real InfiniBand with real OFED that supports real mpich/mvapich/OpenMPI. It's the same stuff as in on-prem supercomputers. And nodes are assembled into full-bisection fat tree InfiniBand clusters just like normal.
      • There's no noisy neighbor problem on compute nodes because HPC node types aren't shared between users. When you run a VM on an HPC node, you get the whole thing. Just like on large supercomputers.
      • There's no horrible loss of performance due to running in a VM. Virtualization extensions, PCIe passthrough, and SR-IOV bypass the hypervisor for most things. Inside your VM, you see real Zen cores and real Mellanox HCAs, not virtualized devices.
      +

      My takeaway impression is that a lot of traditional HPC folks looked at the cloud five or ten years ago, had a sour experience, and haven't paid attention since. In those last five years, though, AI has changed the game. Massive demand for the latest CPUs and accelerators, funded by live-fast-die-young venture capital, has given cloud vendors tremendous financial incentive to catch up to on-prem levels of performance efficiency for AI workloads. And it just so happens that infrastructure that's good for AI is also good for traditional modeling and simulation.

      +

      SCinet!

      +

      One of the unexpected highlights of my SC this year arose from a chance encounter with a former coworker from NERSC, Ron Kumar, who gave me a whirlwind tour of SCinet.

      I have to confess great ignorance around SCinet in general; I always saw it was a weird technological proof of concept that the strange networking people at work would go off and do in the weeks leading up to the actual conference. I knew they did some impressive wide-area transfer demos (like the petabyte-in-a-day demo at SC'16), but I didn't really get the significance.

      So what is SCinet? It's this yellow bundle of cables dangling from the ceiling.

      -

      The yellow cables are 144-core fiber trunks that bring over a terabit per second of bandwidth into the convention center from the Internet via the national research backbones like ESnet and Internet2 and distribute many terabits per second of capacity throughout the SC conference venue. For comparison, most HPC centers in the US only have a tenth of SCinet's wide-area bandwidth at best since 400G infrastructure is still rolling out.

      +


      <p>The yellow cables are 144-core fiber trunks that bring over a terabit per second of bandwidth into the convention center from the Internet via the national research backbones like ESnet and Internet2 and distribute many terabits per second of capacity throughout the SC conference venue. For comparison, most HPC centers in the US only have a tenth of SCinet’s wide-area bandwidth at best since 400G infrastructure is still rolling out.</p>

      Most attendees may be familiar with the row of expensive-looking networking racks behind a glass wall towards the back of the expo which is where those yellow cables dangling from the ceiling end. Here's a photo from inside that glass wall:

      What I didn't realize is that if you go around to the back of the giant walled area behind this glass display, there's a security checkpoint that gates entry into a massive network operations center (NOC) full of laptops, spools of fiber, meeting rooms, and busily working teams in charge of all the lower layers of the networking stack.

      @@ -145,9 +239,11 @@

      SCinet!

      One of the unexpected highlights of

      One last factoid I didn't know until this year was that exhibitors can request 100 Gb/s network drops into their individual booths for demos (or downloading the latest version of a PowerPoint presentation really fast). The end result of supporting both a vast wifi network and 100G fiber across the show floor is that there was a lot of fiber going into the single row of SCinet equipment:

      Finally, when I posted some of these photos online during the conference, my colleague Bilel was kind enough to post a slide from the SC22 opening presentation that had the speeds and feeds of what I had toured:

      -

      If you know anyone involved with SCinet, I highly recommend seeing if you can get a tour at the next SC. Even as a relative networking novice, I walked away with a much greater appreciation for the annual achievement of building SCinet. And who knows? Once I get bored of this whole storage thing, maybe I'll try getting into high-performance networking.

      -

      Composability panel

      This year I was invited to participate in a panel titled "Smackdown! Does HPC Need Composability Now?" moderated by Addison Snell and Dan Olds from Intersect360 Research. This panel was...different. Unlike the traditional SC panel where panelists take turns presenting slides and saying erudite things, this panel had two teams of panelists. And my team only had one slide to present:

      + +

      If you know anyone involved with SCinet, I highly recommend seeing if you can get a tour at the next SC. Even as a relative networking novice, I walked away with a much greater appreciation for the annual achievement of building SCinet. And who knows? Once I get bored of this whole storage thing, maybe I'll try getting into high-performance networking.

      +

      Composability panel

      +

      This year I was invited to participate in a panel titled "Smackdown! Does HPC Need Composability Now?" moderated by Addison Snell and Dan Olds from Intersect360 Research. This panel was...different. Unlike the traditional SC panel where panelists take turns presenting slides and saying erudite things, this panel had two teams of panelists. And my team only had one slide to present:

      The ground rules included "personal attacks are allowed," and needless to say, the panel was about equal parts entertainment and technical discourse. That's not a bad thing, though.

      Addison and Dan did a phenomenal job of pulling their respective teams together and leading discussion in a format that both brought forward the key pros and cons of composability in HPC while poking fun at the thinly veiled, ego-driven personalities that often make up these sorts of panels. Rather than politely dancing around issues like sacrificing memory bandwidth by putting accelerators at the far end of a PCIe bus or gaining higher utilization by allowing users to mix and match CPU, NICs, and GPUs, us panelists were free to shoot straight (or perhaps a bit hyperbolically) and call each other out on our hidden agendas.

      @@ -155,8 +251,82 @@

      Composability panel

      This year I was invited

      On a technical level, what was the outcome?

      It turns out that there was about a 60/40 split between people who felt composability wasn't required yet and those who felt it was after both sides argued their case. Even among panelists, many of us were a lot less convinced about our respective positions than we let on during the panel itself. I got a chuckle when I realized that I wasn't the only one who, when invited to be on the panel, asked "what side do you want me to argue?" I honestly could have gone either way because the dust has not yet settled. Dan Stanzione, director of TACC, gave the truest answer to the question of "will composability help HPC" up front--"it depends." Maybe this is a growth opportunity, or maybe it's a lukewarm reception.

      Either way, composable technologies are hitting the market regardless of whether you think they'll be useful or not.  AMD Genoa supports CXL 1.1 with extensions for memory pooling, Samsung has memory-semantic SSDs, and everyone and their mother is working on photonics to get higher bandwidths and lower latencies over longer distances. This makes it easier for people to dip their toes in the water to see if composability makes sense, and I think that's what a lot of people will wind up doing in the coming years.

      -

      Customer meetings

      Unlike in years past, my SC experience this year was dominated by customer meetings. I've been on the customer side of the table plenty of times, but I was surprised to find that it was actually more fun to be on the vendor side for a change. I'm part salesman at heart, so I found it personally gratifying to end a meeting with people nodding along rather than scratching their heads. I learned as a customer that it's very easy for vendors to go way off the rails and waste everyone's time, so I was grateful to have avoided the awkward confusion that punctuates those kinds of meetings.

      +

      Customer meetings

      +

      Unlike in years past, my SC experience this year was dominated by customer meetings. I've been on the customer side of the table plenty of times, but I was surprised to find that it was actually more fun to be on the vendor side for a change. I'm part salesman at heart, so I found it personally gratifying to end a meeting with people nodding along rather than scratching their heads. I learned as a customer that it's very easy for vendors to go way off the rails and waste everyone's time, so I was grateful to have avoided the awkward confusion that punctuates those kinds of meetings.

      I also went into the week worrying that I'd be sitting in the same room, hearing the same pitch and the same jokes, and answering the same questions all week. Thankfully, I work with some great field, business, and product teams who set up interesting conversations rather than rote recitations of boring roadmap slides. Approaching the same topics from different angles helped me figure out how all the pieces of what I'm working on fit together to make a complete picture too; there weren't nearly as many opportunities to do this in the DOE world since the end-users of the HPC systems on which I worked aren't told anything until all the design decisions have already been made.

      -

      A few personal notes

      This SC was significant to me at a variety of levels; it was the first time I'd gotten on an airplane since February 2020, the first time I'd traveled since starting a new job at a new company, and the first time I'd met any of my new coworkers outside of the structure of a Teams call. During the pandemic I realized that getting out into the world and talking to people from all corners of HPC were my favorite part of my job. Not being able to go to events like SC and maintain that a sense of community involvement dramatically impacted my level of professional satisfaction for the last two years, so I'm glad I was able to finally go this year.

      +

      A few personal notes

      +

      This SC was significant to me at a variety of levels; it was the first time I'd gotten on an airplane since February 2020, the first time I'd traveled since starting a new job at a new company, and the first time I'd met any of my new coworkers outside of the structure of a Teams call. During the pandemic I realized that getting out into the world and talking to people from all corners of HPC were my favorite part of my job. Not being able to go to events like SC and maintain that a sense of community involvement dramatically impacted my level of professional satisfaction for the last two years, so I'm glad I was able to finally go this year.

      Though customer meetings were a lot more fun than I expected them to be, I still felt bummed that I could spend so little time walking the expo, talking to folks, and attending all the BOFs normally on my must-attend list. Compounding this was my personal choice to not dine indoors and consequently miss out on almost all other chances to catch up with old friends and colleagues. I also decided to leave SC a day earlier than I usually do to reduce my risk of getting sick which didn't help either. There's never enough time at SC, but this year was particularly pressed.

      -

      I say all this not to complain, but to say how much I appreciated the people who went out of their way to come accost me during the precious few hours I actually had on the exhibit floor. Some I'd not seen since SC'19, and some I'd never actually met since we only started working together mid-pandemic. The conference is busy for everyone, so giving me a slice of your time was very meaningful. That sense of community membership is why I go to SC, it's why I still work in this business, and it's why I try to contribute whatever I can to whomever wants it whether it be a student, engineer, salesperson, or marketer.

      \ No newline at end of file +

      I say all this not to complain, but to say how much I appreciated the people who went out of their way to come accost me during the precious few hours I actually had on the exhibit floor. Some I'd not seen since SC'19, and some I'd never actually met since we only started working together mid-pandemic. The conference is busy for everyone, so giving me a slice of your time was very meaningful. That sense of community membership is why I go to SC, it's why I still work in this business, and it's why I try to contribute whatever I can to whomever wants it whether it be a student, engineer, salesperson, or marketer.

      + + +
      + +
      + + + + + + + + + + + + + + + + diff --git a/2022/spooky-allocator-issues-and-fixes/index.html b/2022/spooky-allocator-issues-and-fixes/index.html new file mode 100644 index 0000000..b6f4316 --- /dev/null +++ b/2022/spooky-allocator-issues-and-fixes/index.html @@ -0,0 +1,156 @@ + + + + + + + Spooky Allocator Issues and Fixes - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Mark Nelson’s Blog I like to make distributed systems go fast.. See the original post here.
      + +
      +

      Spooky Allocator Issues and Fixes

      +

      Recently we started noticing performance issues in the main branch of Ceph that ultimately were traced back to a commit last summer that changed parts of our AVL and hybrid disk allocator implementations in bluestore. Strangly, the issue only affected some of the NVMe drives in our test lab but not others. The quick fix was to always update and save the allocator’s cursor position so that we don’t search (and fail) over and over in fast-fit mode for every allocation request. Another interesting offshoot of this though is that it may be much nicer to limit fast-fit searches based on time rather than byte distance or the number of iterations.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/vsoch/2022-6-26-ssh-tunnels.md b/2022/ssh-tunnels/index.html similarity index 74% rename from _posts/vsoch/2022-6-26-ssh-tunnels.md rename to 2022/ssh-tunnels/index.html index 2279e5c..837e1aa 100644 --- a/_posts/vsoch/2022-6-26-ssh-tunnels.md +++ b/2022/ssh-tunnels/index.html @@ -1,47 +1,110 @@ ---- -author: Vanessasaurus -author_tag: vsoch -blog_subtitle: dinosaurs, programming, and parsnips -blog_title: VanessaSaurus -blog_url: https://vsoch.github.io/ -category: vsoch -date: '2022-06-26 13:30:00' -layout: post -original_url: https://vsoch.github.io/2022/ssh-tunnels/ -slug: ssh-tunnels -title: SSH Tunnels ---- - -

      Today I want to talk about ssh tunnels. Very abstractly, we would want to use an ssh + + + + + + + SSH Tunnels - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   VanessaSaurus dinosaurs, programming, and parsnips. See the original post here.
      + +
      +

      SSH Tunnels

      +

      Today I want to talk about ssh tunnels. Very abstractly, we would want to use an ssh tunnel to securely send information. In the case of HPC, you are probably familiar with ssh, (Secure Shell or Secure Socket Shell) when you login to your node. You might do something like this:

      -
      $ ssh dinosaur@server.address.edu
       
      -

      Or if you have a proper setup in your ~/.ssh/config (with a named server) you might just do:

      -
      $ ssh dinosaur
       
      -

      I like to use ssh connection multiplexing so the connection is kept alive for a bit, but I won’t go into detail because this post isn’t specifically about the details of ssh. The use case I’m interested in (and the thing that HPC is very bad at) is how to deploy something interactive on an HPC cluster.

      -

      SSH Tunnel with Ports

      Given that a cluster has exposed ports (either the login node, or both the login node and compute nodes) creating a tunnel is fairly straight forward! In the past I created a tool called forward to handle all the manual steps to get this working, meaning:

      -
      1. Show the user how to set up their ~/.ssh/config (once)
      2. Define (once) parameters like a port, memory, GPUs, and if the cluster has isolated nodes
      3. @@ -50,7 +113,6 @@

        SSH Tunnel with Ports

        An interaction using forward might look like any of the following:

        -
        # Run a Singularity container that already exists on your resource (recommended)
         bash start-node.sh singularity-run /scratch/users/vsochat/share/pytorch-dev.simg
         
        @@ -71,19 +133,15 @@ 

        SSH Tunnel with Ports

        -

        Note that the last set of commands are pertaining to notebooks, which is where these tunnels come into play! A notebook is going to be run on a compute node that looks something like the following:

        -
        $ jupyter notebook --no-browser --port=$PORT
         
        -

        And if you ran this with a Singularity container, you’d also want to bind jovyan’s home to be the user’s, along with the jupyter config directory:

        -
        $ singularity exec --home ${HOME} \
             --bind ${HOME}/.local:/home/jovyan/.local \
             --bind ${HOME}/.jupyter:/home/jovyan/.jupyter \  
        @@ -91,28 +149,23 @@ 

        SSH Tunnel with Ports

        -

        As we described earlier here, there are subtle differences between making a tunnel (with a port) given that you have isolated nodes (or not). You can determine this based on your ability to ssh into a non-login node (meaning where your job is running) from “the outside world” that is your computer. If you cannot, your nodes are isolated, which we will discuss next.

        -

        Isolated Nodes

        Let’s say that we need to create a tunnel (using ports) to an isolated node. This means that we are basically going to establish a tunnel to the login node, and then from the login node another one to the compute node. We might use a command that looks like this:

        -
        $ ssh -L $PORT:localhost:$PORT ${RESOURCE} ssh -L $PORT:localhost:$PORT -N "$MACHINE" &
         
        -

        In the command above, the first half (ssh -L $PORT:localhost:$PORT ${RESOURCE}) is executed on the local machine, which establishes a port forwarding to the login node. The “-L” in the above (from the man pages) :

        -

        Specifies that connections to the given TCP port or Unix socket on the local (client) host are to be forwarded to the given host and port, or Unix socket, on the remote side. @@ -128,7 +181,6 @@

        Isolated Nodes

        Or in layman’s terms:

        -

        Forward whatever is running on the second port on my resource to my local machine.

        @@ -141,23 +193,19 @@

        Isolated Nodes

        The -N says “don’t execute a remote command (and just forward the port).” Finally, the last $MACHINE is the node that the jupyter notebook is running on.

        -

        Not Isolated

        For HPCs where the compute node is not isolated from the outside world the ssh command for port forwarding first establishes a connection the login node, but then continues to pass on the login credentials to the compute node to establish a tunnel between the localhost and the port on the compute node. The ssh command in this case utilizes the flag -K that forwards the login credentials to the compute node:

        -
        $ ssh "$DOMAINNAME" -l $FORWARD_USERNAME -K -L  $PORT:$MACHINE:$PORT -N  &
         
        -

        I’m not sure in practice how common this is anymore. At least at my current employer it’s not even the case that ports are exposed on the login node! It’s probably better that way, because in cases where you do get ports it’s sort of a “pick a port above this range and hope that no other user picks the same one!” It’s messy. So let’s talk about the case of not having ports exposed next, since this was the entire reason I wanted to write this post!

        -

        SSH Tunnel with Socket

        More than a year ago, I had this realization that a lot of people at Stanford used the “forward” tool, and just for notebooks (and this @@ -167,163 +215,197 @@

        SSH Tunnel with Socket

        has easy access. It was also a stubborn “I want this to work” proof of concept. This new tool would be like forward, but a little nicer. Because I, along with every other HPC developer and user, wishes we could have nice things 😭️.

        -

        At this time I had just started a new role at a national lab, and I realized that none of my old techniques for launching the job worked because of the lack of exposed ports. Thinking this was impossible, I abandoned it for a year. But then this last week I found this! I was motivated! I was excited! The basic launch command of the notebook looks like this:

        -
        $ jupyter notebook --sock /tmp/test.sock --no-browser
         
        -

        And then with a different looking tunnel, we could forward this socket to the host, and map it to a port! My excitement was then brought down by what led to two days of struggling. I first tried my entire tunel workflow, meaning launching a job on a node, and then running that command, and providing the instruction to the user to create the tunnel as follows:

        -
        $ ssh -L 8888:/tmp/test.sock -N user@this_host
         
        -

        That didn’t work (and remember this socket was created on the isolated node, that’s important to remember for later). So I started looking at the socket with “nc” - “arbitrary TCP and UDP connections and listens” from the login node. The “-U” below is for UNIX sockets:

        -
        $ nc -U /tmp/test.sock
         
        -

        And from the head node I saw:

        -
        Ncat: Connection refused.
         
        -

        So then I knew I needed a simpler, dummier example. I got rid of tunel and just ran the notebook command on the head node. Dear reader, it still did not work. I opened an issue and asked Twitter for help. Someone else on Twitter reported that it worked for them, and that (in my opinion) is the challenge and story of HPC - given the huge differences in setups, it’s hard to reproduce what another person does unless you scope to a very specific environment or technology and hugely go out of your way to do it. I’m always grateful when someone tries to help, but when the ultimate answer is just “But it works on my machine!” I (and I think all of us) are like:

        -

        (╯°□°)╯︵ ┻━┻

        -

        🤣️

        -

        Please know that is intended to be funny, and I really am grateful for the attempt to help! Anyway, the first night I was devastated because I was so excited about the possibility of this working! But of course (as it usually does) my quasi-sadness turned again into relentless stubborn-ness, and for my Saturday I embarked on trying everything. I call this the stubborn brute force approach, and it actually leads to some pretty good outcomes?

        -

        Socket from Login Node

        First from the login node, I started reading about flags in detail, again from the man pages. It occurred to me that the suggested command included “-L” (discussed earlier) but there were a ton of other flags to try, and maybe I need them for my setup? The command that wound up working (after much trial and error) was just:

        -
        # Running on login node
         $ ssh -NT -L 8888:/tmp/test.sock user@server
         
        -

        And here again was the suggested command:

        -
        $ ssh -L 8888:/tmp/test.sock -N user@this_host
         
        -

        So they are very similar - and the main difference is the -T is to “Disable pseudo-terminal allocation.” So I suspect (also based on the version of ssl I’m using) that without the flag, you might be making a request for a pty to the server (more details here) and then it could abort. Adding the flag just skips this, because we don’t need that - we just need the simple forward. And yes, this indeed feels very specific to your ssh setup, version of ssh, and server configuration. Of course, this was only the beginning of figuring things out, because I had no idea how to get this working from one level deeper - an isolated compute node.

        -

        Socket with Isolated Nodes

        Remember that when we created the socket on the isolated node and we tried this out from the login node:

        -
        $ nc -U /tmp/test.sock
         
        -

        And the result was this:

        -
        Ncat: Connection refused.
         
        -

        My spidey senses were telling me that this should work. Indeed, when I ssh into the isolated node from the login node, that same command allowed me to connect (meaning it hung / there was no error output). So my first task, I decided, was to try and “forward” this socket to the login node. Again, back to the man pages! I wound up with something like this (run from the login node):

        -
        $ ssh isolated-node -NT -L /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sock
         
        -

        The above is again using -L but instead of a port (which aren’t exposed) we are using a socket! It’s kind of neat you can switch out those two. When I tried the same nc command from the login node, we had progress (no connection refused message!) 🎉️ And then I moved this up one level to see if I could make this same request from my local machine, sort of combining the first command that worked with the login node notebook with this one. That looked like this (and yes this took more trial and error):

        -
        $ ssh -NT user@server ssh isolated-node -NT -L /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sock
         
        -

        And to confirm it was working, I’d ssh into the server and again run that nc command to ensure that the newly forwarded socket was readable from the login node. After this, again with more trial and error, I tried running a second command to just forward that (now working socket) to my host. That eventually looked like this:

        -
        # And another for the local socket
         $ ssh -NT -L 8899:/home/dinosaur/login-node.sock user@server
         
        -

        And then (all together now!) I tried putting them together.

        -
        $ ssh -NT -L 8899:/home/dinosaur/login-node.sock user@server ssh isolated-node \
                -NT -L /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sock
         
        -

        And then I spent some time integrating it into tunel, and surprise! the first implementation didn’t work. The first bug was that I needed to clean up old sockets each time the “same” app was run (determined by the job name and organizational namespace so the user can only run one of a particular interactive app at once, and not forget about previous runs). The second issue was about opening the tunnel - it didn’t seem to work if the process exited and/or it was run in a subshell (that also probably exits). I realized that (for the time being) running this connection step on behalf of the user, since it’s something the user should have more control over, probably wasn’t the right way to go. If the user hasn’t added something like an rsa key to ~/.ssh/authorized_keys on their clusters, it would also ask for a password interactively, making it harder for me to manage. So for simplicity sake, and assuming that we really should put the user in control of deciding when to start/stop the tunnel, I simply print the full ssh command in the terminal and let them copy paste it. A successful connection might then prompt them for their password for that second ssh, which (by default) I don’t think is carrying forward auth from the first.

        -

        So that was my adventure! Mind you, this entire adventure was only about two days, and that included time to write this post, so I still have lots in front of me to work on. However, with these updated commands (and some nice tweaks from Python’s rich library) I quickly had a nice set of commands to run and stop an app with an interactive jupyter notebook, and using sockets on isolated nodes!

        -
        $ tunel run-app server slurm/socket/jupyter
         $ tunel stop-app server slurm/socket/jupyter
         
        - - -

        As a sidenote, one thing I like about rich is that it puts the aesthetic as a first class citizen. So many tools just don’t consider this, and I love that with rich I can think about colors, presentation, and even animations like spinners!

        - - -

        Getting a socket working means I’ll be able to continue working on this library (hooray!) so if you have ideas or requests for apps you’d like to run on HPC, assuming just this basic technology, please give me a ping and I’d love to chat and support them. I’m also going to be requesting an allocation on the Open Science Grid, which hopefully will give me other kinds of clusters -to test on. I hope this was interesting to read, thanks for doing that!

        \ No newline at end of file +to test on. I hope this was interesting to read, thanks for doing that!

        + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/vsoch/2022-4-24-rsepedia.md b/2022/the-research-software-ecosystem/index.html similarity index 76% rename from _posts/vsoch/2022-4-24-rsepedia.md rename to 2022/the-research-software-ecosystem/index.html index 9ee2765..8c3d74f 100644 --- a/_posts/vsoch/2022-4-24-rsepedia.md +++ b/2022/the-research-software-ecosystem/index.html @@ -1,22 +1,90 @@ ---- -author: Vanessasaurus -author_tag: vsoch -blog_subtitle: dinosaurs, programming, and parsnips -blog_title: VanessaSaurus -blog_url: https://vsoch.github.io/ -category: vsoch -date: '2022-04-24 13:30:00' -layout: post -original_url: https://vsoch.github.io/2022/rsepedia/ -slug: the-research-software-ecosystem -title: The Research Software Ecosystem ---- - -

      We recently published the Research Software Encyclopedia and also have added several new parsers for obtaining new data, meaning the total collection + + + + + + + The Research Software Ecosystem - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   VanessaSaurus dinosaurs, programming, and parsnips. See the original post here.
      + +
      +

      The Research Software Ecosystem

      +

      We recently published the Research Software Encyclopedia and also have added several new parsers for obtaining new data, meaning the total collection of curated research software is greater than 1500 entries. In honor of this collection, and of a library I’m working on called CiteLang, I wanted to do a small study to better understand:

      -
      1. What are the most valuable dependencies in our community, across languages?
      2. What are the most valuable dependencies in our community, by language?
      3. @@ -30,7 +98,6 @@

        CiteLang

        source contributions via git, but it’s main purpose is to be a markdown syntax for citing software, meaning that we can:

        -
        1. Generate basic software credit trees, graphs, and markdown summaries.
        2. Derive a new, customizable model of credit based on published packages and dependencies.
        3. @@ -39,7 +106,6 @@

          CiteLang

          As a simple example, I can run CiteLang over this markdown file with CiteLang references:

          -
          # Summary
           
           Portability and reproducibility of complex software stacks is essential for researchers to perform their work. 
          @@ -67,10 +133,8 @@ 

          CiteLang

          -

          And then run citelang render paper.md to get a nice rendered table alongside your paper! What CiteLang does is find the references in the paper, they look like this:

          -
          
           @conda{name=singularity}
           @pypi{name=singularity-hpc}
          @@ -80,12 +144,10 @@ 

          CiteLang

          -

          Each of the references above is a package manager with a package name and (optionally) a version, and we can load in the metadata for each and then generate a table that you see here that summarizes credit across dependencies. In this model, we give some allocation of credit (default is 50%) to the main work (paper or software) citing the software, and then recursively parse dependencies up to some minimum level of credit to calculate scores. Dependencies shared across libraries are averaged together. The final table represents the credit that you give not only to the top level software, but to all nested dependencies, for the work that you did. And that’s only the basics! CiteLang takes this simple ability to parse references and extends it to automation, graphs, badges, and more! You can read more about CiteLang here.

          -

          Publish or perish? How about neither? I just need to keep writing software!

          @@ -100,13 +162,11 @@

          CiteLang

          are so many cool ideas around this! But let’s start at the beginning. We first want to show how to summarize an ecosystem. That is exactly what we are going to do in this post.

          -

          The Research Software Ecosytem

          Starting with these curated repositories from a set of scrapers including the Journal of Open Source Software, the HAL Research Software Database, the Research Software NL Dictionary, ROpenSci, and The Molecular Sciences Software Institute, we can do a basic analysis to identify the most used (and thus valued) pieces of software in our ecosystem. My analysis plan was to:

          -
          1. Start with the current database.
          2. For each repository, look for requirements files to parse.
          3. @@ -118,19 +178,16 @@

            The Research Software Ecosytem

            Currently we parse setup.py and requirements.txt (Python), DESCRIPTION (R), go.mod (Go), package.json (npm), and Gemfile (ruby). Based on the breakdown of the languages found in the RSEPedia, this is a reasonable start!

            -
            -

            But it’s also kind of sad to see that my favorite languages (Go and Rust) are barely represented in our community. Also, the above should tell you that the R and Python results likely have some meaningful interpretation, but the others not so much, only because we don’t have a big enough sample. So for all of the above steps, for these 1500+ repositories and many languages, I wanted th entire process to be automated, always have potential for easy improvement, and run at some regular interval as new software comes into the Research Software Encyclopedia (also automated) so we can derive changes over time. If you dont’ care to read further:

            -
            1. View the Research Software Ecosystem
            2. Check out Languages here
            3. @@ -140,31 +197,25 @@

              The Research Software Ecosytem

              For this first publication of the interface we have the following metrics:

              -
              -

              And I’m so excited because a tiny vision I had a few years ago to provide (and use) a community research software database is coming to live! So without further adeiu, I’m just going to jump into the cool results! It will be fun to see how these change over time.

              -

              Python

              Ladies and gents, dinosaurs and rabbits! Your Python results:

              -
              -

              So here is the first awesome insight. Is anyone really surprised to see numpy as the number one library? The credit value here says that the average Python repository is attributing about 3% of credit to numpy, meaning it is a direct or indirect dependency. Let that sink in! Here is the irony - when is the last time you cited numpy? You probably haven’t, because you’ve cited something that uses it. We don’t remember numpy despite the fact that it’s so core to everything that we do.

              -

              The fact that the most widely used library is rarely cited is huge evidence for why a manual “write papers and cite DOIs” approach just won’t work for software.

              @@ -176,7 +227,6 @@

              Python

              because we include repositories that are peer reviewed (e.g., JoSS) and documentation and testing is necessary for that. Given this need for Python, sphinx and pytest come up as leaders to provide that. So here is another nugget of insight:

              -

              Some of us are so busy focusing on domain-specific software that we forget the importance of the “less sexy” research software that helps us test, document, view things, or even create simple data structures.

              @@ -187,32 +237,25 @@

              Python

              Of course I have my own strong opinions about a taxonomy for research software, but I would encourage those of you who are very dismissive to take a step back and consider what you are really saying.

              -

              The next insight is that we see a lot of libraries for data formats (e.g., pyaml, h5py, lxml, and more lower in the list) and this is an attestment to how important being able to read, serialize, and save data is.

              -

              The final insight is the fact that requests is high in the list. For those of you not familiar, requests is a library for doing that, making http requests to get content from some webby place. This is an attestment to the fact that our work is increasingly relying on external APIs, automation, or other resources provided on the web.

              -

              You can see the full Python results here.

              -

              R

              I’m less of an R programmer these days, but I think that these results also make sense.

              -
              -

              We don’t see any huge leaders in the same way as we see numpy in Python, but not surprisingly the leader package for the R language is, well, R! I at first thought this was a bug, but actually R DESCRIPTION files that we parse do commonly include a pinned version of R:

              -
              
               Depends: R (>= 3.4.1), TailRank, 
               ...
              @@ -222,52 +265,41 @@ 

              R

              And so we actually can give credit to the language proper! If you don’t feel this is appropriate, feel free to skip this line and consider the top package jsonlite. This is also why I think json would be represented in Python if it wasn’t part of the standard library. Us research folks - we need our json! Overall I think we see a similar pattern here as we saw with Python. The libraries that float to the top are those that involve data structures (jsonlite, yaml), webby requests or similar (httr, curl), documentation and testing (knitr, rmarkdown) and graphics or visualization. What does this tell us about what is undervalued in research software? Again, it’s not the domain specific libraries, but rather the core stuff that enables those libraries.

              -

              You can see the full R results here.

              -

              Projects

              If you are interested in a specific project in the RSEPedia, we also provide a project-specific table and badge! You can browse projects from here, and here is an example of a badge generated for a project called github.com/ORNL/tx2 (and on GitHub). Without even looking I can tell you we have some machine learning and/or visualization going on here (scikit-learn! umap! pandas! matplotlib)!

              -
              -

              Notice how numpy (as an example) shows up at multiple points in the tree - when we calculate an overall credit, say, for the ecosystem, we take that into account! And we can then peek at the project-specific table and sort of verify that yes, this is a Python ML/visualization project:

              -
              -

              And we see some surprises! Like, the slack-sdk? What? Believe it or not, that is pulled in by tqdm. The project-specific tables (and the description at the top) also give you a better sense of how CiteLang allocates credit. The top level package is given 50%, and then the other 50% is given to all dependencies in the same fashion. We cut off at a value of 0.001, and we do that in case we might be parsing dependencies forever down to some infintesimally small amount.

              -

              Finally, every project serves its own raw data

              -
              -

              and the site is searchable, because sites should be. 😄️

              -

              Discussion

              I’m so happy (and a bit relieved, to be honest) to finally be able to show what I’ve been saying for years - that the most valuable software for research, and the software that is driving domain-specific research software, are the unsexy libraries that have to do with data structures, (maybe standards), documentation or testing, and data formats or retrieval. These are the packages that you aren’t going to remember to cite. Also, this set is totally leaving out the software we use on a day to day basis in our CI, which arguably isn’t research software but has done more for the research community than anything I can think of - containers, version control (git), and continuous integration. We’d be a mess without it. We need to be more thankful and aware of this, and for some of y’all that turn down your nose to anything that isn’t a domain-science library, perhaps take a pause. Next, let’s talk about limitations and hopes for the future.

              -

              A Living Database

              I wouldn’t have been happy with myself to simply publish software at one point in time and call it a day. @@ -275,53 +307,114 @@

              A Living Database

              This means that while we do cache a result for a newly added piece of software, we do continue to grow the analysis as new software is added. And since the tool will always use the newly updated CiteLang, any improvements to the parsers there will be reflected here! And if anyone wants to run the entire thing again (outside of the limit of GitHub Actions) they can clone the repository, nuke the _repos folder, and run the scripts again.

              -

              Language Gaps

              The biggest gap in the RSEPedia is with respect to what we don’t see. First, despite being a prominent language, we don’t see anything for C++, because there isn’t a package manager with an API to use it. If you have a nifty (or even hacky) idea for how to parse a requirements file, I want to hear it. The RSEPedia has support for spack, but most research-oriented C++ projects are not going to go out of their way to publish their package there, and we get no signal of the package being in spack when we clone the repository. Sppaaaaaack (sorry, it’s a bit of a tic at this point!) 😁️

              -

              We also don’t see standard modules or libraries provided within a language. E.g., I can almost guarantee you a ton of Python libraries are importing json, but since it’s not a package manager library we wouldn’t see it. I suspect citelang could come up with a way to derive credit for these libraries by way of abstract syntax trees or just parsing the source code, although I haven’t done this yet because I’m not convinced it’s something people are as interested in. If you want to say thank you for the Python standard library, there is a donate button on their contribution page (or you could contribute code). There is an even deeper level of parsing (at least for Python) that looks at function signatures, and I wrote a library called caliper in early 2021 to do that, and it’s able to generate function databases for Python software of interest. This would be cool to do for some kind of (unrelated) compatibility analysis here, but yes that’s very different.

              -

              Parsing Limitation

              For all requirements files except for Python, we are forced to do static parsing. While not perfect because bugs can happen for niche cases of someone defining requirements in a weird way, it’s a reasonable start. There is always room for improvement, or adding more static parsers for requirements files I have not considered yet.

              -

              However, this is not the case for the Python parsing (either requirements.txt or setup.py)! For Python these results are likely very good because we wrap the pypi package manager install command to derive a list of packages and versions from either a setup.py or requirements.txt. Don’t worry - nothing is installed, we either just parse the requirements file and return the results, or we use the solver against a setup.py to come to an equivalent list. We originally had a static parser (and still use this as a fallback) however I talked to @alecbcs and he had this fantastic idea! Will it likely need updates as time goes on, given the functions are private? Sure. But I’m happy to do that to get the much more accurate listing.

              -

              In practice, the only setup.py files that I was not able to parse either had a bug (e.g., trying to read a file that doesn’t exist in the repository) or they were trying to use modules outside of the standard library. For all of the cases of broken-ness, I opened issues on the respective repositories so we might have a better chance at parsing in the future! One detail is that we parse the first requirements file found. For a primary requirements file in the root of the repository, this is the best outcome. However, some repos don’t have a file in the root, and perhaps we find one in a documentation folder instead. Either way, the result represents our best effort at finding and parsing requirements given a cloned repository we don’t know the structure of in advance.

              -

              Final Thoughts

              Here are my final takeaways:

              -

              Publication is not for Research Software

              A system of credit that relies on software engineers to do extra manual work (to write papers) is never going to fully capture the ecosystem and give proper credit. It will only capture those that have the time and possibly privilege to take the extra time to write a paper. Publication only makes sense given that a piece of software is paired alongside a robust result, in which case fine, write the paper and also champion the software.

              -

              Publication Does not Actually Capture Credit

              A system that also only skims the superficial top (the name of one package) and does not dig deep into a dependency tree is also going to miss insights and deserved attributions of credit. As the numpy example shows, nobody is actually citing numpy, but a ton of projects are using it somewhere in their dependency tree, so it deserves a lot of credit.

              -

              We Can Do Better

              I have a pet peeve. I’m frankly just tired of people writing about credit and attribution but not doing anything about it. We could extend that to other things, but it’s especially an issue for this topic. Ironically they are writing papers and improving their publication record as they write about how publication and research software is a strained process. I may not have solved this problem, but damn at least I’m trying to actually do something about it instead of spurting gas.

              -

              I find this idea exciting because there are so many directions you can go with it. When I first designed the idea I imagined a database and online interface where you could essentially connect your GitHub repository, and akin to a builder service, parse your repository on some event and derive a new credit or citation graph. Or you could have some set akin to the RSEPedia that are also updated regularly. And then, by way of having that database, we could do these same queries (that currently I’m doing statically) to say “What are the most important libraries for this language? Across the ecosystem?” or “How has this changed over time?” It would be a true way to derive the value of a library without needing people to publish papers, and totally automated and integrated with package managers, which is where people already should be putting their software. Heck, if someone gave me a cloud and a little bit of funding I’d love to work on this. Are there good reasons or use cases? I don’t know, but maybe.

              +

              So what do you think?

              + +
      +
      + +
      + + + + + + + + + + +
      + + + + -

      So what do you think?

      \ No newline at end of file diff --git a/_posts/dursi/2022-7-3-utility-vs-professional-serviecshtml.md b/2022/the-utility-vs-the-professional-services-firm/index.html similarity index 51% rename from _posts/dursi/2022-7-3-utility-vs-professional-serviecshtml.md rename to 2022/the-utility-vs-the-professional-services-firm/index.html index 850515f..350dd2e 100644 --- a/_posts/dursi/2022-7-3-utility-vs-professional-serviecshtml.md +++ b/2022/the-utility-vs-the-professional-services-firm/index.html @@ -1,28 +1,94 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2022-07-03 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/utility-vs-professional-serviecs.html -slug: the-utility-vs-the-professional-services-firm -title: The Utility vs the Professional Services Firm ---- - -

      As research computing and data becomes more complex and diverse, we need more professional services firms and fewer utilties

      + + + + + + + The Utility vs the Professional Services Firm - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
      + +
      +

      The Utility vs the Professional Services Firm

      +

      As research computing and data becomes more complex and diverse, we need more professional services firms and fewer utilties

      (Note: This post is adapted from #127 of the Research Computing Teams Newsletter)

      -

      I get to talk with a lot of research computing and data teams - software, data, and systems. Sometimes in these conversations it’s pretty clear that some teams, or the team and their funder, or a team and I, are talking a bit past each other. And that’s usually because they or we are (currently) operating with very different mental models of how they operate.

      -

      Some research computing and data teams are operating as Utilities, and see the world through that lens; a growing number are operating as Professional Services Firms. Others are moving from one to the other, and are at different places along that very abrupt transition. Some kinds of groups (like bioinformatics cores) are much more likely to already be operating in service mode, while others (like research compute infrastructure teams) are more likely to still think of themselves as utilities. It varies from place to place, though, depending on local conditions. But they’re very different models!

      -
      Utility vs professional services.  Image Credit: left, John Moore (@thejmoore) at Unsplash.com; right, Jason Goodman @jasongoodman_youxventures at Unsplash.com
      Utility service and professional services delivery are very different, and require different funding, management, and career development models. Image credit: left and right.
      @@ -30,23 +96,88 @@

      natural monopolies; the most reasonable provision model was for the local jurisdiction/institution to own or control a single operator. Differentiation or strategy, or gaining new customers, weren’t meaningful discussion topics. The only thing that really makes a difference is scale, which leads to mergers. Innovation happens slowly, top-down, at the industry-wide scale and usually from the vendors (“hey, did you hear about those new gas compressors Dyneco announced?”), and diffuses outwards. Employees take pride in and the organization values operational skill and things ticking along smoothly. Customers value reliability. The only thing that matters for any individual operator is to operate effectively and to provide the standard service with the right amount of cost: high enough to absorb the available subsidy, low enough to not go broke. If a customer needs something other than what the utility provides, rather than that being a market opportunity, it’s either an inconvenience or an irrelevance. The power company or the water utility or the old phone monopoly just doesn’t serve that need.

      -

      Professional Service Firms — say engineering firms, or architects, or consultancies — are very different beasts. They might very well have significant capital investment in specialized equipment, but their main selling point and their biggest cost is expertise. Competing for and retaining that expertise, and developing that expertise in house and amongst their clients, are principal concerns. As part of a “full-service” offering they they likely have some fairly standard services they offer at the low end, where operating cost and efficiency is vital. But what the organization values, and the employees enjoy, is at the high-touch end — getting deeply involved with the client work, and being as much a collaborator or partner or “trusted advisor” as a service provider. Different clients want very different things, and that high-touch high-expertise work is specialized and labour intensive, so the firms themselves need a clear focus; they can’t meet all needs. Clients can go elsewhere, so there is redundancy and competition, but less than you’d think at a distance. In civil engineering a geotechnical firm is complementary, not competing, with one that specializes in water resource engineering.

      -

      As in the rest of our lives, in research computing we need to have utilities. As research data management matures, institutional or regional data depositories become mature and “enterprise” enough to become utilities, likely run by IT or the Library. Teaching or CI/CD or MLOps resources for data science or software development are likely best served by this model. The closer the operations are to standard, something that can be run by IT, the more likely it is to be a utility. But one has to be careful. Utilies are commodoties: they tend to get merged together wherever feasible, since scale matters and it’s all undifferentiated commodity provision.

      -

      As research computing becomes broader and faster changing and more diverse, we need more and more professional services firms, too; nimble groups specialized to particular needs and ready to adapt as those needs change. As even infrastructure is becoming less one-size-fits-all, and methods for making use of computing and data for diverse fields grow more complex and expertise intensive, the preconditions for the utility model are met in fewer situations than used to be.

      -

      A lot of research computing teams are interested in providing something more like professional services, but were created in the Utility model, and are stuck there by their funders. The institutional or external funders still have this very specific (and to their mind time tested and successful) operating model in their plans. Utilities are funded very differently than professional services firms. At utility scale, it doesn’t make sense to outsource things, or develop non-standard services (who wants non-standard power coming into their house!) Funders requirements on eligible expenses may focus almost entirely on the capital spend, and not on operating funding that’s needed to make effective use of the capital, or to be more agile in how services are delivered.

      -

      Even those teams who aren’t being held back by funders and who want to make the switch to professional services from their original utility model find it a hard transition. There’s no obvious, incremental path to go from providing a standard, stable commodity to changing, specialized, bundles of expertise. Utilities operate very differently from professional services firms. They value different things. The models for staff growth are different. So they have to be managed quiet differently, and there’s no clear path internally from A to B.

      -

      Besides funding, and internal considerations, utilities and professional services firms are also percieved and valued by their clients very differently. Utilities’ existing customers don’t want change, and new customers aren’t yet interested in getting advanced app software development suggestions from what they perceive to still be the mobile telephony provider.

      +

      But research computing and data is changing, increasingly quickly, and the utility approach only meets a piece of these growing needs. Navigating the transition isn’t going to be easy, for RCD teams, leaders, or funders; but expressing it clearly and talking about it more will maybe mean we’re not talking past each other so often.

      + +

      +
      + +
      + + + + + + + + + + +
      + + + + -

      But research computing and data is changing, increasingly quickly, and the utility approach only meets a piece of these growing needs. Navigating the transition isn’t going to be easy, for RCD teams, leaders, or funders; but expressing it clearly and talking about it more will maybe mean we’re not talking past each other so often.

      \ No newline at end of file diff --git a/2022/the-web-services-i-self-host/index.html b/2022/the-web-services-i-self-host/index.html new file mode 100644 index 0000000..e62c3bd --- /dev/null +++ b/2022/the-web-services-i-self-host/index.html @@ -0,0 +1,184 @@ + + + + + + + The web services I self-host - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
      + +
      +

      The web services I self-host

      +

      Why self-host anything?

      + +

      In a lot of ways, self-hosting web services is signing up for extra pain. Most useful web services are available in SaaS format these days, and most people don’t want to be a sysadmin just to use chat, email, or read the news.

      + +

      In general, I decide to self-host a service if one of two things is true:

      + +

      + +
      • Self-hosting is going to add a capability that’s difficult to find in a SaaS alternative. That might be privacy, or extra compute, or just an extra degree of customization that I want.
      • I find it interesting or amusing to self-host it! I have been a professional sysadmin, and ran production web services for over a decade. So I enjoy messing around with servers, and can have a fair amount of fun with this.
      + +

      Infrastructure and general tooling

      + +

      Right now my self-hosted services are hosted on Oracle Cloud Infrastructure, for a very simple reason: OCI includes a very generous Always Free tier, which doesn’t even ask for a credit card! So I’m confident I’m not going to accidentally spend any money. I use ARM Ampere A1 Compute instances for service hosting.

      + +

      The individual services are mostly managed using Docker Compose files, though a few are just running bare-metal. I have so far managed to resist the urge to put everything in Kubernetes.

      + +

      Everything is backed up on a regular basis using Tarsnap.

      + +

      I also use Tailscale to provide a VPN between my cloud servers and my various client devices (phone, laptop, tablet). If a service needs to be exposed to the public Internet to function, I do that… but otherwise, everything is only exposed within the Tailscale VPN, so that only my own devices can access them. This is both a lovely convenience (not having to manage as many DNS records), and provides an extra degree of security by hiding services that no one else needs to access.

      + +

      Services that I self-host

      + +
      • RSS reader: Despite the demise of Google Reader back in the mists of time, I’ve been a consistently heavy user of RSS feed since at least 2008. At times I’ve used commercial products such as Feedly, but these days I self-host the aggregator using FreshRSS. I use FreshRSS partly because it’s pretty easy to spin up and administer, and partly because it’s compatible with Reeder, a Mac and iOS app that I generally use to actually read my feeds.
      • Fediverse instance: I run a self-hosted instance on the Fediverse ensemble of social networking sites. The best-known tool for this is Mastodon, but I currently use the Pleroma server, mostly because it seemed less painful to set up and configure. I run my own instance partly out of curiosity, and partly because I didn’t strongly resonate with any particular topic-specific server that’s already out there.
      • IRC bouncer: I’m not on IRC very much these days, but I do like to avoid losing messages, and sometimes want to be logged into the same channels on different physical clients. So I run a ZNC server to maintain persistence.
      • Matrix server: Matrix is a decentralized messaging platform that supports end-to-end encryption. Think of it as being a little like the Fediverse, but for chat rather than microblogging. This falls pretty squarely in the category of “I find this amusing to run”, because I mostly chat with less-nerdy folks on other, commercial platforms.
      • Git server: I run a Gitea server which I use to mirror my own repos, as well as a variety of other open source repos. This is mostly to ensure that I have an up-to-date backup of repos I care about, independent of Github or whatever provider.
      • Jupyter notebooks: I keep a persistent Jupyter notebook instance running for random code experiments and as a tiny development playground. This runs on its own VM where I also do other random software development, and it’s separate from the other services mostly so I don’t take down all my personal infra with an accidental OOM from a big build.
      • Software package repository: I run an instance of Nexus Repository OSS, mostly to cache Docker images and other content that run the rest of the services above!
      + +

      Services where I use managed hosting but don’t own the server

      + +
      • This website! My regular website and this blog run on a shared hosting provider, mostly through inertia. (I’ve used the same hosting provider for web hosting since around 2008.)
      • Email: In theory it’s an open, federated system similar to the Fediverse. In practice, the combination of spam and the growth of large providers makes it increasingly painful to run a server yourself. This post from Carlos Fenollosa does a good job of describing the difficulties.

        I do, however, run all my email through my own domain, though it’s hosted via Google Apps GSuite Google Workspace. I also back up my inbox locally on a regular basis. That means that if Google ever decides to remove my account, charge obnoxious costs, or otherwise misbehave, my email address is at least portable to other providers.
      + +

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/things-that-are-hard/index.html b/2022/things-that-are-hard/index.html new file mode 100644 index 0000000..efe0e46 --- /dev/null +++ b/2022/things-that-are-hard/index.html @@ -0,0 +1,220 @@ + + + + + + + Things that are Hard - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   VanessaSaurus dinosaurs, programming, and parsnips. See the original post here.
      + +
      +

      Things that are Hard

      +

      I saw a funny tweet on Twitter the other night - it was someone from a large consumer company sharing +their vision for “the next generation shopping experience” and it was a virtual person walking through a supermarket aisle and reaching out to pick up a bottle of wine. +I can’t find the specific tweet, but it said something to the effect of:

      + +
      +

      Nobody asked for this. Stop making stuff to solve problems that people don’t have

      + +
      + +

      My dear reader, it me! 😲️ This message hit me really hard, because I am definitely one to build niche tools for use cases that likely don’t exist but seem fun or interesting to me. I also feel pretty disconnected from communities that are innovating and testing new ideas.

      + +

      What is hard?

      + +

      This is a problem that a lot of us have. We build things that nobody needs. We need to focus more on the problems that people are actually facing. I would also scope that to developer workflows, which includes automation, testing, and development. Since I have a nice view into my own mental space, here is my list of things that are hard.

      + +
        +
      1. When I am trying to develop software and I can't open an interface with the code and environment I need
      2. +
      3. That my main interaction with a resource is via SSH
      4. +
      5. When a workflow or even container works in one place but not another
      6. +
      7. When I need to develop, build in CI, push to a registry, and pull. One mistake? Start from scratch
      8. +
      9. When I need to run a job and I have to interact with a job manager and it's hard and annoying
      10. +
      11. Logging or monitoring means looking at text files with cryptic names
      12. +
      13. Automated testing on HPC is not a thing. Build on GitHub and pray.
      14. +
      15. When I can't easily navigate code, documentation, or it's completely missing
      16. +
      17. When I set up everything the way I like it and I have to login to a new system and do it all over again
      18. +
      19. When I want to develop something that uses a cluster resource but there are no exposed APIs.
      20. +
      21. When it's impossible to compare between systems because they are special snowflakes
      22. +
      23. When I can't easily test across the systems that my software is intended for.
      24. +
      25. To scale anything I have to use a job manager, wait hours, and then again if there is one mistake
      26. +
      27. When it takes 2 hours or more to get a node allocated
      28. +
      29. When I can't really make tools for HPC because I'm trying to find workarounds for all these problems
      30. +
      + +

      And I’d add a “thing that is annoying” to be this obsessive focus on power and scale, in a competitive sense, and this race +to be in the top 500 and beat the other guy over all else. The constant need to rebuild clusters means we never +focus on the details of how we use them. We do the same things over again. I’ve mentioned these things before, possibly many times, but I need to point it out again.

      + +
      +

      Our current developer environments are more like handcuffs than places we are enabled to thrive.

      + +
      + +

      The reality for me is that I tend to put myself in a new role or environment, and then think of lots of cool ways to extend a particular tool, or build something before it. This is why I’ve made a ton of visualizations, associated tools, or posts for spack - it’s literally just the thing that is right in front of me. Put something else in front of me, such as an entire infrastructure with APIs, and I’d do the same. So why can’t a nice set of developer tools be available for the resources I’m supposed to be using?

      + +

      Develop based on specific problems

      + +

      I think I want to develop more focusing on these problems. Don’t get me wrong - I’ll definitely keep making silly projects. But my vision for the future needs to be oriented toward these pains. These in particular are the problems that I think our community needs to look at, at least given this developer perspective. +I say this because I’ve seen and used the dark side - having free rein of beautiful cloud APIs to let me automate to my heart’s content! +I only now, without being a part of some cloud or container cluster deployed project, am aware that I don’t have access to these development tools. + What is my solution now? I largely don’t ssh into an HPC cluster until I absolutely have to - either to scale something, or reproduce a workflow on GitHub actions that works there (but then is really challenging to get it working on the cluster resource). Indeed this entire thread resulted after a frustrating evening of exactly that.

      + +

      What isn’t helpful? What isn’t helpful is telling me “This center / place / person has this thing that has solved this problem.” Can I easily access it, and what about the entire research software engineering community? This kind of response shuts down the conversation +and makes the developer (myself for example) realize that the person I’m talking to is not interested in thinking about how to inspire change. +I’ve been really frustrated recently with mentioning even an abstract idea, and getting shut down that “Oh that sounds like this other tool.” +For a project to reach this “mention status” it needs to be easy to install or use, and not have a barrier of privilege that you have to work at a certain place or know people. Telling me that there is a solution that requires some convoluted steps and permissions not only implies that it is only available to those in privilege, but that the solution is not well adopted enough or shared enough to be truly a solution for our community.

      + +

      Inspiring Vision

      + +

      If we aren’t happy with the current state of the world, what are our options? Well, we could leave our current roles to find another state that is more similar to what we want. Personally speaking, I haven’t hit that point quite yet. I want to try my hardest to formulate a vision for how I want the world to be, and then find opportunity to work on it from where I am. The wisdom here is that no specific role is perfect, and optimally we should place ourself somewhere where there are resources and open mindedness for change. it’s up to us to extend our influence as best we can to help drive some possible future. If you try that and fail? At least you tried.

      + +

      These are the things that are hard. I am going to try harder to have them be the focus of my thinking about the future. I want to make them easier. I’m starting to realize that possibly the reality is that I should think beyond the constraints of HPC, and more toward the kind of infrastructure that I want, and then +figure out how to slowly integrate it as a part of our culture too. We can start with a core vision for a future that we want, and then +slowly build up tooling and community around that.

      + +

      Happy Friday, friends!

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/toy-programs-for-learning-a-new-language/index.html b/2022/toy-programs-for-learning-a-new-language/index.html new file mode 100644 index 0000000..0ce46b8 --- /dev/null +++ b/2022/toy-programs-for-learning-a-new-language/index.html @@ -0,0 +1,174 @@ + + + + + + + Toy programs for learning a new language - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Computing – thinking out loud works in progress and scattered thoughts, often about computers. See the original post here.
      + +
      +

      Toy programs for learning a new language

      +

      It used to be that I’d get interested in a new programming language, but I wouldn’t have a new project to use it for and had trouble knowing how to start. I have trouble really grasping a new language without building something in it, and “X by example” or working through a book don’t really do the job.

      + +

      What’s helped me lately is to build an array of “standard” toy programs that I understand reasonably well, and that I can use to explore the new language and figure out how to do something real in it.

      + +

      Right now, my toy program collection consists of:

      + +
      • A link shortening service, like bit.ly or tinyurl, along with a HTTP API for adding and removing links
      • A 2D diffusion simulation
      • A “system package inventory” program, that builds a list of all the RPMs/DEBs installed on a Linux machine and shoves them into a SQLite database
      + +

      This is almost never what I’d call production-quality code. For example, when I’m writing these toy programs, I rarely write unit tests (until I start exploring the test libraries for the language!). But they’re still very valuable learning tools, and give me space to explore some very different use-cases.

      + +

      I almost always write all three in a given language, but the order depends a lot on what I think the new language will be good for. For example, I’ll write the “system package inventory” program first if I think the new language might be handy for system administration tools. It’s a great way to see how well the language plays with a common Linux environment, how painful it is to use SQLite, and to get practice writing CLI tools in it. I’ll often augment the basic “scan and store” functionality with a CLI to do frequent queries, like “on what date was this package last upgraded”.

      + +

      On the other hand, if I think I’m going to use the new language for a bunch of numerical work, I’ll start with the diffusion simulation. When I write that, I often start with a naive implementation and then start playing with profilers and other performance tools, or try to parallelize the simulation. This is also a great excuse to dig into any plotting tools commonly used with the language.

      + +

      These toy programs are also handy if I want to explore new ways to integrate a service into a larger production environment. For example, I might start with the link shortening service, deploying the service itself statelessly and persisting the list of links into a PostgreSQL DB. Then I start complicating things…

      + +
      • Let’s add logging!
      • And tracing!
      • It’s always a good idea to expose Prometheus metrics
      • And wouldn’t it be handy to support multiple database backends?
      • Now wrap it all in a Helm chart for handy deployment
      + +

      I imagine I’m not the only person to have a standard collection of learning projects for new languages. If you do this too, what does your project list look like?

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/vsoch/2022-8-4-hpc-apps.md b/2022/tunel-apps-for-hpc/index.html similarity index 63% rename from _posts/vsoch/2022-8-4-hpc-apps.md rename to 2022/tunel-apps-for-hpc/index.html index 5df3f52..ab5b841 100644 --- a/_posts/vsoch/2022-8-4-hpc-apps.md +++ b/2022/tunel-apps-for-hpc/index.html @@ -1,22 +1,90 @@ ---- -author: Vanessasaurus -author_tag: vsoch -blog_subtitle: dinosaurs, programming, and parsnips -blog_title: VanessaSaurus -blog_url: https://vsoch.github.io/ -category: vsoch -date: '2022-08-04 13:30:00' -layout: post -original_url: https://vsoch.github.io/2022/hpc-apps/ -slug: tunel-apps-for-hpc -title: Tunel- Apps for HPC ---- - -

      A few months ago I was talking about ssh tunnels. The reason was because I was looking for a solution to deploy apps (like a Jupyter notebook) onto HPC. + + + + + + + Tunel- Apps for HPC - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   VanessaSaurus dinosaurs, programming, and parsnips. See the original post here.
      + +
      +

      Tunel- Apps for HPC

      +

      A few months ago I was talking about ssh tunnels. The reason was because I was looking for a solution to deploy apps (like a Jupyter notebook) onto HPC. After an adventure I got it working, and it came down a relatively simple set of commands that I needed to just write into my app logic and forget about. The reason for this was working on my new personal project, tunel.

      -

      Tunel is named for what it does. “Tunel” is an elegant derivation of “tunnel” and will do exactly that - create a tunnel between your local workstation and an HPC cluster.

      @@ -26,37 +94,29 @@
      -

      In short, tunel will provide a collection of “apps” that are easy to deploy to HPC. There are concepts called launchers, and examples are singularity, slurm, or htcondor. And we can add more! It’s the job of a launcher to take a an app recipe (a definition in yaml plus helper scripts that can be customized on the fly by the user) and get it running, whatever that means (run a job? a container? monitor something? something else?). For the most part, most apps that I’ve developing have web interfaces, as they have historically been the most challenging thing to get easily working on HPC. As a quick example, to run a jupyter notebook via Singularity on my login node, after I install tunel and have my ssh connection defined as “osg” I can do:

      -
      $ tunel run-app osg singularity/socket/jupyter --jupyterlab=true 
       
      -

      The name “singularity/socket/jupyter” is the unique identifier (and path) to the recipe and config, and I can provide custom arguments as shown above. And although this is the “singularity” launcher, we can do the same kind of interaction with a slurm launcher, going one level deeper to run the notebook on a node after we submit a job! And in my typical way of doing things, I have automation that generates a table and documentation for each of these apps. Check them out here!.

      -
      -

      I’m mostly working on singularity an HTCondor apps at the moment because I use the open science grid (OSG) for development, as this is a personal project. Thanks to Matthew West for showing me OSG - I was pretty handicapped to develop before finding it!

      -

      Django template with a socket?

      This kind of framework can be powerful if I develop a bunch of custom apps, but it’s much more powerful if I can enable YOU to easily do that too! Thus, I knew one of the first tasks I wanted to do is create a template, likely in each of Flask, Django, and FastAPI, that would plug immediately into Tunel. And while I have much work left to do, last night and this evening I figured out a technical issue that is going to empower us to make so many cool things and I wanted to share! Let’s talk about the problem, what I tried, and what ultimately worked.

      -

      Traditional Setup with uwsgi and nginx

      If you look at a family of Python + web interface apps, you’ll find this uwsgi guy in the middle (I don’t know the correct pronunciation but I say YOU-SKI). It’s a fairly rich tool, but in layman’s terms I think of it as a middleman between Python and a traditional web server. But actually, you don’t technically need the web server - and this is where things start to get interesting. For a traditonal setup, you might find a nginx (a web server) configuration file that looks like this.

      -
      
       # the upstream component nginx needs to connect to
       upstream django {
      @@ -94,19 +154,15 @@ 

      Traditional Setup with uwsgi and

      -

      I’ve made a lot of web apps, and whether I use docker-compose with separate containers or a single one, I usually have to write a nginx configuration. The above gets started in the container entrypoint with my app calling uwsgi, and defining that same socket:

      -
      $ uwsgi --socket=${socket} /code/scripts/uwsgi.ini
       
      -

      And of course things happen before that, but that’s the main last line. The uwsgi.ini is a configuration file that makes it easier to define settings.

      -
      [uwsgi]
       master = true
       processes = 4
      @@ -128,75 +184,131 @@ 

      Traditional Setup with uwsgi and

      -

      Without going into huge detail, the above says that the app that I wrote (in Python) is listening on that socket, so requests to the web server will either be directed to some static file, filtered out, or sent to our application. And we typically want to use nginx because it’s really good at serving static files and handling traffic.

      -

      But now let’s step back. If you look under the server in the config above, you’ll notice we are serving content on port 8000. This is why I can open the browser to localhost and that port and see my application. But as we know with headless HPC, there are no ports. I can’t use this. So this was my first predicament, last night. I had created this application and it ran locally, but I needed to somehow get the entire thing routed through a tunneled socket to take a next step.

      -

      Uwsgi Only?

      I’ll skip over the many hours of things that I tried and failed. I really liked having nginx so I first wanted to somehow send it to the user via a socket, but that never worked. I had an idea to just map the original socket and then have a second container on the host for nginx, but I decided that was too complex. What would up working is realizing that uwsgi can serve http directly, and that came down to a single addition to its config:

      -
      listen=200
       protocol=http
       
      -

      Once I did that, I tried the same technique to map the socket being written to directly to a port via the ssh tunnel, and boum I saw a page! But it was really ugly, because it had no style. This is where I was like OHNO I need nginx for static. But then I found this page and it was a message from the heavens - I could define the same static and media URls using uwsgi directly! That looked like this:

      -
      $ uwsgi --socket=${socket} --static-map /static=/code/static /code/scripts/uwsgi-standalone.ini
       

      At this point I held my breath, re-ran my app, and wow!

      -
      -

      There it was - my entire app being served by a container running on a remote machine, only accessible to me through a physical socket. And guess what? I added a file browser, and it even worked to upload a dinosaur picture!

      -
      -

      Here is the entire page for the app - you can see there are many flags you can add and customize to interact.

      -
      -

      While it’s only accessible to you and there isn’t need for any kind of login, I did add the default username/password login to Django, and require it for logging in to the file browser. Of course I will eventually need this to be more formally security audited, but at least I don’t have anything interesting on my OSG home to be worried about. And is using just uwsgi a performance issue? I think probably not since the expected use case is only once person.

      -

      A Future for Apps

      This is just the beginning - my plan is to put together a list of use cases for a GUI on a cluster, and then just package them into the core template apps for the developer user to easilyc customize. I have big plans for working on this, and honestly I’m so excited that I find I’m staying up way too late and just egging for the work day to end so I can continue. This idea is so powerful, because it’s using existing technologies to deploy containerized apps on HPC, where you don’t need any special permission. Just to show y’all, here is what it looks like to launch my app template:

      -
      $ tunel run-app osg singularity/socket/tunel-django --tag=dev --pull
       
      -

      I added the pull flag and a custom tag because I am actively developing, and my workflow is to quickly rebuild, push, and then run that command. That then shows me the ssh tunnel command that will immediately connect me to my app on a port in my browser.

      -
      $ ssh -NT -L 7789:/../tunel/singularity/singularity/socket/tunel-django/singularity-socket-tunel-django.sock sochat1@osg
       
      -

      And that’s seriously it. You as the developer user are empowered to make and deploy apps, and they have interfaces, and you don’t need to do something silly like open a port or actually deploy a web server. It’s so stupidly easy - I’m looking around at all these complex web app setups that people have made for HPC over the years and I wonder why they aren’t doing something simpler. Maybe it’s just a space of development that people gave up on, or there are some security things I’m missing. Either way, I’m going to charge forward working on this! It’s too simple, and the idea is to beautiful to do anything else by this point.

      \ No newline at end of file +

      And that’s seriously it. You as the developer user are empowered to make and deploy apps, and they have interfaces, and you don’t need to do something silly like open a port or actually deploy a web server. It’s so stupidly easy - I’m looking around at all these complex web app setups that people have made for HPC over the years and I wonder why they aren’t doing something simpler. Maybe it’s just a space of development that people gave up on, or there are some security things I’m missing. Either way, I’m going to charge forward working on this! It’s too simple, and the idea is to beautiful to do anything else by this point.

      + +
      +
      + +
      + + + + + + + + + + + + + + + + diff --git a/_posts/gaborsamu/2022-12-13-lsf_grafana.md b/2022/visualizing-spectrum-lsf-data-with-grafana/index.html similarity index 58% rename from _posts/gaborsamu/2022-12-13-lsf_grafana.md rename to 2022/visualizing-spectrum-lsf-data-with-grafana/index.html index 3a6520b..590fb6a 100644 --- a/_posts/gaborsamu/2022-12-13-lsf_grafana.md +++ b/2022/visualizing-spectrum-lsf-data-with-grafana/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2022-12-13 00:06:51' -layout: post -original_url: https://www.gaborsamu.com/blog/lsf_grafana/ -slug: visualizing-spectrum-lsf-data-with-grafana -title: Visualizing Spectrum LSF data with Grafana ---- - -

      Overview

      + + + + + + + Visualizing Spectrum LSF data with Grafana - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      Visualizing Spectrum LSF data with Grafana

      +

      Overview

      System monitoring is a fundamental part of IT best practices. High performance computing (HPC) environments are no exception to this. At the high-end, HPC clusters can consist of thousands of servers, processing millions of jobs per day. HPC admins need ways to monitor the overall cluster to determine system status and availability through to the efficiency @@ -147,4 +216,76 @@

      Summary

      -

      Spectrum LSF provides many hooks and integration points enabling administrators to change things ranging from scheduling behavior and the output of query commands through to job information being logged to Elasticsearch. Spectrum LSF is highly customizable by organizations to suit specific needs and requirements. We’ve demonstrated this using Grafana to visualize data from the LSF scheduler in a simple example. Following the above example, administrators can combine existing HPC cluster system level reporting in Grafana with job information from Spectrum LSF for a better overall view and understanding of the infrastructure.

      \ No newline at end of file +

      Spectrum LSF provides many hooks and integration points enabling administrators to change things ranging from scheduling behavior and the output of query commands through to job information being logged to Elasticsearch. Spectrum LSF is highly customizable by organizations to suit specific needs and requirements. We’ve demonstrated this using Grafana to visualize data from the LSF scheduler in a simple example. Following the above example, administrators can combine existing HPC cluster system level reporting in Grafana with job information from Spectrum LSF for a better overall view and understanding of the infrastructure.

      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/2022/what-i-ve-learned-from-looking-at-1-500-jobs-leading-research-computing-teams/index.html b/2022/what-i-ve-learned-from-looking-at-1-500-jobs-leading-research-computing-teams/index.html new file mode 100644 index 0000000..6e91a81 --- /dev/null +++ b/2022/what-i-ve-learned-from-looking-at-1-500-jobs-leading-research-computing-teams/index.html @@ -0,0 +1,189 @@ + + + + + + + What I've Learned from Looking at 1,500 Jobs Leading Research Computing Teams - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Jonathan Dursi R&D computing at scale. See the original post here.
      + +
      +

      What I've Learned from Looking at 1,500 Jobs Leading Research Computing Teams

      +

      Job numbers continue to grow; lots of data and product management jobs; IR groups at Universities becoming bigger employers

      + +

      (Note: This post is adapted from #111 of the Research Computing Teams Newsletter)

      + +

      A year and a half ago I posted my observations on the first 500 jobs posted to the job board - we’re getting close to 1,500 now, and it’s worth taking a look to see what if anything has changed in research computing team leadership and management jobs1.

      + +

      There are some trends that have continued since the posting. The jobs in industry are growing vastly beyond what I would have imagined possible when I started in research computing in the 1990s. (The number of jobs working with biomedical data of one sort or another in particular is just astonishing.) Rather than technical computing being a niche, it’s utterly mainstream now. There are a lot of jobs out there, and I don’t even bother posting generic “data science manager” jobs unless they’re connected to some real complex research questions - which happens a lot, whether it’s fraud detection or improving financial modelling or supporting biomedical research. Some really fun-looking jobs that would probably feel a lot like working at a research computing centre keep coming up at consultancies –– go visit a client and help them with their data science/data engineering/etc needs. There’s also a growing number of data science/engineering jobs at Universities that fall under the Provost/VP Operations rather than the VPR’s side of the house — Institutional Research, looking at (say) student success in support of the teaching mission.

      + +

      Because of the growth in number of jobs, it is very much a candidate’s market out there. I’m seeing postings –– especially for the traditional academic “director of research computing” jobs –— stay open for cringe-inducing periods of time. A few in particular I’ve watched with vicarious embarrassment continue coming up in the listings for 8+ months. That’s a bad sign for us as hiring managers - the market for individual contributors is at least as tight - but it’s amazing news for us as individuals.

      + +

      When I wrote that post in late 2020 it was just regulated industries like health/biotech or financial services that were developing data governance or other data management jobs, but now data management is popping up everywhere, whether it’s retail or logistics or anywhere else. These are being joined, again first in the regulated industries, by data privacy or data risk management jobs. Privacy-preserving data analysis jobs (and teams supporting same with software development) are also starting to be more common (and there’s a lot of cool research and technology work to be done there!)

      + +

      I’m also (finally!) starting to see a explicitly product management jobs in research computing, both academic and private-sector. You see it around data management — bundling and curating of data into real data products — but also in software development, especially around analysis pipelines for some reason.

      + +

      Probably related to the growth of product vs project thinking, I’m starting to see a lot of “delivery manager” jobs that would have been called “project managers” just a year ago. Projects are defined by having clearly defined start- and end-points up-front. “Delivery” jobs seem to focus on sustained, ongoing work, more appropriate for long-lived products.

      + +

      These products that keep coming up often combine data, software, and systems one way or another. That really points to weaknesses around organizing by type of skills - the research software engineering movement, for instance - as the lines between software and systems in this DevOps, infrastructure-as-code era is very fuzzy; and as data grows more and more important, data skills are needed everywhere.

      + +

      Especially for us as managers or leads, but especially for individual contributors as they grow their skills, it’s important to have a pretty holistic view of research computing and data and not try to break it up into silos. The growing number of data engineering jobs is a great example. That work often involves all three of software, systems, and data expertise. Data engineering is getting so broad and important that not only are there different sub-fields, in large organizations there are likely to be completely distinct data engineering teams doing different work. Trying to decide which of those jobs are “research software engineering” jobs and which aren’t is not a productive way forward, for those candidates or for us as a community.

      + +

      Needless to say, the growth of remote jobs has been off the charts - especially in the private sector, although the academic institutions are gamely doing what they can to keep up (often hampered by institutional policies).

      + +

      Late June 2022 update: At the time that I write this, there’s a slow down in hiring in tech, especially among early stage-startups. That slowdown due to economic conditions as I write this is not, as far as I can tell, affecting these more research-oriented kinds of jobs. The job board doesn’t have a lot of jobs from startups anyway. For larger organizations, the biotech firms or the banking firms doing fraud detection research or the computing providers or academic groups or… clearly do not view these roles as “nice to haves” that can wait until there’s a bit more economic certainty.

      + +
      + +
      +
        +
      1. +

        What counts as such a job? Any job that involves leading, or mentoring people, or managing projects, programs, or products, in software, systems, or data curation/management/engineering/analysis to support the solution of research problems is a good fit. If you are hiring for such a job, feel free to submit it to the job board

        + +
      2. +
      +
      + +
      +
      + +
      + + + + + + + + + + +
      + + + + + diff --git a/_posts/gaborsamu/2023-3-1-lsf_macos.md b/2023/lsf-client-on-macos-submitting-from-your-laptop/index.html similarity index 61% rename from _posts/gaborsamu/2023-3-1-lsf_macos.md rename to 2023/lsf-client-on-macos-submitting-from-your-laptop/index.html index a03d19b..16c3997 100644 --- a/_posts/gaborsamu/2023-3-1-lsf_macos.md +++ b/2023/lsf-client-on-macos-submitting-from-your-laptop/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2023-03-01 19:10:58' -layout: post -original_url: https://www.gaborsamu.com/blog/lsf_macos/ -slug: lsf-client-on-macos-submitting-from-your-laptop -title: LSF client on macOS - submitting from your laptop ---- - -

      In traditional HPC environments, login nodes are typically used as an access point for users to submit + + + + + + + LSF client on macOS - submitting from your laptop - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

      +
      + +
      +

      hpc.social

      + + + + + + + + + + + +
      + High Performance Computing
      Practitioners
      and friends /#hpc +
      +
      + +
      +
      +
      + +
      + +
      + Share:  + +
      +
      + +
      + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
      + +
      +

      LSF client on macOS - submitting from your laptop

      +

      In traditional HPC environments, login nodes are typically used as an access point for users to submit and manage jobs. Although login nodes are still used today, HPC environments are increasingly being used by a broad class of users with domain expertise and not necessarily IT experts. In other words, such users may be more comfortable using their native desktop @@ -260,4 +329,74 @@ command is also blocked. Here, repeat the steps 10, 11 as described above but for the bsub command. Once the steps have been completed, repeat the bsub job submission command.

    lsfuser@My-MacBook-Air LSF_Desktop_Client % bsub -q normal sleep 3600
    -Job <617551> is submitted to queue <normal>.
    \ No newline at end of file +Job <617551> is submitted to queue <normal>. + + +
    + +
    + + + + + + + + + + + + + + + + diff --git a/_posts/gaborsamu/2023-1-24-lsf_tig.md b/2023/monitoring-ibm-spectrum-lsf-with-the-tig-stack/index.html similarity index 98% rename from _posts/gaborsamu/2023-1-24-lsf_tig.md rename to 2023/monitoring-ibm-spectrum-lsf-with-the-tig-stack/index.html index c490417..e2330ce 100644 --- a/_posts/gaborsamu/2023-1-24-lsf_tig.md +++ b/2023/monitoring-ibm-spectrum-lsf-with-the-tig-stack/index.html @@ -1,18 +1,87 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2023-01-24 19:48:44' -layout: post -original_url: https://www.gaborsamu.com/blog/lsf_tig/ -slug: monitoring-ibm-spectrum-lsf-with-the-tig-stack -title: Monitoring .-.. ... ..-. (IBM Spectrum LSF) with the TIG stack ---- + + + + + + + Monitoring .-.. ... ..-. (IBM Spectrum LSF) with the TIG stack - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + -

    Much like dashboards in automobiles, dashboards in the context of HPC infrastructure are crucial to get an understanding of what’s happening under the hood of your HPC cluster - at + + + + + + + + + + + + + +

    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    + +
    + Share:  + +
    +
    + +
    + This is a crosspost from   Blogs on Technical Computing Goulash Recent content in Blogs on Technical Computing Goulash. See the original post here.
    + +
    +

    Monitoring .-.. ... ..-. (IBM Spectrum LSF) with the TIG stack

    +

    Much like dashboards in automobiles, dashboards in the context of HPC infrastructure are crucial to get an understanding of what’s happening under the hood of your HPC cluster - at a glance. During my IT career, I’ve used a myriad of monitoring solutions ranging from SNMP and Ganglia, to the ELK (Elasticsearch, Logstash, Kibana) stack. For example, I’ve recently written an overview on how it is possible to visualize IBM Spectrum LSF (LSF) data in Grafana. LSF is an HPC job scheduler which brings to the table three decades of experience in workload and resource management.

    @@ -22,6 +91,7 @@ given the many available ways to query LSF for key information using CLI commands.


    +

    The Journey

    There already exists many write-ups on how to deploy the TIG stack to monitor systems. This isn’t meant to be a guide on setting up the TIG stack. Rather, it’s assumed that the reader @@ -35,6 +105,7 @@ services and software. A search however, didn’t reveal the existence of any plugin to monitor LSF. So it was time to get creative.


    +

    What to monitor?

    A bit of research revealed that InfluxDB supports what is known as “line protocol”. This is a well defined text-based format for writing data to InfluxDB. I used the following @@ -46,6 +117,7 @@ information for the LSF scheduler daemons. Refer to the following table for details:


    +
    @@ -89,6 +161,7 @@

    +

    Scrapin' fun

    These above metrics would give a good idea of the state of the Spectrum LSF cluster at a glance. With the list of metrics prepared, the next step was to create a plugin script which would @@ -98,6 +171,7 @@

    A copy of the Telegraf plugin for Spectrum LSF is provided below. This is just an example and is provided “as is” for testing purposes. Your mileage may vary.


    +
    Example lsf_telegraf_agent.py script. Click to expand!
    #!/usr/bin/python3.8
    @@ -366,14 +440,15 @@
     
     
    -
    +

    Bringing it all together

    For completeness, below is the detail regarding the configuration of the environment. It should be noted that the simple test environment consists of a single server running IBM Spectrum LSF Suite for HPC and a separate server which runs the InfluxDB instance.


    + @@ -416,6 +491,7 @@

    +

    The follwing steps assume that IBM Spectrum LSF Suite for HPC, InfluxDB and Telegraf have been installed.

      @@ -485,6 +561,7 @@ telegraf –config /etc/telegraf/telegraf.conf –test. Note: Any errors in the configuration file /etc/telegraf/telegraf.conf will result in errors in the output.

    +
    Output of telegraf –config /etc/telegraf/telegraf.conf –test. Click to expand!
    [root@kilenc telegraf]# pwd
    @@ -607,8 +684,8 @@
     
     
    -
    +
    1. Assuming there were no errors in the previous step with telegraf, proceed to start the telegraf process via systemd.
    @@ -632,6 +709,7 @@ This is confirmed in the output below.
    +
    Output from InfluxDB queries. Click to expand!
    [root@adatbazis fedora]# influx
    @@ -976,8 +1054,8 @@
     
     
    -
    +
    1. With telegraf successfully logging data to the InfluxDB instance, it will now be possible to create a data source in Grafana in order to create a dashboard containing LSF metrics. As noted at the outset, this article is not meant to be an extensive guide to the creation of dashoards in Grafana. In the Grafana navigation select Configuration > Data sources.
    2. @@ -989,6 +1067,7 @@
    3. Select the Add data source button, followed by InfluxDB, which is listed under Time series databases. On the settings page specify following values:

    + @@ -1020,6 +1099,7 @@

    +

    Next, click on Save & test. If all variables and settings were properly specified, the message datasource is working. 17 measurements found.

    @@ -1034,6 +1114,7 @@

    +
    Example dashboard definition (JSON). Click to expand!
    {
    @@ -5697,8 +5778,80 @@
     
     
    -
    +

    As you can see, with a short plugin script to collect information from LSF, it’s possible to monitor your LSF cluster using the TIG stack. It’s important to note that there are powerful monitoring and reporting tools available from IBM as add-ons to LSF; IBM Spectrum LSF RTM and IBM Spectrum LSF Explorer. You can find more details about the add-on capabilities for LSF -here.

    \ No newline at end of file +here.

    + +
    +
    + +
    + + + + + + + + + + +
    + + + + + diff --git a/404.html b/404.html new file mode 100644 index 0000000..7ff4678 --- /dev/null +++ b/404.html @@ -0,0 +1,93 @@ + + + + + + + 404: Page not found - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    404: Page not found

    +

    Whoops! We couldn’t find that link! Go home or just admire this cute animation.

    + +
    + + +
    + + + + + diff --git a/Gemfile b/Gemfile deleted file mode 100644 index 3faaee2..0000000 --- a/Gemfile +++ /dev/null @@ -1,15 +0,0 @@ -source "https://rubygems.org" -gem 'jekyll' -gem 'jekyll-feed' -gem 'jekyll-paginate' -gem 'jekyll-sitemap' -gem 'jekyll-relative-links' -gem "tzinfo-data", "~> 1.2021" -gem 'github-pages' -gem "hpc-social-blog-theme", :git => "https://github.com/hpc-social/hpc-social-blog-theme.git", :branch => "main" - -# windows specific -gem 'wdm', '>= 0.1.0' - -# update ruby version -gem 'webrick' diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 10d0126..0000000 --- a/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2022 Vanessa Sochat, HPC Social Community - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/README.md b/README.md deleted file mode 100644 index 8e0a184..0000000 --- a/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# hpc.social Community Syndicated Blog - -![assets/images/blog.png](assets/images/blog.png) - -This is the repository for the [syndicated blog](https://hpc.social/projects/blog/) for the hpc.social community! -Note that we have two flavors of blogs - an aggregated personal blog (here) along with a collection -of community blogs served from [syndicated community blog](https://hpc.social/community-blog/). -The criteria for adding content feeds here is the following: - -> The personal blogs aggregator is the "soul" of the HPC community - HPCers who are personally invested in the minutiae of the work they are doing, the projects they are working on with some content about their culture and pet pictures :D - things that we all find in common and share and talk about. - -You can see the background for this discussion in [this thread](https://github.com/hpc-social/blog/pull/13). -Contribution steps are the equivalent across our community blogs, and you can -read about them [here](https://github.com/hpc-social/blog). - -## Development - -Note that we develop with a [shared theme](https://github.com/hpc-social/hpc-social-blog-theme) -you can generally update here via: - -```bash -$ bundle install -$ bundle update -``` - -And any changes to the theme should bed one there (and thus consistent across the sites). diff --git a/_config.yml b/_config.yml deleted file mode 100644 index b1e0019..0000000 --- a/_config.yml +++ /dev/null @@ -1,97 +0,0 @@ -title: hpc.social - Aggregated Personal Blog -description: Shared personal experiences and stories -author: vsoch -baseurl: "/personal-blog" -url: "https://hpc.social" -keywords: hpc, community -lang: en-US -timezone: America/Denver -repo: https://github.com/hpc-social/personal-blog - -# Social Profiles -twitter_username: hpc_social -github_username: hpc-social -logo: assets/images/hpc-social-blue.png - -# We use the default badge_color (red) for this one -badge_color: "#e50039" - -# And our theme! -theme: hpc-social-blog-theme - -author: - name: hpc.social - bio: High Performance Computing
    Practitioners
    and friends /#hpc - picture: assets/images/hpc-social-blue.png - github: hpc-social - twitter: hpc_social - email: info@hpc.social - -collections: - pages: - output: true - permalink: /:name - posts: - output: true - permalink: /:year/:title/ - - -defaults: - - - scope: - path: "" - values: - layout: "default" - - - scope: - path: "" - type: "pages" - values: - layout: "page" - - - scope: - path: "" - type: "posts" - values: - layout: "post" - comments: true - -# Navigation -navbar-links: - Home: "https://hpc.social/" - Community: - - About: "about" - Posts: - - All Posts: "posts" - - Archive: "archive" - -# Build settings -exclude: ["_site", "vendor", ".github"] - - -markdown: kramdown -kramdown: - input: GFM - syntax_highlighter: rouge - -comments: true -paginate: 20 -paginate_path: "/page/:num" - -plugins: - - jekyll-feed - - jekyll-paginate - - jekyll-sitemap - - jekyll-relative-links - -exclude: - - .jekyll-cache - - Gemfile - - Gemfile.lock - - LICENSE - - README.md - - vendor - -relative_links: - enabled: true - collections: false diff --git a/_data/authors.yml b/_data/authors.yml deleted file mode 100644 index a55f78f..0000000 --- a/_data/authors.yml +++ /dev/null @@ -1,35 +0,0 @@ -- name: "Vanessasaurus" - tag: "vsoch" - feed: https://vsoch.github.io/hpc-feed.xml - url: https://vsoch.github.io/ - image: https://vsoch.github.io/assets/images/avatar.png -- name: "Jonathan Dursi's Blog" - tag: "dursi" - feed: https://www.dursi.ca/feed.xml - url: https://www.dursi.ca - image: https://www.dursi.ca/assets/imgs/avatar-medium.jpg -- name: "Glenn K. Lockwood's Blog" - tag: "glennklockwood" - url: https://glennklockwood.blogspot.com - feed: https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss - image: https://glennklockwood.com/me2.jpg -- name: "Mark Nelson's Blog" - tag: "markhpc" - url: https://markhpc.github.io/ - feed: https://markhpc.github.io/feed.xml -- name: "Thinking Out Loud" - tag: ajdecon - feed: https://thinking.ajdecon.org/category/computing/feed/ - url: https://thinking.ajdecon.org/ - image: https://www.ajdecon.org/pubfiles/ajdecon-covatar.jpg -- name: "Ramblings of a supercomputing enthusiast." - tag: gaborsamu - feed: https://www.gaborsamu.com/blog/index.xml - url: https://www.gaborsamu.com/blog/ - image: https://www.gaborsamu.com/images/gpsamu.png -- name: "Derek Weitzel's Blog" - tag: dweitzel - feed: https://derekweitzel.com/feed.xml - url: https://derekweitzel.com/ - image: https://derekweitzel.com/images/bio-photo.png - diff --git a/_posts/ajdecon/2021-1-2-p=147.md b/_posts/ajdecon/2021-1-2-p=147.md deleted file mode 100644 index 36bed63..0000000 --- a/_posts/ajdecon/2021-1-2-p=147.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2021-01-02 19:06:36' -layout: post -original_url: https://thinking.ajdecon.org/2021/01/02/sre-to-solutions-architect/ -slug: sre-to-solutions-architect -title: SRE to Solutions Architect ---- - -

    It’s been about two years since I joined NVIDIA as a Solutions Architect, which was a pretty big job change for me! Most of my previous work was in jobs that could fall under the heading of “site reliability engineering”, where I was actively responsible for the operations of computing systems, but my new job mostly has me helping customers design and build their own systems.

    - - - - -

    I’m finally starting to feel like I know what I’m doing at least 25% of the time ? so I thought this would be a good time to reflect on the differences between these roles and what my past experience brings to the table for my (sort of) new job.

    - - - - - - - - -

    (Just a note: I feel like job titles for ops folks are a fraught topic. My job titles have included things like “Production Engineer”, “HPC Cluster Administrator”, and “HPC/Cloud Systems Engineer”. I tend to self-identify more with the term “sysadmin”, but I’m using “SRE” as the most current term that captures the work I’ve spent a lot of my career doing, where I generally approached ops from a software engineering perspective. Feel free to substitute your job title of choice!)

    - - - - -

    I spent most of the past 10 years building and running large computing systems. With the exception of ~18 months working on backend storage for a fairly large website, I’ve mostly worked on large high-performance-computing (HPC) clusters. These systems are generally used by researchers and engineers to run simulations and data analysis. The teams I joined were generally responsible for building these clusters, keeping them running, helping the researchers who used them, and making sure they performed well.

    - - - - -

    In my day-to-day work in SRE (or whatever you call it), I mostly thought about problems like:

    - - - - -
    • Are my team’s services operating reliably and predictably, according to our defined metrics?
      • Translated: What’s broken today?! ?
    • Are our (internal) customers having a good qualitative experience?
    • For any current or recently-past incidents, how can we understand what went wrong and incorporate that into our future development?
    • What major features or other changes are we hoping to release soon? How can we be confident they’ll work correctly and reliably?
    • Are we expecting to have to turn up more capacity or new systems soon? Are we ready to do so?
    • What projects can I pursue to automate anything boring that I have to work on?
    - - - -
    - - - -

    My role as a solutions architect is rather different, as I don’t actually have any services I’m responsible for keeping online. Instead, I’m generally working with external customers who are working with our products and using them in their own production environments. Because I’m focused on HPC and supercomputing, my customers have generally purchased NVIDIA’s hardware products, and are operating them in their own datacenters. I’m frequently talking to the SRE teams, but I’m not part of them myself.

    - - - - -

    In my daily work as a solutions architect, I’m thinking more about questions like:

    - - - - -
    • Do my (external) customers have a good understanding of what our products are and how to use them?
      • This may include products they already use, or new products that they may be planning to deploy
    • What are their pain points, and how can I feed that back to the product teams?
      • And also: What new product developments can I provide pro-active advice on before it makes it to the customer?
    • What new customer deployments are coming up, and how can I help them go smoothly?
    • How are our customers doing running their current clusters, and are they feeling a lot of pain?
    • What tools can I develop, or what content can I write, to help all of the above go well?
    - - - -
    - - - -

    On the one hand, I work on a lot of the same problems as a solutions architect as I did in SRE. I still spend a lot of time thinking about the scalability, performance, and reliability of HPC systems. I still care a lot about making sure the systems I help build are useful and usable for researchers.

    - - - - -

    On the other hand, I’m not so much on the pointy end of these problems anymore. My work is mostly focused on enabling others to run reliable systems, rather than being directly on the hook for them. And while I do help directly manage some internal lab clusters, those systems have very loose SLOs. So in practice I haven’t been on call in about two years.

    - - - - -

    I do think my experience in SRE has been really important in doing a good job in solutions architecture. I like to think I have a pretty good instinct for systems design at this point, and I can often help identify problems and bottlenecks in early stages. My troubleshooting skills from SRE work are incredibly helpful, as a lot of my work is helping customers understand what the heck is broken on their clusters. And I also find that it really helps to have someone who “speaks the same language” as the SRE teams for our customers, especially because I feel like so many vendor relationships neglect reliability concerns in favor of features.

    - - - - -

    The transition has been really interesting, and I’m still conflicted about which kind of job I prefer. I don’t exactly miss being on call… but I do miss somewhat the more visceral feeling of understanding a running system really well through sheer continuous contact with it. However, I really love helping my customers build cool systems, and I like the satisfaction of helping many different teams do well, versus focusing tightly on a single service.

    - - - - -

    I’m really enjoying the solutions architect gig right now, but I also wouldn’t be surprised if I ended up doing SRE work directly again at some point.

    \ No newline at end of file diff --git a/_posts/ajdecon/2022-1-15-p=176.md b/_posts/ajdecon/2022-1-15-p=176.md deleted file mode 100644 index e743525..0000000 --- a/_posts/ajdecon/2022-1-15-p=176.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2022-01-15 16:00:00' -layout: post -original_url: https://thinking.ajdecon.org/2022/01/15/toy-programs-for-learning-a-new-language/ -slug: toy-programs-for-learning-a-new-language -title: Toy programs for learning a new language ---- - -

    It used to be that I’d get interested in a new programming language, but I wouldn’t have a new project to use it for and had trouble knowing how to start. I have trouble really grasping a new language without building something in it, and “X by example” or working through a book don’t really do the job.

    - - - - -

    What’s helped me lately is to build an array of “standard” toy programs that I understand reasonably well, and that I can use to explore the new language and figure out how to do something real in it.

    - - - - -

    Right now, my toy program collection consists of:

    - - - - -
    • A link shortening service, like bit.ly or tinyurl, along with a HTTP API for adding and removing links
    • A 2D diffusion simulation
    • A “system package inventory” program, that builds a list of all the RPMs/DEBs installed on a Linux machine and shoves them into a SQLite database
    - - - -

    This is almost never what I’d call production-quality code. For example, when I’m writing these toy programs, I rarely write unit tests (until I start exploring the test libraries for the language!). But they’re still very valuable learning tools, and give me space to explore some very different use-cases.

    - - - - -

    I almost always write all three in a given language, but the order depends a lot on what I think the new language will be good for. For example, I’ll write the “system package inventory” program first if I think the new language might be handy for system administration tools. It’s a great way to see how well the language plays with a common Linux environment, how painful it is to use SQLite, and to get practice writing CLI tools in it. I’ll often augment the basic “scan and store” functionality with a CLI to do frequent queries, like “on what date was this package last upgraded”.

    - - - - -

    On the other hand, if I think I’m going to use the new language for a bunch of numerical work, I’ll start with the diffusion simulation. When I write that, I often start with a naive implementation and then start playing with profilers and other performance tools, or try to parallelize the simulation. This is also a great excuse to dig into any plotting tools commonly used with the language.

    - - - - -

    These toy programs are also handy if I want to explore new ways to integrate a service into a larger production environment. For example, I might start with the link shortening service, deploying the service itself statelessly and persisting the list of links into a PostgreSQL DB. Then I start complicating things…

    - - - - -
    • Let’s add logging!
    • And tracing!
    • It’s always a good idea to expose Prometheus metrics
    • And wouldn’t it be handy to support multiple database backends?
    • Now wrap it all in a Helm chart for handy deployment
    - - - -

    I imagine I’m not the only person to have a standard collection of learning projects for new languages. If you do this too, what does your project list look like?

    \ No newline at end of file diff --git a/_posts/ajdecon/2022-10-30-p=235.md b/_posts/ajdecon/2022-10-30-p=235.md deleted file mode 100644 index fac1ccf..0000000 --- a/_posts/ajdecon/2022-10-30-p=235.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2022-10-30 21:59:55' -layout: post -original_url: https://thinking.ajdecon.org/2022/10/30/the-web-services-i-self-host/ -slug: the-web-services-i-self-host -title: The web services I self-host ---- - -

    Why self-host anything?

    - - - -

    In a lot of ways, self-hosting web services is signing up for extra pain. Most useful web services are available in SaaS format these days, and most people don’t want to be a sysadmin just to use chat, email, or read the news.

    - - - - -

    In general, I decide to self-host a service if one of two things is true:

    - - - - - - - - -
    • Self-hosting is going to add a capability that’s difficult to find in a SaaS alternative. That might be privacy, or extra compute, or just an extra degree of customization that I want.
    • I find it interesting or amusing to self-host it! I have been a professional sysadmin, and ran production web services for over a decade. So I enjoy messing around with servers, and can have a fair amount of fun with this.
    - - - -

    Infrastructure and general tooling

    - - - -

    Right now my self-hosted services are hosted on Oracle Cloud Infrastructure, for a very simple reason: OCI includes a very generous Always Free tier, which doesn’t even ask for a credit card! So I’m confident I’m not going to accidentally spend any money. I use ARM Ampere A1 Compute instances for service hosting.

    - - - - -

    The individual services are mostly managed using Docker Compose files, though a few are just running bare-metal. I have so far managed to resist the urge to put everything in Kubernetes.

    - - - - -

    Everything is backed up on a regular basis using Tarsnap.

    - - - - -

    I also use Tailscale to provide a VPN between my cloud servers and my various client devices (phone, laptop, tablet). If a service needs to be exposed to the public Internet to function, I do that… but otherwise, everything is only exposed within the Tailscale VPN, so that only my own devices can access them. This is both a lovely convenience (not having to manage as many DNS records), and provides an extra degree of security by hiding services that no one else needs to access.

    - - - - -

    Services that I self-host

    - - - -
    • RSS reader: Despite the demise of Google Reader back in the mists of time, I’ve been a consistently heavy user of RSS feed since at least 2008. At times I’ve used commercial products such as Feedly, but these days I self-host the aggregator using FreshRSS. I use FreshRSS partly because it’s pretty easy to spin up and administer, and partly because it’s compatible with Reeder, a Mac and iOS app that I generally use to actually read my feeds.
    • Fediverse instance: I run a self-hosted instance on the Fediverse ensemble of social networking sites. The best-known tool for this is Mastodon, but I currently use the Pleroma server, mostly because it seemed less painful to set up and configure. I run my own instance partly out of curiosity, and partly because I didn’t strongly resonate with any particular topic-specific server that’s already out there.
    • IRC bouncer: I’m not on IRC very much these days, but I do like to avoid losing messages, and sometimes want to be logged into the same channels on different physical clients. So I run a ZNC server to maintain persistence.
    • Matrix server: Matrix is a decentralized messaging platform that supports end-to-end encryption. Think of it as being a little like the Fediverse, but for chat rather than microblogging. This falls pretty squarely in the category of “I find this amusing to run”, because I mostly chat with less-nerdy folks on other, commercial platforms.
    • Git server: I run a Gitea server which I use to mirror my own repos, as well as a variety of other open source repos. This is mostly to ensure that I have an up-to-date backup of repos I care about, independent of Github or whatever provider.
    • Jupyter notebooks: I keep a persistent Jupyter notebook instance running for random code experiments and as a tiny development playground. This runs on its own VM where I also do other random software development, and it’s separate from the other services mostly so I don’t take down all my personal infra with an accidental OOM from a big build.
    • Software package repository: I run an instance of Nexus Repository OSS, mostly to cache Docker images and other content that run the rest of the services above!
    - - - -

    Services where I use managed hosting but don’t own the server

    - - - -
    • This website! My regular website and this blog run on a shared hosting provider, mostly through inertia. (I’ve used the same hosting provider for web hosting since around 2008.)
    • Email: In theory it’s an open, federated system similar to the Fediverse. In practice, the combination of spam and the growth of large providers makes it increasingly painful to run a server yourself. This post from Carlos Fenollosa does a good job of describing the difficulties.

      I do, however, run all my email through my own domain, though it’s hosted via Google Apps GSuite Google Workspace. I also back up my inbox locally on a regular basis. That means that if Google ever decides to remove my account, charge obnoxious costs, or otherwise misbehave, my email address is at least portable to other providers.
    - - - -

    \ No newline at end of file diff --git a/_posts/ajdecon/2022-11-1-p=247.md b/_posts/ajdecon/2022-11-1-p=247.md deleted file mode 100644 index 07af04d..0000000 --- a/_posts/ajdecon/2022-11-1-p=247.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2022-11-02 00:18:17' -layout: post -original_url: https://thinking.ajdecon.org/2022/11/01/happy-living-close-ish-to-the-metal/ -slug: happy-living-close-ish-to-the-metal -title: happy living close (-ish) to the metal ---- - -

    For various reasons, I’ve been doing a little bit of career introspection lately. One of the interesting realizations to come out of this is that, despite in practice doing mostly software work, I’ve been happiest when my work involved a strong awareness of the hardware I was running on.

    - - - - - - - - -

    I suppose it shouldn’t be a surprise, exactly, but I hadn’t exactly thought about it in those terms before! Before I got into computing, I got a bachelors degree in physics, and got through much of a PhD in materials science. While I wasn’t building computers directly, I was definitely working regularly on hardware, building experimental apparatus involving various combinations of vacuum chambers, lasers, exotic microscopes, custom electronics, and microfluidics.

    - - - - -

    In terms of my computing career, I’ve generally worked in the area of “high-performance computing”, a buzzword that means I’ve focused on building fast parallel systems aimed at researchers.

    - - - - -

    It’s a sub-field that lends itself to awareness of hardware: even as a new baby sysadmin, I was staring at motherboard block diagrams and thinking about the performance differences between different PCIe topologies.

    - - - - -

    And because HPC is one of the areas that took the longest to embrace cloud computing, I spent a lot of years doing work in datacenters. Most of my work would usually involve writing code, doing configuration management, and managing Linux systems… but on a regular basis I’d head into a big loud room full of air conditioners and server racks, carrying a screwdriver.

    - - - - -

    Amusingly, my relatively recent stint at a hyperscaler was the first time I had worked on computers, but didn’t have my office in the same building as the computers I was running! Even there I was at least somewhat cognizant of hardware specifics, and one of my early projects was performance testing on the Bryce Canyon storage node, to see if it was ready for use in a large-scale distributed filesystem.

    - - - - -

    And these days, at NVIDIA, I’m enjoying being even closer to the metal. (At least conceptually; I still work remote…) I spend my days thinking about datacenter requirements, cable lengths, firmware upgrades, hardware health checks, and application performance tests on large clusters. And I love getting to play with these shiny toys.

    - - - - -

    Anyway, this is just a ramble. But a useful one. While I’d be the first to admit that cloud has its place, and I use it for some personal projects, I really enjoy understanding the hardware I run on. I have trouble thinking of computers as remote abstractions with no underlying detail. They are pleasingly physical in my mind, even if they’re thousands of miles away.

    \ No newline at end of file diff --git a/_posts/ajdecon/2022-11-27-p=264.md b/_posts/ajdecon/2022-11-27-p=264.md deleted file mode 100644 index 86d0aa0..0000000 --- a/_posts/ajdecon/2022-11-27-p=264.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2022-11-27 15:28:16' -layout: post -original_url: https://thinking.ajdecon.org/2022/11/27/adams-weekly-update-2022-11-27/ -slug: adam-s-weekly-update-2022-11-27 -title: Adam’s weekly update, 2022-11-27 ---- - -

    What’s new

    - - - -

    The first thing that’s new is… this post! I’m going to try to do at least a weekly post on the blog now, just a general update and some links. This will hopefully help me get back into the habit of writing on the blog regularly, and maybe inspire me to write a bit more in general.

    - - - - - - - - -

    I was off work this week for the Thanksgiving holiday, and traveled Michigan to visit my parents and my brother’s family. My mom has been struggling with some pretty major health issues this year, so it was really wonderful and reassuring to get to spend some time with her and my dad. I also finally got to meet my brother’s three-year-old son, who was born right before the pandemic started, and who I hadn’t managed to meet up until now.

    - - - - -

    On the tech-related front, I used this week to take a break from Twitter (mostly), and to be honest… it was kinda refreshing! I had developed a pretty bad Twitter habit this year, doomscrolling for more time than I like to admit. While I really like Twitter and I’ve had some nice career boosts from it, it was also a time sink that was not entirely healthy.

    - - - - -

    Admittedly, that time was somewhat replaced by playing around on the Fediverse / Mastodon. But with the lack of algorithmic suggestions, quote tweets, and other means of virality, that network so far feels a lot quieter and less time-consuming than Twitter. Tim Bray has a good post up which discusses some of the advantages and pitfalls of federated social media, and I can highly recommend reading that. I’m still a bit skeptical that it will be a practical “Twitter replacement” for most people, but so far I’m finding it pleasant.

    - - - - -

    What I’m reading

    - - - -
      -
    • Nonfiction book: Code, Second Edition, by Charles Petzold. This book walks through the process of building a working computer, starting with ideas like Morse code, then working up from logic gates on up. This is technically a re-read, as I read the first edition… 10+ years ago? But I’m getting a lot more out of it this time around, and really enjoying it.
    • - - - -
    • Fiction book: The Spare Man, by Mary Robinette Kowal. A cozy murder mystery on a luxury cruise to Mars. I’m only a few chapters in, but already greatly enjoying myself.
    • - - - -
    • “Hiding theory in practice”, by Fred Hebert. I’ve been reading a lot about safety engineering and its application to computing lately, but that community can sometimes get off into the weeds about points of theory that don’t have consensus in the broader computing community. This post has a good discussion of how to use the theory of safety engineering to guide decisions, without requiring that everyone working with you be handed a reading list.
    • - - - -
    • “Paper: Repentance as Rebuke: Betrayal and Moral Injury in Safety Engineering”, also by Fred Hebert. A discussion of a paper by Dekker et al which looks at the aftermath of the 737 MAX air disasters, and the public repentance of some of the engineers who were involved. Go read the post, it’s great. And I’m planning to read the original paper this week.
    • - - - -
    • “Cannon Lake: Intel’s Forgotten Generation”, from Chips and Cheese. Really I’ve been reading a bunch of the technical posts from Chips and Cheese lately, and they’re doing pretty good analyses of recent hardware. They’ve definitely earned that spot in my RSS reader.
    • - - - -
    • Glenn K Lockwood’s “SC’22 Recap”. I was sad to miss Supercomputing this year, though enough folks have come down with COVID that I don’t really regret the decision. But Glenn wrote up a really interesting recap post, with an interesting new viewpoint now that he’s working at Microsoft Azure. Among other things, he included a whole section titled The underwhelming, with the opening line “The biggest deal appears to be that exascale is here, and it turns out that it’s not that big of a deal.”
    • -
    - - - -

    Recent recipes

    - - - -

    Because it was Thanksgiving, I did a lot of cooking this week! I’m not going to list everything I made, but a few of my favorites were:

    - - - - -
      -
    • Cheesy Garlic Butter Rolls from Delish: Nothing special, but really tasty.
    • - - - -
    • Challah Stuffing from Smitten Kitchen: This recipe was a huge winner, with most of the family coming back for seconds, and then having more the next day for leftovers. It was really good, and is probably what I’ll make if I ever do stuffing again.
    • - - - -
    • Best Challah from Smitten Kitchen: I baked the bread that went into the stuffing, and it was really tasty on its own! This recipe makes two loaves, and I only needed one for the stuffing. So I also made french toast with it, which worked really nicely.
    • -
    - - - -

    Pet photos

    - - - -

    Gotta have those pet photos.

    - - - - -
    A blond golden doodle in a red harness and a blue bandanna lays on sandy dirt and looks into the camera
    - - - -
    A white calico cat sits on a blanket and washes her front paw
    - - - -
    A gray-brown tabby cat wearing a green collar sitting on a wall, looking vaguely toward the camera
    \ No newline at end of file diff --git a/_posts/ajdecon/2022-12-20-p=289.md b/_posts/ajdecon/2022-12-20-p=289.md deleted file mode 100644 index d351933..0000000 --- a/_posts/ajdecon/2022-12-20-p=289.md +++ /dev/null @@ -1,101 +0,0 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2022-12-20 18:14:52' -layout: post -original_url: https://thinking.ajdecon.org/2022/12/20/adams-weekly-ish-update-2022-12-20/ -slug: adam-s-weekly-ish-update-2022-12-20 -title: Adam’s weekly (-ish) update, 2022-12-20 ---- - -

    What’s new

    - - - -

    The past few weeks have been on the intense side at work, so I completely lost track of the blog and haven’t had a chance to write much in that time. However, I’m now on a holiday break, and finally have time to sit down at a keyboard to write more than code and Slack messages.

    - - - - - - - - -

    One of the highlights of the past few weeks was a trip to San Jose, and the NVIDIA headquarters. I changed teams at work back in July, transferring from a group that was closely integrated with product management, to a more straightforward engineering team which designs and builds new high-performance computing systems.

    - - - - -

    This was the first chance I’ve had to meet up with other members of my new team in person, and it was a really wonderful experience to be in the same physical space as folks who were previously just images on my screen. I love working remotely, but it’s also great to be able to stand in front of a white board with someone and brainstorm, or get coffee and just have a chat with a coworker outside of a video call with an agenda.

    - - - - -

    (Plus, we were all careful and managed to avoid catching COVID from each other! Which was a win on its own.)

    - - - - -

    Now, for the next two weeks I’m off work, and planning to take some time to relax and spend time on projects that are harder to focus on during busy work weeks. Expect (maybe) less about computers in my blog and social feeds, and more about D&D, baking, and tasty cocktails.

    - - - - -

    What I’m reading, watching, and listening to

    - - - -

    I’ve been a bit too scattered to focus on actual books the past few weeks, but I did find time for a few interesting articles and podcasts. In particular,

    - - - - -
      -
    • “Why Roman Egypt was such a strange province”, from Bret Devereaux: As usual from Devereaux, an accessible but extremely detailed discussion of why so much of what we know about the Roman empire is from Egyptian records, but why that also might not be representative of the broader empire.
    • - - - -
    • “Emoji as incident resolution tools”, from Will Gallego: A fun discussion of how using emoji as part of a team’s communication can add nuance and shared understanding during incident management, along with a discussion of the disadvantages and costs associated with the practice.
    • - - - -
    • “What does modern software architecture look like in 2022?”, from Bartosz Mikulski: A nice article which discusses how service-oriented software architecture can often include an explicit expectation of change. For example, the architecture might include notes on an ongoing deprecation of a library, or might signpost the need to factor a new microservice out when overall system load gets high enough.
    • - - - -
    • The Brady Heywood podcast: Found via the Oxide and Friends podcast, the Brady Heywood podcast is a series on engineering disasters and their consequences from a forensic engineering firm. It’s mostly not being updated any more (with the podcasters moving on to a separate series on complexity science), but it has a deep back catalog of good episodes, and includes thoughtful discussions of human factors, safety engineering, and how organizational pressures become manifest in engineering artifacts.
    • -
    - - - -

    Recent recipes

    - - - -
      -
    • Smitten Kitchen’s Homemade Irish Cream: This is a recipe I make every year, and I often give away small bottles of it as holiday gifts. It’s really ridiculously tasty, much better than Baileys or similar, and good either on its own or in hot chocolate.
    • - - - -
    • Smitten Kitchen’s Fairytale of New York: This is a really tasty whiskey cocktail, and the star of the show is a “winter warmth syrup” that substitutes in for simple syrup. The syrup is simply very tasty, and turns what’s effectively an OId Fashioned variant into a lovely holiday cocktail.
    • - - - -
    • Sparkling gingerbread from Yossy Arefi’s Snaking Cakes: This recipe takes a little more prep than most of Arefi’s “snacking cakes”, as it includes ginger three ways (ground, fresh, and crystallized), but it’s worth the few minutes of extra work.
    • -
    - - - -

    Pet photos

    - - - -
    A white calico cat and a gray tabby cat lounging on a large brown pet bed in front of a gas fireplace.
    I’m pretty sure these two want me to turn the fireplace on.
    - - - -
    A gray tabby cat lounges on a dog bed, while a golden doodle lays on the floor nearby and looks forlornly at the bed.
    Just Percy bullying the dog by stealing his bed.
    \ No newline at end of file diff --git a/_posts/ajdecon/2022-12-5-p=268.md b/_posts/ajdecon/2022-12-5-p=268.md deleted file mode 100644 index a233916..0000000 --- a/_posts/ajdecon/2022-12-5-p=268.md +++ /dev/null @@ -1,104 +0,0 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2022-12-05 05:49:35' -layout: post -original_url: https://thinking.ajdecon.org/2022/12/05/adams-weekly-update-2022-12-04/ -slug: adam-s-weekly-update-2022-12-04 -title: Adam’s weekly update, 2022-12-04 ---- - -

    What’s new

    - - - -

    This week was really intense from a work perspective. Not “bad intense”, but the kind of week where every day was spent with such a level of focus, that at 5 PM or so I found myself staring off into space and forgetting words. I think I got some good things accomplished, but my brain also felt like mush by the time the weekend came.

    - - - - - - - - -

    This week I’m traveling to San Jose for work (I just checked into my hotel a little while ago!), so I fully expect this week to also be eaten by work. So I don’t promise anything terribly interesting for next week’s post…

    - - - - -

    However, I did take advantage of a Sunday in San Jose to visit the Computer History Museum in Mountain View! I try to visit the museum every few years, and while a lot of the exhibits are the same, enough things change that I always get something new from the visit. Also, I’ve been doing a lot of reading about hardware development and the history thereof lately, so it was interesting to examine the museum through that new lens.

    - - - - -

    I may write more about my visit later this week — it definitely sparked some thoughts — but in the mean time, here are a few photos I took while wandering around the museum.

    - - - - -
    A mechanical computer built mostly of brass, with various numerical dials. A small placard labels this as a replica of the Babbage Difference Engine No. 1 Demonstration Piece.
    The Babbage Difference Engine, and other mechanical computers, have always fascinated me.
    - - - -
    The Cray-1, a round computer with its own built-in seating attached.
    Can’t visit the museum without visiting the Cray-1.
    - - - -
    The Connection Machine 1, a large black cube divided in eight sections.
    I would have loved to have seen a CM-1 in operation, with its red LEDs showing the operation of its many single-bit CPUs.
    - - - -
    The front panel of an Altair 8800 computer, with an array of LEDs and switches controlling the state of individual bits.
    Having recently read Charles Petzold’s “Code”, I was struck by how closely the front panel of the Altair 8800 resembles the fictional front panel of the computer that Petzold constructs from logic gates up.
    - - - -
    A Dell PowerEdge R710 lays on a white plastic table, top cover off, surrounded by instructions on how to disassemble it.
    The CHM Learning Lab now includes a back room with a couple of Dell PowerEdge R710 servers, complete with instructions for how to disassemble and reassemble them. Anyone who wants can wander in and take them apart. It was great fun watching a 5-year-old kid pulling components out of one of these… As well as feeling a little weird, as I think I’ve run these in production!
    - - - -

    What I’m reading

    - - - -

    I don’t have a ton to share this week — honestly, the whole week feels like a blur — but here are two books that I recommend.

    - - - - -
      -
    • The Red Scholar’s Wake, by Aliette de Bodard: As the blurb says, “Lesbian space pirates!” Also, a really wonderful novella about building a new relationship amidst grief, power differentials, politics, and space battles. I think I basically recommend everything that de Bodard writes, but especially this. And it basically stands alone! So you can read this first, without going back to the other stories in the same world.
    • - - - -
    • Dealers of Lightning: XEROX PARC and the Dawn of the Computer Age, by Michael Hiltzik: I’ve just started this, but it’s already a really interesting snapshot of a key period in the development of the personal computer.
    • -
    - - - -

    Recent recipes

    - - - - - - - -

    Pet photos

    - - - -
    Phyrne the calico cat stares down into the camera from a stairway
    - - - -
    Close-up on the face of Percy the gray tabby cat
    - - - -
    Benny the golden doodle curled up on a dog bed
    \ No newline at end of file diff --git a/_posts/ajdecon/2022-2-12-p=167.md b/_posts/ajdecon/2022-2-12-p=167.md deleted file mode 100644 index 639086c..0000000 --- a/_posts/ajdecon/2022-2-12-p=167.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2022-02-12 16:00:00' -layout: post -original_url: https://thinking.ajdecon.org/2022/02/12/development-of-managed-vs-self-hosted-services/ -slug: developing-managed-vs-self-hosted-software -title: Developing managed vs self-hosted software ---- - -

    I’ve done some work lately with teams that deliver their products in very different ways, and it has me thinking about how much our “best practices” depend on a product’s delivery and operations model. I’ve had a bunch of conversations about this tension

    - - - - -

    On the one hand, some of the teams I’ve worked with build software services that are developed and operated by the same team, and where the customers (internal or external) directly make use of the operated service. These teams try to follow what I think of as “conventional” SaaS best practices:

    - - - - -
    • Their development workflow prioritizes iteration speed above all else
    • They tend to deploy from HEAD, or close to it, in their source repository
      • In almost all cases, branches are short-lived for feature development
    • They’ve built good automated test suites and well-tuned CI/CD pipelines
    • Releases are very frequent
    • They make extensive use of observability tooling, often using third-party SaaS for this
    • Fast roll-back is prioritized over perfect testing ahead of time
    • While their user documentation is mostly good, their operations documentation tends to be “just good enough” to onboard new team members, and a lot of it lives in Slack
    - - - -

    However, we also have plenty of customers who deploy our software to their own systems, whether in the cloud or on-premise. (Some of them don’t even connect to the Internet on a regular basis!) The development workflow for software aimed at these customers looks rather different:

    - - - - -
    • Deploys are managed by the customer, and release cycles are longer
    • These teams do still have CI/CD and extensive automated tests… but they may also have explicit QA steps before releases
    • There tend to be lots of longer-lived version branches, and even “LTS” branches with their own roadmaps
    • Logging is prioritized over observability, because they can’t make assumptions about the customer tooling
    • They put a lot more effort into operational documentation, because most operators will not also be developers
    - - - -

    From a developer perspective, of course, this all feels much more painful! The managed service use case feels much more comfortable to develop for, and most of the community tooling and best practices for web development seems to optimize for that model.

    - - - - -

    But from a sysadmin perspective, used to mostly operating third-party software, the constraints of self-hosted development are all very familiar. And even managed service teams often rely on third-party software developed using this kind of model, relying on LTS releases of Linux distributions and pinning major versions of dependencies.

    - - - - -

    The biggest challenge I’ve seen, however, is when a development team tries to target the same software at both use cases. As far as I can tell, it’s very difficult to simultaneously operate a reliable service that is being continuously developed and deployed, and to provide predictable and high-quality releases to self-hosted customers.

    - - - - -

    So far, I’ve seen this tension resolved in three different ways:

    - - - - -
    • The internal service becomes “just another customer”, operating something close to the latest external release, resulting in a slower release cycle for the internal service
    • Fast development for the internal service gets prioritized, with external releases becoming less frequent and including bigger and bigger changes
    • Internal and external diverge completely, with separate development teams taking over (and often a name change for one of them)
    - - - -

    I don’t really have a conclusion here, except that I don’t really love any of these results. /sigh

    - - - - -

    If you’re reading this and have run into similar tensions, how have you seen this resolved? Have you seen any success stories in deploying the same code internally and externally? Or alternatively — any interesting stories of failure to share? 😉 Feel free to send me an email, I’d be interested to hear from you.

    \ No newline at end of file diff --git a/_posts/ajdecon/2022-2-25-p=203.md b/_posts/ajdecon/2022-2-25-p=203.md deleted file mode 100644 index a5bcad5..0000000 --- a/_posts/ajdecon/2022-2-25-p=203.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2022-02-25 16:00:00' -layout: post -original_url: https://thinking.ajdecon.org/2022/02/25/a-supportive-job-interview-story/ -slug: a-supportive-job-interview-story -title: A supportive job interview story ---- - -

    (adapted from an old lobste.rs comment)

    - - - - -

    My favorite interview ever was a systems interview that didn’t go as planned. This was for an SRE position, and while I expected the interview to be a distributed systems discussion, the interviewer instead wanted to talk kernel internals.

    - - - - -

    I was not at all prepared for this, and admitted it up front. The interviewer said something along the lines of, “well, why don’t we see how it goes anyway?”

    - - - - -

    He then proceeded to teach me a ton about how filesystem drivers work in Linux, in the form of leading me carefully through the interview question he was “asking” me. The interviewer was incredibly encouraging throughout, and we had a good discussion about why certain design decisions worked the way they did.

    - - - - -

    I ended the interview (a) convinced I had bombed it, but (b) having had an excellent time anyway and having learned a bunch of new things. I later learned the interviewer had recommended to hire me based on how our conversation had gone, though I didn’t end up taking the job for unrelated reasons having to do with relocation.

    - - - - -

    I’ve given a number of similar interviews since, on system design or general sysadmin skills. I’ve always tried to go into these thinking about both where I could learn, and where I could teach, and how either outcome would give the candidate a chance to shine.

    \ No newline at end of file diff --git a/_posts/ajdecon/2022-3-12-p=13.md b/_posts/ajdecon/2022-3-12-p=13.md deleted file mode 100644 index 1fad332..0000000 --- a/_posts/ajdecon/2022-3-12-p=13.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2022-03-12 16:00:00' -layout: post -original_url: https://thinking.ajdecon.org/2022/03/12/an-unstructured-rant-on-running-long-lived-software-services/ -slug: an-unstructured-rant-on-running-long-lived-software-services -title: An unstructured rant on running long-lived software services ---- - -

    – Be kind to your colleagues. Be kind to your users. Be kind to yourself. This is a long haul and you’ll all fuck up.

    - - - - -

    ⁃ The natural environment for your code is production. It will run there longer than it does anywhere else. Design for prod first, and if possible, make your dev environment act like prod.

    - - - - -

    ⁃ Legacy code is the only code worth caring about.

    - - - - -

    ⁃ Users do weird stuff, but they usually have a very good reason, at least in their context. Learn from them.

    - - - - -

    ⁃ It’s 2022, please do structured logging.

    - - - - -

    ⁃ Contexts and tracing make everyone’s lives easier when it comes time to debug. At minimum, include a unique request id with every request and plumb it through the system.

    - - - - -

    ⁃ Do your logging in a separate thread. It sucks to find a daemon blocked and hanging because of a full disk or a down syslog server.

    - - - - -

    ⁃ Don’t page for individual machines going down. Do provide an easy or automated way for bad nodes to get thrown out of the system.

    - - - - -

    – Be prepared for your automation to be the problem, and include circuit breakers or kill switches to stop it. I’ve seen health checks that started flagging every machine in the fleet as bad, whether it was healthy or not. We didn’t bring down prod because the code assumed if it flagged more than 15% of the fleet as bad, the problem was probably with the test, not the service.

    - - - - -

    ⁃ Make sure you have a way to know who your users are. If you allow anonymous access, you’ll discover in five years that a business-critical team you’ve never heard of is relying on you.

    - - - - -

    ⁃ Make sure you have a way to turn off access for an individual machine, user, etc. If your system does anything more expensive than sending network requests, it will be possible for a single bad client to overwhelm a distributed system with thousands of servers. Turning off their access is easier than begging them to stop.

    - - - - -

    ⁃ If you don’t implement QOS early on, it will be hellish to add it later, and you will certainly need it if your system lasts long enough.

    - - - - -

    ⁃ If you provide a client library, and your system is internal only, have it send logs to the same system as your servers. This will help trace issues back to misbehaving clients so much.

    - - - - -

    ⁃ Track the build time for every deployed server binary and monitor how old they are. If your CI process deploys daily, week-old binaries are a problem. Month-old binaries are a major incident.

    - - - - -

    ⁃ If you can get away with it (internal services): track the age of client library builds and either refuse to support builds older than X, or just cut them off entirely. It sucks to support requests from year-old clients, force them to upgrade!

    - - - - -

    ⁃ Despite all this, you will at some point start getting requests from an ancient software version, or otherwise malformed. Make sure these requests don’t break anything.

    - - - - -

    ⁃ Backups are a pain, and the tooling is often bad, but I swear they will save you one day. Take the time to invest in them.

    - - - - -

    ⁃ Your CI process should exercise your turnup process, your decommission process, and your backups workflow. Life will suck later if you discover one of these is broken.

    - - - - -

    ⁃ Third party services go down. Your service goes down too, but they probably won’t happen at the same time. Be prepared to either operate without them, or mirror them yourself

    - - - - -

    ⁃ Your users will never, ever care if you’re down because of a dependency. Every datacenter owned by AWS could be hit by a meteor at the same time, but your user will only ever ask “why doesn’t my service work?”

    - - - - -

    ⁃ Have good human relationships with your software dependencies. Know the people who develop them, keep in touch with them, make sure you understand each other. This is especially true internally but also important with external deps. In the end, software is made of people.

    - - - - -

    ⁃ If users don’t have personal buy-in to the security policy, they will find ways to work around them and complain about you for making their lives harder. Take the time to educate them, or you’ll be fighting them continuously.

    \ No newline at end of file diff --git a/_posts/ajdecon/2022-5-14-p=227.md b/_posts/ajdecon/2022-5-14-p=227.md deleted file mode 100644 index 4c9f655..0000000 --- a/_posts/ajdecon/2022-5-14-p=227.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -author: Thinking Out Loud -author_tag: ajdecon -blog_subtitle: works in progress and scattered thoughts, often about computers -blog_title: Computing – thinking out loud -blog_url: https://thinking.ajdecon.org -category: ajdecon -date: '2022-05-14 19:35:32' -layout: post -original_url: https://thinking.ajdecon.org/2022/05/14/interesting-links-i-clicked-this-week/ -slug: interesting-links-i-clicked-this-week -title: Interesting links I clicked this week ---- - -

    I watched several really interesting talks from SRECon22 Americas this week, and in particular I’d like to highlight:

    - - - - -
    • Principled Performance Analytics, Narayan Desai and Brent Bryan from Google. Some interesting thoughts on quantitative analysis of live performance data for monitoring and observability purposes, moving past simple percentile analysis.
    • The ‘Success’ in SRE is Silent, Casey Rosenthal from Verica.io. Interesting thoughts here on the visibility of reliability, qualitative analysis of systems, and why regulation and certification might not be the right thing for web systems.
    • Building and Running a Diversity-focused Pre-internship program for SRE, from Andrew Ryan at Facebook Meta. Some good lessons-learned here from an early-career internship-like program, in its first year.
    • Taking the 737 to the Max, Nickolas Means from Sym. A really interesting analysis of the Boeing 737 Max failures from both a technical and cultural perspective, complete with some graph tracing to understand failure modes.
    - - - -

    I also ran across some other articles that I’ve been actively recommending and sharing with friends and colleagues, including:

    - - - - -
    • Plato’s Dashboards, Fred Hebert at Honeycomb. This article has some great analysis of how easily-measurable metrics are often poor proxies for the information we’re actually interested in, and discussing qualitative research methods as a way to gain more insight.
    • The End of Roe Will Bring About A Sea Change In The Encryption Debate, Rianna Pfefferkorn from the Stanford Internet Observatory. You should absolutely go read this article, but to sum up: Law enforcement in states than ban abortion is now absolutely part of the threat model that encrypted messaging defends against. No one claiming to be a progressive should be arguing in favor of “exceptional access” or other law enforcement access to encryption.
    - - - -

    \ No newline at end of file diff --git a/_posts/dursi/2010-5-1-canadian-astronomical-computing-data-netowork-facilitieshtml.md b/_posts/dursi/2010-5-1-canadian-astronomical-computing-data-netowork-facilitieshtml.md deleted file mode 100644 index 40e1fa9..0000000 --- a/_posts/dursi/2010-5-1-canadian-astronomical-computing-data-netowork-facilitieshtml.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2010-05-01 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/canadian-astronomical-computing-data-netowork-facilities.html -slug: canadian-astronomical-computing-data-and-network-facilities-a-white-paper-for-the-2010-long-range-plan -title: Canadian Astronomical Computing, Data And Network Facilities- A White Paper - for the 2010 Long Range Plan ---- - -

    In this whitepaper for the CASCA 2010 Long Range Plan, I and the rest of the Computing, Data, and Network committee of CASCA lay out the state of ecosystem for computation in support of Canadian astronomy, and suggests a path forward for the time period of the 2010-2020 long range plan.

    - - -

    Abstract

    - -

    Significant investment in new large, expensive astronomical observing facilities spanning a substantial portion of the electronic spectrum was a dominant theme of LRP2000 and continues to be necessary for Canadian astronomy to maintain its world position. These developments are generating increasingly large volumes of data. Such investments only makes sense if they are balanced by strong infrastructure support to ensure that data acquired with these facilities can be readily accessed and analyzed by observers, and that theoreticians have the tools available to simulate and understand their context. This will require continuing investment in computational facilities to store and analyze the data, networks to ensure useful access to the data and products by Canadian researchers, and personnel to help Canadian researchers make use of these tools.

    - - -

    In addition, large parallel simulations have become an essential tool for astrophysical theory, and Canadian Astronomy has world-leading simulators and developers who rely on world-class High Performance Computing facilities being maintained in Canada to do their research effectively.

    - - -

    We recommend that Compute Canada be funded at $72M/yr to bring HPC funding per capita in line with G8 norms; that part of every Compute Canada technology renewal include a Top-20 class computing facility; NSERC and other funding agencies begin supporting software development as an integral component of scientific research; that the staff funding for consortia be tripled, including local access to technical analyst staff; and that the last mile bottleneck of campus networking less than 10 Gb/s be addressed where it is impacting researchers, with particular urgency for the current 1 Gb/s connection at the CADC.

    \ No newline at end of file diff --git a/_posts/dursi/2010-6-17-codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decadehtml.md b/_posts/dursi/2010-6-17-codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decadehtml.md deleted file mode 100644 index 6ec0ec2..0000000 --- a/_posts/dursi/2010-6-17-codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decadehtml.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2010-06-17 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decade.html -slug: codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decade -title: Codes as Instruments- Community Applications and Simulation Software for the - Hardware Architectures of the Next Decade ---- - -

    It is becoming increasingly problematic that, even as computing and data becomes more and more fundamental to research, and the complexity and diversity of computing technologies out there grows, getting stable funding for developing high-quality research software remains so difficult.

    - - -

    In this whitepaper for the CASCA 2010 Long Range Plan, my colleague Falk Herwig and I lay out the case for increased funding of R&D software development by professional research software developers. We make a couple points which I genuinely believe to be strong:

    - - -

    First, increased benefits. A successful community code can support an enoormous body of research. By the (admittedly somewhat crude) count we use in this paper, the top six reseach codes in Astronomy accounted for approximately 50% of the computational astronomy publications over the period of study, and the top three - Cloudy, Gadget, and FLASH, which I was part of - accounted for nearly 40%. That is an enormous about of R&D effort enabled by those projects.

    - - -

    Second, reduced costs. We cite from the growing research software development literature to demonstrate the high (and growing) challenges of engineering these codes in a scientists’ spare time, and the high cost of software defects. By having a small cadre of professional research software development personnel, better quality software can be developed more efficiently.

    - - -

    Finally, a word about the title - this is an analogy due to Falk, and while it’s been controversial, I think there’s a lot of truth to it. Astronomy has always relied heavily on, for instance, telescopes - but a telescope is only part of an observational facility. A big photon-gathering dish is only as useful as the scientific instrument that’s placed at its focus to make sense of those photons. Similarly, a huge computer by itself has no scientific value without software to run on it. Unless our community invests in computational instruments with the same level of seriousness as observational instruments, our ability to make use of these facilities is going to be needlessly limited.

    - - -

    Abstract

    - -

    Modern astronomical research requires increasingly sophisticated computing facilities and software tools. Computational tools have become the fundamental tools to turn observational raw data into scientific insight. Complex multi-physics simulation codes have developed into tools for numerical experiments that provide scientific insight beyond classical theory. Canadian researchers need an environment for developement and maintenance of these critical tools. In particular, the drastically enhanced complexity of deeply heterogeneous hardware architectures poses a real challenge to using present and future HPC facilties.

    - - -

    Without a national program in astrophysical simulation science and astronomy application code developement we are becoming vulnerable with respect to our ability to maximise the scientific return from existing and planned investments into atronomy. In addition, there are significant industrial/commercial HQP needs that simulation and application code program could start to address, if it is properly aligned with academic training opportunities.

    - - -

    We outline the framework and requirements for such a framework for developing Canadian astronomical application and simulation codes — and code builders. In the US decadal plan process, voices are calling for similar emphasis on developing infrastructure and incentives for open community codes (Weiner et al. 2009). We propose funding several small interdisciplinary teams of postdocs, graduate students, and staff, housed in departments at Universities that have or are about to make a commitment in a relevant area (e.g. applied math, computational physics, modeling science). These teams can, while training astronomical and computational HQP, focus on building tools that have been deemed to be high priorities by the astronomical and astrophysical communities in order to make the best scientific use of our new computational faciliites.

    \ No newline at end of file diff --git a/_posts/dursi/2011-11-23-testing-roundoff-2html.md b/_posts/dursi/2011-11-23-testing-roundoff-2html.md deleted file mode 100644 index 66fa680..0000000 --- a/_posts/dursi/2011-11-23-testing-roundoff-2html.md +++ /dev/null @@ -1,106 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2011-11-23 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/testing-roundoff-2.html -slug: testing-roundoff -title: Testing Roundoff ---- - -

    A talk has been circulating (HT: Hacker News) from a conference celebrating 50 years of scientific computing at Stanford where the author, William Kahan, discusses an old and sadly disused trick for testing the numerical stability of the implementation of an algorithm that should work with any C99 or Fortran 2003 compiler without changing the underlying code. It’s definitely a tool that’s worth having in your toolbox, so it’s worth mentioning here.

    - - -

    We’ll consider a simple numerical problem; imagine a projectile launched from height $h = 0$ with velocity $v_0=5000 \mathrm{m s}^{-1}$, and subject to the Earth’s gravitational accelleration, $g = 9.81 \mathrm{m} \mathrm{s}^{-2}$. We’re going to ask when the (first) time is that the projectile hits a height h.

    - - -

    This is going to be an application of our friend the quadratic equation:

    - - -\[r = \frac{-b \pm \sqrt{b^2 - 4 a c}}{2 a}\] - -

    Now, because of the repeated subtraction, a naive implementation of this equation is known to undergo catastrophic cancellation near $b^2=4 a c$, or for where the discriminant is much less than \(b\) — in our case, near the ends and the peak of the projectile’s trajectory. We’re going to demonstrate that below.

    - - -

    Now, before we show that such sensitivity can happen, we should ask — why would we care? If we test our code and know it gives “good enough” answers under the conditions that matter to us, does it really matter what could happen in other circumstances? The answer, of course, is yes. There are a lot of things we could want to do — increase the agressiveness of compiler optimizations when compiling our code, for instance — which will have the effect of numerically perturbing our computation; and we need to know if those small perturbations will have small, or large, effects on our answers.

    - - -

    It turns out that IEEE 754, the standard for floating point numbers, can give us some help with this. (Everyone who does numerical work should know at least a little bit about the floating point standard, or at least the issues involved with floating point numbers. What every computer scientist should know about floating point, particularly the first few sections, is an essential guide). The floating point standard - which almost all widely-used computing hardware should support - allows you to set certain properties of the mathematics “on the fly”. One particularly useful feature is the ability to set how the last digit of all floating point operations are rounded - to nearest (the default), to zero (eg, always truncate), to positive infinity (eg, always round up) or to negative infinity (always round down). In the C99 standard, this is implemented in the “fenv.h” header and the math library; in Fortran2003, this is part of the intrinsic IEEE_ARITHMETIC module, where you can call IEEE_SET_ROUNDING_MODE.

    - - -

    By changing the rounding, you are perturbing every floating point operation in your calculation. If this perturbation results in significant changes in your result, then your calculation is very fragile, and you may have to look into re-writing the calculation, using another algorithm, or resorting to using higher precision for that calculation (which will push the perturbations to less significant decimal places). If not, then you have some evidence that your calculation is robust to perturbations, at least in the last bit.

    - - -

    Below we have an example of how you’d do this in C. We have a simple routine which uses the obvious implementation of the quadratic equation to calculate the time when the projectile is at one meter, and we perform this calculation with all available rounding modes:

    - - -
    #include <stdio.h>
    -#include <math.h>
    -#include <fenv.h>
    -
    -const int NOSOLN=-1;
    -const int SOLN  = 0;
    -
    -int time(const float vo, const float g, const float ho, float *time) {
    -    float disc  = (vo*vo - 2.*g*ho);
    -
    -    if (disc < 0) return NOSOLN;
    -
    -    disc = sqrt(disc);
    -    float root1 = (vo + disc)/g;
    -    float root2 = (vo - disc)/g;
    -
    -    if ((root2 >= 0.) && root2 < root1)
    -        *time = root2;
    -    else
    -        *time = root1;
    -
    -    return SOLN;
    -}
    -
    -
    -int main(int argc, char **argv) {
    -
    -    const float g =9.81;
    -    const float vo=5000.;
    -    const int   ho=1.;
    -
    -    int nroundings=4;
    -    int roundings[]={FE_TONEAREST, FE_UPWARD, FE_DOWNWARD, FE_TOWARDZERO};
    -    char *names[]  ={"To nearest", "To +inf", "To -inf", "To zero"};
    -
    -    for (int r=0; r<nroundings; r++) {
    -        int status = fesetround(roundings[r]);
    -        if (status) {
    -            fprintf(stderr,"Could not set rounding to '%s'.\n", names[r]);
    -        } else {
    -            float soln;
    -            time(vo, g, ho, &soln);
    -            printf("%s: %f\n", names[r], soln);
    -        }
    -    }
    -
    -    return 0;
    -}
    -
    - -

    We compile the code with gcc (any C99 compiler should work):

    - - -
    $ gcc -O0 -Wall -std=c99 quadratic.c -o quadratic -lm
    -
    -

    Note that we need to explicitly link in the math library, and to turn off optimization (so that the compiler doesn’t replace the repeated calls to time() with a single call). Running this, we find:

    - - -
    $ ./quadratic
    -To nearest: 0.000199
    -To +inf: 0.000149
    -To -inf: 0.000249
    -To zero: 0.000249
    -
    - -

    Changing the rounding modes changes the result by 50%! This shows that our current implementation - which is not giving obviously wrong answers - is extremely fragile in the presence of numerical noise, and we should exercise extreme caution with compiler flags, etc. (How to re-write the expression to be more robust to small changes is a topic for another day.)

    \ No newline at end of file diff --git a/_posts/dursi/2012-1-12-stopping-your-program-at-the-first-nanhtml.md b/_posts/dursi/2012-1-12-stopping-your-program-at-the-first-nanhtml.md deleted file mode 100644 index 868996e..0000000 --- a/_posts/dursi/2012-1-12-stopping-your-program-at-the-first-nanhtml.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2012-01-12 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/stopping-your-program-at-the-first-nan.html -slug: stopping-your-program-at-the-first-nan -title: Stopping your program at the first NaN ---- - -

    If you know that somewhere in your program, there lurks a catastrophic numerical bug that puts NaNs or Infs into your results and you want to know where it first happens, the search can be a little frustrating. However, as before, the IEEE standard can help you; these illegal events (divide by zero, underflow or overflow, or invalid operations which cause NaNs) can be made to trigger exceptions, which will stop your code right at the point where it happens; then if you run your code through a debugger, you can find the very line where it happens.

    - - -

    We’ll discuss using the gnu compilers here; other compiler suites have similar options.

    - - -

    Let’s take a look at the following Fortran code:

    - - -
    program nantest
    -    real :: a, b, c
    -
    -    a = 1.
    -    b = 2.
    -
    -    c = a/b
    -    print *, c,a,b
    -
    -    a = 0.
    -    b = 0.
    -
    -    c = a/b
    -    print *, c,a,b
    -
    -    a = 2.
    -    b = 1.
    -
    -    c = a/b
    -    print *,c,a,b
    -end program nantest
    -
    - -

    If we compile this code with -ffpe-trap=invalid (I usually add ,zero,overflow , and even underflow if I think that’s causing me a problem in intermediate results), then the debugger can tell us the line where it all goes wrong:

    - - -
    $ gfortran -o nantest nantest.f90 -ffpe-trap=invalid,zero,overflow -g -static
    -$ gdb nantest
    -[...]
    -(gdb) run
    -Starting program: /scratch/ljdursi/Testing/fortran/nantest
    -  0.50000000       1.0000000       2.0000000    
    -
    -Program received signal SIGFPE, Arithmetic exception.
    -0x0000000000400384 in nantest () at nantest.f90:13
    -13          c = a/b
    -Current language:  auto; currently fortran
    -
    - -

    With the intel fortran compiler (ifort), using the option -fpe0 will do the same thing.

    - - -

    It’s a little tricker with C code; we have to actually insert a call to feenableexcept(), which enables floating point exceptions, and is defined in fenv.h;

    - - -
    #include <stdio.h>
    -#include <fenv.h>
    -
    -int main(int argc, char **argv) {
    -    float a, b, c;
    -    feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW);
    -
    -    a = 1.;
    -    b = 2.;
    -
    -    c = a/b;
    -    printf("%f %f %f\n", a, b, c);
    -
    -    a = 0.;
    -    b = 0.;
    -
    -    c = a/b;
    -    printf("%f %f %f\n", a, b, c);
    -
    -    a = 2.;
    -    b = 1.;
    -
    -    c = a/b;
    -    printf("%f %f %f\n", a, b, c);
    -
    -    return 0;
    -}
    -
    -

    but the effect is the same:

    - - -
    $ gcc -o nantest nantest.c -lm -g
    -$ gdb ./nantest
    -[...]
    -(gdb) run
    -Starting program: /scratch/s/scinet/ljdursi/Testing/exception/nantest
    -1.000000 2.000000 0.500000
    -
    -Program received signal SIGFPE, Arithmetic exception.
    -0x00000000004005d0 in main (argc=1, argv=0x7fffffffe4b8) at nantest.c:17
    -17	    c = a/b;
    -
    - -

    either way, you have a much better handle on where the errors are occuring.

    \ No newline at end of file diff --git a/_posts/dursi/2012-1-13-present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-cascahtml.md b/_posts/dursi/2012-1-13-present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-cascahtml.md deleted file mode 100644 index a1ed4d4..0000000 --- a/_posts/dursi/2012-1-13-present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-cascahtml.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2012-01-13 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-casca.html -slug: present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-casca- -title: Present and Future Computing, Data, and Networks Committee of the Canadian - Astronomical Society (CASCA) ---- - -

    This document is a whitepaper I wrote for the CASCA Computing and Data committee outlining the computing needs for the Canadian astronomy community for the coming several years. It does a fairly decent job of laying out the diverse range of large-scale R&D computing needs for the national community.

    - - -

    Executive Summary

    - -

    Advanced research computing resources have never been so essential to the Canadian Astronomy and Astrophysics research community. In the past few years, astronomical researchers have benefited greatly from modern large-scale computing systems; a diverse range of resources, which are a good match to the diverse computing needs of our scientists; and good working relationships with existing providers, allowing flexibility and collaboration between these centres and research groups.

    - - -

    However, CASCA has concerns about the near future of advanced research computing available to its researchers. Here the Computers, Data, and Networks Committee of CASCA present, on behalf of the Society, a summary of the current state of the computing needs, successes, and concerns of our researchers taken from previous consultative summaries and their updates. This is the first step of a process that will continue through the first half of 2013, which will include a comprehensive survey of research computing needs of the Canadian Astronomy and Astrophysics community, and will investigate a variety of strategies for meeting those needs.

    - - -

    Early systems funded by the CFI NPF are already showing their age; in many cases they are out of their maintenance contract and are already starting to fail. The lack of any clear signs of new investment on the horizon means that even if existing systems were to continue operating perfectly, as other nations continue to invest in new research computing platforms, our researchers, using stagnant computing hardware, will not only fall behind our international competitors as data volumes continue to increase, but also be unable to make full use of prior investments.

    - - -

    When new funding does become available, the Canadian astronomy community would like to see changes in emphasis taken as lessons learned from the CFI NPF procurement. Previous investment focused largely on computing hardware. While this addressed a real and pressing need resulting from years of underinvestment, the research endeavor requires a more holistic approach. Computing hardware investments must be balanced with similar investments in storage, highly qualified personnel, software development, and networking to maximize results.

    - - -

    In this report, we recommend an urgent search for new and sustainable sources of funding for advanced research computing funding; an increased focus on personnel, software development, and storage; maintaining a diverse range of systems; enabling major longer-term projects by committing resources for longer than the one-year allocation window currently of the RAC process; continuing to enable close working relationships with research groups and computing providers, preferably as close to the researchers as possible. In addition, we recommend that CCI’s board, through the proposed Researcher Advisory Committee or otherwise, establish a direct relationship with CASCA (and similar professional groups), with via persons charged with representing the needs of these research communities in planning for Compute Canada.

    \ No newline at end of file diff --git a/_posts/dursi/2014-1-16-scalable-data-analysis-in-rhtml.md b/_posts/dursi/2014-1-16-scalable-data-analysis-in-rhtml.md deleted file mode 100644 index ad08e36..0000000 --- a/_posts/dursi/2014-1-16-scalable-data-analysis-in-rhtml.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2014-01-16 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/scalable-data-analysis-in-r.html -slug: scalable-data-analysis-in-r -title: Scalable Data Analysis in R ---- - -

    R is a great environment for interactive analysis on your desktop, but when your data needs outgrow your personal computer, it’s not clear what to do next.

    - - -

    I’ve put together material for a day-long tutorial on scalable data analysis in R. It covers:

    - - -
      -
    • A brief introduction to R for those coming from a Python background;
    • -
    • The bigmemory package for out-of-core computation on large data matrices, with a simple physical sciences example;
    • -
    • The standard parallel package, including what was the snow and multicore facilities, using airline data as an example
    • -
    • The foreach package, using airline data and simple stock data;
    • -
    • The Rdsm package for shared memory; and
    • -
    • a brief introduction to the powerful pbdR pacakges for extremely large-scale computation.
    • -
    - -

    The presentation for the material, in R markdown (so including the sourcecode) is in the presentation directory; you can read the resulting presentation as markdown there, or as a PDF.

    - - -

    The R code from the slides can be found in the R directory.

    - - -

    Some data can be found in the data directory; but as you might expect in a workshop on scalable data analysis, the files are quite large! Mostly you can just find scripts for downloading the data; running make in the main directory will pull almost everything down, but a little more work needs go to into automating some of the production of the data products used.

    - - -

    Suggestions, as always, greatly welcomed.

    \ No newline at end of file diff --git a/_posts/dursi/2014-10-5-shell-for-scientistshtml.md b/_posts/dursi/2014-10-5-shell-for-scientistshtml.md deleted file mode 100644 index 76021d7..0000000 --- a/_posts/dursi/2014-10-5-shell-for-scientistshtml.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2014-10-05 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/shell-for-scientists.html -slug: the-shell-for-scientists -title: The Shell For Scientists ---- - -

    I’ve posted a half-day “The Shell for Scientists” -tutorial that -I’ve given variants on a number of times; the motivating problem, -provided by Greg Wilson for a two-day set of of tutorials at the -University of Toronto, was cleaning up a bunch of auditory lab data -on people’s cochlear implants.

    - - -

    The focus is on productivity and automation; PDF slides are available -here -(although I really should translate them into a markdown-based format to -make them more re-usable).

    - - -

    Covered are a number of basic shell commands

    - - -
      -
    • echo
    • -
    • pwd
    • -
    • cd
    • -
    • ls
    • -
    • man
    • -
    • file
    • -
    • cat
    • -
    • more
    • -
    • wc
    • -
    • mv
    • -
    • cp
    • -
    • rm
    • -
    • head
    • -
    • tail
    • -
    • sort
    • -
    • mkdir
    • -
    • rmdir
    • -
    • grep
    • -
    • for..do..done
    • -
    - -

    As well as simple script writing. There is some optional material -on make (again, for automation) and ssh/scp (because that was -frequently necessary for tutorials at SciNet). There are a number -of hands-on exercises sprinkled throughout.

    \ No newline at end of file diff --git a/_posts/dursi/2014-12-20-machine-learning-for-scientistshtml.md b/_posts/dursi/2014-12-20-machine-learning-for-scientistshtml.md deleted file mode 100644 index 21f85f5..0000000 --- a/_posts/dursi/2014-12-20-machine-learning-for-scientistshtml.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2014-12-20 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/machine-learning-for-scientists.html -slug: machine-learning-for-scientists -title: Machine Learning for Scientists ---- - -

    I recently taught a 1-day machine learning workshop for scientists for the good folks at SciNetHPC. There was enough interest (nearly forty people signed up for a day-long session near the end of term) that we had to book a large-ish classroom.

    - - -

    There’s a lot of interest in the topic — which might even be surprising, given that a lot of the material is either familiar or pretty easy to digest for those who spend a lot of their time doing scientific data analysis. But for those coming to it for the first time and on their own, the difference in terminology (“features”? “shrinkage”? Wait, you just mean variables and regularization?) and the huge number of different methods available can be pretty baffling.

    - - -

    And I think it helps to have someone with a science background to explain the very different approaches taken to modelling than in the sciences (especially the natural sciences) and why it is that way. Having that connection means that you can translate – so that the very real expertise and experience they do already have can be a benefit, rather than throwing up barriers. (“Bias-Variance tradeoff? You mean you’re willing to introduce error just to get the error bars down a bit – centred on the wrong answer? What kind of monster are you, and what dangerous nonsense is this machine learning stuff?”)

    - - -

    This was the first time teaching this material, and while there are some things I’d like to improve (especially doing more on PCA and clustering, although I don’t know what I’d take out for a 1-day class), I think that it went fairly well. The presentation can be seen online, and everything’s available on github.

    - - -

    Incidentally, this was my first time using Slidify for a presentation, and I really enjoyed it – this may be the first markdown/html5 setup that finally gets me willingly moving away from Keynote for this sort of material. Obviously, Slidify integrates much more closely with R than with python, particularly for graphics; but still, it was a pleasure to use.

    \ No newline at end of file diff --git a/_posts/dursi/2014-9-4-hadoop-for-hpcershtml.md b/_posts/dursi/2014-9-4-hadoop-for-hpcershtml.md deleted file mode 100644 index 29bb090..0000000 --- a/_posts/dursi/2014-9-4-hadoop-for-hpcershtml.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2014-09-04 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/hadoop-for-hpcers.html -slug: hadoop-for-hpcers -title: Hadoop For HPCers ---- - -

    I and my colleague Mike Nolta have put together a half-day tutorial on Hadoop - briefly covering HDFS, Map Reduce, Pig, and Spark - for an HPC audience, and put the materials on github.

    - - -

    The Hadoop ecosystem of tools continues to rapidly grow, and now includes tools like Spark and Flink that are very good for iterative numerical computation - either simulation or data analysis. These tools, and the underlying technologies, are (or should be) of real interest to the HPC community, but most materials are written for audiences with web application or maybe machine-learning backgrounds, which makes it harder for an HPC audience to see how they can be useful to them and how they might be applied.

    - - -

    Most of the source code is Python. Included on git hub are all sources for the examples, a vagrantfile for a VM to run the software on your laptop, and the presentation in Markdown and PDF. Feel free to fork, send pull requests, or use the materials as you see fit.

    \ No newline at end of file diff --git a/_posts/dursi/2015-12-18-approximate-squiggle-mappinghtml.md b/_posts/dursi/2015-12-18-approximate-squiggle-mappinghtml.md deleted file mode 100644 index 0b3d415..0000000 --- a/_posts/dursi/2015-12-18-approximate-squiggle-mappinghtml.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2015-12-18 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/approximate-squiggle-mapping.html -slug: approximate-mapping-of-nanopore-squiggle-data-with-spatial-indexing -title: Approximate Mapping of Nanopore Squiggle Data with Spatial Indexing ---- - -

    Over at the Simpson Lab blog, -I have an post describing a novel method for Directly Mapping -Squiggle Data, -using k-d trees to map segmented kmers; a simple proof of -concept -is available on github.

    \ No newline at end of file diff --git a/_posts/dursi/2015-3-2-spark-in-hpc-clustershtml.md b/_posts/dursi/2015-3-2-spark-in-hpc-clustershtml.md deleted file mode 100644 index 0168eae..0000000 --- a/_posts/dursi/2015-3-2-spark-in-hpc-clustershtml.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2015-03-02 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/spark-in-hpc-clusters.html -slug: spark-in-hpc-clusters -title: Spark in HPC clusters ---- - -

    Over the past several years, as research computing centres and others who run HPC clusters tried to accommodate other forms of computing for data analysis, much effort went into trying to incorporate Hadoop jobs into the scheduler along with other more traditional HPC jobs. It never went especially well, which is a shame, because it seems that those past unsuccessful attempts have discouraged experimentation with related next-generation technologies which are a much better fit for large-scale technical computing.

    - - -

    Hadoop v1 was always going to be a niche player and an awkward fit for big technical computing - and HPCers weren’t the only ones to notice this. Hadoop MapReduce’s mandatory dumping of output to disk after every Map/Reduce stage rendered it nearly unusable for any sort of approach which required iteration, or interactive use. Machine learning users, who often rely on many of the same iterative linear algebra solvers that physical science simulation users need, equally found Hadoop unhelpful. Hadoop v1 solved one set of problems – large single-pass data processing – very well, but those weren’t the problems that the technical computing community needed solved.

    - - -

    The inefficiency of flushing to disk wasn’t necessarily the difficulty that HPC centres had with incorporating Hadoop into their clusters, however. Dumping to disk could be sped up with caching, or SSDs. The real issue was with HDFS, the filesystem which Hadoop relies on. Because every job needed very rapid access to its data – to read the entire set in to the compute nodes, do minimal processing, then flush it back out – the file system was intimately tied to Hadoop cluster scheduling, which worked very hard (reasonably enough) to schedule the compute next to the data. But with Hadoop “on demand” in a cluster, how is this to work? One could spin up a new HDFS within each Hadoop job – but now the user has to have the new empty HDFS ingest the data files (probably with replication) initially, and then stage the data out of the doomed-to-be-shut-down HDFS afterwards. But this staging in and out will certainly take substantially longer than even the rest of the job’s I/O, which already likely dominates runtime. One can reserve a number of nodes for Hadoop jobs and keep a persistent HDFS store there, but this now defeats the purpose of running Hadoop in the cluster; one might as well just hive off those nodes into a separate system. Probably the best approach, which worked better than I think anyone had any right to expect, was to run Hadoop on Lustre, but it remained awkward even for those who already were using Lustre for their cluster.

    - - -

    The HPC community’s reaction to those problems – problems with a technology they were already skeptical of due to Not Invented Here Syndrome – was largely to give up on anything that seemed “Hadoopy” as a sensible approach. The large-scale machine learning community, which didn’t necessarily have that luxury, was instead already looking for in-memory approaches to avoid this problem entirely.

    - - -

    Two very promising “post-Hadoop” in-memory approaches which are much better suited to large-scale technical computing than Hadoop v1 ever was are also Apache projects - Spark and Flink. Flink has some really interesting features - including using a database-like query optimizer for almost all computations - but there’s no real question that currently, Spark is the more mature and capable of the offerings.

    - - -

    Spark can make use of HDFS, and other related file stores, but those aren’t requirements; since iterative computation can be done in memory given enough RAM, there is much less urgency in having the data local to the computation if the computation is long enough. Instead, Spark can simply use a POSIX interface to whatever filesystem is already running on your cluster.

    - - -

    Spark not only lacks hard HDFS-style requirements, but can also run in standalone mode without a heavyweight scheduler like Yarn or Mesos. This standalone mode makes it quite easy to simply spin up a Spark “cluster” within a job, reading from the file system as any other job would. (Earlier versions of Spark made this unnecessarily difficult, with the standalone startup scripts having hardcoded values that assumed only one such job at a time; this is somewhat easier now.)

    - - -

    Thus, below is a little job submission script for a Spark job on SciNet; it starts up a Spark master on the head node of the job, sets the workers, and runs a simple wordcount example.

    - - -

    Spark’s well-thought-out python interface, standalone mode, and filesystem-agnostic approach, makes Spark a much better match for traditional HPC systems than Hadoop technologies ever were.

    - - -

    Spark is covered a little bit in my and Mike Nolta’s Hadoop-for-HPCers workshop.

    - - -
    #!/bin/bash
    -#
    -#PBS -l nodes=3:ppn=8,walltime=0:20:00
    -#PBS -N spark-test
    -
    -nodes=($( cat $PBS_NODEFILE | sort | uniq ))
    -nnodes=${#nodes[@]}
    -last=$(( $nnodes - 1 ))
    -
    -cd $PBS_O_WORKDIR
    -
    -export SPARK_HOME=/scinet/gpc/Libraries/spark/spark-1.0.2-bin-hadoop2/
    -ssh ${nodes[0]} "module load java; cd ${SPARK_HOME}; ./sbin/start-master.sh"
    -sparkmaster="spark://${nodes[0]}:7077"
    -
    -for i in $( seq 0 $last )
    -do
    -    ssh ${nodes[$i]} "cd ${SPARK_HOME}; module load java; nohup ./bin/spark-class org.apache.spark.deploy.worker.Worker ${sparkmaster} &> ${SCRATCH}/work/nohup-${nodes[$i]}.out" &
    -done
    -
    -rm -rf ${SCRATCH}/wordcounts
    -
    -cat > sparkscript.py <<EOF
    -from pyspark import SparkContext
    -
    -sc = SparkContext(appName="wordCount")
    -file = sc.textFile("${SCRATCH}/moby-dick.txt")
    -counts = file.flatMap(lambda line: line.split(" ")).map(lambda word: (word, 1)).reduceByKey(lambda a, b: a+b)
    -counts.saveAsTextFile("${SCRATCH}/wordcounts")
    -EOF
    -
    -module load java
    -${SPARK_HOME}/bin/spark-submit --master ${sparkmaster} sparkscript.py
    -
    -ssh ${nnodes[0]} "module load java; cd ${SPARK_HOME}; ./sbin/stop-master"
    -for i in $( seq 0 $last )
    -do
    -    ssh ${nodes[$i]} "killall java"
    -done
    -wait
    -
    -
    \ No newline at end of file diff --git a/_posts/dursi/2015-5-1-hpcmpi-on-rce-podcasthtml.md b/_posts/dursi/2015-5-1-hpcmpi-on-rce-podcasthtml.md deleted file mode 100644 index 55d4f7b..0000000 --- a/_posts/dursi/2015-5-1-hpcmpi-on-rce-podcasthtml.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2015-05-01 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/hpcmpi-on-rce-podcast.html -slug: hpc-mpi-on-rce-podcast -title: HPC+MPI on RCE Podcast ---- - -

    In the latest episode of the RCE podcast, Jeff Squyres, Brock Palen, and I spoke about the HPC and MPI series of blogposts and the community reaction.

    - - -

    It was a really interesting discussion; Brock has worked closely with an enormous variety of researchers and helps run an HPC centre, while Jeff deeply understands HPC networking, from the getting ones and zeros onto the wires at the lowest-level of hardware up to being an extremely active member of the MPI forum.

    - - -

    I was really pleased that they asked me to join them; I’ve been listening to their show since at least the VisIt episode in 2009 (I had just missed the Hadoop episode, it turns out) and for some years they were the only big computing podcast around.

    - - -

    If you were interested in the MPI discussion, you might want to listen to this most recent episode; if you’re interested in big computing software projects more broadly, you should definitely consider subscribing to the podcast.

    \ No newline at end of file diff --git a/_posts/dursi/2015-5-1-understanding-poahtml.md b/_posts/dursi/2015-5-1-understanding-poahtml.md deleted file mode 100644 index 7374b5b..0000000 --- a/_posts/dursi/2015-5-1-understanding-poahtml.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2015-05-01 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/understanding-poa.html -slug: understanding-partial-order-alignment-for-multiple-sequence-alignment -title: Understanding Partial Order Alignment for Multiple Sequence Alignment ---- - -

    Over at the Simpson Lab blog, I have an explainer -on Understanding Partial Order Alignment, -an under-appreciated method for multiple sequence alignment; I hope the explanation there -(and explanatory implementation) is useful to those -exploring graph-based approaches to alignment.

    \ No newline at end of file diff --git a/_posts/dursi/2015-5-19-io-performancehtml.md b/_posts/dursi/2015-5-19-io-performancehtml.md deleted file mode 100644 index fac645e..0000000 --- a/_posts/dursi/2015-5-19-io-performancehtml.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2015-05-19 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/io-performance.html -slug: on-random-vs-streaming-i-o-performance-or-seek-and-you-shall-find-eventually- -title: On Random vs. Streaming I/O Performance; Or seek(), and You Shall Find --- - Eventually. ---- - -

    At the Simpson Lab blog, I’ve written a post -on streaming vs random access I/O performance, -an important topic in bioinformatics. Using a very simple problem (randomly choosing lines in a -non-indexed text file) I give a quick overview of the file system stack and what it means for -streaming performance, and reservoir sampling for uniform random online sampling.

    \ No newline at end of file diff --git a/_posts/dursi/2016-10-14-mpis-place-in-big-computinghtml.md b/_posts/dursi/2016-10-14-mpis-place-in-big-computinghtml.md deleted file mode 100644 index 865fafb..0000000 --- a/_posts/dursi/2016-10-14-mpis-place-in-big-computinghtml.md +++ /dev/null @@ -1,53 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2016-10-14 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/mpis-place-in-big-computing.html -slug: mpi-s-place-in-big-computing -title: MPI's Place in Big Computing ---- - -

    The organizers of EuroMPI 2016 were kind enough to invite me to give a keynote and participate in a panel at their meeting, which was held at the end of September in beautiful Edinburgh. The event was terrific, with lots of very interesting work going on in MPI implementations and with MPI.

    - - -

    The topic of my talk was “MPI’s Place in Big Computing”; the materials from the talk can be found on github. The talk, as you might expect, included discussion of high-productivity big data frameworks, but also — and missing from the discussion in my “HPC is dying” blog post — the “data layer” frameworks that underpin them.

    - - -

    I think a lot of people have taken, quite reasonably, my that blog post to suggest that Spark for example is a competitor to MPI; the point I wanted to make is a little more nuanced that that.

    - - -

    I’m actually skeptical of Spark’s utility for (e.g.) large-scale simulations. However attractive the model is from a variety of points of view, absent some huge breakthrough I don’t think that functional models with immutable data can support the performance, memory requirements, or performance predictability we require. (But who knows; maybe that’ll be one of the compromises we find we have to make on the road to exascale).

    - - -

    But whatever you might think of Spark’s efficacy for your particular use case,

    - - -
      -
    • A lot of people manifestly find it to be extremely useful for their use case; and
    • -
    • Performance is quite important to those communities.
    • -
    - -

    So given that, why isn’t Spark built atop of MPI for network communications? And why isn’t TensorFlow, or Dask, or SeaStar?

    - - -

    The past five years have seen a huge number of high-productivity tools for large-scale number crunching gain extremely rapid adoption. Even if you don’t like those particular tools for your problems, surely you’d like for there to exist some tools like that for the traditional HPC community; why do other communications frameworks support this flourishing ecosystem of platforms, and MPI doesn’t?

    - - -

    There’s another argument there, too - simply from a self-preservation point of view, it would be in MPI’s interest to be adopted by a high-profile big data platform to ensure continued success and support. But none are; why? It’s not because the developers of Spark or at Google are just too dumb to figure out MPI’s syntax.

    - - -

    Going through what does get used for these packages and what doesn’t — which is what I do in this talk — I think the issues become fairly clear. MPI wants to be both a low-level communications framework and a higher-level programming model, and ends up tripping over it’s own feet trying to dance both dances. As a communications “data plane” it imposes too many high-level decisions on applications — no fault tolerance, restrictive communications semantics (in-order and arrival guarantees), and provides too few services (e.g. a performant active message/RPC layer). And as a high-level programming model it is too low level and is missing different services (communications-aware scheduling came up in several guises at the meeting).

    - - -

    I don’t think that’s insurmountable; I think inside MPI implementations there is a performant, network-agnostic low-level communications layer trying to get out. Exposing more MPI runtime services is a move in the right direction. I was surprised at how open the meeting participants were to making judicious changes — even perhaps breaking some backwards compatability — in the right directions.

    - - -

    Thanks again to the organizers for extending the opportunity to participate; it was great.

    - - -

    My slides can be seen below or on github, where the complete materials can be found.

    \ No newline at end of file diff --git a/_posts/dursi/2016-5-10-spark-chapel-tensorflow-workshop-at-umichhtml.md b/_posts/dursi/2016-5-10-spark-chapel-tensorflow-workshop-at-umichhtml.md deleted file mode 100644 index 8146d3b..0000000 --- a/_posts/dursi/2016-5-10-spark-chapel-tensorflow-workshop-at-umichhtml.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2016-05-10 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/spark-chapel-tensorflow-workshop-at-umich.html -slug: spark-chapel-tensorflow-workshop-at-umich -title: Spark, Chapel, TensorFlow- Workshop at UMich ---- - -

    The kind folks at the University of Michigan’s Center for Computational Discovery and Engineering (MICDE), which is just part of the very impressive Advanced Research Computing division, invited me to give a workshop there a couple of months ago about the rapidly-evolving large-scale numerical computing ecosystem.

    - - -

    There’s lots that I want to do to extend this to a half-day length, but the workshop materials — including a VM that can be used to play with Spark, Chapel and TensorFlow, along with Jupyter notebooks for each — can be found on GitHub and may be of some use to others as they stand.

    - - -

    The title and abstract follow.

    - - -
    -

    Next Generation HPC? What Spark, TensorFlow, and Chapel are teaching us about large-scale numerical computing

    -
    - -
    -

    For years, the academic science and engineering community was almost alone in pursuing very large-scale numerical computing, and MPI - the 1990s-era message passing library - was the lingua franca for such work. But starting in the mid-2000s, others became interesting in large-scale computing on data. First internet-scale companies like Google and Yahoo! started performing fairly basic analytics tasks at enormous scale, and now many others are tackling increasingly complex and data-heavy machine-learning computations, which involve very familiar scientific computing tasks such as linear algebra, unstructured mesh decomposition, and numerical optimization. But these new communities have created programming environments which emphasize what we’ve learned about computer science and programmability since 1994 - with greater levels of abstraction and encapsulation, separating high-level computation from the low-level implementation details, and some in HPC are starting to notice. This talk will give a brief introduction to Apache Spark environment and Google’s Tensor Flow machine-learning package for high-level numerical computation, as well as the HPC-focused Chapel language from Cray, to show where each can be used today and how they might be used in the future. The slides for this talk, and examples for each package along with a virtual machine which can be used for running them, will be available at https://github.com/ljdursi/Spark-Chapel-TF-UMich-2016 .

    - -
    \ No newline at end of file diff --git a/_posts/dursi/2016-9-9-jupyter-for-bioinformaticshtml.md b/_posts/dursi/2016-9-9-jupyter-for-bioinformaticshtml.md deleted file mode 100644 index 7254066..0000000 --- a/_posts/dursi/2016-9-9-jupyter-for-bioinformaticshtml.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2016-09-09 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/jupyter-for-bioinformatics.html -slug: jupyter-notebooks-for-performing-and-sharing-bioinformatics-analyses -title: Jupyter Notebooks for Performing and Sharing Bioinformatics Analyses ---- - -

    I was asked to do a half-day tutorial at the Great Lakes Bioinformatics conference Workshop session. -The focus was mainly on R, with some python as well. We covered:

    - - -
      -
    • The basics of Jupyter notebooks - what they are and how they work
    • -
    • How to install and run Jupyter notebooks on their laptop, in R and Python
    • -
    • How to perform interactive analyses in a web browser using Jupyter
    • -
    • Using markdown and latex to
    • -
    • How to “Port” an R bioinformatics workflow from some scripts into a Jupyter notebook
    • -
    • How to share a Jupyter notebook online, using three different approaches -
        -
      • SageMathCloud
      • -
      • GitHub and
      • -
      • mybinder.org
      • -
      -
    • -
    - -

    I think it went prety well; the materials are available On GitHub. -It was largely hands-on, so apart from some introductory slides, -it was mainly about giving a tour of the notebook and how use Jupyter to share analyses; the “scripts” that I went through -in presenting the material were aimed at having the students produce the notebooks -here.

    \ No newline at end of file diff --git a/_posts/dursi/2017-2-15-beyond-single-core-rhtml.md b/_posts/dursi/2017-2-15-beyond-single-core-rhtml.md deleted file mode 100644 index 273453f..0000000 --- a/_posts/dursi/2017-2-15-beyond-single-core-rhtml.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2017-02-15 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/beyond-single-core-R.html -slug: beyond-single-core-r-parallel-data-analysis -title: Beyond Single Core R- Parallel Data Analysis ---- - -

    I was asked recently to do short presentation for the Greater Toronto R Users Group -on parallel computing in R; My slides can be seen below or on github, where the complete materials can be found.

    - - -

    I covered some similar things I had covered in a half-day workshop -a couple of years earlier (though, obviously, without the hands-on -component):

    - -
      -
    • How to think about parallelism and scalability in data analysis
    • -
    • The standard parallel package, including what was the snow and multicore facilities, using airline data as an example
    • -
    • The foreach package, using airline data and simple stock data;
    • -
    • A summary of best practices,
    • -
    - -

    with some bonus material tacked on the end touching on a couple advanced topics.

    - - -

    I was quite surprised at how little had changed since late 2014, other than -further development of SparkR (which -I didn’t cover), and the interesting but seemingly not very much used future -package. I was also struck by how hard it is to find similar materials -online, covering a range of parallel computing topics in R - it’s rare enough -that even this simple effort made it to the HPC project view on CRAN -(under “related links”). R continues to grow in popularity for data analysis; -is this all desktop computing? Is Spark siphoning off the clustered-dataframe -usage?

    - - -

    (This was also my first time with RPres in RStudio; -wow, not a fan, RPres was not ready for general release. And I’m a big fan of RMarkdown.)

    \ No newline at end of file diff --git a/_posts/dursi/2017-6-1-compute-canadianhtml.md b/_posts/dursi/2017-6-1-compute-canadianhtml.md deleted file mode 100644 index 1b95a2b..0000000 --- a/_posts/dursi/2017-6-1-compute-canadianhtml.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2017-06-01 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/compute-canadian.html -slug: compute-canadian-building-a-successful-and-federated-computational-research-enterprise-together -title: Compute Canadian- Building a successful and federated computational research - enterprise, together ---- - -

    Canada is a federated nation, and this is particularly visible in -areas of research funding, where both the federal and provincial -orders of government play a role. In building a successful digital -research infrastructure to support Canadian science and scholarship, -we must recognize that reality, and rely on the successful examples -of many organizations in Canada and around the world that embrace -such a federated approach.

    - - -

    In this discussion paper, -my colleague Jill Kowalchuck and I lay out what we hope to be the beginnings -of a discussion of what a renewed federation for supporting Canadian -science with advanced research computing and data could look like.

    - - -

    Executive Summary

    - -

    Computing and data, and the expertise and tools to make use of both, is -now central to all fields of study. Ten years after the creation of -Compute Canada in response to the National Platforms Fund call, and -after the Naylor Report on science funding, it is an apt time for the -Canadian community built around this national research platform to take -stock. Is it doing what we need it to do for Canadian researchers? Is it -working the way we want it to? What should a Canadian computation and -data platform for supporting research look like in the coming years? -This document aims to begin that discussion within the community.

    - - -

    Here we propose seven principles to guide us in this discussion — that -our project should serve Canadian research in a researcher-centred, -service-oriented, and truly national way; and that it should operate as -a true federation of equal partners, interoperable but not identical, -collaborative and up-to-date. We suggest in particular that it is vital -that our national platform is adaptive and responsive to researchers, -making choices driven by research needs and not technical choices, and -should make full use of the diversity and specialization that a Canadian -federation and its partners offer.

    - - -

    From those principles, we make evidence-based proposals for a renewed -Canadian organization. Comparisons with successful examples of federated -organizations within Canada and abroad suggest that while the basic -architecture of our federation is sound, important roles and -relationships need to be clarified. While a central office must be -responsible for the processes of defining priorities, strategies, and -standards of interoperability, a successful federation requires those -processes to have buy-in from partners committed to the goals of the -federation. The Board of Directors of the central office in a federation -must have experience and training to handle the delicate task of -governing a central office but being responsible to a national -community. The Members need adequate visibility into the operations of -the central office and the federation as a whole so that they can -support their vital role to the organization. And that engagement needs -to extend to all who are invested in the success of research in Canada: -regional staff and Boards, institutional staff, researchers and funders, -and other organizations that provide digital infrastructure for research -in Canada. This document focusses on Compute Canada in particular, but -the principles and proposals apply to any digital research -infrastructure providers, or the system as a whole.

    - - -

    Success for this document will mean starting conversations, inspiring -other documents and differing points of view, and the emerging of a -consensus within the community of what a renewed national platform for -the next ten years looks like. That does not mean this document is a -straw-man. The authors have played roles in the national platform -starting at its inception, from researcher to consortium and regional -(east and west) staff and management, and within the Compute Canada -central office, and hope that experience plus the benefit of some -distance have produced a coherent and compelling vision of what the -Compute Canada national project could be. But what matters is not this -proposal; it is what the community as a whole decides it wants its -national platform to be.

    \ No newline at end of file diff --git a/_posts/dursi/2017-6-4-new-computing-landscape-and-chapelhtml.md b/_posts/dursi/2017-6-4-new-computing-landscape-and-chapelhtml.md deleted file mode 100644 index 29bb9b4..0000000 --- a/_posts/dursi/2017-6-4-new-computing-landscape-and-chapelhtml.md +++ /dev/null @@ -1,31 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2017-06-04 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/new-computing-landscape-and-chapel.html -slug: chapel-s-home-in-the-landscape-of-new-scientific-computing-languages -title: Chapel's Home in the Landscape of New Scientific Computing Languages ---- - -

    I was invited to speak at this past weekend’s fourth annual Chapel Implementers and Users Workshop (CHIUW 2017). -It was a great meeting, with lots of extremely high-quality talks on work being done with and on Chapel. The slides from the presentations -will be up shortly, and I recommend them - the libfabric, -KNL, use-after-free tracking, and GraphBLAS works -were of particular interest to me. The Code Camp on the next day, working with members the Chapel team on individual particular projects, was also a lot of fun.

    - - -

    The topic of my own talk was “Chapel’s Home in the Landscape -of New Scientific Computing Languages (and what it can learn from -the neighbours)”; the materials from the talk can be found -on github. I described -the sorts of problems I’m particularly interested in, surveyed -some of the languages/frameworks in there, and tried to identify -what I saw as Chapel’s role in the environment.

    - - -

    My slides can be seen below or on github, where the complete materials can be found.

    \ No newline at end of file diff --git a/_posts/dursi/2020-10-14-jobs_managing_research_computing_teamshtml.md b/_posts/dursi/2020-10-14-jobs_managing_research_computing_teamshtml.md deleted file mode 100644 index 1bca0f0..0000000 --- a/_posts/dursi/2020-10-14-jobs_managing_research_computing_teamshtml.md +++ /dev/null @@ -1,113 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2020-10-14 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/jobs_managing_research_computing_teams.html -slug: things-i-learned-from-looking-at-500-research-computing-manager-jobs-over-10-months -title: Things I Learned from Looking at 500 Research Computing Manager Jobs over 10 - Months ---- - -

    I write a weekly newsletter -for research computing managers, team leads, or those aspiring to -those roles. One of the things I’ve wanted to emphasize in the -newsletter is that managing research computing teams is a profession -in and of itself, and worth doing well. Part of that is emphasizing -the existence of career opportunities.

    - - -

    So since the beginning I’ve included job listings and maintained -a job board, -posting about 500 such jobs over the past 10 months and removing -them as they become filled or otherwise unavailable. My main -criteria for such jobs are whether or not I would describe the work -as principally about managing or leading a research computing team - -admittedly a fuzzy definition.

    - - -

    Over the course of examining those 500 jobs - and looking through -many many more that never made it to the board - I’ve learned some -things:

    - - -

    There are a lot of jobs out there for people managing research -computing teams. I’ve never had any trouble finding some weekly -to put in the job board or with highlights interesting enough to -list at the end of the newsletter.

    - - -

    There are certainly many more I’m missing. As the field matures -there are starting to be job -boards for research -software development or for particular sub-fields of research -computing like -bioinformatics. -But, consistent with research’s neglect of management as something -that needs to be done and done well, no such resources exist for -the managers of those important roles. So I have a go-to list of -google and other searches for jobs which I go through a couple of -times a week.

    - - -

    In research, when you’re doing a literature search and you start -hitting the same papers again and again, you’re pretty sure you’ve -got a mostly complete list of references as a starting point. I’m -nowhere near that with my managing research computing teams job -list, largely because the names we use for these roles vary so -widely. So I’m confident that I only see a fraction of these jobs. -(You can help out by submitting any -jobs you know about).

    - - -

    Research computing teams are broadening, and so is the need for -managers. Where this is most obvious is in data science or data -engineering teams, which have spread to every sector and every -industry. Generic “Manager, Data Science” jobs are so plentiful -that I don’t list most of them - many of them are more operational -rather than “jobs leading research computing teams” - but even the -ones that make the cut are in sectors from health to transportation -to retail to engineering. There are increasingly data engineering, -cloud architecture, etc roles for supporting research computing -efforts, to say nothing of ML/AI jobs. And there are countless -management/team lead jobs for specialist research computing in -health, biology, and biomedicine.

    - - -

    Research data management is increasingly employable. As the -initial data science and data engineering work in organizations -mature, many institutions are realizing that they now need principled -approaches to data governance, stewardship, and modelling. This -is happening most rapidly in heavily regulated industries — -health, finance — but is starting to percolate outwards. -Those who have maintained and curated data resources for research, -or who have supported those that do, will be surprised at the number -of jobs in the private sector for doing similar work.

    - - -

    “Traditional” research computing team management jobs remain, and -they take forever to fill: There are definitely still routinely -“Director of Research Computing, University of Somethingorother” -jobs out there. And I don’t know whether it’s because of the -pandemic, or because of the competition from other sectors, but -such jobs are taking forever to fill this year. I routinely see -them open for months, and then reposted one or more times. I see -this in both for managers of teams running on-premises hardware and -for teams mainly doing software development.

    - - -

    Despite the talk of RSE units, most research computing jobs within -academic institutions are lone outposts: While in companies -research computing - data science, computing resource management, -software development - tends to be centralized (even if it is -matrixed out or embedded into other teams), in academia we’re -definitely not there - most of the team leads/manger jobs I see in -Universities are for small teams embedded in a single institute or -project. I think that’s a shame; it greatly reduces the opportunity -for cross-pollination, learning, and developing best practices, -makes work less efficient and less satisfying, and it makes teams -more management heavy than they need to be.

    \ No newline at end of file diff --git a/_posts/dursi/2020-12-4-reseach-infrastructurehtml.md b/_posts/dursi/2020-12-4-reseach-infrastructurehtml.md deleted file mode 100644 index 8f1dd2b..0000000 --- a/_posts/dursi/2020-12-4-reseach-infrastructurehtml.md +++ /dev/null @@ -1,36 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2020-12-04 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/reseach-infrastructure.html -slug: when-research-infrastructure-is-and-isn-t-maintained -title: When Research Infrastructure Is and Isn't Maintained ---- - -

    (Note: This post is adapted from #53 of the Research Computing Teams Newsletter)

    - - -

    There were two big stories in the news this week (as I write this, at the end of 2020) about what’s possible with sustained research infrastructure funding and what happens when research infrastructure isn’t sustained.

    - - -

    In the first, you’ve probably read about AlphaFold, Google Brain’s efforts to bring deep learning to protein folding. It did very well in the 14th annual Critical Assessment of (protein) Structure Prediction (CASP) contest. Predictably but unfortunately, Google’s press releases wildly overhyped the results - “Protein Folding Solved”.

    - - -

    Most proteins fold very robustly in the chaotic environment of the cell, and so it’s expected that there should be complex features that predict how the proteins folded configurations look. We still don’t know anything about the model AlphaFold used - other than it did very well on these 100 proteins - or how it was trained. There are a lot of questions of how it will work with more poorly behaved proteins - a wrong confident prediction could be much worse than no prediction. But it did get very good results, and with a very small amount of computational time to actually make the predictions. That raises a lot of hope for the scope of near-term future advances.

    - - -

    But as Aled Edwards points out on twitter, the real story here is one of long term, multi-decadal, investment in research infrastructure including research data infrastructure by the structural biology community. The protein data bank was set up 50 years ago (!!); and a culture of data sharing of these laboriously solved protein structures was set up, with a norm of contributing to (and helping curate) the data bank. That databank has been continuously curated and maintained, new techniques developed, eventually leading to the massive database now on which methods can be trained and results compared.

    - - -

    It’s the sustained funding and support - monetarily but also in terms of aligning research incentives like credit - which built the PDB. The other big story we heard this week tells us that you can’t just fund a piece of infrastructure, walk away, and expect the result to be self-sustaining. On December 1st, the iconic Arecibo Radio Telescope in Puerto Rico collapsed. The telescope was considered important enough to keep running - there was no move to decommission it until late November - but not important enough to keep funding the maintenance to keep it functioning.

    - - -

    Overhead image of a broken Arecibo Telescope

    - - -

    Digital research infrastructure - software, data resources, computing systems - fall apart at least as quickly without ongoing funded effort to maintain them. It’s not about whether these digital pieces of infrastructure are “sustainable”; it’s whether or not they are sustained. Too many critical pieces of our digital research infrastructure are not being sustained.

    \ No newline at end of file diff --git a/_posts/dursi/2020-4-18-cobol-imperial-college-and-sustained-scientific-softarehtml.md b/_posts/dursi/2020-4-18-cobol-imperial-college-and-sustained-scientific-softarehtml.md deleted file mode 100644 index 5b8576d..0000000 --- a/_posts/dursi/2020-4-18-cobol-imperial-college-and-sustained-scientific-softarehtml.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2020-04-18 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/cobol-imperial-college-and-sustained-scientific-softare.html -slug: cobol-imperial-college-bursty-maintenance-and-sustained-scientific-software -title: COBOL, Imperial College, Bursty Maintenance, and Sustained Scientific Software ---- - -

    We’ve all read about the huge rise in unemployment claims causing -unprecedented loads on US state software systems, with the situation -so dire that the governor of New Jersey put out an urgent call -for COBOL programmers. -It’s worth looking at this from the point of view of research -software, where we need software to be sustainable and reproducible -for long periods of time.

    - - -

    The systems that need suddenly need COBOL developers have often -been chugging away with maintenance and tweaks for 40–50 -years. This is an almost unfathomable success in the world of -software. So the current issue clearly isn’t with the quality of -the software itself per se.

    - - -

    Is COBOL being “obsolete” the problem? I mean, look -at that record of success again. COBOL is a proven, perfectly -serviceable, -domain-specific language for these sorts of batch tasks. There’s -ways to connect to tools and services written in other languages, -so it can coexist with other systems. The lack of (say) a vibrant and -rapidly-evolving ecosystem of third-party packages isn’t necessarily -a bad thing here. (How innovative and cutting-edge do you want the -system that sends out your pension cheques to be, exactly, when the -time comes? Do you really want someone to accidentally -leftpad -your bank account?)

    - - -

    Yes, people coming in to maintain the software for the first time -will have to familiarize themselves with a new, old, language. But -people in research or open-source software learn an unfamiliar language to -contribute to a code base every day. Even if they knew the language, -they would still have to learn the codebase itself, the idioms, and -the problem domain. All of those things can be quickly learned by -new developers if there is documentation and tests, and especially -if there are people who have recently been maintaining the code -base to help. And that’s the issue here.

    - - -

    These COBOL systems weren’t poorly designed, or obsolete, or a bad -match to their requirements. Easily handling 100x the previously -expected maximum rate of applications isn’t a feature, it’s a symptom -of giddy overengineering. The requirements just changed suddenly. -And when that happened, the people, procedures, and resources weren’t -in place to do the necessary maintenance.

    - - -

    There is no such thing as infrastructure which does not require -maintenance, and the need for that maintenance is often quite bursty. -This is just as true in research software as it is in governmental -systems. Research software which goes into production needs to be -written in a maintainable fashion, but that’s not enough. There -has to be funding support to keep in place the people, procedures, -and resources necessary to maintain that software, likely in bursts. -And those resources have to remain in place between bursts.

    - - -

    The bursty nature of necessary maintenance has also come up in -research software, in the saga of the Imperial College epidemic -modelling -software. -When COVID-19 arrived, this tool suddenly moved from a mildly -interesting research code to a key input into UK domestic policy. -Transparency and flexibility leapt from being nice-to-haves to key -requirements, and the people, procedures, documentation, tests, and -resources weren’t in place to add them.

    - - -

    The importance and urgency of epidemic modelling meant that expertise -and resources from many places were made available to extend and -eventually rewrite the code. But this isn’t a sustainable model for -research computing software, any more than it is for unemployment -application processing systems.

    - - -

    We still genuinely don’t know how to reliably provide maintenance, bursty -or otherwise, for software, shared databases, or systems in -our research communities. Our funding models are all built around -supporting experiments, observations, or theoretical works — -short-term projects which start, proceed, result in publications -and other research outputs, and are then done. Mechanisms for ongoing support of evolving -research inputs isn’t even a work in progress — it’s absent.

    - - -

    If experimental methods work develops new kinds of equipment or -reagents which are useful to other researchers, then a vendor starts -manufacturing and selling those items to researchers, with money -that comes out of their grants — and that’s the sustainability -model. We don’t have that for ongoing efforts in software, databases, -or even reliably for hardware shared at a larger scale than a single -organization yet.

    - - -

    For software undergoing active development, there are at least -plausible approaches proposed. Some of them look, -reasonably enough, like the research equipment model above. Add a -modest amount of money to grants earmarked for distribution to -software, databases, or systems that the research group relies on. -Maybe that would work! But it would almost certainly preferentially -fund projects that are being actively worked on, taking feature -requests and bug reports for software or new submissions for -databases.

    - - -

    For mature, quiescent resources that “just work” and -so fade into the background, the tools that don’t need development -until they suddenly do, we need other solutions. Likely we need -centres of expertise in research computing, populated by professionals -as advocated by RSE societies around -the world, with named maintainers even for -research tools actively used but not actively developed.

    - - -

    People — -maintainers, -with the tools to do their job — are what drive software -sustainability, not language choices or technologies. As a research -community we need to find and retain funding to retain, develop, -and empower those people to do their work. Otherwise we’re going -to waste time and effort urgently re-learning and re-creating tools -when individually unforeseeable but collectively predictable bursts -in maintenance are needed.

    \ No newline at end of file diff --git a/_posts/dursi/2020-6-5-managers_need_to_speak_out_on_racismhtml.md b/_posts/dursi/2020-6-5-managers_need_to_speak_out_on_racismhtml.md deleted file mode 100644 index 69432b0..0000000 --- a/_posts/dursi/2020-6-5-managers_need_to_speak_out_on_racismhtml.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2020-06-05 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/managers_need_to_speak_out_on_racism.html -slug: white-managers-in-research-computing-we-need-to-be-speaking-out-about-racism-then-listening-and-advocating -title: White Managers in Research Computing, We Need to be Speaking Out About Racism, - then Listening and Advocating ---- - -

    Many people in our research computing community — and in the broader research community we serve — are in pain this week. There’s another video of another Black man, George Floyd, begging for his life while being murdered by a police officer in Minneapolis. Here in Toronto a Black woman, Regis Korchinski-Paquet, died when what should have been a routine call resulted in a mystifying number of police officers showing up. With only police officers present in her apartment, she went over her high-rise balcony to her death, with her last words being, repeatedly, “Mom, help”. This is all taking place during a pandemic which is disproportionately killing and incapacitating Black people, Indigenous people, and people of colour because they have less access to jobs that can be worked from home, and are more likely to be living in overcrowded multi-generational homes.

    - - -

    So with news and social media being dominated by the consequences of systemic racism, anti-Black violence in particular, and police violence in reaction to anti-police-brutality protests, a lot of people are feeling despair and anguish.

    - - -

    As managers, we are leaders of communities. Small communities, but nonetheless. We have a responsibility to members of those communities to let them know we support them and are here for them. It doesn’t take much to be small bit of genuine help to someone really struggling. But we have to initiate the conversations. Our community members won’t open up to us about these topics until we’ve demonstrated we can have some kind of adult conversation about racism.

    - - -

    Doing or saying something is scary for many of us in research computing — who are overwhelmingly not Black and mostly white, which is a related conversation we need to have — because we are worried, reasonably, about getting it wrong. And it’s easy to make the excuse that because we don’t have Black team members (which… you know, same) it’s not something we need to address.

    - - -

    Most of us don’t have team members who have gotten sick with COVID-19 either, but we’ve certainly been addressing that. It’s been hard and uncomfortable and we didn’t get it all right the first time around and we did it anyway. You don’t necessarily know who’s hurting in your team and community or why. Not addressing a topic dominating the news and social media now doesn’t project professionalism, it just suggests discomfort or indifference.

    - - -

    I do not have great suggestions about what to say or do. I can offer some articles and collections of resources I’m finding useful:

    - - - -

    I can also tell you what I’m doing at work. I’ve raised the issue at our all hands meeting using words much like the above, and let people know they can talk to me about it if they need to. Unhelpfully, I sounded a bit awkward, even after practicing, but the next conversation will be easier. I’ve made a point of checking in a little deeper with people during one-on-ones, and doing a lot of listening, I’m listening for feedback even when it’s uncomfortable, and I’ll keep reading those materials, and others, to see what I can do better and how I can support change.

    - - -

    That’s not the best or even a particularly good way to address what’s going on now and what’s been going on for a very long time. It’s the bare minimum, and started too late. The challenge will come when making changes, then advocating for more change to peers and upwards. But it’s a start.

    - - -

    From issue #27 of the Research Computing Teams newsletter

    \ No newline at end of file diff --git a/_posts/dursi/2021-11-23-users-time-is-valuablehtml.md b/_posts/dursi/2021-11-23-users-time-is-valuablehtml.md deleted file mode 100644 index a7a670a..0000000 --- a/_posts/dursi/2021-11-23-users-time-is-valuablehtml.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2021-11-23 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/users-time-is-valuable.html -slug: researcher-s-time-has-value-too -title: Researcher's Time Has Value, Too ---- - -

    ..And Researchers Value Their Time

    - -

    (Note: This post is adapted from #102 of the Research Computing Teams Newsletter)

    - - -

    If you followed HPC twitter in late 2021 at all, you will have seen a heartfelt thread by a well-known research software developer, one who was a key contributor to the Singularity project among others, lamenting the frankly appalling state of developer productivity in HPC - both in what tools exist, and support for them (and other tools for developers) at academic centres. A lot of people chimed into the discussion, including one of the leading developers of the PetSC project, embedded software developers, some key people at big computing centres, all agreeing that there was a problem, but typically zooming in on one or another particular technical or procedural issue and not coming to any conclusion.

    - - -

    I think the issue is a lot bigger than HPC software development workflows - it comes up in too many contexts to be about specific technical issues of running CI/CD pipelines on fixed infrastructure. The only people to identify the correct underlying issue, in my opinion, were people with experience of both academia and the private sector, such as Brendan Bouffler at AWS:

    - - - - - -

    The same argument got made by R&D research staff in the private sector. Their time actually has value; as a result, it gets valued.

    - - -

    In academic research computing, partly because of low salaries — especially for the endless stream of trainees — but also because we typically provide research computing systems for free, we tend to put zero value on people’s time. Thus our “lowest-cost” approach definitely does not apply to researcher or trainee effort. If researchers have to jump through absurd hoops to get or renew their accounts, or have to distort their workflows to fit one-size-fits-all clusters and queueing systems, or postdocs have to spend hours of work by hand every month hand because tools to automate some of that work would cost $500, well, what do they expect, right?

    - - -

    It’s not that this is an indefensible position to take, but one can’t take this position and act surprised when researchers who can afford to are seriously investigating taking their projects into the commercial cloud even though it costs 2x as much. It turns out that people’s time is worth quite a lot to them, and is certainly worth some money. If we were to let researchers spend their research computing and data money wherever they pleased, I think we’d find that significantly less than 100% of researchers would use “lowest price possible” as their sole criterion for choosing providers. Core facilities like animal facilities, sequencing centres, and microscopy centres compete on dimensions other than being the cheapest option available.

    - - -

    To be sure, there are process issues in academia which exacerbates the tendency to see people’s time as valueless - rules about capital vs operating costs, for instance - but those rules aren’t a law of nature. If we were paying people in academia what they pay in tech, administration would suddenly discover some additional flexibility in the thresholds and criteria for considering something a capital expense if it meant we could be a bit more parsimonious with people’s time.

    - - -

    Until then, one can’t be too surprised when the most talented and ambitious staff get routinely poached by the private sector, and when research groups start considering service providers that cost more but respect their time.

    \ No newline at end of file diff --git a/_posts/dursi/2021-6-6-nobody-cares-tech-stackhtml.md b/_posts/dursi/2021-6-6-nobody-cares-tech-stackhtml.md deleted file mode 100644 index 5cc190f..0000000 --- a/_posts/dursi/2021-6-6-nobody-cares-tech-stackhtml.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2021-06-06 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/nobody-cares-tech-stack.html -slug: nobody-else-cares-about-your-tech-stack -title: Nobody Else Cares About Your Tech Stack ---- - -

    Focus on your researchers’ and funders’ problems, not your technical solution

    - -

    (Note: This post is adapted from #75 of the Research Computing Teams Newsletter)

    - - -

    Many of us who are managing research computing and data teams come up through the ranks doing research ourselves, and have -experience in grantwriting for open research calls. That can actually hold us back from succeeding with getting grants -for “digital research infrastructure” — building teams and infrastructure to support research.

    - - -

    The thing is, digital research infrastructure calls, the sort that support research computing and data teams and tools, -are more like applying to grants as a nonprofit than as a researcher. And we can learn a lot from how the nonprofit -community writes funding proposals.

    - - -

    We're not proposing a research project, we're proposing to solve problems a funder sees for a research community.

    - - -

    Any funder has things they want to accomplish, and the goal as a potential fundee is to find something in the intersection of -“work that helps the funder accomplish their goals” and “work that we are able to do and that is aligned -with our goals”. Excellent work that isn’t in that first set won’t get funding. Money attached to work that isn’t -in the second set is at best a distraction, at worst drains your teams’ credibility.

    - - -

    Most of us in research got our experience in grants from open disciplinary competitions where the funders and fundees goals -are aligned — be seen to be funding/doing the best research. That means you don’t have to think about the distinction -very much. The funder wants a portfolio of projects that are promising and could have impact - some will pan out and some -won’t, but such is research. So everyone is focussed on “the best” work. There’s a lot of focus on methods and technology -used, because those are relevant for assessing the best work. A new technology or method might be why it’s important to -fund this work now - some key observation wasn’t possible before, but now it is, and the funder and team who makes the -observation now will get the impact. And methods can sabotage a project - a team that does great work with the wrong -methods won’t get the best results.

    - - -

    Special digital research infrastructure calls — like those that research computing projects typically fall under — -and calls by nonprofit funders, are different. The funder has some particular change they want to see in the world; -some community they want to see better served. They are generally much less willing to take a flyer on projects with -only a modest chance of success, because failures won’t serve the community they want to see served. Something that -successfully serves the community can always be improved in future iterations; something that fails to meet the communities -needs may well be unsalvagable.

    - - -

    Methods and technology matter much less to these funders. They want to know that you can credibly deliver on the proposal, -and that you have a plan, but the nuts and bolts typically are much less interesting.

    - - -

    A nonprofit funder absolutely wants to understand how the after-school homework tutoring program you’re proposing will -interact with the community — how it will find underserved students, how the tutoring will be delivered to the -students, what indicators will be used to measure success — but the behind the scenes tech stack like what task -management and tutor booking software you’ll use is completely irrelevant unless it’s to justify that you’ll -be able to deliver the program. (And if you are in a position where you need details like that to justify your -credibility for delivering the program, you are probably not in serious contention for the funding). Every paragraph -you spend talking about the cool new tutor booking software you’re going to use is a paragraph that doesn’t get spent -highlighting the funder’s goals being achieved — more underserved students doing better in school.

    - - -

    A research computing funder who’s receptive to a “we’ll run a new research data management platform specifically -aimed at [discipline X]” proposal absolutely wants to know that you’re familiar with the underserved area, that -you’ve been successful delivering similar things before, and what metrics you’ll use for success. They do not care -that your roadmap includes Kubernetes and some exciting new operators. Would they be disappointed if mid-stream, you -pivoted to running the tasks on bare metal with Ansible? If not, why draw their attention and yours to obscure and -uncertain details rather than to how your work will best advance their goals?

    - - -

    The thing is, this same approach applies to not just research funders, but anyone you plan to work with; any research -group that contacts your team looking for something. They have a problem; the greater the up-front focus on understanding - and solving researcher’s problem, the better the chance of success.

    - - -

    How will you know what the funder’s or researcher’s problems and goals are? In the funder’s case, the call will sometimes -spell it out; in the researcher’s case, they’ll usually say something. In both cases, it may require some question-asking -and digging deeper; the researcher’s or even the funder’s “presenting problem” may not be the underlying issue, -and the funder’s call may focus on one particular aspect rather than the overarching goals. But the solution is the same; -just ask a bunch of questions.

    - - -

    “Do you mean they will they just tell you?” I know a team in a Hackathon who went to an open pre-hackathon info -session, and approached the organizer and sponsor in a gaggle afterwards. They asked the sponsor — the lead judge — what -a successful Hackathon would be from their point of view. The sponsor — who, again, was the lead judge — answered with -a particular problem they’d like solved as an example. That team and mystifyingly only that team delivered a partial but -promising solution to the exact problem described in detail and in public, and they of course won first prize. How could -they not? People organize special funding calls and hackathons because they want other people to help them -achieve their goals. Yes, they’ll tell you, and if you keep asking questions they’ll keep talking about it until you politely explain -that you have to leave for the evening. They put that contact information there and run informational sessions for a reason.

    - - -

    The stakeholder side of research computing isn’t rocket surgery. But listening, digging in, and focussing on their goals -is still rare enough that doing it well is almost an unfair advantage.

    \ No newline at end of file diff --git a/_posts/dursi/2021-6-8-research-computing-funding-to-researchershtml.md b/_posts/dursi/2021-6-8-research-computing-funding-to-researchershtml.md deleted file mode 100644 index f05657f..0000000 --- a/_posts/dursi/2021-6-8-research-computing-funding-to-researchershtml.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2021-06-08 01:00:00' -layout: post -original_url: http://www.dursi.ca/post/research-computing-funding-to-researchers.html -slug: research-computing-funding-should-mostly-just-go-to-researchers -title: Research Computing Funding Should Mostly Just Go To Researchers ---- - -

    Research computing and data — supporting research efforts with -software, computer and data expertise and resources — is fundamentally -all of a piece. Today there’s fewer and fewer hard boundaries -between where the system requirements end and where the software -or data resource requirements begin; and teams supporting researchers -must have expertise across the stack.

    - - -

    This convergence is a huge opportunity for research computing, but -it’s also a challenge for funders. How to know how much to allocate to software, -and how much to hardware? Within software, how many resources -should go to new software development or procurement, and how much -to maintenance? In hardware, what is the right balance between -GPUs and CPUs or FPGAs, and within data, how much should we support -curation efforts vs discovery, or archival vs near-line storage?

    - - -

    Luckily, there is a simple, robust, time-tested mechanism research -computing funders can easily take advantage of, and they should do so. -Funders for research computing and data efforts manage their portfolio -effortlessly — in exactly the same way health funders -know how to balance spending between reagents and lab staff, or the -same way physical science funders know how much to allocate to -trainee salaries vs tabletop equipment.

    - - -

    Most research computing funding should go directly to researchers, -via traditional funding councils, and the researchers should spend -that research computing and data portion of their grants as and where -they see fit.

    - - -

    With research computing and data funding as an integral component -of project funding, the same research review process that adjudicates -the research proposal would weigh in on the computing and data -resources requested to conduct it. This eliminates nonsensical but -all-too-common situations where a researcher successfully wins computing -cycles for a non-funded project, or gets funding for a postdoc for -a project but doesn’t get enough compute or storage resources for -the trainee to perform the project. It would also allow the -researcher to adjust how they were using resources mid-stream; if -after initial efforts it turned out that software development effort -to improve the code was a better use of funding than throwing -hardware at the problem, the money could be spent that way, rather -than applying ahead of time for people time and computing resources -separately and hoping that it all works out in the end.

    - - -
    - A technician validates genetic variants identified through whole-exome sequencing at the Cancer Genomics Research Laboratory, part of the National Cancer Institute's Division of Cancer Epidemiology and Genetics (DCEG). -
    We fund researchers to buy all kinds of complex equipment, they can handle buying research computing services.
    -
    -

    In this model, a researcher would include in their grant proposal -a research computing and data component where necessary. As with -the purchasing wet lab equipment, animal experiments, or large -physical apparatus — undertakings which are no less technical or -complex than research computing — research grants would include -cost justifications for the proposed research computing services -or equipment, and funding agencies would rate the quality of the -justification and the worthiness of the proposed goals versus the -cost.

    - - -

    A researcher whose proposal was successful would then, as with other -line items, be free to spend that research computing and data -component of their grant where they wish on for software development, -data management and analysis, or access to storage and compute -resources. Obviously as known entities with existing working -relationships, local research computing centres — now working in a -familiar core facility model — would have a huge advantage. But -the researcher would not be limited to working with those centres, -nor to working with only one service provider.

    - - -

    This approach will work well for capacity computing, data, and -expertise — those needs where there are many possible service -providers. And in those areas, having the researcher in control -of what services they can use where will help drive those vendors -to providing the kinds and quality of services that researchers -need. But not every kind of computing or expertise capability is -available enough for researchers to be able to easily buy needed -quantities of. Researchers can’t conjure into existence a (say) -quantum computing shared facility one investigator-led grant at a -time. Those new and emerging capabilities have to be handled -separately, with existing funding councils setting priorities. Once -those new capabilities are operational, they can and should be -sustained with the same core-facility portable-funding model; if -they can’t, maybe they didn’t need to be built. Other needs like -foundational infrastructures — research and education networks, -advisory bodies — will also need to be handled separately by funders.

    - - -

    But for the bulk of research computing, for capacity support of -research using computing, data and related expertise, there’s no -longer need for endless surveys and consultations and projections -to indirectly inform decision making. Parallel competitions for -different kinds of support for a research project have long since -stopped making sense. Internal computing organization debates about -what kinds of services to offer should make way for researchers -allocating the funds themselves. Let researchers decide what works -best for advancing their research.

    \ No newline at end of file diff --git a/_posts/dursi/2022-2-26-1500-jobshtml.md b/_posts/dursi/2022-2-26-1500-jobshtml.md deleted file mode 100644 index 8a834f1..0000000 --- a/_posts/dursi/2022-2-26-1500-jobshtml.md +++ /dev/null @@ -1,59 +0,0 @@ ---- -author: Jonathan Dursi's Blog -author_tag: dursi -blog_subtitle: R&D computing at scale -blog_title: Jonathan Dursi -blog_url: http://www.dursi.ca -category: dursi -date: '2022-02-26 00:00:00' -layout: post -original_url: http://www.dursi.ca/post/1500-jobs.html -slug: what-i-ve-learned-from-looking-at-1-500-jobs-leading-research-computing-teams -title: What I've Learned from Looking at 1,500 Jobs Leading Research Computing Teams ---- - -

    Job numbers continue to grow; lots of data and product management jobs; IR groups at Universities becoming bigger employers

    - -

    (Note: This post is adapted from #111 of the Research Computing Teams Newsletter)

    - - -

    A year and a half ago I posted my observations on the first 500 jobs posted to the job board - we’re getting close to 1,500 now, and it’s worth taking a look to see what if anything has changed in research computing team leadership and management jobs1.

    - - -

    There are some trends that have continued since the posting. The jobs in industry are growing vastly beyond what I would have imagined possible when I started in research computing in the 1990s. (The number of jobs working with biomedical data of one sort or another in particular is just astonishing.) Rather than technical computing being a niche, it’s utterly mainstream now. There are a lot of jobs out there, and I don’t even bother posting generic “data science manager” jobs unless they’re connected to some real complex research questions - which happens a lot, whether it’s fraud detection or improving financial modelling or supporting biomedical research. Some really fun-looking jobs that would probably feel a lot like working at a research computing centre keep coming up at consultancies –– go visit a client and help them with their data science/data engineering/etc needs. There’s also a growing number of data science/engineering jobs at Universities that fall under the Provost/VP Operations rather than the VPR’s side of the house — Institutional Research, looking at (say) student success in support of the teaching mission.

    - - -

    Because of the growth in number of jobs, it is very much a candidate’s market out there. I’m seeing postings –– especially for the traditional academic “director of research computing” jobs –— stay open for cringe-inducing periods of time. A few in particular I’ve watched with vicarious embarrassment continue coming up in the listings for 8+ months. That’s a bad sign for us as hiring managers - the market for individual contributors is at least as tight - but it’s amazing news for us as individuals.

    - - -

    When I wrote that post in late 2020 it was just regulated industries like health/biotech or financial services that were developing data governance or other data management jobs, but now data management is popping up everywhere, whether it’s retail or logistics or anywhere else. These are being joined, again first in the regulated industries, by data privacy or data risk management jobs. Privacy-preserving data analysis jobs (and teams supporting same with software development) are also starting to be more common (and there’s a lot of cool research and technology work to be done there!)

    - - -

    I’m also (finally!) starting to see a explicitly product management jobs in research computing, both academic and private-sector. You see it around data management — bundling and curating of data into real data products — but also in software development, especially around analysis pipelines for some reason.

    - - -

    Probably related to the growth of product vs project thinking, I’m starting to see a lot of “delivery manager” jobs that would have been called “project managers” just a year ago. Projects are defined by having clearly defined start- and end-points up-front. “Delivery” jobs seem to focus on sustained, ongoing work, more appropriate for long-lived products.

    - - -

    These products that keep coming up often combine data, software, and systems one way or another. That really points to weaknesses around organizing by type of skills - the research software engineering movement, for instance - as the lines between software and systems in this DevOps, infrastructure-as-code era is very fuzzy; and as data grows more and more important, data skills are needed everywhere.

    - - -

    Especially for us as managers or leads, but especially for individual contributors as they grow their skills, it’s important to have a pretty holistic view of research computing and data and not try to break it up into silos. The growing number of data engineering jobs is a great example. That work often involves all three of software, systems, and data expertise. Data engineering is getting so broad and important that not only are there different sub-fields, in large organizations there are likely to be completely distinct data engineering teams doing different work. Trying to decide which of those jobs are “research software engineering” jobs and which aren’t is not a productive way forward, for those candidates or for us as a community.

    - - -

    Needless to say, the growth of remote jobs has been off the charts - especially in the private sector, although the academic institutions are gamely doing what they can to keep up (often hampered by institutional policies).

    - - -

    Late June 2022 update: At the time that I write this, there’s a slow down in hiring in tech, especially among early stage-startups. That slowdown due to economic conditions as I write this is not, as far as I can tell, affecting these more research-oriented kinds of jobs. The job board doesn’t have a lot of jobs from startups anyway. For larger organizations, the biotech firms or the banking firms doing fraud detection research or the computing providers or academic groups or… clearly do not view these roles as “nice to haves” that can wait until there’s a bit more economic certainty.

    - - -
    - -
    -
      -
    1. -

      What counts as such a job? Any job that involves leading, or mentoring people, or managing projects, programs, or products, in software, systems, or data curation/management/engineering/analysis to support the solution of research problems is a good fit. If you are hiring for such a job, feel free to submit it to the job board

      - -
    2. -
    -
    \ No newline at end of file diff --git a/_posts/dweitzel/2017-11-6-cleaning-up-gracc.md b/_posts/dweitzel/2017-11-6-cleaning-up-gracc.md deleted file mode 100644 index 4c221b0..0000000 --- a/_posts/dweitzel/2017-11-6-cleaning-up-gracc.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -author: Derek Weitzel's Blog -author_tag: dweitzel -blog_subtitle: Thoughts from Derek -blog_title: Dereks Web -blog_url: https://derekweitzel.com/ -category: dweitzel -date: '2017-11-06 19:09:23' -layout: post -original_url: https://derekweitzel.com/2017/11/06/cleaning-up-gracc/ -slug: cleaning-up-gracc -title: Cleaning Up GRACC ---- - -

    The GRid ACcounting Collector (GRACC) is the OSG’s new version of accounting software, replacing Gratia. It has been running in production since March 2017. Last week, on Friday November 3rd, we held a GRACC Focus Day. Our goal was to clean up data that is presented in GRACC. My changes where:

    - - -
      -
    • Update the GRACC-Collector to version 1.1.8. The primary change in this release is setting the messages sent to RabbitMQ to be “persistent”. The persistent messages are then saved to disk in order to survive a RabbitMQ reboot.
    • -
    • Use case-insenstive comparisons to determine the Open Science Grid Information Management system (OIM) information. This was an issue with GPGrid (Fermilab), which was registered as GPGRID.
    • -
    • Set the OIM_Site equal to the Host_description attribute if the OIM logic is unable to determine the registered OIM site. This is especially useful for the LIGO collaboration, which uses sites in Europe that are not registered in OIM. Now, instead of a lot of Unknown sites listed on the LIGO site listing, it shows the somewhat reported site name of where the job ran.
    • -
    - -
    - GRACC Projects Page
    - GRACC Projects Page for LIGO - -
    - -

    Regular Expression Corrections

    - -

    One of the common problems we have in GRACC is poor data coming from the various probes installed at hundreds of sites. We don’t control the data coming into GRACC, so occasionally we must make corrections to the data for clarity or correctness. One of these corrections is misreporting the “site” that the jobs ran on.

    - - -

    In many instances, the probe is unable to determine the site and simply lists the hostname of the worker node where the job ran. This can cause the cardinality of sites listed in GRACC to increase dramatically as we get new hostnames inserted into the sites listing. If the hostnames are predictable, a regular expression matching algorithm can match a worker node hostname to a proper site name.

    - - -

    The largest change for GRACC was the regular expression corrections. With this new feature, GRACC administrators can set corrections to match on attributes using regular expression patterns. For example, consider the following correction configuration.

    - - -
    [[Corrections]]
    -index = 'gracc.corrections'
    -doc_type = 'host_description_regex'
    -match_fields = ['Host_description']
    -source_field = 'Corrected_OIM_Site'
    -dest_field = 'OIM_Site'
    -regex = true
    -
    -
    - - -

    This configuration means:

    - - -
    -

    Match the Host_description field in the incoming job record with the regular expression Host_description field in the corrections table. If they are a match, take the value in the Corrected_OIM_Site field in the corrections table and place it into the OIM_Site field in the job record.

    - -
    - -

    And the correction document would look like:

    - - -
    {
    -  "_index": "gracc.corrections-0",
    -  "_type": "host_description_regex",
    -  "_id": "asldkfj;alksjdf",
    -  "_score": 1,
    -  "_source": {
    -    "Host_description": ".*\.bridges\.psc\.edu",
    -    "Corrected_OIM_Site": "PSC Bridges",
    -  }
    -}
    -
    -
    - -

    The regular expression is in the Host_description FIELD.

    - - -

    So, if the incoming job record is similar to :

    - - -
    {
    -...
    -"Host_description": "l006.pvt.bridges.psc.edu"
    -...
    -}
    -
    -
    - - -

    Then the correction would modify or create values such that the final record would approximate:

    - - -
    {
    -...
    -"Host_description": "l006.pvt.bridges.psc.edu",
    -"OIM_Site": "PSC Bridges",
    -"RawOIM_Site": ""
    -...
    -}
    -
    -
    - - -

    Note that the Host_description field stays the same. We must keep it the same because it is used in record duplicate detection. If we modified the field and resummarized previous records, then it would cause multiple records to represent the same job.

    \ No newline at end of file diff --git a/_posts/dweitzel/2017-6-14-stashcache.md b/_posts/dweitzel/2017-6-14-stashcache.md deleted file mode 100644 index 7c9aec8..0000000 --- a/_posts/dweitzel/2017-6-14-stashcache.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -author: Derek Weitzel's Blog -author_tag: dweitzel -blog_subtitle: Thoughts from Derek -blog_title: Dereks Web -blog_url: https://derekweitzel.com/ -category: dweitzel -date: '2017-06-14 17:11:55' -layout: post -original_url: https://derekweitzel.com/2017/06/14/stashcache/ -slug: stashcache -title: StashCache ---- - -

    StashCache is a framework to distribute data across the Open Science Grid. It is designed to help opportunistic users to transfer data without the need for dedicated storage or frameworks of their own, like CMS and ATLAS have deployed. StashCache has several regional caches and a small set of origin servers. Caches have fast network connections, and sizable disk storage to quickly distribute data to the execution hosts in the OSG.

    - - -

    StashCache is named for the Stash filesystem located at the University of Chicago’s OSG-Connect service. It is primarily intended to be used to cache data from the Stash filesystem, though, data origins exist for other experiments.

    - - -
    - -Regional Caches - -
    Regional Caches
    -
    - -

    Components

    -

    The worker nodes are where the user jobs will run. The transfer tools are used on the worker nodes to download data from StashCache caches. Worker nodes are geographically distributed across the US, and will select the nearest cache based upon a GeoIP database.

    - - -
    - StashCache Architecture -
    StashCache Architecture
    -
    - -

    The caches are distributed to computing sites across the U.S. They are are running the XRootD software. The worker nodes connect directly to the regional caches, which in turn download from the Origin servers. The caching proxies discover the data origin by querying the Redirectors. The caching algorithm used is Least Recently Used (LRU). In this algorithm, the cache will only delete cached data when storage space is near capacity, and will delete the least recently used data first.

    - - -

    The origin servers are the primary source of data for the StashCache framework. StashCache was named after the Stash data store at the University of Chicago’s OSG-Connect service, but other origins also utilize the framework. The origin is the initial source of data, but once the data is stored on the Caches, the origin is no longer used. Updates to data on the origin are not reflected in the caches automatically. The caches treat the data from the origin as immutable, and therefore do not check for updates. If a user requires new data to be pulled into the cache, the name or location of the data on the origin must be changed.

    - - -

    Redirectors are used to discover the location of data. They are run only at the Indiana Grid Operations Center (GOC). The redirectors help in the discovery of the origin for data. Only the caching proxies communicate with the redirectors.

    - - -

    Tools to transfer

    -

    Two tools exist to download data from StashCache, CVMFS and StashCP. With either of these tools, the first step for users is to copy the data to the Stash filesystem. Once the user has an OSG-Connect account, they may copy their data to the /stash//public directory. Once there, both of the tools can view and download the files.

    - - -

    CVMFS (CERN Virtual Machine File System) is a mountable filesystem that appears to the user as a regular directory. CVMFS provides transparent access for users to data in the Stash filesystem. The namespace, such as the size and name of files, and the data are separate in the Stash CVMFS. CVMFS distributes the namespace information for the Stash filesystem over a series of HTTP Forward Proxies that are separate from the StashCache federation. Data is retrieved through the Stash proxies.

    - - -

    In order to map the Stash filesystem into CVMFS, a process is constantly scanning the Stash filesystem checking for new files. When new files are discovered, they are checksummed and the meta-data is stored in the CVMFS namespace. Since this scanning can take a while for a filesystem the size of Stash, it may take several hours for a file placed in Stash to be available through CVMFS.

    - - -

    Using CVMFS, copying files is as easy as copying files with any other filesystem:

    - - -
    $ cp /cvmfs/stash.osgstorage.org/user/<username>/public/… dest/
    -
    -
    - - -

    CVMFS access also has other features that are beneficial for Stash access. CVMFS will cache files locally so that multiple accesses to the same file on the same node will be very fast. Also, CVMFS can fallback to other nearby caches if the first fails.

    - - -

    StashCP is the second tool that can download data from StashCache. StashCP uses CVMFS above, as well as falling back to the caching proxies and eventually the origin. The order of operations that StashCP performs:

    - - -
      -
    1. Check for the file in CVMFS mount under /cvmfs/stash.osgstorage.org/…
    2. -
    3. If CVMFS copy fails, connect directly to the nearest proxy and attempt to download the file.
    4. -
    5. If the proxy fails, then connect directly to the origin server.
    6. -
    - -

    Since StashCP doesn’t rely on the CVMFS mount only, files are immediately available to transfer with StashCP.

    - - -

    StashCP is distributed with OSG-Connect’s module system. Using StashCP is nearly as simple as using the cp command:

    - - -
    $ module load  stashcp
    -$ stashcp /user/<username>/public/… dest/
    -
    -
    - - -

    Conclusions

    -

    The StashCache framework is very useful for downloading data to execution hosts across the OSG. It was designed to help opportunistic users to transfer data without the need for dedicated storage or frameworks of their own, like CMS and ATLAS have deployed.

    - - -

    StashCache has been used to transfer over 3 PB of data this year. Check out some of the papers written about using StashCache:

    - -
      -
    • Derek Weitzel, Brian Bockelman, Duncan A. Brown, Peter Couvares, and Frank Wu ̈rthwein, Edgar Fajardo Hernandez. 2017. Data Access for LIGO on the OSG. In Proceedings of PEARC17, New Orleans, LA, USA, July 09-13, 2017, 6 pages. DOI: 10.1145/3093338.3093363 Online
    • -
    • Derek Weitzel, Brian Bockelman, Dave Dykstra, Jakob Blomer, and René Meusel, 2017. Accessing Data Federations with CVMFS. In Journal of Physics - Conference Series. Online
    • -
    \ No newline at end of file diff --git a/_posts/dweitzel/2017-9-7-installing-scitokens-on-a-mac.md b/_posts/dweitzel/2017-9-7-installing-scitokens-on-a-mac.md deleted file mode 100644 index 84c757a..0000000 --- a/_posts/dweitzel/2017-9-7-installing-scitokens-on-a-mac.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -author: Derek Weitzel's Blog -author_tag: dweitzel -blog_subtitle: Thoughts from Derek -blog_title: Dereks Web -blog_url: https://derekweitzel.com/ -category: dweitzel -date: '2017-09-07 19:20:04' -layout: post -original_url: https://derekweitzel.com/2017/09/07/installing-scitokens-on-a-mac/ -slug: installing-scitokens-on-a-mac -title: Installing SciTokens on a Mac ---- - -

    In case I ever have to install SciTokens again, the steps I took to make it work on my mac. The most difficult part of this is installing openssl headers for the jwt python library. I followed the advice on this blog post.

    - - -
      -
    1. Install Homebrew
    2. -
    3. -

      Install openssl:

      - - -
       brew install openssl
      -
      -
      - -
    4. -
    5. -

      Download the SciTokens library:

      - - -
       git clone https://github.com/scitokens/scitokens.git
      - cd scitokens
      -
      -
      - -
    6. -
    7. -

      Create the virtualenv to install the jwt library

      - - -
       virtualenv jwt
      - . jwt/bin/activate
      -
      -
      - -
    8. -
    9. -

      Install jwt pointing to the Homebrew installed openssl headers:

      - - -
       env LDFLAGS="-L$(brew --prefix openssl)/lib" CFLAGS="-I$(brew --prefix openssl)/include" pip install cryptography PyJWT
      -
      -
      - -
    10. -
    \ No newline at end of file diff --git a/_posts/dweitzel/2018-8-31-htcondor-pull-mode.md b/_posts/dweitzel/2018-8-31-htcondor-pull-mode.md deleted file mode 100644 index e36c394..0000000 --- a/_posts/dweitzel/2018-8-31-htcondor-pull-mode.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -author: Derek Weitzel's Blog -author_tag: dweitzel -blog_subtitle: Thoughts from Derek -blog_title: Dereks Web -blog_url: https://derekweitzel.com/ -category: dweitzel -date: '2018-08-31 18:28:42' -layout: post -original_url: https://derekweitzel.com/2018/08/31/htcondor-pull-mode/ -slug: htcondor-pull-mode -title: HTCondor Pull Mode ---- - -

    For a recent project to utilize HPC clusters for HTC workflows, I had to add the ability to transfer the input and output sandboxes to and from HTCondor. HTCondor already has the ability to spool input files to a SchedD, and pull the output sandbox. These functions are intended to stage jobs to an HTCondor pool. But, HTCondor did not have the ability to pull jobs from an HTCondor pool.

    - - -

    The anticipated steps for a job pulled from an HTCondor pool:

    - - -
      -
    1. Download the input sandbox
    2. -
    3. Submit the job to the local scheduler
    4. -
    5. Watch the job status of the job
    6. -
    7. Once completed, transfer the output sandbox to the origin SchedD
    8. -
    - -

    The sandboxes are:

    - - -
      -
    • Input: -
        -
      • Input files
      • -
      • Executable
      • -
      • Credentials
      • -
      -
    • -
    • Output: -
        -
      • Stdout / Stderr from job
      • -
      • Output files or any files that may have changed while the job ran
      • -
      -
    • -
    - -

    API Additions

    - -

    In order to transfer the input sandbox and output sandbox, 2 new commands where added to the SchedD, as well as a new client function and python bindings to use them.

    - - -

    The function for transferring input files is:

    - - -
    transferInputSandbox(constraint, destination)
    -
    -
    - - -

    jobs is a HTCondor constraint selecting the jobs whose input files should be transferred. destination is a directory to put the sandboxes. The sandboxes will be placed in directories named destination/<ClusterId>/<ProcId>/.

    - - -

    For transferring output files, the function is:

    - - -
    transferOutputSandbox( jobs )
    -
    -
    - - -

    Where jobs is a list of tuples. The structure of the tuple is ( classad, sandboxdir ). classad is the full classad of the original job, and sandboxdir is the location of the output sandbox to send.

    - - -

    Current Status

    - -

    I have created a repo for an example that uses these functions in order to pull a job from a remote SchedD.

    - - -

    Also, my changes to HTCondor are in my repo, and I have begun the discussion about merging in my changes.

    \ No newline at end of file diff --git a/_posts/dweitzel/2018-9-26-stashcache-by-the-numbers.md b/_posts/dweitzel/2018-9-26-stashcache-by-the-numbers.md deleted file mode 100644 index 15037b6..0000000 --- a/_posts/dweitzel/2018-9-26-stashcache-by-the-numbers.md +++ /dev/null @@ -1,148 +0,0 @@ ---- -author: Derek Weitzel's Blog -author_tag: dweitzel -blog_subtitle: Thoughts from Derek -blog_title: Dereks Web -blog_url: https://derekweitzel.com/ -category: dweitzel -date: '2018-09-26 06:00:00' -layout: post -original_url: https://derekweitzel.com/2018/09/26/stashcache-by-the-numbers/ -slug: stashcache-by-the-numbers -title: StashCache By The Numbers ---- - -

    The StashCache federation is comprised of 3 components: Origins, Caches, and Clients. There are additional components that increase the usability of StashCache which I will also mention in this post.

    - - -
    - Diagram of StashCache Infrastructure
    - Diagram of the StashCache Federation - -
    - -
    - Cumulative Usage of StashCache
    - Cumulative Usage of StashCache over the last 90 days - -
    - -

    Origins

    - -

    A StashCache Origin is the authoritative source of data. The origin receives data location requests from the central redirectors. These requests take the form of “Do you have the file X”, to which the origin will respond “Yes” or “No”. The redirector then returns a list of origins that claim to have the requested file to the client.

    - - -

    An Origin is a simple XRootD server, exporting a directory or set of directories for access.

    - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
    OriginBase DirectoryData Read
    LIGO Open Data/gwdata926TB
    OSG Connect/user246TB
    FNAL/pnfs166TB
    OSG Connect/project63TB
    - -

    A list of Origins and their base directories.

    - - -

    Clients

    - -

    The clients interact with the StashCache federation on the user’s behalf. They are responsible for choosing the “best” cache. The available clients are CVMFS and StashCP.

    - - -
    - - - - Client Usage By Tool - - - - - - StashCP Usage - - - - -
    StashCache Client Usage -
    - -
    - -

    In the pictures above, you can see that most users of StashCache use CVMFS to access the federation. GeoIP is used by all clients in determining the “best” cache. GeoIP location services are provided by the CVMFS infrastructure in the U.S. The geographically nearest cache is used.

    - - -

    The GeoIP service runs on multiple CVMFS Stratum 1s and other servers. The request to the GeoIP service includes all of the cache hostnames. The GeoIP service takes the requesting IP address and attempts to locate the requester. After determining the location of all of the caches, the service returns an ordered list of nearest caches.

    - - -

    The GeoIP service uses the MaxMind database to determine locations by IP address.

    - - -

    CVMFS

    - -

    Most (if not all) origins on are indexed in an *.osgstorage.org repo. For example, the OSG Connect origin is indexed in the stash.osgstorage.org repo. It uses a special feature of CVMFS where the namespace and data are separated. The file metadata such as file permissions, directory structure, and checksums are stored within CVMFS. The file contents are not within CVMFS.

    - - -

    When accessing a file, CVMFS will use the directory structure to form an HTTP request to an external data server. CVMFS uses GeoIP to determine the nearest cache.

    - - -

    The indexer may also configure a repo to be “authenticated”. A whitelist of certificate DN’s is stored within the repo metadata and distributed to each client. The CVMFS client will pull the certificate from the user’s environment. If the certificate DN matches a DN in the whitelist, it uses the certificate to authenticate with an authenticated cache.

    - - -

    StashCP

    - -

    StashCP works in the order:

    - - -
      -
    1. Check if the requested file is available from CVMFS. If it is, copy the file from CVMFS.
    2. -
    3. Determine the nearest cache by sending cache hostnames to the GeoIP service.
    4. -
    5. After determining the nearest cache, run the xrdcp command to copy the data from the nearest cache.
    6. -
    - -

    Caches

    - -
    - Cache Locations
    - Cache Locations in the U.S. - -
    - -

    The cache is half XRootD cache and half XRootd client. When a cache receives a data request from a client, it searches it’s own cache directory for the files. If the file is not in the cache, it uses the built-in client to retrieve the file from one of the origins. The cache will request the data location from the central redirector which in turn, asks the origins for the file location.

    - - -

    The cache listens on port 1094 to regular XRootD protocol, and port 8000 for HTTP.

    - - -

    Authenticated Caches

    - -

    Authenticated caches use GSI certificates to authenticate access to files within the cache. The client will authenticate with the cache using the client’s certificate. If the file is not in the cache, the cache will use it’s own certificate to authenticate with the origin to download the file.

    - - -

    Authenticated caches use port 8443 for HTTPS.

    \ No newline at end of file diff --git a/_posts/dweitzel/2019-10-11-letsencrypt-for-multiple-hosts.md b/_posts/dweitzel/2019-10-11-letsencrypt-for-multiple-hosts.md deleted file mode 100644 index 4b9b95c..0000000 --- a/_posts/dweitzel/2019-10-11-letsencrypt-for-multiple-hosts.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -author: Derek Weitzel's Blog -author_tag: dweitzel -blog_subtitle: Thoughts from Derek -blog_title: Dereks Web -blog_url: https://derekweitzel.com/ -category: dweitzel -date: '2019-10-11 20:38:14' -layout: post -original_url: https://derekweitzel.com/2019/10/11/letsencrypt-for-multiple-hosts/ -slug: letsencrypt-for-multiple-hosts -title: LetsEncrypt for Multiple Hosts ---- - -

    Using LetsEncrypt for certificate creation and management has made secure communications much easier. Instead of contacting the IT department of your university to request a certificate, you can skip the middle man and generate your own certificate which it trusted around the world.

    - - -

    A common use case of certificates is to secure data transfers. Data transfers that use the GridFTP, XRootD, or HTTPS transfer protocols can load balance between multiple servers to increase throughput. keepalived is used to load balance between multiple transfer servers. The certificate provided to the clients need to have the virtual host address of the load balancer, as well as the hostname of each of the worker nodes.

    - - -
      -
    1. Create a shared directory between the data transfer nodes
    2. -
    3. Install httpd on each of the data transfer nodes
    4. -
    5. Configure httpd to use the shared directory as the “webroot”
    6. -
    7. Configure keepalived to use virtualize port 80 to at least 1 of your data transfer nodes.
    8. -
    9. Run certbot with the webroot option, as well as the multiple hostnames of the data transfer nodes.
    10. -
    - -

    Create a NFS share that each of the data transfer nodes can read. The steps in creating a NFS shared directory is outside the scope of this guide. In this guide, the shared directory will be referred as /mnt/nfsshare . Next, install httpd on each of the data transfer nodes:

    - - -
    root@host $ yum install httpd
    -
    -
    - - -

    Create a webroot directory within the shared directory on one of the nodes:

    - - -
    root@host $ mkdir /mnt/nfsshare/webroot
    -
    -
    - - -

    Configure httpd to export the same webroot on each of the data transfer nodes:

    - - -
    <VirtualHost *:80>
    -    DocumentRoot "/mnt/nfsshare/webroot"
    -    <Directory "/mnt/nfsshare/webroot">
    -        Require all granted
    -    </Directory>
    -</VirtualHost>
    -
    -
    - - -

    Configure keepalived to virtualize port 80 to at least one of your data transfer nodes. -Add to your configuration:

    - - -
    virtual_server <VIRTUAL-IP-ADDRESS> 80 {
    -    delay_loop 10
    -    lb_algo wlc
    -    lb_kind DR
    -    protocol tcp
    -
    -    real_server <GRIDFTP-SERVER-#1-IP ADDRESS> {
    -        TCP_CHECK {
    -            connect_timeout 3
    -            connect_port 80
    -        }
    -    }
    -}
    -
    -
    - - -

    Run certbot with the webroot options on only 1 of the data nodes. The first domain in the command line should be the virtual hostname:

    - - -
    root@host $ certbot certonly -w /mnt/nfsshare/webroot -d <VIRTUAL_HOSTNAME> -d <DATANODE_1> -d <DATANODE_N>...
    -
    -
    \ No newline at end of file diff --git a/_posts/dweitzel/2020-10-11-xrootd-client-manager.md b/_posts/dweitzel/2020-10-11-xrootd-client-manager.md deleted file mode 100644 index 34e8e78..0000000 --- a/_posts/dweitzel/2020-10-11-xrootd-client-manager.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -author: Derek Weitzel's Blog -author_tag: dweitzel -blog_subtitle: Thoughts from Derek -blog_title: Dereks Web -blog_url: https://derekweitzel.com/ -category: dweitzel -date: '2020-10-11 06:00:00' -layout: post -original_url: https://derekweitzel.com/2020/10/11/xrootd-client-manager/ -slug: xrootd-client-manager -title: XRootD Client Manager ---- - -

    The validation project for XRootD Monitoring is moving to phase 2, scale -testing. Phase 1 focused on correctness of single server monitoring. The -report is available.

    - - -

    We are still forming the testing plan for the scale test of XRootD, but a -component of the testing will be multiple clients downloading from multiple -servers. In addition, we must record exactly how much data each client reads -from each server in order to validate the monitoring with the client’s real behavior.

    - - -

    This level of testing will require detailed coordination and recording of client -actions. I am not aware of a testing framework that can coordinate and record -accesses of multiple clients and servers, therefore I spent the weekend -developing a simple framework for coordinating these tests.

    - - -

    Some requirements for the application are:

    - - -
      -
    • Easy to use interface
    • -
    • Easy to add clients and servers
    • -
    • Authenticated access for clients, servers, and interface
    • -
    • Storage of tests and results
    • -
    - -

    I chose Heroku for prototyping this application.

    - - -

    Interface

    - -

    The web interface is available at https://xrootd-client-manager.herokuapp.com/. -I chose to host it on heroku as it is my go to for pet projects. I will likely -move this over to OSG’s production kubernetes installation soon. The entire -application is only the web interface and a back-end Redis -data store.

    - - -
    - Screenshot of web interface
    - Screenshot of simple web interface - -
    - -

    The web interface shows the connected clients and servers. The web interface -also connects to the web server with an persistent connection to update the list -of connected clients.

    - - -

    Client Communication

    - -

    Client communcation is handled through a Socket.IO connection. Socket.IO is a -library that will at create a bi-directional event based communcation between -the client and the server. The communcation is over websockets if possible, but -will fall back to HTTP long polling. A good discussion of long polling vs. -websockets is available from -Ably. The Socket.IO -connection is established between each worker, server, and web client and the -web server.

    - - -

    The difficult part is authenticating the Socket.IO connections. We discuss this -in the security session.

    - - -

    Security

    -

    Securing the commands and web interface is required since the web interface is -sending commands to the connected worker nodes and servers.

    - - -

    Socket.IO Connections

    - -

    The Socket.IO connection is secured with a shared key. The communication flow -for a non-web client (worker/server):

    - - -
      -
    1. A JWT is created from the secret key. The secret key is communicated through -a separate secure channel. In most cases, it will be through the command -line arguments of the client. The JWT has a limited lifetime and a scope.
    2. -
    3. The client registers with the web server, with an Authentication bearer token -in the headers. The registration includes details about the client. It -returns a special (secret) client_id that will be used to authenticate the -Socket.IO connection. The registration is valid for 30 -seconds before the client_id is no longer valid.
    4. -
    5. The client creates a Socket.IO connection with the client_id in the request -arguments.
    6. -
    - -

    Web Interface

    - -

    The web interface is secured with an OAuth login from GitHub. There is a whitelist -of allowed GitHub users that can access the interface.

    - - -

    The flow for web clients connecting with Socket.IO is much easier since they are already authenticated -with OAuth from GitHub.

    - - -
      -
    1. The user authenticates with GitHub
    2. -
    3. The Socket.IO connection includes cookies such as the session, which is a -signed by a secret key on the server. The session’s github key is compared to the -whitelist of allowed users.
    4. -
    - -

    Storage of tests and results

    - -

    Storage of the tests and results are still being designed. Most likely, the -tests and results will be stored in a database such as Postgres.

    - - -

    Conclusions

    - -

    Heroku provides a great playing ground to prototype these -web applications. I hope that I can find an alternative eventually that will run on -OSG’s production kubernetes installation.

    - - -

    The web application is still be developed, and there is much to be done before -it can be fully utilized for the scale validation. But, many of the difficult -components are completed, including the communcation and eventing, secure web -interface, and clients.

    - - -

    The GitHub repos are available at:

    - - - \ No newline at end of file diff --git a/_posts/dweitzel/2020-3-8-gracc-transition.md b/_posts/dweitzel/2020-3-8-gracc-transition.md deleted file mode 100644 index 7dbd12e..0000000 --- a/_posts/dweitzel/2020-3-8-gracc-transition.md +++ /dev/null @@ -1,70 +0,0 @@ ---- -author: Derek Weitzel's Blog -author_tag: dweitzel -blog_subtitle: Thoughts from Derek -blog_title: Dereks Web -blog_url: https://derekweitzel.com/ -category: dweitzel -date: '2020-03-08 06:00:00' -layout: post -original_url: https://derekweitzel.com/2020/03/08/gracc-transition/ -slug: gracc-transition-visualization -title: GRACC Transition Visualization ---- - -

    The OSG is in the progress of transitioning from an older ElasticSearch (ES) cluster to a new version. Part of this process is reindexing (copying) data from the old to the new. Unfortunately, it’s not easy to capture a status of this transition. For this, I have created the GRACC Transition page.

    - - -

    The goal is to transition when both the old and new ES have the same data. A simple measure of this is if they share the same number of documents in all of the indexes.

    - - -

    Source for this app is available on github: GRACC Transition

    - - -

    Data Collection

    - -

    Data collection is performed by a probe on each the new and old ElasticSearch clusters. Upload is performed with a POST to the gracc transition website. Authorization is performed with a shared random token between the probe and the website.

    - - -

    The probe is very simple. It queries ES for all indexes, as well as the number of documents and data size inside the index.

    - - -

    There are also many indexes that the OSG is not transitioning to the new ES. In order to ignore these indexes, a set of regular expressions is used to remove the indexes from consideration. Those regular expressions are:

    - - -
    /^osg.*/,           // Start with osg.*
    -/^ps_.*/,           // Start with ps_*
    -/^shrink\-ps_.*/,   // Start with shrink-ps_*
    -/^glidein.*/,       // Start with glidein*
    -/^\..*/,            // Start with .
    -/^ps\-itb.*/        // Start with ps-itb*
    -
    -
    - - -

    The Website

    - -

    GRACC Transition Website

    - - -

    The gracc transition app is hosted on the Heroku. I choose Heroku because it provides a simple hosting platform with a database for free.

    - - -

    The website pushes alot of the data processing to the client. The data is stored in the database as JSON and is sent to the client without any transformation. The client pulls the data from the website for both the new and old ES and begins to process the data within javascript.

    - - -

    The website breaks the statistics into three visualizations:

    - - -
      -
    1. Progress Bars: Comparing the total documents and total data size of the old and new. The progress is defined as new / old. The bars provide a very good visualization of the progress of the transition as they need to reach 100% before we are able to fully transition.
    2. -
    3. Summary Statistics: The summary statistics show the raw number of either missing or mismatched indexes. If an index is in the old ES but is not in the new ES, it is counted as missing. If the index is a different size in the old vs. the new, it is counted as mismatched.
    4. -
    5. Table of Indices: Finally, a table of indices is shown with the number of documents that are missing, or simply Missing if the index is missing in the new ES.
    6. -
    - -

    In addition to the table, I also provide a button to download the list of indexes that are missing or mismatched. This can be useful for an administrator to make sure it matches what they expect or to process with elasticsearch.

    - - -

    Improvements and Future

    - -

    In the future, I would like to generate a weekly or even daily email to show the progress of the transition. This would give provide a constant reminder of the state of the transition.

    \ No newline at end of file diff --git a/_posts/dweitzel/2022-1-22-improving-geoip.md b/_posts/dweitzel/2022-1-22-improving-geoip.md deleted file mode 100644 index 9bceabb..0000000 --- a/_posts/dweitzel/2022-1-22-improving-geoip.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -author: Derek Weitzel's Blog -author_tag: dweitzel -blog_subtitle: Thoughts from Derek -blog_title: Dereks Web -blog_url: https://derekweitzel.com/ -category: dweitzel -date: '2022-01-22 05:00:00' -layout: post -original_url: https://derekweitzel.com/2022/01/22/improving-geoip/ -slug: improving-the-open-science-data-federation-s-cache-selection -title: Improving the Open Science Data Federation’s Cache Selection ---- - -

    Optimizing data transfers requires tuning many parameters. High latency between the client and a server can decrease data transfer throughput. The Open Science Data Federation (OSDF) attempts to optimize the latency between a client and cache by using GeoIP to locate the nearest cache to the client. But, using GeoIP alone has many flaws. In this post, we utilize Cloudflare Workers to provide GeoIP information during cache selection. During the evaluation, we found that location accuracy grew from 86% accurate with the original GeoIP service to 95% accurate with Cloudflare Workers.

    - - -
    - Map of U.S. OSDF
    - Map of OSDF locations - -
    - -

    GeoIP has many flaws, first, the nearest physical cache may not be the nearest in the network topology. Determining the nearest cache in the network would require probing the network topology between the client and every cache, a intensive task to perform for each client startup, and may be impossible with some network configurations, such as blocked network protocols.

    - - -

    Second, the GeoIP database is not perfect. It does not have every IP address, and the addresses may not have accurate location information. When GeoIP is unable to determine a location, it will default to “guessing” the location is a lake in Kansas (a well known issue).

    - - -

    Following a review of the Open Science Data Federation (OSDF), we found that we could improve effeciency by improving the geo locating of clients. In the review, several sites where detected to not be using the nearest cache.

    - - -

    Implementation

    - -

    StashCP queries the CVMFS geo location service which relies on the MaxMind GeoIP database.

    - - -

    Cloudflare Workers are designed to run at Cloudflare’s many colocation facilities near the client. Cloudflare directs a client’s request to a nearby data center using DNS. Each request is annotaed with an approximate location of the client, as well as the colocation center that received the request. Cloudflare uses a GeoIP database much like MaxMind, but it also falls back to the colocation site that the request was serviced.

    - - -

    I wrote a Cloudflare worker, cache-locator, which calculates the nearest cache to the client. It uses the GeoIP location of the client to calculate the ordered list of nearest caches. If the GeoIP fails for a location, the incoming request to the worker will not be annotated with the location but will include the IATA airport code of the colocation center that received the client request. We then return the ordered list of nearest caches to the airport.

    - - -

    We imported a database of airport codes to locations that is pubically available. The database is stored in the Cloudflare Key-Value, keyed by the IATA code of the airport.

    - - -

    Evaluation

    - -

    To evaluate the location, I submitted test jobs to each site available in the OSG OSPool, 43 different sites at the time of evaluation. The test jobs:

    - - -
      -
    1. -

      Run the existing stashcp to retrieve the closest cache.

      - - -
       stashcp --closest
      -
      -
      - -
    2. -
    3. -

      Run a custom closest script that will query the Cloudflare worker for the nearest caches and print out the cache.

      - -
    4. -
    - -

    After the jobs completed, I compiled the caches decisions to a spreadsheet and manually evaluated each cache selection decision. The site names in the spreadsheet are the somewhat arbitrary internal names given to sites.

    - - -

    In the spreadsheet, you can see that the correct cache was choosen 86% of the time with the old GeoIP service, and 95% of the time with Cloudflare workers.

    - - -

    Notes during the Evaluation

    - -

    Cloudflare was determined to be incorrect at two sites, the first being UColorado_HEP (University of Colorado in Boulder). In this case, the Colorado clients failed the primary GeoIP lookup and the cloudflare workers fell back to using the IATA code from the request. The requests from Colorado all where recieved by the Cloudflare Dallas colocation site, which is nearest the Houston cache. The original GeoIP service choose the Kansas City cache, which is the correct decision. It is unknown if the orignal GeoIP service choose KC cache because it knew the GeoIP location of the clients, or it defaulted to the Kansas default.

    - - -

    The second site where the Cloudflare worker implementation was incorrect was SIUE-CC-production (Southern Illinois University Edwardsville). In this case, the original GeoIP service choose Chicago, while the new service choose Kansas. Edwardsville is almost equal distance from both the KC cache and Chicago. The difference in the distance to the caches is ~0.6 KM, with Chicago being closer.

    - - - - -

    An example of a site that did not work with GeoIP was ASU-DELL_M420 (Arizona Statue University). The original service returned that the KC cache was the nearest. The Cloudflare service gave the default Lat/Log if GeoIP failed, the middle of Kansas, but the data center serving the request had the airport code of LAX (Los Angeles). The nearest cache to LAX is the UCSD cache, which is the correct cache decision.

    - - -

    During the evaluation, I originally used the Cloudflare worker development DNS address, stash-location.djw8605.workers.dev. Purdue University and the American Museum of Natural History sites both blocked the development DNS address. The block was from an OpenDNS service which reported the domain had been linked to malware and phishing. Since the DNS hostname was hours old, it’s likely that most *workers.dev domains were blocked.

    - - -

    Conclusion

    - -

    Improving the cache selection can improve the download effeciency. It is left as future work to measure if the nearest geographical cache is the best choice. While the OSDF is using GeoIP service for cache selection, it is important to select the correct cache. Using the new Cloudflare service results in 95% correct cache decision vs. 86% with the original service.

    - - -

    Cloudflare Workers is also very affordable for the scale that the OSDF would require. The first 100,000 requests are free, while it is $5/mo for the next 10 Million requests. The OSPool runs between 100,000 to 230,000 jobs per day, easily fitting within the $5/mo tier.

    \ No newline at end of file diff --git a/_posts/dweitzel/2022-9-14-dashboards.md b/_posts/dweitzel/2022-9-14-dashboards.md deleted file mode 100644 index 35970bf..0000000 --- a/_posts/dweitzel/2022-9-14-dashboards.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -author: Derek Weitzel's Blog -author_tag: dweitzel -blog_subtitle: Thoughts from Derek -blog_title: Dereks Web -blog_url: https://derekweitzel.com/ -category: dweitzel -date: '2022-09-14 06:00:00' -layout: post -original_url: https://derekweitzel.com/2022/09/14/dashboards/ -slug: dashboards-for-learning-data-visualizations -title: Dashboards for Learning Data Visualizations ---- - -

    Creating dashboards and data visualizations are a favorite past time of mine. Also, I jump at any chance to learn a new technology. That is why I have spent the last couple of months building dashboards and data visualizations for various projects while learning several web technologies.

    - - -

    Through these dashboards, I have learned many new technologies:

    - - - -

    GP-ARGO Dashboard

    - -

    The Great Plains Augmented Regional Gateway to the Open Science Grid (GP-ARGO) is a regional collaboration of 16 campuses hosting computing that is made available to the OSG. My goal with the GP-ARGO dashboard was to show who is using the resources, as well as give high level overview of the region and sites hosting GP-ARGO resources.

    - - -

    The metrics are gathered from OSG’s GRACC Elasticsearch. The list of projects are also from GRACC, and the bar graph in the bottom right are from OSG is simply an iframe to a grafana panel from GRACC.

    - - -

    Technologies used: React, NextJS, Leaflet, Elasticsearch

    - - -

    Repo: GP-ARGO Map

    - - -

    GP-ARGO

    - - -

    OSDF Website

    - -

    My next website was the Open Science Data Federation landing page. I was more bold in the design of the OSDF page. I took heavy inspiration from other technology websites such as the Mapbox website and the Lens website. The theme is darker and it was also my first experience with the TailwindCSS library. Additionally, I learned the CSS flexbox layout techniques.

    - - -

    The spinning globe is using the Globe.gl library. The library is great to create visualizations to show distribution throughout the world. On the globe I added “transfers” between the OSDF origins and caches. Each origin sends transfers to every cache in the visualization, though it’s all just animation. There is no data behind the transfers, it’s only for visual effect. Also, on the globe, each cache location is labeled. The globe can be rotated and zoomed with your mouse.

    - - -

    The number of bytes read and files read is gathered using the Elasticsearch client querying GRACC, the OSG’s accounting service. The OSG gathers statistics on every transfer a cache or origin perform. Additionally, we calculate the rate of data transfers and rate of files being read using GRACC.

    - - -

    One unique feature of the OSDF website is the resiliency of the bytes read and files read metrics. We wanted to make sure that the metrics would be shown even if a data component has failed. The metrics are gathered in 3 different ways for resiliency:

    - -
      -
    1. If all components are working correctly, the metrics are downloaded from the OSG’s Elasticsearch instance.
    2. -
    3. If OSG Elasticsearch has failed, the dashboard pulls saved metrics from NRP’s S3 storage. The metrics are saved everytime they are succesfully gathered from Elasticsearch, so they should be fairly recent.
    4. -
    5. The metrics are gathered and saved on each website build. The metrics are static and immediatly available upon website load. If all else fails, these saved static metrics are always available, even if they may be old.
    6. -
    - -

    Technologies used: React, NextJS, Globe.gl

    - - -

    Repo: OSDF Website

    - - -

    OSDF

    - - -

    NRP Dashboard

    - -

    The National Research Platform dashboard is largely similar to the GP-ARGO dashboard. It uses the same basic framework and technologies. But, the data acquisition is different.

    - - -

    The metrics shown are the number of gpus allocated, number of pod running, and the number of active research groups. The metrics are gathered from the NRP’s prometheus server on-demand. The graph in the background of the metric is generated with D3.js.

    - - -

    Technologies used: React, NextJS, D3.js, Prometheus, TailwindCSS

    - - -

    Repo: NRP Map App

    - - -

    NRP Dashboard

    - - -

    PNRP Website

    - -

    The Prototype National Research Platform is a NSF research platform. The dashboard is also in prototype stage as the PNRP hardware is not fully delivered and operational yet.

    - - -

    The dashboard is my first experience with a large map from Mapbox. I used a React binding to interface with the Mapbox service. Also, when you click on a site, it zooms into the building where the PNRP hardware will be hosted.

    - - -

    The transfer metrics come from the NRP’s prometheus which shows the bytes moving into and out of the node. The transfer metrics are for cache nodes nearby the sites, but once PNRP hardware becomes operational the transfer metrics will show the site’s cache.

    - - -

    Technologies Used: React, NextJS, Mapbox, TailwindCSS, Prometheus

    - - -

    Repo: NRP Website

    - - -

    PNRP Website

    \ No newline at end of file diff --git a/_posts/gaborsamu/2013-10-9-hpc411_bridge.md b/_posts/gaborsamu/2013-10-9-hpc411_bridge.md deleted file mode 100644 index b07e8fc..0000000 --- a/_posts/gaborsamu/2013-10-9-hpc411_bridge.md +++ /dev/null @@ -1,190 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2013-10-09 19:12:15' -layout: post -original_url: https://www.gaborsamu.com/blog/hpc411_bridge/ -slug: ibm-platform-hpc-4-1-1-creating-a-network-bridge-on-compute-nodes -title: IBM Platform HPC 4.1.1- Creating a network bridge on compute nodes ---- - -

    Applies to

    - -
      -
    • IBM Platform HPC V4.1.1
    • -
    • IBM Platform Cluster Manager V4.1.1
    • -
    -

    Introduction

    - -

    IBM Platform HPC provides the ability to customise the network configuration -of compute nodes via Network Profiles. Network Profiles support a custom NIC -script for each defined interface.

    - -

    This provides the ability to configure network bonding and bridging. Here we -provide a detailed example on how to configure a network bridge in a cluster -managed by IBM Platform HPC.

    - -

    IBM Platform HPC includes xCAT technology for cluster provisioning. xCAT -includes a script (/install/postscripts/xHRM) which may be used to -configure network bridging. This script is leveraged as a custom network -script in the example below.

    - -

    Example

    - -

    The configuration of the network provision may be viewed in the IBM Platform HPC Web console at: Resources > Node Provisioning > Networks.

    - -
    -
    - -

    The configuration of network provision may also be viewed using the lsdef CLI.

    - -
    # lsdef -t network provision
    -Object name: provision
    -    domain=private.dns.zone
    -    dynamicrange=192.0.2.201-192.0.2.254
    -    gateway=<xcatmaster>
    -    mask=255.255.255.0
    -    mgtifname=eth0
    -    net=192.0.2.0
    -    staticrange=192.0.2.15-192.0.2.49
    -    staticrangeincrement=1
    -    tftpserver=192.0.2.50
    - -

    The Network Profile default_network_profile which includes the network -provision may be viewed in the IBM Platform HPC Web console at: Resources > -Node Provisioning > Provisioning Templates > Network Profiles.

    - -

    {{ < figure src="/images/network_profile_provision_wiki.png" >}}

    - -

    The Network Profile default_network_profile configuration may also be viewed -using the lsdef CLI.

    - -
    # lsdef -t group __NetworkProfile_default_network_profile
    -Object name: __NetworkProfile_default_network_profile
    -    grouptype=static
    -    installnic=eth0
    -    members=
    -    netboot=xnba
    -    nichostnamesuffixes.eth0=-eth0
    -    nichostnamesuffixes.bmc=-bmc
    -    nicnetworks.eth0=provision
    -    nicnetworks.bmc=provision
    -    nictypes.eth0=Ethernet
    -    nictypes.bmc=BMC
    -    primarynic=eth0
    - -

    Here, we configure a network bridge br0 against eth0 for compute nodes -using a new Network Profile.

    - -
      -
    1. Add a new Network Profile with name default_network_profile_bridge via -the IBM Platform HPC Web console. As an Administrator user, browse to Resources > Node Provisioning > Provisioning Templates > Network Profiles and select -the button Add.
    2. -
    -
    -
    - -

    A total of three devices are required to be added:

    - -
      -
    • -

      eth0

      - -
    • -
    • -

      Type: Ethernet

      - -
    • -
    • -

      Network: provision

      - -
    • -
    • -

      bmc

      - -
    • -
    • -

      Type: BMC

      - -
    • -
    • -

      Network: provision

      - -
    • -
    • -

      br0

      - -
    • -
    • -

      Type: Customized

      - -
    • -
    • -

      Network: provision

      - -
    • -
    • -

      Configuration Command: xHRM bridgeprereq eth0:br0 (creates network bridge -br0 against eth0)

      - -
    • -
    -

    The new Network Profile default_network_profile_bridge is shown below.

    - -
    -
    - -
      -
    1. Now we are ready to provision the nodes using the new Network Profile -default_network_profile_bridge. To begin the process to add nodes, navigate -in the the IBM Platform HPC Web console to Resources > Devices > Nodes and -select the button Add. Within the Add Nodes window, select optionally -Node Group compute and Select Specify Properties for the provisioning -template. This will allow you to select the newly created network profile -default_network_profile_bridge. Here the hardware profile IPMI and stateful -provisioning are used.
    2. -
    -
    -
    - -

    Nodes are added using Auto discovery by PXE boot. Nodes may also be added -using a node information file.

    - -

    The nodes are powered on, detected by IBM Platform HPC and provisioned. In -this example, two nodes compute000, compute001 are detected and -subsequently provisioned.

    - -
      -
    1. Once the nodes have been provisioned and complete their initial boot, they -appear in the IBM Platform HPC Web console (Resources > Devices > Nodes) with -Status booted and Workload Agent OK.
    2. -
    -
    -
    - -

    The network bridge is configured on the nodes as expected. We may see this -via the IBM Platform HPC Web console by browsing to Resources > Devices > -Nodes and selecting the Summary tab and scrolling to Other Key Properties.

    - -
    -
    - -

    Finally, using the CLI xdsh, we remotely execute ifconfig on node compute001to check the configuration of interface br0.

    - -
    # xdsh compute001 ifconfig br0
    -compute001: br0       Link encap:Ethernet  HWaddr 00:1E:67:49:CC:E5   
    -compute001:           inet addr:192.0.2.20  Bcast:192.0.2.255  Mask:255.255.255.0
    -compute001:           inet6 addr: fe80::b03b:7cff:fe61:c1d4/64 Scope:Link
    -compute001:           UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
    -compute001:           RX packets:26273 errors:0 dropped:0 overruns:0 frame:0
    -compute001:           TX packets:42490 errors:0 dropped:0 overruns:0 carrier:0
    -compute001:           collisions:0 txqueuelen:0  
    -compute001:           RX bytes:11947435 (11.3 MiB)  TX bytes:7827365 (7.4 MiB)
    -compute001:
    - -

    As expected, the compute nodes have been provisioned with a network bridge -br0 configured.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2013-11-22-sc13.md b/_posts/gaborsamu/2013-11-22-sc13.md deleted file mode 100644 index 33ce3c2..0000000 --- a/_posts/gaborsamu/2013-11-22-sc13.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2013-11-22 16:44:50' -layout: post -original_url: https://www.gaborsamu.com/blog/sc13/ -slug: supercomputing-2013-sc13- -title: Supercomputing 2013 (SC13) ---- - -

    Super Computing 2013 has now come to a close. For those of you who were in -Denver, we hope that you had the opportunity to visit the IBM booth. Among -the many live demonstrations running at the IBM booth, there was a demo of -IBM Platform HPC for System x.

    - -

    In addition to the demo running live on IBM NeXtScale, there was also a static -IBM NeXtScale system on display for people to touch and see.

    - -
    -
    - -

    The IBM Platform HPC demo featured IBM NeXtScale and the Weather Research -and Forecasting Model (WRF) application.

    - -
    -
    - -

    Even though SC13 has just wrapped up, I’m already looking forward next years -events.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2013-7-9-isc13_wrapup.md b/_posts/gaborsamu/2013-7-9-isc13_wrapup.md deleted file mode 100644 index fe24201..0000000 --- a/_posts/gaborsamu/2013-7-9-isc13_wrapup.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2013-07-09 17:58:32' -layout: post -original_url: https://www.gaborsamu.com/blog/isc13_wrapup/ -slug: isc-2013-wrapup-ibm-platform-hpc-and-intel-xeon-phi -title: ISC 2013 wrapup- IBM Platform HPC and Intel Xeon Phi ---- - -

    This past June at ISC 2013 the IBM booth featured live demonstration of IBM -Platform HPC V3.2 managing an IBM iDataplex cluster equipped with Intel Xeon -Phi coprocessors.

    - -

    As part of the demonstration, the potential performance gains running an -application on Intel Xeon Phi coprocessors was shown by running the visually -stunning Intel Embree crown rendering on Intel Xeon and Intel Xeon -Phi simultaneously.

    - -

    IBM Platform HPC provides a unified web-based interface for deployment and -managment of the cluster. Additionally, it includes application submission -templates to allow administrators the flexiblity to create templates to greatly simplify the submission of jobs for their users. A number of templates for -well known ISV and open source applications are also included as standard. -For ISC, a template was created to allow Intel Embree to be easily launched -through the built-in workload manager for execution on Intel Xeon or -Intel Xeon Phi coprocessors.

    - -

    Finally, when the processor intensive Intel Embree application was running, -the monitoring and reporting capabilities of IBM Platform HPC provided both -real time and historical reporting on the health of each node in the cluster, -including metrics specific to the Intel Xeon Phi coprocessor such as -temperature, power consumption and utilization - all through a consistent -web-based interface.

    - -

    Enjoy the short video of the demo here.

    - - -
    - -
    \ No newline at end of file diff --git a/_posts/gaborsamu/2014-3-13-arm_days_of_old.md b/_posts/gaborsamu/2014-3-13-arm_days_of_old.md deleted file mode 100644 index 01b8e0c..0000000 --- a/_posts/gaborsamu/2014-3-13-arm_days_of_old.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2014-03-13 17:29:42' -layout: post -original_url: https://www.gaborsamu.com/blog/arm_days_of_old/ -slug: looking-forward-from-the-arm-days-of-old -title: Looking forward from the ARM days of old ---- - -

    These days we often hear about CPUs based upon ARM cores. They can be found -in mobile phones, embedded systems, laptops and even servers. Indeed, projects such as Mont Blanc are investigating the -use of ARM based systems for high performance computing (HPC).

    - -

    Back in the late 1980’s, I was a student in high-school and a budding -computer scientist. In those days, my view of the personal computer market -was ver North American centric. Until one day I read about a new desktop -computer from the UK know as the Acorn Achimedes. This system was based upon -a RISC CPU which was given the name ARM (Acorn RISC Machine). The writeup in -the local Toronto Computes! newspaper indicated that Olivetti Canada was -bringing the Acorn Archimedes range to North America. As luck would have it, -Olivetti was just down the road from me. After after a few phone calls, I was -invited to their offices for some hands on time with a top of the line -Acorn Archimedes 440. This was the start of my journey with ARM based -systems. The folks at Olivetti were kind enough let me use the Archie over a -number of days. During that time, I had a chance to try out a number of -different software products including games and productivity software. Overall, -I was greatly impressed by the Archie and it’s operating system, RISC OS and -it’s WIMP interface. One game in particular I remember quite well called -Zarch - which showed off the 3D graphics capabilities of the system.

    - - -
    - -
    - - -

    The only catch for me was the list price of the system. As I recall it was -around $2,500 CAD, which for me at the time was prohibitive.

    - -

    Moving forward to 2014, I’ve recently been tinkering with the ARM-based mini -PC UDOO Quad running Debian Wheezy EABI (hard-float). This happens to -intersect with another area of interest, Technical Computing.

    - -

    I’ll share more of my experiences with Udoo Quad in the coming weeks.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2014-4-11-armed_ready_lsf.md b/_posts/gaborsamu/2014-4-11-armed_ready_lsf.md deleted file mode 100644 index 3ec9102..0000000 --- a/_posts/gaborsamu/2014-4-11-armed_ready_lsf.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2014-04-11 16:39:53' -layout: post -original_url: https://www.gaborsamu.com/blog/armed_ready_lsf/ -slug: armed-and-ready-with-ibm-platform-lsf -title: Armed and ready with IBM Platform LSF ---- - -

    These days it’s not uncommon to hear about CPUs based upon ARM cores. They can -be found in mobile phones, embedded systems, laptops and even servers. Indeed, -recently there have been a number of major announcements from vendors building -processors based ARM cores. This includes the AMD Opteron A1100, NVIDIA Tegra -K1 and even the Apple A7, which is used the iPhone 5s. What these all have in -common is that they are 64-bit and based on the ARM v8 ISA. At the same time, -the ARM-server chip startup Calxeda announced it was shutting down. Surging -power requirements, as well as the announcement of 64-bit chips have led to -renewed interest in energy efficient ARM based processors for high performance -computing.

    - -

    When building out an infrastructure for Technical Computing, a workload manager -is typically used to control access to the computing resources. As it turns out,the leading workload manager IBM Platfom LSF (formerly Platform Computing) has -supported Linux on ARM for about 10 years. In fact, today there are IBM -clients using Platform LSF on Linux ARM-based clusters as part of mobile -device design and testing.

    - -

    The current release of IBM Platform LSF 9.1.2 supports Linux on ARM v7 with -upcoming support for ARM v8. Given that Platform LSF provides the ability to -build out heterogeneous clusters, creating a compute cluster containing ARM, -Power and x86 based nodes is a snap. Jobs may be targetted to a specific -processor type and the optional portal IBM Platform Application Centre -provides an easy to use, highly configurable, application-centric web based -interface for job management.

    - -

    Hello. How do you “doo”?

    - -

    I’ve recently had the opportunity to test IBM Platform LSF on two node, ARM -based cluster . The IBM Platform LSF master node was a Udoo Quad system running Debian Wheezy ARMv7 EABI hard-float. The second node was running Fedora on a -ARM v8 simulator. Installation and operation of the software was identical to -other platforms. Using the Platform LSF ELIM (External LIM) facility for -adding external load indices, I was able to quickly create a script to load -the processor temperature on the Udoo Quad system.

    - -

    Now, putting Platform LSF through it’s paces, we see the type and model and -other physical characteristics of the nodes are detected.

    - -
    $ lshosts -w
    -HOST_NAME type model cpuf ncpus maxmem maxswp server RESOURCES
    -udoo LINUX_ARM ARM7l 60.0 4 875M - Yes (mg)
    -ma1arms4 LINUX_ARM   ARM8  60.0     1   1.8G   1.9G    Yes ()
    - -

    Looking at the load information on the system, we see the built-in load -indices, in addition to the cputemp metric which I introduced to report the -CPU temperature (Celsius). At this point the system is essentially idle.

    - -
    $ lsload -l
    -HOST_NAME status r15s r1m r15m ut pg io ls it tmp swp mem cputemp
    -udoo ok 0.5 0.6 1.5 4% 0.0 311 1 0 1297M 0M 701M 45.0
    -ma1arms4   busy   3.6  *7.7   6.2  52%   0.0   50 3   0  954M  1.9G  1.6G 0.0
    - -

    Next, we submit a job for execution to Platform LSF. Rather than the requisite -sleep job, we submit something a bit more interesting, the HPC Challenge -Benchmark (HPCC). Debian Wheezy happens to include a pre-compiled binary which -is compiled against OpenMPI.

    - -

    As the Udoo Quad is a 4 core system (as the name implies), hpcc is submitted -requesting 4 cores.

    - -
    $ bsub -n 4 mpiexec -n 4 /usr/bin/hpcc
    -Job <2> is submitted to default queue <normal>.
    - -

    With HPCC running, we quickly see the utilization as well as the CPU -temperature increase to 60C.

    - -
    $ lsload -l
    -HOST_NAME status r15s r1m r15m ut pg io ls it tmp swp mem cputemp
    -udoo ok 5.1 5.1 2.4 94% 0.0 49 1 0 1376M 0M 497M 60.0
    -ma1arms4   ok   0.5  1.1   1.2  40%   0.0   50 3   0  954M  1.9G  1.6G 0.0
    - -

    During the life of the job, the resource utilization may be easily viewed using the Platform LSF user commands. This includes details such as the PIDs which -the job is comprised of.

    - -
    $ bjobs -l
    - 
    -Job <2>, User <debian>, Project <default>, Status <RUN>, Queue <normal>, 
    -                    Command <mpiexec -n 4 /usr/bin/hpcc>, Share group charged </debian>
    -Sun Feb 2 23:49:48: Submitted from host <udoo>, CWD </opt/ibm/lsf/conf>, 
    -                    4 Processors Requested;
    -Sun Feb 2 23:49:48: Started on 4 Hosts/Processors <udoo> <udoo> <udoo> <udoo>,
    -Execution Home </home/debian>, Execution CWD </opt/ibm/lsf/conf>;
    -Sun Feb 2 23:51:05: Resource usage collected.
    -The CPU time used is 227 seconds.
    -MEM: 140 Mbytes; SWAP: 455 Mbytes; NTHREAD: 8
    -PGID: 15678; PIDs: 15678 15679 15681 15682 15683 15684
    -15685
    -....
    -....
    - -

    New Roads?

    - -

    Here we could speak of GFlops, and other such measures of performance, but -that was not my objective. The key, is that there is a growing interest in -non-x86 solutions for Technical Computing. IBM Platform LSF software has -supported and continues to support a wide variety of operating systems and -processor architectures, from ARM to IBM Power to IBM System z.

    - -

    As for ARM based development boards such as the Udoo Quad, Parallela Board, -etc., they are inexpensive as well as being energy efficient. This fact makes -them of interest to HPC scientists looking at possible approaches to energy -efficiency for HPC workloads. Let us know your thoughts about the suitability -of ARM for HPC workloads.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2014-6-28-isc14_leipzig.md b/_posts/gaborsamu/2014-6-28-isc14_leipzig.md deleted file mode 100644 index 17f7f9a..0000000 --- a/_posts/gaborsamu/2014-6-28-isc14_leipzig.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2014-06-28 16:24:51' -layout: post -original_url: https://www.gaborsamu.com/blog/isc14_leipzig/ -slug: isc-2014-auf-wiedersehen-leipzig -title: ISC 2014- Auf Wiedersehen Leipzig ---- - -

    I’ve just returned from International Supercomputing 2014, which took place in -Leipzig, Germany. As was the case in 2013, I greatly enjoyed my time at the -conference, and the hospitality in Leipzig. It’s a wonderful city to visit.

    - -

    You will have read in my previous blogs about my experiences with ARM based -developer systems, and running IBM Platform LSF. For me, ISC 2014 was a very -interesting event for one big reason - variety! Variety is the spice -of life as they say. And the variety in this case came from the displays at -OpenPOWER Foundation members Mellanox and NVIDIA, as well as servers based on -the the newly unveiled Applied Micro X-Gene 64-bit ARM processors.

    - -

    Although small in size, the Tyan POWER8 motherboard with NVIDIA Tesla K40 -installed made a strong statement. Although OpenPOWER was founded in 2013, we -are already seeing the benefits of this foundation - with a varied member -base including education, interconnect, and accelerator vendors - all with an -HPC pedigree. With the rich set of members that is growing, these look to be -exciting times for the IBM POWER8 processor and the OpenPower Foundation.

    - -

    For those of you who did not attend, the IBM booth had a number of live demos -including the IBM Platform Computing Cloud Service, which is built on top of -IBM SoftLayer infrastructure. This service can provide both hybrid and -stand-alone clouds and is ideally suited for HPC workloads - as it’s -non-virtualized.

    - -

    So we say Auf Wiedersehen to Leipzig for now and look forward to the spice that New Orleans will provide this autumn; where there will surely be more exciting -things emerging from the OpenPower Foundation!

    - -
    -
    \ No newline at end of file diff --git a/_posts/gaborsamu/2014-9-5-docker_for_hpc.md b/_posts/gaborsamu/2014-9-5-docker_for_hpc.md deleted file mode 100644 index 315c4fc..0000000 --- a/_posts/gaborsamu/2014-9-5-docker_for_hpc.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2014-09-05 16:21:11' -layout: post -original_url: https://www.gaborsamu.com/blog/docker_for_hpc/ -slug: docker-for-hpc -title: Docker for HPC ---- - -

    With the recent release of Docker 1.0 and the broad industry backing from -organizations such as Red Hat and IBM, it’s no wonder that interest in the use -and application of this Linux container technology continues to grow. Docker is -shipped with Red Hat Enterprise 7 and there exists a growing registry of Docker images for a wide variety of applications.

    - -

    For those who unfamiliar with Docker, it’s essentially a container technology -for the Linux platform, which leverages existing and well proven technologies -such as control groups (cgroup), and LinuX Containers (LXC). Docker brings -these technologies together and provides ease of setup, use and compelling -efficiency.

    - -

    The IBM Platform Computing team has recently announced the availability of the -IBM Platform LSF and Docker integration, which is available as an open beta on -Service Management Connect. Supplementing the release of the integration is a -white paper which is focused on the suitability of Docker for high performance -computing (HPC) and includes an easy to follow, real world example of how to -run a Docker image under Platform LSF.

    - -

    Happy tinkering!

    \ No newline at end of file diff --git a/_posts/gaborsamu/2015-1-30-ultrasparc_laptop.md b/_posts/gaborsamu/2015-1-30-ultrasparc_laptop.md deleted file mode 100644 index db19c49..0000000 --- a/_posts/gaborsamu/2015-1-30-ultrasparc_laptop.md +++ /dev/null @@ -1,73 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2015-01-30 15:07:57' -layout: post -original_url: https://www.gaborsamu.com/blog/ultrasparc_laptop/ -slug: ultrasparc-powered-laptop-circa-2001 -title: UltraSPARC powered laptop - circa 2001 ---- - -

    It’s been ages since my last blog. What better way to start off the new year -then by looking at the past. In this case, let’s wind the clock all the way -back to 2001. This was the era of the Intel Pentium 4 processors. However, -today we’ll be looking at something far less pedestrian. Based on the Scalable -Processor ARChitecture (commonly known as SPARC), the NatureTech 777 -GenialStation is an UltraSPARC IIe laptop computer. Why do I have an -UltraSPARC IIe based laptop computer? Why not? And it’s oh so cool with it’s -lovely blue and gray chassis as opposed to boring old black. This NatureTech777 -laptop boasts the following specs:

    - -
      -
    • SUN UltraSPARC IIe @ 500 MHz w/256-KB L2 Cache
    • -
    • 15.0" TFT SXGA LCD Panel
    • -
    • 256MB ECC RAM
    • -
    • 80GB IDE disk
    • -
    • CD/DVD Combo drive
    • -
    • 3.5” Floppy disk drive
    • -
    • 5400mAh/ 11.1V. Li-ion Smart Battery Pack (mine is dead)
    • -
    • Built-in H/W Security Controller, 4 button input
    • -
    • A honking noisy fan that always runs at full speed
    • -
    -

    What can you do with a NatureTech 777 laptop? Well, at this stage of its life, I don’t use it for much apart from tinkering. Back in the day, being able to -take SUN Solaris on the road in a portable package was quite impressive -and I understand that these systems also went for a premium price at the time.

    - -

    I was surprised to not find any NatureTech video on YouTube or other such sites. So, I’m pleased to present this beast of a laptop in all its glory booting up -Solaris 9 and running Linpack - of course compiled with the requisite SunPro -compilers (and SUN math libraries). No speed records broken here of course, -and with that fan running constantly in overdrive, I would not expect any -thermal issues either :)

    - -

    Booting Solaris 9 - -

    - -
    - -

    - -

    Stressing the mighty UltraSPARC IIe with Linpack - -

    - -
    - -

    - -

    I’m lucky enough to have the fancy laptop bag from the manufacturer which -proudly proclaims that it’s carrying a SPARC based piece of equipment.

    - -
    -
    - -

    As the SUN sets on this blog (pun intended), I reminisce about the days of -variety in computing - different processors, operating systems - and when RISC -was king. Hopefully, we are entering another such era with the rise of ARM, -OpenPower, MIPS as well as the others that are out there.

    - -

    Varietas Delectat!

    \ No newline at end of file diff --git a/_posts/gaborsamu/2015-2-16-workpadz50_netbsd.md b/_posts/gaborsamu/2015-2-16-workpadz50_netbsd.md deleted file mode 100644 index 883c2e8..0000000 --- a/_posts/gaborsamu/2015-2-16-workpadz50_netbsd.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2015-02-16 14:51:20' -layout: post -original_url: https://www.gaborsamu.com/blog/workpadz50_netbsd/ -slug: ibm-workpad-z50-netbsd-an-interesting-combination -title: IBM Workpad z50 & NetBSD - an interesting combination ---- - -

    This week we look at another RISC powered notebook, this time from IBM.
    -Although IBM did produce a line of PowerPC based Thinkpad systems, this blog -is focused on a little known system called the IBM Workpad z50. This Microsoft -Handheld PC form factor system was launched in March 1999 and ran Windows CE -at the time. As we’ll see below, with some ingenuity it is also able to run -NetBSD, which makes it a much more interesting proposition (at least for me). -Ironically, although this is a high performance computing (HPC) focused blog, -the “HPC” in this case stands for “Handheld PC”.

    - -

    The Workpad z50 has a form factor smaller than a notebook, but has what I -consider to be an excellent keyboard and of course the trademark Thinkpad -trackpoint! Looking more closely at the specifications:

    - -
      -
    • NEC VR4121 MIPS R4100 CPU @ 131 MHz
    • -
    • 16 MB System RAM (expandable)
    • -
    • 16 MB System ROM
    • -
    • 8.4” LCD Display 640x480 (16-bit)
    • -
    • External Monitor connector (SVGA)
    • -
    • Serial port
    • -
    • Infrared port
    • -
    • CF slot
    • -
    • PCMCIA slot
    • -
    -

    What prevents me from taking my pristine Workpad z50 to the local electronics -recycling facility is NetBSD. With a little effort it is possible to install -recent versions of NetBSD on the Workpad z50 and even run XWindows. There are -a number of sources of information on this topic including some videos on -YouTube which helped me a great deal:

    - - -
    - -
    - - -

    I won’t run through the install procedure here as that’s been well covered -already in the above series of videos. Rather, let’s look at the boot-up -sequence and of course in keeping with the high performance computing theme, -run a simple benchmark. Links to the videos follow below:

    - -

    The requisite system bootup - -

    - -
    - -

    - -

    Starting XWindows and running Linpack - -

    - -
    - -

    - -

    Using NetBSD pkgsrc, I have setup NetBSD on a x86 based system and have taken -advantage of distcc to cross compile binaries. This helps greatly to get -packages quickly compiled for the system. Note that I ran into a log of local -compiles failing due to lack of RAM. So cross compiling is almost a must.

    - -

    Equipped with PCMCIA, I’m able to easily add to the Workpad z50 such -capabilities as Ethernet, Wireless networking and even SCSI. Below is my -collection of PCMCIA adaptors.

    - -
    -
    - -

    Next steps? I’ll be looking to move to NetBSD 6.x series and compile a more -compact kernel (with drivers removed that I don’t require). And unlike the -system in my previous blog, this one is silent :)

    \ No newline at end of file diff --git a/_posts/gaborsamu/2015-5-11-hpc_seeing_believing.md b/_posts/gaborsamu/2015-5-11-hpc_seeing_believing.md deleted file mode 100644 index 01d0e4a..0000000 --- a/_posts/gaborsamu/2015-5-11-hpc_seeing_believing.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2015-05-11 15:34:40' -layout: post -original_url: https://www.gaborsamu.com/blog/hpc_seeing_believing/ -slug: hpc-seeing-is-believing -title: HPC- Seeing is believing ---- - -

    People who know me, know that I like to tinker. Whether it’s with cars, -computers or other mechanical gizmos, I’ve always enjoyed dismantling and -reassembling things - understanding what makes them tick. Maintaining classic -computers is a passion of mine and as you’ve seen in my previous blogs on that -topic, I’ve always tried to add an element of high performance computing when -tinkering with computers. Whether on a classic SPARC based laptop, MIPS -smartbook or a modern ARM developer board, there is a sense of achievement in -getting such systems installed in 2015 and successfully running a benchmark -for example. Even when running a simple home network, in this case with a wild -mix of machines, the importance of monitoring is apparent.

    - -

    For organizations that make a considerable investment in high performance -computing infrastructure, monitoring this infrastructure and understanding -how it’s being used is of paramount importance. IBM Platform RTM is a -comprehensive monitoring, reporting and alerting software for HPC environments -running IBM Platform LSF. It takes the guess work out of HPC infrastructure -monitoring by aggregating system, workload as well as license consumption -information, all in a single tool.

    - -
    -
    - -

    Whether you’re a system admin or a line of business manager, this Technical -Brief provides an in-depth look at the importance of comprehensive HPC -infrastructure monitoring - which allows organizations to correlate in a -single tool workload, system and license consumption metrics.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2015-5-13-sdi_openpower.md b/_posts/gaborsamu/2015-5-13-sdi_openpower.md deleted file mode 100644 index a7628fe..0000000 --- a/_posts/gaborsamu/2015-5-13-sdi_openpower.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2015-05-13 15:24:34' -layout: post -original_url: https://www.gaborsamu.com/blog/sdi_openpower/ -slug: ibm-software-defined-infrastructure-put-the-power-down-and-jump-the-chasm- -title: IBM Software Defined Infrastructure- Put the POWER down and jump the chasm! ---- - -

    OpenPOWER continues to put the power down and accelerate strongly in 2015.
    -Earlier this year, the First Annual OpenPOWER Summit took place and more -recently Cabot Partners published the paper Crossing the Performance Chasm with -OpenPOWER, outlining the benefits of OpenPOWER for HPC. Reading through that -paper, one important point which stuck out were the considerations when -choosing a HPC system. It suggests that rather than using point benchmarks, -one must consider the performance of workflows across the HPC Data Life Cycle. This seems a very sensible approach actually. Would you choose a car strictly -on it’s 0-100km/h time? Well, when I was 16 years old probably yes. What -about braking, cornering, economy, safety? You need strong performance in all -categories. OpenPOWER Foundation achieves just this - by bringing together -organizations with broad expertise from accelerators, to interconnects around -IBM POWER server technology.

    - -

    IBM Software Defined Infrastructure helps to wield the sword of OpenPOWER for high performance -computing workloads. Featuring broad OS/platform support including Linux on -POWER (Little Endian), IBM Platform Computing software products provide broad -capabilities including application management, infrastructure management, job -scheduling as well as monitoring and reporting.

    - -

    Learn more about the IBM Software Defined Infrastructure for high performance -computing on OpenPOWER in this presentation from the OpenPOWER Summit. Put -the POWER down and jump the chasm!

    - - -
    - -
    \ No newline at end of file diff --git a/_posts/gaborsamu/2015-5-29-lsf_docker_whale.md b/_posts/gaborsamu/2015-5-29-lsf_docker_whale.md deleted file mode 100644 index 89d9861..0000000 --- a/_posts/gaborsamu/2015-5-29-lsf_docker_whale.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2015-05-29 03:49:58' -layout: post -original_url: https://www.gaborsamu.com/blog/lsf_docker_whale/ -slug: ibm-platform-lsf-and-docker-a-whale-of-a-time- -title: IBM Platform LSF and Docker- A Whale of a time! ---- - -

    Containers are useful. Whether you’re shipping things across the blue seas or -encapsulating applications on a computer system, they provide numerous benefits. HPC Administrators will know that applications today can depend upon multiple -packages, libraries and environments. Docker, a container technology for Linux, based on well proven technologies brings together ease of setup, use and -efficiency to application management. Leveraging Docker in High-Performance -Computing is one approach to address application “dependency hell”, as well -as easing transition to the cloud.

    - -

    Workload managers are commonly used in High Performance Computing environments -to drive effective use of compute resources and ensure alignment of resources -with business priorities. IBM Platform LSF, a leading workload management -family of products provides support for workloads to run within user-specified -Docker containers by way of an integration package available as an open beta -on Service Management Connect.

    - -

    By leveraging the rich Platform LSF plugin framework, the Docker integration -works seamlessly and allows users to specify a defined Docker image as a -submission option. All resource constraints, environment variables are -automatically passed to the container thanks to the integration and Platform -LSF job lifecycle management functions including monitoring resource usage as -well as control actions (i.e. suspend, resume and terminate) are also supported -for Docker containers.

    - -

    Ease the burden of administration and ensure consistency with IBM Platform LSF -and Docker! - and have a whale of a time!

    \ No newline at end of file diff --git a/_posts/gaborsamu/2015-6-7-clustermanager_eggs.md b/_posts/gaborsamu/2015-6-7-clustermanager_eggs.md deleted file mode 100644 index 2cf1385..0000000 --- a/_posts/gaborsamu/2015-6-7-clustermanager_eggs.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2015-06-07 03:42:03' -layout: post -original_url: https://www.gaborsamu.com/blog/clustermanager_eggs/ -slug: ibm-platform-cluster-manager-how-do-you-like-your-eggs- -title: IBM Platform Cluster Manager - how do you like your eggs? ---- - -

    Whether your HPC center is in Lilliput or Blefuscu, you’ll appreciate the -importance of a flexible and easy-to-use cluster management solution to empower your populations. Administrators need software that will allow them to easily -setup, manage, monitor and maintain their infrastructure and ensure consistency for repeatable performance. With the varied workloads we see in modern HPC -centers, ranging from traditional HPC to Big Data and Analytics, organizations -may also consider building out heterogeneous environments, where different -hardware types are used for different workloads. As the OpenPOWER Foundation -grows, it stresses the overall importance of workflows across the HPC Data -Life Cycle - it’s clear that when it comes to solutions for technical computing, it’s no longer a one horse race.

    - -

    IBM Platform Cluster Manager is powerful, easy-to-use infrastructure management for today’s scale out computing needs. The latest release of Platform Cluster -Manager V4.2.1 now provides the ability to manage mixed computing environments - -so whether you’re running Linux on POWER Big-Endian or Little-Endian, the choice is yours. In fact, you can even deploy and seamlessly manage a mixed -infrastructure taking advantage of the latest IBM POWER8 and x86 systems.

    - -

    Leveraging xCAT technology, Platform Cluster Manager can manage clusters ranging from ‘Lilliputian’ in size all the way up to 2500 nodes. Platform Cluster -Manager Advanced Edition supports the automated creation of multiple clusters -on a shared infrastructure - allowing you to easily satisfy the business -requirements of Lilliputians and Blufescans. For organizations with a single -HPC cluster, Platform Cluster Manager Standard Edition provides the ability to -quickly provision, run, manage and monitor a technical computing infrastructure with unprecedented ease.

    - -

    For users taking advantage of IBM POWER8 systems, Platform Cluster Manager can -now provision PowerNV nodes as well as PowerKVM hypervisors, which provides -greater flexibility in infrastructure management and optimization. Further -enhancements in this release geared towards administrator productivity include -IBM POWER8 energy, PowerKVM and enhanced switch monitoring

    - -

    So go ahead. With Platform Cluster Manager you can crack your eggs any way you -like.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2015-7-29-isc15_lookingback.md b/_posts/gaborsamu/2015-7-29-isc15_lookingback.md deleted file mode 100644 index d934b60..0000000 --- a/_posts/gaborsamu/2015-7-29-isc15_lookingback.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2015-07-29 03:08:23' -layout: post -original_url: https://www.gaborsamu.com/blog/isc15_lookingback/ -slug: looking-back-at-isc-high-performance-2015 -title: Looking back at ISC High-Performance 2015 ---- - -

    I’ve always enjoyed a good road trip. There’s just something fun about jumping -in the car, and heading to a far off location. As they say, half of the fun is -just getting to your destination. My latest road trip brought me to -Frankfurt for ISC High-Performance 2015.

    - -

    Crossing all of Austria as well as the southern part of Germany, this trip -proved to be no less exciting than the rest. Breaking down about 50 km from -Frankfurt due to a dead battery, I was fortunate enough to meet a local family -who helped to boost my car so that I could make it in time for the show. -Luckily I had some craft beer to reward them for their troubles. Of course, -part of the excitement this time was the fabled Autobahns of Germany. Here I -could get up to some decent speeds - legally :)

    - -

    Refreshments are always needed on long trips…

    - -
    -
    - -

    Frankfurt too had some interesting surprises in store - including the -interesting culinary treat Handkäse mit Musik, which is a sour milk cheese -served with onions. I’ll let you read what the Musik part is all about. There -too is the infamous Apfelsaftschorle which I constantly mistook for beer at -the ISC High-Performance venue. Such is life :)

    - -

    For me, where the rubber hit the road was the ISC High-Performance event. The -IBM booth (928) featured a refreshing bright yellow colour scheme, like the -dawning of a new era of High-Performance Computing built on Data Centric Systemsand OpenPOWER. In terms of demos, the IBM booth featured a number of live and -static demos including:

    - -
      -
    • OpenPOWER HPC Server and Cirrascale GPU Developer System
    • -
    • IBM High Performance Services for HPC
    • -
    • IBM Data Engine for Analytics
    • -
    • IBM Watson tranSMART Transational Medicine Solution
    • -
    • Pluto (astrophysics hydrodynamics/magneto-hydrodynamics) running live on Power8 + GPU
    • -
    • OpenFOAM (CFD)
    • -
    • High Performance Storage System (HPSS)
    • -
    -

    The OpenPOWER hardware that was on the show floor attracted a lot of attention. Many people were impressed to behold the two Power8 systems which included -technology from OpenPOWER members Mellanox and NVIDIA. You may have read about -my interest in Power and ARM based systems in some of my earlier blogs.

    - -
    -
    - -

    Being part of the IBM Platform Computing marketing team, I could frequently be -found at the IBM High Performance Services for HPC demo point. Here we -demonstrated our turnkey cloud solution for HPC workloads built in top of -the IBM SoftLayer cloud and featuring both IBM Platform LSF & Platform Symphony workload management options. The demo leveraged the work done by MINES -ParisTech and Transvalor to provide CFD services to French industry. You can -read more about how MINES ParisTech and Transvalor leverage the -IBM solutions for HPC here.

    - -
    -
    - -

    ISC also offered us the opportunity to showcase the IBM Platform LSF family of -products interactive conceptual demo to passersby. Here users could learn that -the Platform LSF family is not simply about workload management. For example, -Platform Process Manager and Platform Application Center, two add-on products -for Platform LSF help to boost user productivity through ease of use and -simplification.

    - -
    -
    - -

    So what’s next? Toronto to Austin road trip for SC15? Yeah, that doesn’t -sound like a bad idea.

    - -

    See y’all in Texas!

    \ No newline at end of file diff --git a/_posts/gaborsamu/2016-11-11-sc16_stiritup.md b/_posts/gaborsamu/2016-11-11-sc16_stiritup.md deleted file mode 100644 index 6de74ff..0000000 --- a/_posts/gaborsamu/2016-11-11-sc16_stiritup.md +++ /dev/null @@ -1,58 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2016-11-11 00:54:36' -layout: post -original_url: https://www.gaborsamu.com/blog/sc16_stiritup/ -slug: sc16-stir-it-up- -title: SC16- Stir it up! ---- - -

    It’s been ages since I’ve posted to this blog. I’ve not forgotten about it - I’ve been -figuratively stirring the technical computing goulash pot over on the IBM Systems -In the Making blog.

    - -

    Having recently moved house, all of the old classic and newer Arm based systems that I’ve -written about previously are still mostly packed away. My hands have been more focused on -home improvement rather than tinkering. As those in HPC circles will know, the annual -Supercomputing SC16 event starts this coming Sunday in Salt Lake City, UT. -Interestingly, if my memory serves me well the last time we were in Salt Lake City for SC12, -I was a newbie with IBM, having joined as a result of the acquisition of Platform Computing.

    - -

    The HPC landscape has changed quite a bit since then, including the divestiture of the IBM -x86 server business to Lenovo and the birth of the OpenPOWER Foundation. The OpenPOWER -Foundation has gone from baby steps to sprinting with a huge and diverse group of members -from accelerators, interconnects, research organizations and more - all united on a common -goal - to drive innovation and change in enterprise computing and HPC via the OpenPOWER -platform. It’s like somebody has taken a big wooden spoon and stirred the goulash in the -pot - because we all know that if things stand still for too long in the pot, it’s going to -burn.

    - -

    As I’ve banged on about in previous blogs, I’m more pleased than ever to see this explosion -of diversity in HPC from A(RM), P(OWER) to X(86). When you throw accelerators such as FPGAs, -GPUs into the mix, what is needed more than ever to address this complexity in diverse -environments is a software defined approach - which hides this complexity from -the users and allows them to leverage the power of todays environments.

    - -

    IBM Spectrum LSF (formerly Platform LSF) has been making this possible for over 20 years. A -glance at the OS and platform support list illustrates the breadth and depth of OS and -processor support. Not only does IBM Spectrum LSF make tying together heterogeneous -resources easy, it’s proven technology allows organizations to share resources on a global -scale. In fact, the latest IBM Spectrum LSF V10 release from June 2016 contained contained -numerous enhancements all focused on improving the productivity the users of HPC and -controlling costs. Read more in this top 10 cool things about IBM Spectrum LSF blog. And -looking beyond HPC, the IBM Spectrum Computing family of products helps provide advanced -resource management capabilities for diverse workloads including Hadoop, Spark.

    - -

    Yours truly will be in Salt Lake City for SC16. Drop by booth 1018 to talk about how IBM -software defined computing can help your organization. IBM will be holding a number of user -groups and seminars covering the broad spectrum of IBM solutions for HPC. And for IBM -Spectrum LSF users, we’ll be holding our annual user group, where you can hear how -your peers are using IBM Spectrum LSF to get an advantage, and learn about the latest -developments in IBM Spectrum LSF from our experts.

    - -

    Come on and stir it up! You’ll like it!

    \ No newline at end of file diff --git a/_posts/gaborsamu/2016-4-2-reminiscing_computingrenaissance.md b/_posts/gaborsamu/2016-4-2-reminiscing_computingrenaissance.md deleted file mode 100644 index 79e2d24..0000000 --- a/_posts/gaborsamu/2016-4-2-reminiscing_computingrenaissance.md +++ /dev/null @@ -1,47 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2016-04-02 02:44:49' -layout: post -original_url: https://www.gaborsamu.com/blog/reminiscing_computingrenaissance/ -slug: reminiscing-and-the-computing-renaissance -title: Reminiscing and the computing renaissance ---- - -

    Sifting through boxes of 3.5 inch floppy diskettes - some of questionable -provenance in a dusty basement. Gingerly packing up what I consider to be the -holy trinity of Commodore Amiga computers - A1000, A2000, A3000 - all in some -state of working condition. Of course, back in the day, only Amiga made it all -possible - awesome graphic demos, games, word processing, and ray tracing to -Amiga Unix - AMIX, which was one of the first ports of SVR4 to the MC68000 -series processor (yes I do have AMIX installed also).

    - -

    The frustration watching the Death Bed Vigil movie in which Dave Haynie of -Commodore Amiga fame gives us a tour through the Commodore engineering at -headquarters and of course the fire (bankruptcy) sale which happened at -Commodore Canada on Pharmacy Avenue in Toronto.

    - - -
    - -
    - - -

    Once upon a time, we all carried the respective flags of our favorite platforms - which were varied. It was this rivalry which I think led to these respective -user communities squeezing tremendous performance out of these systems in the -race to show which platform was best.

    - -

    Then it all seemed to change. Suddenly we were all forced to march to the same -clock rhythm - and boredom set in. With this course seemingly set in stone, -how are we to escape this computing Sturm and Drang?

    - -

    GAME ON!

    - -

    Well, for me this hope appeared in 2013 with the announcement of the OpenPOWER Consortium - an open technical community built around the IBM POWER architecture to grow solutions to serve the evolving computing needs of today and the future. -Next week the second annual OpenPOWER Summit takes place in San Jose, United States and if the first event was any indication, this should be a very exciting -event. So Power Up and strap on your accelerators as we’re in for a very -interesting ride!

    \ No newline at end of file diff --git a/_posts/gaborsamu/2017-1-9-woe_is_2016.md b/_posts/gaborsamu/2017-1-9-woe_is_2016.md deleted file mode 100644 index 01532a1..0000000 --- a/_posts/gaborsamu/2017-1-9-woe_is_2016.md +++ /dev/null @@ -1,57 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2017-01-09 21:17:58' -layout: post -original_url: https://www.gaborsamu.com/blog/woe_is_2016/ -slug: oh-woe-is-2016-not- -title: Oh Woe is 2016 - NOT! ---- - -

    As we enter a new year, 2016 seems to have been tarnished it the closing month by events -around the world. Far be it for me to to talk about world events here, I’d like to focus on -the good - at least from my perspective. 2016 was a great year for me. It was the year in -which I managed to:

    - -
      -
    • Moved house
    • -
    • Upgraded from a late 1980’s to a late 1990’German station wagon (“estate” for those who -speak real English)
    • -
    • Moved from Blackberry 10 to Android - blech - but I’ll admit my HTC 10 is a fantastic -piece of hardware
    • -
    • Decided that I no longer revere Apple products as I once did - before any harsh words, I -am writing this on a Macbook Pro Retina…and I have a veritable museum of Apple kit at home
    • -
    • Took my first steps to learn about machine learning frameworks like Caffe, Tensorflow - -yes I’ve been tinkering with Caffe on one of my ARM developer boards
    • -
    • Stuck with Linux for my work laptop even with the tantalizing choice of a shiny new -Macbook with OS X
    • -
    • Entrusted the security of my home internet to the Turris Omnia
    • -
    -
      -
    • because using a router that hasn’t been patched in years is well - silly, to put it politely
    • -
    -
      -
    • Finally got myself an OpenPOWER t-shirt at ISC High-Performance - which I wear proudly -because OpenPOWER rocks!
    • -
    • Understood that getting the future generations interested in technology is key - and did -my part by giving an intro to High-Performance Computing talk at a local school
    • -
    • Successfully launched IBM Spectrum LSF 10.1 with the help of my many great peers. And -yes, it does run on Linux on Arm v7&v8 and Linux on POWER8 Little Endian :)
    • -
    -

    And that’s just what I can think of as I write this blog…so for me, 2016 has an aura rather -than a tarnish to it.

    - -

    So as we enter the year of Canada’s 150th birthday with a full head of steam, I’m looking -forward to hitchin' my wagon to some of cool things coming up including:

    - -
      -
    • Exploring the wonderful national parks of Canada at no charge with my Parks Canada pass
    • -
    • OpenPOWER and IBM POWER9
    • -
    • Building up of my home ARMy with a pre-ordered Armada 8040 Community Board, which should -help to speed up the machine learning I’ve been tinkering with
    • -
    -

    And that’s just for starters. What’s your plan?

    \ No newline at end of file diff --git a/_posts/gaborsamu/2017-9-15-benchmarking_macchiatobin.md b/_posts/gaborsamu/2017-9-15-benchmarking_macchiatobin.md deleted file mode 100644 index 79ab181..0000000 --- a/_posts/gaborsamu/2017-9-15-benchmarking_macchiatobin.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2017-09-15 17:37:03' -layout: post -original_url: https://www.gaborsamu.com/blog/benchmarking_macchiatobin/ -slug: cool-and-quiet-benchmarking-on-macchiatobin-armada-8040- -title: Cool and quiet benchmarking on MACCHIATObin (Armada 8040) ---- - -

    I’ve recently taken delivery of a few new goodies to complement the MACCHIATObin Arm v8 powered board that I’ve written about recently on my blog.

    - - -

    Youi'll recall that my efforts to do some rudimentary testing including running HPL were thwarted by overheating. So I decided to -address the issue with some parts I’ve been meaning to pickup anyway for some other interesting projects I have in the pipeline -(fingers crossed):

    - - -

    And this is what is looks like now…

    - -
    -
    - -

    Now, the red workbench and shiny heatsinks scream performance. So what about my run of HPL (Linpack)? Well, I decided to start over -from scratch and built my own Linpack against ATLAS, which I also compiled from scratch (let that run overnight).

    - -

    The result? I went from hitting the thermal limiter (and a non-result) to a successful Linpack run - with the CPU temperature never -really going much past 50C. As for my Linpack score, you can see that below.

    - -
    -
    \ No newline at end of file diff --git a/_posts/gaborsamu/2018-10-2-spectrumlsf_gpu_usage.md b/_posts/gaborsamu/2018-10-2-spectrumlsf_gpu_usage.md deleted file mode 100644 index b388279..0000000 --- a/_posts/gaborsamu/2018-10-2-spectrumlsf_gpu_usage.md +++ /dev/null @@ -1,119 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2018-10-02 03:21:37' -layout: post -original_url: https://www.gaborsamu.com/blog/spectrumlsf_gpu_usage/ -slug: gpu-usage-information-for-jobs-in-ibm-spectrum-lsf -title: GPU usage information for jobs in IBM Spectrum LSF ---- - -

    In my last blog, we ran through an example showing how IBM Spectrum LSF now automatically detects the presence of NVIDIA GPUs on hosts in the cluster and performs the necessary configuration of the scheduler automatically.

    - -

    In this blog, we take a closer look at the integration between Spectrum LSF and -NVIDIA DCGM which provides GPU usage information for jobs submitted to the -system.

    - - - - -

    To enable the integration between Spectrum LSF and NVIDIA DCGM, we -need to specify the LSF_DCGM_PORT=<port number> parameter in -LSF_ENVDIR/lsf.conf

    - -
    root@kilenc:/etc/profile.d# cd $LSF_ENVDIR
    -root@kilenc:/opt/ibm/lsfsuite/lsf/conf# cat lsf.conf |grep -i DCGM
    -LSF_DCGM_PORT=5555
    - -

    You can find more details about the variable LSF_DCGM_PORT and what it -enables here.

    - -

    Before continuing, please ensure that the DCGM daemon is up and running. Below -we start DCGM on the default port and run a query command to confirm that it’s -up and running.

    - -
    root@kilenc:/opt/ibm/lsfsuite/lsf/conf# nv-hostengine
    -Started host engine version 1.4.6 using port number: 5555
    -
    -root@kilenc:/opt/ibm/lsfsuite/lsf/conf# dcgmi discovery -l
    -1 GPU found.
    -+--------+-------------------------------------------------------------------+
    -| GPU ID | Device Information                                                |
    -+========+===================================================================+
    -| 0      |  Name: Tesla V100-PCIE-32GB                                       |
    -|        |  PCI Bus ID: 00000033:01:00.0                                     |
    -|        |  Device UUID: GPU-3622f703-248a-df97-297e-df1f4bcd325c            |
    -+--------+-------------------------------------------------------------------+ 
    - -

    Next, let’s submit a GPU job to IBM Spectrum LSF to demonstrate the collection -of GPU accounting. Note that the GPU job must be submitted to Spectrum LSF -with the exclusive mode specified in order for the resource usage to be -collected. As was the case in my previous blog, we submit the gpu-burn test -job (formally known as Multi-GPU CUDA stress test).

    - -
    test@kilenc:~/gpu-burn$ bsub -gpu "num=1:mode=exclusive_process" ./gpu_burn 120
    -Job <54086> is submitted to default queue <normal>
    - -

    Job 54086 runs to successful completion and we use the Spectrum LSF bjobs command with the -gpu option to display the GPU usage -information in the output below.

    - -
    test@kilenc:~/gpu-burn$ bjobs -l -gpu 54086
    -
    -Job <54086>, User <test>, Project <default>, Status <DONE>, Queue <normal>, Com
    -                     mand <./gpu_burn 120>, Share group charged </test>
    -Mon Oct  1 11:14:04: Submitted from host <kilenc>, CWD <$HOME/gpu-burn>, Reques
    -                     ted GPU <num=1:mode=exclusive_process>;
    -Mon Oct  1 11:14:05: Started 1 Task(s) on Host(s) <kilenc>, Allocated 1 Slot(s)
    -                      on Host(s) <kilenc>, Execution Home </home/test>, Executi
    -                     on CWD </home/test/gpu-burn>;
    -Mon Oct  1 11:16:08: Done successfully. The CPU time used is 153.0 seconds.
    -                     HOST: kilenc; CPU_TIME: 153 seconds
    -                        GPU ID: 0
    -                            Total Execution Time: 122 seconds
    -                            Energy Consumed: 25733 Joules
    -                            SM Utilization (%): Avg 99, Max 100, Min 64
    -                            Memory Utilization (%): Avg 28, Max 39, Min 9
    -                            Max GPU Memory Used: 30714888192 bytes
    -
    -
    -GPU Energy Consumed: 25733.000000 Joules
    -
    -
    - MEMORY USAGE:
    - MAX MEM: 219 Mbytes;  AVG MEM: 208 Mbytes
    -
    - SCHEDULING PARAMETERS:
    -           r15s   r1m  r15m   ut      pg    io   ls    it    tmp    swp    mem
    - loadSched   -     -     -     -       -     -    -     -     -      -      -  
    - loadStop    -     -     -     -       -     -    -     -     -      -      -  
    -
    - EXTERNAL MESSAGES:
    - MSG_ID FROM       POST_TIME      MESSAGE                             ATTACHMENT
    - 0      test       Oct  1 11:14   kilenc:gpus=0;                          N     
    -
    - RESOURCE REQUIREMENT DETAILS:
    - Combined: select[(ngpus>0) && (type == local)] order[gpu_maxfactor] rusage[ngp
    -                     us_physical=1.00]
    - Effective: select[((ngpus>0)) && (type == local)] order[gpu_maxfactor] rusage[
    -                     ngpus_physical=1.00]
    -
    - GPU REQUIREMENT DETAILS:
    - Combined: num=1:mode=exclusive_process:mps=no:j_exclusive=yes
    - Effective: num=1:mode=exclusive_process:mps=no:j_exclusive=yes
    -
    - GPU_ALLOCATION:
    - HOST             TASK ID  MODEL        MTOTAL  FACTOR MRSV    SOCKET NVLINK                           
    - kilenc           0    0   TeslaV100_PC 31.7G   7.0    0M      8      -               
    - -

    And to close, yours truly spoke at the HPC User Forum in April 2018 (Tucson, AZ) giving a -short update in the vendor panel about Spectrum LSF, focusing on GPU support.

    - - -
    - -
    \ No newline at end of file diff --git a/_posts/gaborsamu/2018-6-21-taming_gpu.md b/_posts/gaborsamu/2018-6-21-taming_gpu.md deleted file mode 100644 index aaec1a3..0000000 --- a/_posts/gaborsamu/2018-6-21-taming_gpu.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2018-06-21 03:21:37' -layout: post -original_url: https://www.gaborsamu.com/blog/taming_gpu/ -slug: the-taming-of-the-gpu -title: The Taming of the GPU ---- - -

    The media has been alight with articles regarding the groundbreaking Summit supercomputer recently unveiled at Oak Ridge National Laboratory. It sports a mind boggling 9,216 IBM POWER9 CPUs, 27,648 NVIDIA Tesla GPUs, underpinned with 250 petabytes of storage. This muscle will be put to good use running traditional HPC as well as AI workloads across a broad range of sciences.

    - -

    Looking at the landscape of systems being built for HPC and now AI, there is one commonality – many are hybrid CPU-GPU systems. Whether we’re considering systems at the pinnacle of computing such as Summit, or commercial HPC and AI systems, GPUs have become a defacto method for accelerating code and providing copious amounts of floating point performance.

    - -

    The early days of clustered computing saw the advent of workload and resource managers which were a means of taming environments by orchestrating access to, and bringing computing resources to bear, in a predictable manner – aligned with the needs of scientists and businesses alike. As environments have grown in scale to meet the growing thirst for HPC, GPUs and accelerated computing have stepped out on stage to take a bow.

    - -

    Software developers have and continue to port and optimize applications to benefit from the capabilities provided by GPUs. According to a recent report from November 2017, a high percentage of HPC applications now offer GPU support.

    - -
    -

    “According to the latest HPC User Site Census data and additional research, of the 50 most popular application packages mentioned by HPC users, 34 offer GPU support (including two under current development), including 9 of the top 10.”

    - -
    -

    Indeed, the recent Top 500 list (November 2017) includes no less than 87 hybrid CPU-GPU systems (and more counting other types of accelerators).

    - -

    So how do GPU-heavy systems impact the task of the workload and resource managers? Fundamentally, as GPUs are resources, workload schedulers have had to adapt too.

    - -

    A wild west land grab

    - -

    It’s not just large-scale supercomputers that face the challenges of compute supply versus user demands. Commercial HPC environments are also now increasingly hybrid CPU-GPU based with potentially hundreds of users and millions of jobs per day in high-throughput computing use cases. These are complex environments and large investments requiring workload management software with sophisticated capabilities to reign in all the resources – so that users end up with GPU workloads running on the right servers.

    - -

    Computing environments today can have some servers with GPUs, some without, varied GPU configurations including models and memory, and a different number of GPUs per node. Adding to this complexity, in a typical data center, servers can come and go so the configuration is not always static.

    - -

    In general, workload schedulers require the administrator to specify in the configuration whether a given server is equipped with GPUs, often requiring additional information such as the GPU model, etc. Without this crucial information, the workload scheduler cannot effectively route jobs to nodes – potentially leading to a Wild West grab for resources.

    - -

    Call in the Cavalry

    - -

    IBM Spectrum LSF has been continuously innovating to address the needs of increasingly complex HPC environments of scale since 1992. Support for NVIDIA GPUs was first introduced in IBM Spectrum LSF in 2007. Continuing this long tradition of enhancements to NVIDIA GPU support, IBM Spectrum LSF now includes a new capability designed to dramatically simplify the administration of GPU servers and enables users to be more productive faster. With “zero config” for NVIDIA GPUs, IBM Spectrum LSF detects the presence of GPUs and automatically performs the necessary scheduler configuration – without any interaction from the administrator. IBM Spectrum LSF will help tame the GPU environment for you, allowing users with GPU ready codes to be productive from the moment the environment is setup.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2019-12-19-intelligent_hpc.md b/_posts/gaborsamu/2019-12-19-intelligent_hpc.md deleted file mode 100644 index 22ca8cf..0000000 --- a/_posts/gaborsamu/2019-12-19-intelligent_hpc.md +++ /dev/null @@ -1,38 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2019-12-19 02:21:37' -layout: post -original_url: https://www.gaborsamu.com/blog/intelligent_hpc/ -slug: intelligent-hpc-keeping-hard-work-at-bay-es- -title: Intelligent HPC - Keeping Hard Work at Bay(es) ---- - -

    Since the dawn of time, humans have looked for ways to make their lives easier. Over the centuries human ingenuity has given us inventions such as the wheel and simple machines – which help greatly with tasks that would otherwise be extremely laborious. Over the time, we’ve learned there are often alternatives to brute force ways of doing things. It’s this human reasoning that has driven the advancement we find in our world today.

    - -

    Fast forward to this century where computer driven simulations have been developed as the third branch of scientific method supplementing theory and experimentation. For decades, simulation and modelling have delivered unprecedented capabilities to drive innovation for the betterment of the world. The need to run more simulations faster, has spurred the development of ever faster processors, networking and storage. The approach to speeding up simulations has been one of brute force. Faster computing to deliver faster results. But the insatiable desire to perform simulations faster has very real implications in today’s world – such as managing the power requirements of future supercomputers. It’s time in high performance computing to revisit the brute force approaches to achieve the next level of performance.

    - -

    Lessons from the past

    - -

    We sometimes forget that it’s important to look at lessons from the past, in order to create a better future. HPC simulations today are computationally intensive – and as the fidelity of models increases, so does the number of calculations and time to solution. Rethinking this laborious method for simulations, are there ways that we can cut down on the number of calculations performed? A calculation avoided, is time saved. Our lesson takes us back to 1763 when Thomas Bayes authored “An Essay towards solving a Problem in the Doctrine of Changes”, from which Bayes’ Theorem was developed.

    - -

    In simple terms, Bayes’ theorem can be used to predict the probability of an outcome, based upon prior knowledge or information. What if Bayes’ theorem could be applied to computational simulations to determine the likelihood of a given iteration of a simulation to provide a useful outcome, and to discard those iterations where there is not likely a useful outcome? A calculation avoided, is time saved. As it turns out, applying Bayesian methods to HPC design can dramatically reduce the time to optimal product specification.

    - -

    Bayesian optimization at work

    - -

    To put Bayesian methods to the test, the engineers of the IBM Systems High Speed Bus Signal Integrity (HSB-SI) Team used software based upon the principles of Bayesian statistics called IBM Bayesian Optimization (IBO) developed by IBM Research. IBO was designed to accelerate computational workflows through the application of sophisticated algorithms. The HSB-SI team’s challenge is to minimize the time needed design validation simulation analysis of high-speed interfaces for the purpose of choosing an optimal configuration point, while maintaining or increasing the fidelity of the solution. In testing IBO, they wanted to reduce the number of simulations needed to reach the optimal configuration point for chip-to-chip communication.

    - -
    -

    “Our team is taking advantage of state-of-the-art machine learning to design computer systems of the future.” -Dale Becker, Ph.D., Chief Engineer Electrical Packaging Integration, IBM

    - -
    -

    The results were dramatic. They achieved a 140x faster time to solution with higher accuracy than their legacy method. They used 99% less cores to arrive at a higher confidence solution with less than a 1% error rate using IBO.

    - -

    With time to solution being a critical element of competitive advantage, the adoption of sophisticated statistical methods and machine learning to accelerate simulation workflows is destined to grow quickly. In our next article about innovations in HPC we will highlight multiple use cases where Bayesian optimized workflows are transforming HPC simulation-driven innovation.

    - -

    Originally published on HPCwire IBM Solution Channel on December 18, 2019

    \ No newline at end of file diff --git a/_posts/gaborsamu/2019-6-5-beyond_simulation_isc.md b/_posts/gaborsamu/2019-6-5-beyond_simulation_isc.md deleted file mode 100644 index 7be4c6c..0000000 --- a/_posts/gaborsamu/2019-6-5-beyond_simulation_isc.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2019-06-05 03:21:37' -layout: post -original_url: https://www.gaborsamu.com/blog/beyond_simulation_isc/ -slug: beyond-simulation-harnessing-ai-for-next-generation-hpc-at-isc -title: Beyond Simulation – Harnessing AI for Next-Generation HPC at ISC ---- - -

    Computer simulation has become a staple technique in many disciplines – so much so that it often described as the “third pillar” of the scientific method. Alongside theory and experimentation, simulation is used in everything from automotive design to computational chemistry to forecasting weather and market movements.

    - -

    Simulation helps us solve problems that are too difficult, time-consuming, or expensive to solve empirically – for example, what is the optimal design and material for an impeller in a centrifugal pump? Or what failure states might exist in a semiconductor design from which a device can’t recover?

    - -

    By devising accurate mathematical models and approximating those numerically in software, we can predict the behavior of real-world systems based on various parameters and a set of initial conditions. The better the model, the quality of the input data, and the more computing power that can be brought to bear, the better the prediction.

    - -

    Simulation vs. analytics

    - -

    High-performance data analytics (HPDA) and computer simulation are increasingly joined at the hip. Analytic techniques are sometimes used to improve simulation – providing better quality datasets to feed a simulation model, for example. Other times, simulation helps improve analytics – back-testing the performance of a financial or weather model over past data, for example, to gain confidence in a model’s predictive quality.

    - -

    While simulation has served us well, it has limits. The quality of a predictive model is only as good as our ability to identify features useful in making accurate predictions. For some problems, such as are structural mechanics, the features required to build a predictive model are relatively well known. For other problems, such as financial markets or weather models, the number of potential parameters is vast, and their effects are sometimes poorly understood, significantly affecting the quality of the result.

    - -

    A fourth pillar in the scientific method

    - -

    AI is rapidly emerging as a “fourth pillar” in the scientific method complementing theory, experimentation, and simulation techniques. Inference allows computers to make educated guesses about future results without the need to go through a full-blown simulation.

    - -

    In fact, the AI development process can be modeled as automation of the scientific method where the steps are:

    - -
      -
    1. Observe
    2. -
    3. Hypothesize
    4. -
    5. Test Hypothesis
    6. -
    7. (return to #1)
    8. -
    -

    The power of “better guesses”

    - -

    Humans often infer things based on prior knowledge intuitively. For example, back to our impeller design, if a centrifugal pump needs to handle a viscous or corrosive liquid, the human engineer might know intuitively that a strong, non-reactive material like stainless steel is a good choice. By making educated guesses on materials and other parameters, the problem-space to be simulated is reduced dramatically.

    - -

    When dealing with complex problems, however, our human ability to make such inferences breaks down. Even for subject matter experts, problems like modeling chemical reactions or predicting how a semiconductor will behave, are beyond our experience. The systems we need to model are too complex and involve too many parameters.

    - -

    Intelligent Simulation

    - -

    Fortunately, computers are very good at sifting through vast amounts of data and detecting patterns not obvious to humans. The best way to boost simulation performance is often to avoid simulations that will be irrelevant and not useful. By applying machine learning and other AI techniques to make informed guesses about what parameters and simulations will be most useful in solving a problem we can:

    - -
      -
    • Reduce the number of simulations required
    • -
    • Provide higher resolution simulations and more trustworthy models
    • -
    • Reduce costs and cycle times wherever computer simulation is used
    • -
    -

    Intelligent simulation helps us more effectively explore a problem space by predicting what regions, data, and exploratory techniques are most likely to be useful and omitting the rest.

    - -

    Bayesian Optimization

    - -

    In probability theory, Bayes’ theorem describes the probability of an event, based on prior knowledge of conditions that might be related to the event. It turns out that Bayesian analysis is a particularly effective way to capture common sense information from data, to help make better predictions, thus reducing the amount of computer simulation required. IBM has developed a Bayesian optimization accelerator that can function as an HPC advisory engine.

    - -

    Powered by Bayesian optimization libraries, the system helps scientists exploit these state-of-the-art techniques to computer simulation in multiple industries without the need for deep AI expertise. Bayesian optimization has demonstrated that it can reduce simulation requirements by half with no disruption to the existing HPC infrastructure, dramatically improving HPC productivity.

    - -

    Harnessing AI for Next-Generation HPC @ ISC 2019

    - -

    At this year’s ISC conference in Frankfurt, Germany, you can learn more about IBM solutions for AI and HPC –

    - -
      -
    • Learn how accelerating simulations with Bayesian optimization has the potential to help you perform simulations in half the time
    • -
    • Learn how IBM Q researchers are putting machine learning on the path to quantum advantage
    • -
    • Try out IBM RXN for Chemistry and learn how AI techniques are helping automated discovery for organic chemistry by predicting chemical reactions
    • -
    • Finally, learn how a CPPM PCIe40 data acquisition adapter in an IBM POWER9 based system can help advance state-of-the-art research in high-energy physics and other applications
    • -
    -

    Stop by the IBM booth (D-1140 in the exhibit hall) to see demos Power Systems and Spectrum Storage to Spectrum Computing and Watson Machine Learning Accelerator.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2020-1-16-power9_bootup.md b/_posts/gaborsamu/2020-1-16-power9_bootup.md deleted file mode 100644 index bccff91..0000000 --- a/_posts/gaborsamu/2020-1-16-power9_bootup.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2020-01-16 13:43:37' -layout: post -original_url: https://www.gaborsamu.com/blog/power9_bootup/ -slug: bootup-fun-dual-socket-power9 -title: Bootup fun- dual-socket POWER9 ---- - -

    Well today it’s going to be a short one. For those of you out there who are like me and enjoy -watching systems boot, I’ve recorded this brief (~3 minutes) bootup sequence of a dual-socket -POWER9 based system. This was done through the CLI based OpenBMC console (obmc-console-client) -and we see the system progressing through the bootup sequence to a running instance of CentOS 7.

    - - -
    - -
    - - -

    And for something a bit more esoteric another bootup video recorded a number of years back. -This time a MIPS-based IBM Workpad z50 booting NetBSD. Definitely not a room heater, but -probably the best keyboard I’ve used on a small form factor laptop - ironically the form -factor is referred to as “hpc”, which in this case stands for “handheld pc”.

    - - -
    - -
    \ No newline at end of file diff --git a/_posts/gaborsamu/2020-4-1-flora_watch.md b/_posts/gaborsamu/2020-4-1-flora_watch.md deleted file mode 100644 index a3f4c0d..0000000 --- a/_posts/gaborsamu/2020-4-1-flora_watch.md +++ /dev/null @@ -1,35 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2020-04-01 13:04:28' -layout: post -original_url: https://www.gaborsamu.com/blog/flora_watch/ -slug: when-you-got-time-on-your-side-create-something -title: When you got time on your side - create something ---- - -

    As we all settle down into the new norm of being housebound during this global epidemic, it’s given the opportunity to work on projects which would have -remained on the back burner for an indefinite period.

    - -

    As the eternal tinkerer, I’ve lately turned my attention to the Adruino community and all of the very interesting projects and possibilities that exist. -One wearable electronics project that caught my eye a number of months back was a wristwatch project which I spotted on the Adafruit site here. Of course, ordering the parts was the easy part. What I found in the meantime is that my soldering iron was also kaput and -I could not for the life of me find any of the wires, solder and other electronics tools. So alongside the box full of electronics components, I ordered a -shiny new soldering iron, essentials for soldering and a few different types of glue.

    - -

    And the last important piece of this jigsaw puzzle was the watch band. I had been scouting around some time for a suitable band - something high quality, -yet fashionable. I managed to purchase a fantastic Kapital (Japan) indigo velcro band from Grailed.

    - -

    As all of the pieces were finally in my hands, what was missing was time. This past weekend, I was able to devote some time to prototyping and ultimately -soldering together all of the pieces with some younger helping hands. Definitely my soldering skills were not what they used to be. But there was something -special about sitting on my back porch in the spring sunshine stripping wires, and soldering. The most challenging part for me was not assembling the watch. rather it was gluing the straps to the back o the watch face in order to be able to mount it to the watch band. I had to try a few different glues with a lot of patience. I wasn’t keen on using E6000 glue due to it’s toxicity…and rather opted to use a non-toxic glue from Aleene’s. Not sure how it will hold up in the long term though - time will tell (pun intended). Above is a photo of the watch connected to it’s USB “umbillical cord” for power and to load sketch (code).

    - -

    And this is how it looks on my arm running off of a mini LiPo battery (also courtesy of Adafruit).

    - -
    -
    - -

    Tinkering is fun!

    \ No newline at end of file diff --git a/_posts/gaborsamu/2020-9-4-gpu_pac.md b/_posts/gaborsamu/2020-9-4-gpu_pac.md deleted file mode 100644 index b5c0501..0000000 --- a/_posts/gaborsamu/2020-9-4-gpu_pac.md +++ /dev/null @@ -1,99 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2020-09-04 17:30:44' -layout: post -original_url: https://www.gaborsamu.com/blog/gpu_pac/ -slug: extending-the-spectrum-lsf-gui-to-display-job-gpu-metrics -title: Extending the Spectrum LSF GUI to display job GPU metrics ---- - -

    I’ve previously written about accounting for GPU workloads in Spectrum LSF using Nvidia DCGM to collect granular metrics including energy consumed, memory used, and overall GPU -utilization. Spectrum LSF collects the information and it is made available through the familiar bhist and bacct -commands.

    - -

    How can one go about displaying this information in the web-based job management interface that is provided by -Spectrum LSF Application Center or as part of the Spectrum LSF Suites? Here we will provide a simple example showing -how:

    - -
      -
    • Administrators can customize the navigation in the Spectrum LSF web-based job management interface
    • -
    • Display the same GPU accounting information in the Spectrum LSF web-based job management interface
    • -
    -

    The following assumes that DCGM support has been enabled in Spectrum LSF and that you are running an edition of -the Spectrum LSF Suite or Spectrum LSF Application Center

    - -

    The Spectrum LSF web-based job management interface enables GUI administrators to create new tabs with a user specified -URL or command. Here we will create a new tab which runs a command (script) which will run the Spectrum LSF bhist -command to display the GPU metrics for a given job. The script must be able to distinguish between a GPU and non-GPU -job.

    - -

    A. To begin, we’ll require a simple script to display the detailed historical data of a given jobID, including -GPU metrics using the Spectrum LSF bhist command. An example simple script is provided below which is saved -with filename gpu_acct.sh.

    - -
    #!/bin/sh
    -if [ -z "$1" ]
    -then
    -   echo "Usage $0 <jobID>"
    -else
    -OUTPUT=`bhist -a -l -gpu $1`
    -grep -q 'GPU Energy Consumed' <<< $OUTPUT && bhist -a -l -gpu $1 || echo "Not a GPU job."
    -fi
    - -

    As the Spectrum LSF administrator, create the above script in the $LSF_BINDIR directory with permissions 755.

    - -

    B. Next, login to the Spectrum LSF web-based interface as a user with administrative privileges and navigate to -Workload > Workload. Note that the user must have the Application Center Administrator privilege.

    - -
    -
    - -

    C. It’s now necessary to select one of the jobs in the job list in order to display the job detail view. This is the -page where we will be adding the GPU accounting tab.

    - -
    -
    - -

    D. Click the edit (pencil) dropdown that can be found at the top right of the Spectrum LSF web-based interface -and select Edit Page.

    - -
    -
    - -

    This will display the Create New Tab window which will be filled in during the next step.

    - -

    E. In the Create New Tab window, specify the following:

    - -
      -
    • Tab Label: GPU accounting
    • -
    • Content From: Command and specify the command gpu_acct.sh %J
    • -
    -

    Click the Apply button to complete the addition of the new tab on the job detail page.

    - -
    -
    - -

    F. Finally, click the Edit Page dropdown on the top right corner of the interface and select -Apply and exit Pages Editing to make the changes take effect. You will now see a new GPU accounting tab in the -job detail view. Here I’ve selected a GPU job that has been run previously through Spectrum LSF. We see the full -bhist output displayed here including the detailed GPU accounting.

    - -

    -
    - -
    -
    -

    - -

    As a final note, for jobs that have not requested a GPU resource through Spectrum LSF, we will see the message -“Not a GPU job" displayed when the GPU accounting tab is selected.

    - -
    -
    - -

    That concludes this simple example showing how the Spectrum LSF web-based interface can be customized.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2021-1-16-new_novena.md b/_posts/gaborsamu/2021-1-16-new_novena.md deleted file mode 100644 index c08f91d..0000000 --- a/_posts/gaborsamu/2021-1-16-new_novena.md +++ /dev/null @@ -1,77 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2021-01-16 21:16:11' -layout: post -original_url: https://www.gaborsamu.com/blog/new_novena/ -slug: late-to-the-party-and-a-few-bits-short -title: Late to the party and a few bits short ---- - -

    I recently had the opportunity to purchase a pristine Novena desktop system. -For those who aren’t aware, Novena is a Freescale i.mx6 based open-hardware computing platform -which began shipping in 2015. It was available as a desktop, laptop, standalone -board and a really neat heirloom version with a wooden case. The Novena was -always a curiosity for me since it was announced. But back in 2015, I missed my -opportunity to purchase one – probably due to a bit of procrastination and the -fact that I already had a Udoo Quad board, which is powered by the same -processor. Because it’s based on the 32-bit processor, I purchased it with open -eyes, knowing that it would not deliver M1 performance. Remarkably, although -the creators of the Novena have declared it EOL status, there are still some -components available to purchase on Crowd Supply, including mainboards.

    - -

    Hackable? Yes, please

    - -

    I’m a bit of a boomer when it comes to technology. I cut my teeth back in the -day on highly expandable systems such as the IBM PC XT, Commodore Amiga 2000 -and still to this day do my fair share of tinkering - for example that -super cool Flora watch project which I did back in 2020. That being said, I’ve -also been one to appreciate leading edge design from Apple and the super cool -NeXT and SPARCstation systems designed by the renown team at Frog Design. But cases are designed to house and protect what’s inside of a computer when it’s operating.

    - -

    The Novena desktop and laptop versions eschew this for a design which features -a pop out screen, supported by a gas strut similar to what you’d see on a -hatchback liftgate, exposing the mainboard in all it’s glory - when the system -is operating - caution is always a good idea.

    - -

    Of course I could tell you about the time many moons ago that I fried a system -by carelessly dropping a metalic object on the mainboard while the system was -running. With that hard lesson learned, I’m being super cautious with Novena.

    - -

    Better late than never

    - -

    My Novena desktop arrived from a far off land and survived a transatlantic -voyage unscathed, due to impeccable packaging. So although I’m very late to the Novena party, I managed to make it, circa 2021.

    - -

    Before deciding on purchasing this previously loved Novena, one important -factor I did research was OS support. OS support is often spotty for such non -standard systems. Luckily an industrious person has kicked off the novena-next -project, which aims to deliver OS support for -the Novena for the foreseeable future. As always, your mileage may vary.

    - -

    Seeing is believing

    - -

    Opening the package, I was like a kid at Christmas. The previous owner shipped -me the whole shebang - Novena-RF SDR, extra green bezel, speakers, screws, -clips, power adapter etc. I connected the system to the power and it immediately -sprang to life and booted an older Debian version.

    - -

    I’ve done a lot of tinkering since that first day. My Novena now has Debian 10 -Buster installed (relying on support from novena-next), and boots from a -SATA SSD. The speakers have been installed along with the Novena-RF SDR (which -replaces the breakout board). In fact, I’m writing this blog on the Novena -running LibreOffice, while listening to music from YouTube through Chromium, -along with a bunch of terminals opened to some much more powerful systems -humming along in my basement.

    - -
    -
    - -

    Novena definitely won’t win any speed records and is a few bits short of 64. -But it makes up for all of that in character. As I experiment with Novena, I -plan a few more blogs along the way. Stay tuned for more. A computer with nine -lives? It just may be the case with Novena.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2021-11-23-easy_hpc.md b/_posts/gaborsamu/2021-11-23-easy_hpc.md deleted file mode 100644 index be838b0..0000000 --- a/_posts/gaborsamu/2021-11-23-easy_hpc.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2021-11-23 14:14:56' -layout: post -original_url: https://www.gaborsamu.com/blog/easy_hpc/ -slug: the-easy-hpc-button -title: The Easy HPC button ---- - -

    We live in a results-driven world. Whether it’s an aerodynamicist waiting on simulation results to determine the efficiency of their latest model, or a doctor waiting on genomic pipeline results to determine next steps for a patient, results make the world go round. And this of course goes beyond the sciences. As any thespian will tell you, stage productions are the result of the work of many individuals behind the scenes.

    - -

    Much in the same way, complex computational processes that are found in HPC rely upon many things behind the scenes to be carried out. And although the devil may be in the details, consumers of HPC resources shouldn’t have to go through purgatory to get results. Organizations today rely on HPC to drive their core mission, delivering products to market faster. So, it goes without saying that the need for HPC to be easy to drive productivity is crucial. And much like the technology of HPC has changed so have the skills of the users. Modern HPC infrastructure relies upon a myriad of technologies including containerization, accelerators and cloud. And for users, gone are the expectations of learning a complex CLI, replaced by the need for easy-to-use interfaces.

    - -

    Workload schedulers are a necessary component of any HPC cluster. Schedulers have been around for a very long time and as they become more sophisticated, they support an ever-increasing number of CLI and configuration options. Although these options provide greater functionality, their use can be complicated to end users. What if you could provide an HPC easy button for your users?

    - -

    IBM Spectrum LSF is a workload management solution for HPC environments. Over the past 30 years, it’s evolved from being just a workload scheduler, to an entire suite of capabilities covering the lifecycle of HPC jobs. Scheduling wise, LSF has not only kept pace with the massive scale of commercial HPC environments today, but also provides capabilities which dramatically lower the bar to access HPC.

    - -

    Ease of use starts with the users and LSF provides a web-based job submission and management portal which greatly simplifies the use of your HPC cluster. Administrators define custom forms that hide the complexity, and they can even be customized to use application and domain specific language understood by your users. For users on the go, LSF has Android and iOS mobile clients so you can check on the state of your running jobs. And a RESTful API is also available to integrate LSF into your corporate infrastructure.

    - -

    With users well taken care of, LSF features many capabilities which allow administrators to take advantage of technologies such as containerization, hybrid cloud and GPUs. Out of the box support for various container technologies let’s administrators control which containers can be used in the environment and hides the complex container startup commands from users. Support for dynamic hybrid cloud enables LSF to burst out to any of the supported cloud providers when needed and scale back the resources when no longer required. And intelligent data staging takes care of moving data to and from the cloud without blocking or making resources wait for transfers.

    - -

    What does this all add up to? Well, you can think of it as an HPC easy button. Your users simply fill in a form and submit their job. LSF worries about the underlying complexities, where to place the job, moving data, CPU and GPU allocation. The user waits to get the job results back and is oblivious everything that is going on behind the curtain.

    - -

    Learn more about easy HPC with IBM Spectrum LSF in this session: Simplifying HPC - Just push the button.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2021-6-15-risque_computing.md b/_posts/gaborsamu/2021-6-15-risque_computing.md deleted file mode 100644 index 5000a43..0000000 --- a/_posts/gaborsamu/2021-6-15-risque_computing.md +++ /dev/null @@ -1,98 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2021-06-16 00:57:31' -layout: post -original_url: https://www.gaborsamu.com/blog/risque_computing/ -slug: very-risqué-computing -title: Very risqué computing ---- - -

    This spring, we’ve been blessed with fantastic and almost tropical weather here -in Southern Ontario, Canada. Normally at this time, after a long winter the last -thing on my mind are indoor activities. However on June 3rd, I was greeted one -morning by an email about an incoming delivery. It turns out it was on of the -items I’ve been waiting patiently for from [Crowd Supply](https://www.crowdsuppl -y.com/) in the hopes of keeping me busy during what I thought would be a cold -spring season.

    - -

    Christmas in June

    - -

    As I have multiple things from Crowd Supply on order (don’t ask!) I didn’t quite -know which item was arriving. It turns out it was the long awaited SiFive -HiFive Unmatched RISCV powered -board. Those who know me (and I’ve said this many times) understand that I don’t -like mainstream anything. And that also applies to computers. My interest in Arm -based systems dates from the 1990’s with the venerable Acorn Archimedes -computers. However, all of the news around the RISCV community has really piqued -my interest. I passed on the SiFive Unleashed primarily because it didn’t have a -PCIe slot - although this was remedied with an optional, but costly add-on -board.

    - -

    So when the SiFive Unmatched was announced with a competitive price and a bump -to 16GB I jumped at the opportunity to purchase one. And it turned out to be a -great decision.

    - -

    The HiFive Unmatched is based on the SiFive Freedom U740 SOC with four U74 cores -and one S7 core and features an all important PCIe slot. With 16GB of onboard -RAM and a M.2 Key M for an SSD, my goal was to get the Unmatched setup as a desk -top. For those looking to learn more about RISCV, I’d recommend starting with -the RISCV International foundation site. As per the RISCV -site, “RISC-V is a free and open ISA enabling a new era of processor innovation -through open standard collaboration.” In simple terms, the ISA or instruction -set architecture defines the set of instructions that are supported by the -processor – so things like arithmetic, logic, and branch instructions to name -a few. So it’s the way that programmers can issue commands to the processor to -do “things”.

    - -

    First impressions

    - -

    I’ve become accustomed to developer boards being packaged in rather non-descript -packaging. The first impression of the Unmatched board could not be further from -this. The board was shipped in a lovely box and included an SD card with a -bootable Freedom U SDK image and I/O shield and a USB cable. So the first -impression for me was quite positive.

    - -
    -
    - -

    Bootstrapping

    - -

    I mounted the Unmatched board to my Streacom BC1 benchmark table and installed -a XFX Radeon 2GB Heatsink edition to the PCIe slot. It’s an old GPU, but fanless -– which I always appreciate. Plus, I’m not looking to do any serious gaming on -the system.

    - -

    The first boot of the system from the SD card was a success (albeit a bit slow). -I monitored the boot over the serial console (minicom) from another system. The -Unmatched sprang to life and eventually booted up to a fully working XFCE -desktop. This was actually a lot smoother than what I anticipated. Once I -confirmed that everything was working as expected, I installed a Samsung 780 -NVME SSD to the M.2 Key M slot and turned my focus to Ubuntu 21.04. The SiFive -Forums have proven an invaluable resource to help -me get Ubuntu up and runing on the system and to make sure the board was booting -Ubuntu with a clock of 1.2 Ghz. Of course, I followed the steps to install -Ubuntu to the NVME onboard, so I/O performance is much better now naturally.

    - -

    Burning in

    - -

    Does it run Linpack? Of course it does :) As with any new board I receive, -running a High Performance Linpack benchmark is often one of the first things I -do. It’s a well known bechmark which provides data for the Top500 ranking of -supercomputers.

    - -

    I used the current HPL v2.3 and -compiled it using the Ubuntu supplied gcc, openmpi and math libraries. -A few runs of HPL yielded a result of 2 GFlops (see screenshots below). -Although I’ve not looked closely at what the theoretical peak of the U740 SOC -is, the result is roughly what I expected given what I’ve been reading up on -the board. Ultimately, I was pleased that HPL compiled and ran to completion and it was a great way to stress the board.

    - -
    -
    - -

    Stay tuned to this channel for more risqué computing escapades…

    \ No newline at end of file diff --git a/_posts/gaborsamu/2021-8-8-riscv_bootup.md b/_posts/gaborsamu/2021-8-8-riscv_bootup.md deleted file mode 100644 index 05fa2a6..0000000 --- a/_posts/gaborsamu/2021-8-8-riscv_bootup.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2021-08-08 11:57:08' -layout: post -original_url: https://www.gaborsamu.com/blog/riscv_bootup/ -slug: booting-hifive-unmatched -title: Booting HiFive Unmatched ---- - -

    For those of you who like system bootup videos, here is the latest addition -to my collection. Here I’ve captured the bootup of Ubuntu 21.04 on a -SiFive HiFive Unmatched developer board. This is a capture of the bootup -mesages over the serial console using minicom and the appropriate USB -serial cable.

    - - -
    - -
    \ No newline at end of file diff --git a/_posts/gaborsamu/2021-9-26-ten64_nas.md b/_posts/gaborsamu/2021-9-26-ten64_nas.md deleted file mode 100644 index ccce557..0000000 --- a/_posts/gaborsamu/2021-9-26-ten64_nas.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2021-09-26 17:55:00' -layout: post -original_url: https://www.gaborsamu.com/blog/ten64_nas/ -slug: 10-4-to-the-ten64-with-rockstor -title: 10-4 to the Ten64 with Rockstor ---- - -

    I love it when a plan comes together! And this time, I’m not referring to a -daring rescue by the A-Team, but rather something just slightly more mundane - -network attached storage (NAS).

    - -

    I wrote back in March of this year about my experience setting up an Arm based -NAS for home use running Rockstor on my venerable SolidRun macchiatoBIN board. Although the macchiatoBIN served -in this role well, one limiting factor is the 3 onboard SATA ports. When used -as a desktop, this wasn’t an issue, but as a NAS it would limit things down -the road in terms of adding storage. Yes, I know I could have purchased a -PCIe SATA card to add additional ports, but decided against going this route -given the various foibles I encountered with PCIe support over the years with -the macchiatoBIN.

    - -

    My mind had been set a number of months earlier to purchase a Traverse Ten64 network appliance -and to use it primarily as a NAS. The Ten64 was attractive to me because of -it’s expandability, ECC RAM support, abundance of network ports and an -interesting capability known as DPAA2, which is thanks to the use of -NXP Layerscape LS1088A processor. A little bit more about DPAA2 later in -the writeup. Furthermore, Ten64 could stand in for home router duties should my -(also venerable) Turris Omnia router decide to give up the ghost.

    - -

    Through all of this, I heard the chants of QNAP and Synology from my friends, -who all thought that I was making things overly complicated for -myself. For me, it was a no brainer. The Ten64 would provide unprecedented -flexibilty and would give me a handy appliance which could take on NAS duties -as well as other tasks such as tinkering with K8s (k3s) clusters. And, who -could deny the additional cool factor of the red PCB of the Ten64! -Ultimately, I just love messing around with technology, and I’m always looking -for unique and flexible solutions. Plug and play? Nein Danke!

    - -

    Back in March, after assessing that an Arm based NAS was indeed a workable -solution, I started to seek out the necessary bits and pieces in anticipation -of the arrival of the Ten64 board. Of course, with COVID still in the air -I was quite worried about being able to get all of the bits I needed in time. -Over the summer, I dilligently got all of the following pieces ready:

    - -
      -
    • 1 x Kingston KSM268SE8/16ME 16GB DDR4 2666 MHz ECC SODIMM
    • -
    • 1 x IOCrest IO-M2F585-5I M.2 B/M 5-port SATA controller
    • -
    • 2 x Seagate Ironwolf 2 TB NAS drives
    • -
    • 1 x Seagate Ironwolf 240 GB NAS SSD
    • -
    • 1 x Fraktal Array R2 mini-ITX NAS case
    • -
    -

    And the plan was coming slowly together. At this stage only the Ten64 itself -was missing. And then, one fateful day in August the Ten64 arrived -at long last! And it was rock n' roll time. The Traverse Ten64 online -documentation and forum turned out to be invaluable sources of information to help me -get up and running. In fact if you search the forum you’ll find my name there -in a few threads, in particular around DPAA2, which was the most thorny issue -to resolve. Full disclosure that DPAA2 support in Linux distros is a bit hit -and miss.

    - -

    The Ten64 shipped in it’s own small form factor case. I setup the Ten64 on my -workbench and installed the RAM, M.2 SATA controller and connected the 240GB -SATA SSD. The end game was to get the system booting the openWrt based -muvirt from the 240GB SATA SSD and -to run Rockstor as a virtual machine under muvirt, with network interfaces -managed by DPAA2.

    - -
    -
    - -

    Once the software side of the house was figured out, it was time to install the -Ten64 board into the Fraktal NAS case. This is what is looked like during -the installation phase.

    - -
    -
    - -

    There are tons of resources on NXP DPAA2 which can be found on the Internet. -The Ten64 online documentation includes a useful overview and details. -It’s effectively a way that you can represent network objects on the NXP LS1088A processor of the Ten64 and pass those securely into the VM running on the -system - which in my case was going to be Rockstor running on an OpenSUSE LEAP 15.3 VM. With DPAA2 I can avoid using virtualized networking for the VMs for -better performance. Again, I’m very far from being an authority on DPAA2, -but it was definitely an important selling point for me, given my use case.

    - -

    DPAA2 took some effort to get working, but I’m very pleased with the outcome. -Ultimately, it required updated versions of muvirt, re-compilation of the VM -guest kernel to include the necessary DPAA2 patches and to flash a -new data path layout to the Ten64 board. You can find all of the nitty-gritty details about this in the following -Ten64 forum thread.

    - -

    Here is a view of the Rockstor dashboard showing activity on the system. -I’m a fan of the dashboard as it gives important details at a glance about -the state of the NAS.

    - -
    -
    - -

    So what does the future hold? At the moment I’m migrating data to the -Rockstor NAS. I’ve not done extensive performance tests, but suffice it to -say that the performance reading/writing to the NAS is as I would expect -with Gigabit Ethernet. I’ve installed both Jellyfin and Netdata rock-ons -as well to provide media server capabilities and detailed metrics on -the system load. I anticipate that I’ll be looking more closely at -k3s in the coming weeks.

    - -

    So this is bit of a pat myself on the back moment. I’m very pleased with -the outcome and the capabilities of the Ten64 now and the room it will -provide to grow in the future. And what also matters to me is that in the -end, I did it my way.

    \ No newline at end of file diff --git a/_posts/gaborsamu/2022-1-4-2022_hpc.md b/_posts/gaborsamu/2022-1-4-2022_hpc.md deleted file mode 100644 index cac28c3..0000000 --- a/_posts/gaborsamu/2022-1-4-2022_hpc.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -author: Ramblings of a supercomputing enthusiast. -author_tag: gaborsamu -blog_subtitle: Recent content in Blogs on Technical Computing Goulash -blog_title: Blogs on Technical Computing Goulash -blog_url: https://www.gaborsamu.com/blog/ -category: gaborsamu -date: '2022-01-04 20:27:54' -layout: post -original_url: https://www.gaborsamu.com/blog/2022_hpc/ -slug: new-year-s-resolution-for-hpc-using-resources-more-efficiently -title: New Year's Resolution for HPC- Using Resources More Efficiently ---- - -

    A hearty happy new year to everyone. It’s that time of the year that we hear from folks about their resolutions for new year’s. But rather than talk about me purchasing a gym membership, I’d like to share my thoughts on a new year’s resolution for HPC.

    - -

    With the topsy-turvy weather that we’re seeing all over the planet, we’re all acutely aware of the changes that are happening to our climate and what is represents for humankind. HPC is a key engine for science, including efforts that are crucial to help with our climate change battle. Climate and ocean modelling are some examples of the use of HPC that immediately come to mind in this respect. Modelling the environment is important for us to understand what is occurring around us and what is projected to occur. Additionally, materials science is also important in order to help develop the necessary technologies to more effectively store energy from renewable sources and transmit, generate energy. HPC is a consumer of energy, which brings me to the HPC resolution for this year – using computing resources more efficiently.

    - -

    We’ve seen great strides in the efficiency of processors and systems. But at scale, large HPC centers consume large amounts of energy for both powering the servers and storage systems, as well as the cost of cooling. And if you’re using cloud for HPC, then of course you’re not concerned with the energy and cooling, but rather the cost to you. In either case, making the most efficient use of your infrastructure should be a key consideration. Workload schedulers are the interface between users and jobs in any HPC environment. Users submit work and it’s the task of the workload scheduler to find suitable compute resources to dispatch the work to. On the surface, this may seem like a trivial task. But with potentially large numbers of jobs, users, servers and priorities, workload and resource management is anything but a trivial. The good news is that there are workload management solutions which bring decades of experience to the table.

    - -

    IBM Spectrum LSF Suites provide a fully integrated workload management solution for HPC environments. LSF builds on almost 30 years of experience in workload and resource management and is used on some of the worlds’ largest supercomputers including Summit, at the Oak Ridge Leadership Computing Facility. On a high-level, here are some critical areas where LSF can help to drive better efficiency in your HPC infrastructure:

    - -
      -
    • Dynamic hybrid cloud – automatically flex up and down cloud resources according to policies, with support for all major cloud providers. Learn more here
    • -
    • Dynamic multi-instance GPU support – right size NVIDIA A100 multi-instance GPU slices according to incoming workload demands. Learn more here
    • -
    • User productivity – single unified UI for job submission and management which captures repeatable best practices. Learn more here
    • -
    -

    Start the year off right, with a focus efficiency in your HPC environment with IBM Spectrum LSF. Learn more here.

    \ No newline at end of file diff --git a/_posts/glennklockwood/2014-11-5-tagbloggercom1999blog-4307061427721284246post-8823126637772820705.md b/_posts/glennklockwood/2014-11-5-tagbloggercom1999blog-4307061427721284246post-8823126637772820705.md deleted file mode 100644 index b6beaf1..0000000 --- a/_posts/glennklockwood/2014-11-5-tagbloggercom1999blog-4307061427721284246post-8823126637772820705.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2014-11-05 15:53:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2014/11/storage-utilization-in-long-tail-of.html -slug: storage-utilization-in-the-long-tail-of-science -title: Storage Utilization in the Long Tail of Science ---- - -

    Introduction

    Since changing careers and moving up to the San Francisco Bay Area in July, I haven't had nearly as much time to post interesting things here on my blog—I guess that's the startup life. That isn't to say that my life in DNA sequencing hasn't been without interesting observations to explore though; the world of high-throughput sequencing is becoming increasingly dependent on high-performance computing, and many of the problems being solved in genomics and bioinformatics are stressing aspects of system architecture and cyberinfrastructure that haven't gotten a tremendous amount of exercise from the more traditional scientific domains in computational research.

    Take, for example, the biggest and baddest DNA sequencer on the market: over the course of a three-day run, it outputs around 670 GB of raw (but compressed) sequence data, and this data is spread out over 1,400,000 files. This would translate to an average file size of around 500 KB, but the reality is that the file sizes are a lot less uniform:

    Figure 1. File size distribution of a single flow cell output (~770 gigabases) on Illumina's highest-end sequencing platform

    After some basic processing (which involves opening and closing hundreds of these files repeatedly and concurrently), these data files are converted into very large files (tens or hundreds of gigabytes each) which then get reduced down to data that is more digestible over the course of hundreds of CPU hours. As one might imagine, this entire process is very good at taxing many aspects of file systems, and on the computational side, most of this IO-intensive processing is not distributed and performance benefits most from single-stream, single-client throughput.

    As a result of these data access and processing patterns, the storage landscape in the world of DNA sequencing and bioinformatics is quite different from conventional supercomputing. Some large sequencing centers do use the file systems we know and love (and hate) like GPFS at JGI and Lustre at Sanger, but it appears that most small- and mid-scale sequencing operations are relying heavily on network-attached storage (NAS) for both receiving raw sequencer data and being a storage substrate for all of the downstream data processing.

    I say all of this because these data patterns—accessing large quantities of small files and large files with a high degree of random IO—is a common trait in many scientific applications used in the "long tail of science." The fact is, the sorts of IO for which parallel file systems like Lustre and GPFS are designed are tedious (if not difficult) to program, and for the majority of codes that don't require thousands of cores to make new discoveries, simply reading and writing data files in a naïve way is "good enough."

    The Long Tail

    This long tail of science is also using up a huge amount of the supercomputing resources made available to the national open science community; to illustrate, 98% of all jobs submitted to the XSEDE supercomputers in 2013 used 1024 or fewer CPU cores, and these modest-scale jobs represented over 50% of all the CPU time burned up on these machines.

    Figure 2. Cumulative job size distribution (weighted by job count and SUs consumed) for all jobs submitted to XSEDE compute resources in 2013

    The NSF has responded to this shift in user demand by awarding Comet, a 2 PF supercomputer designed to run these modest-scale jobs. The Comet architecture limits its full-bisection bandwidth interconnectivity to groups of 72 nodes, and these 72-node islands will actually have enough cores to satisfy 99% of all the jobs submitted to XSEDE clusters in 2013 (see above). By limiting the full-bisection connectivity to smaller islands and using less rich connectivity between islands, the cost savings in not having to buy so many mid-tier and core switches are then turned into additional CPU capacity.

    What the Comet architecture doesn't address, however, is the question of data patterns and IO stress being generated by this same long tail of science—the so-called 99%. If DNA sequencing is any indicator of the 99%, parallel file systems are actually a poor choice for high-capacity, mid-scale jobs because their performance degrades significantly when facing many small files. Now, the real question is, are the 99% of HPC jobs really generating and manipulating lots of small files in favor of the large striped files that Lustre and GPFS are designed to handle? That is, might the majority of jobs on today's HPC clusters actually be better served by file systems that are less scalable but handle small files and random IO more gracefully?

    Some colleagues and I set out to answer this question last spring, and a part of this quest involved looking at every single file on two of SDSC's Data Oasis file systems. This represented about 1.7 PB of real user data spread across two Lustre 2.4 file systems—one designed for temporary scratch data and the other for projects storage—and we wanted to know if users' data really consisted of the large files that Lustre loves or if, like job size, the 99% are really working with small files.  Since SDSC's two national resources, Gordon and Trestles, restrict the maximum core count for user jobs to modest-scale submissions, these file systems should contain files representative of long-tail users.

    Scratch File Systems

    At the roughest cut, files can be categorized based on whether their size is on the order of bytes and kilobytes (size < 1024*1024 bytes), megabytes (< 1024 KB), gigabytes (<1024 MB), and terabytes (< 1024 GB). Although pie charts are generally a terrible way to show relative compositions, this is how the files on the 1.2 PB scratch file system broke down:

    Figure 3. Fraction of file count consumed by files of a given size on Data Oasis's scratch file system for Gordon


    The above figure shows the number of files on the file system classified by their size, and there are clearly a preponderance of small files less than a gigabyte in size. This is not terribly surprising as the data is biased towards smaller files; that is, you can fit a thousand one-megabyte files in the same space that a single one-gigabyte file would take up. Another way to show this data is by how much file system capacity is taken up by files of each size:

    Figure 4. File system capacity consumed by files of a given size on Data Oasis's scratch file system for Gordon


    This makes it very apparent that the vast majority of the used space on this scratch file system—a total of 1.23 PB of data—are taken up by files on the order of gigabytes and megabytes. There were only seventeen files that were a terabyte or larger in size.

    Incidentally, I don't find it too surprising that there are so few terabyte-sized files; even in the realm of Hadoop, median job dataset sizes are on the order of a dozen gigabytes (e.g., Facebook has reported that 90% of its jobs read in under 100 GB of data). Examining file sizes with much finer granularity reveals that the research data on this file system isn't even of Facebook scale though:

    Figure 5. Number of files of a given size on Data Oasis's scratch file system for Gordon.  This data forms the basis for Figure 3 above


    While there are a large number of files on the order of a few gigabytes, it seems that files on the order of tens of gigabytes or larger are far more scarce. Turning this into relative terms,

    Figure 6. Cumulative distribution of files of a given size on Data Oasis's scratch file system for Gordon


    we can make more meaningful statements. In particular,

    • 90% of the files on this Lustre file system are 1 megabyte or smaller
    • 99% of files are 32 MB or less
    • 99.9% of files are 512 MB or less
    • and 99.99% of files are 4 GB or less

    The first statement is quite powerful when you consider the fact that the default stripe size in Lustre is 1 MB. The fact that 90% of files on the file system are smaller than this means that 90% of users' files really gain no advantages by living on Lustre. Furthermore, since this is a scratch file system that is meant to hold temporary files, it would appear that either user applications are generating a large amount of small files, or users are copying in large quantities of small files and improperly using it for cold storage. Given the quota policies for Data Oasis, I suspect there is a bit of truth to both.

    Circling back a bit though, I said earlier that comparing just the quantity of files can be a bit misleading since a thousand 1 KB files will take up the same space as a single 1 MB file. We can also look at how much total space is taken up by files of various sizes.

    Figure 7. File system capacity consumed by files of a given size on Data Oasis's scratch file system for Gordon.  This is just a more finely diced version of the data presented in Figure 4 above.

    The above chart is a bit data-dense so it takes some staring at to understand what's going on. First looking at the purple line, we can pull out some pretty interesting facts:

    • Half of the file system's used capacity (50%) is consumed by files that are 1 GB or less in size
    • Over 20% of the file system's used capacity is taken up by files smaller than 64 MB
    • About 10% of the capacity is used by files that are 64 GB or larger

    The blue boxes represent the derivative of that purple line—that is, how much space is taken up by files of only one specific size. The biggest chunk of the file system (141 TB) is taken up by 4 GB files, but it appears that there is a substantial range of file sizes that take up very similarly sized pieces of the pie. 512 MB files take up a total of 139 TB; 1 GB, 2 GB, and 8 GB files all take up over 100 TB of total space each as well. In fact, files ranging from 512 MB to 8 GB comprise 50% of the total file system capacity.

    Why the sweet spot for space-consuming files is between 512 MB and 8 GB is unclear, but I suspect it's more caused by the human element in research. In my own research, I worked with files in this range simply because it was enough data to be statistically meaningful while still small enough to quickly re-analyze or transfer to a colleague. For file sizes above this range, the mass of the data made it difficult to manipulate using the "long-tail" cyberinfrastructure available to me. But, perhaps as more national-scale systems comes online to meet the needs of these sorts of workloads, this sweet spot will creep out to larger file sizes.

    Projects Storage

    The above discussion admittedly comes with a lot of caveats.  In particular, the scratch file system we examined was governed by no hard quotas which did lead some people to leave data resident for longer than they probably should have.  However, the other file system we analyzed was SDSC's Data Oasis projects storage which was architected for capacity over performance and featured substantially more disks per OSS.  This projects storage also came with 500 GB quotas by default, forcing users to be a little more mindful of what was worth keeping.

    Stepping back to the coarse-grained kilobyte/megabyte/gigabyte/terabyte pie charts, here is how projects storage utilization compared to scratch storage:

    Figure 8. Fraction of file count consumed by files of a given size on Data Oasis's projects file system (shared between Gordon and Trestles users)

    On the basis of file counts, it's a bit surprising that users seem to store more smaller (kilobyte-sized) files in their projects space than their scratch space.  This may imply that the beginning and end data bookending simulations aren't as large as the intermediate data generated during the calculation.  Alternately, it may be a reflection of user naïveté; I've found that newer users were often afraid to use the scratch space because of the perception that their data may vanish from there without advanced notice.  Either way, gigabyte-sized files comprised a few hundredths of a percent of files, and terabyte-sized files were more scarce still on both file systems.  The trend was uniformly towards smaller sizes on projects space.

    As far as space consumed by these files, the differences remain subtle.

    Figure 9. Fraction of file system capacity consumed by files of a given size on Data Oasis's projects file system

    There appears to be a trend towards users keeping larger files in their projects space, and the biggest change is the decrease in megabyte-sized files in favor of gigabyte-sized files.  However, this trend is very small and persists across a finer-grained examination of file size distributions:

    -
    Figure 10. File system capacity consumed by files of a given size on Data Oasis's projects file system

    Half of the above plot is the same data shown above, making this plot twice as busy and confusing.  However there's a lot of interesting data captured in it, so it's worth the confusing presentation.  In particular, the overall distribution of mass with respect to the various file sizes is remarkably consistent between scratch and projects storage.  We see the same general peak of file size preference in the 1 GB to 10 GB range, but there is a subtle bimodal divide in projects storage that reveals preference for 128MB-512MB and 4GB-8GB files which manifests in the integrals (red and purple lines) that show a visibly greater slope in these regions.

    The observant reader will also notice that the absolute values of the bars are smaller for projects storage and scratch storage; this is a result of the fact that the projects file system is subject to quotas and, as a result, is not nearly as full of user data.  To complicate things further, the projects storage represents user data from two different machines (each with unique job size policies, to boot), whereas the scratch storage is only accessible from one of those machines.  Despite these differences though, user data follows very similar distributions between both file systems.

    Corollaries

    It is probably unclear what to take away from these data, and that is with good reason.  There are fundamentally two aspects to quantifying storage utilizations--raw capacity and file count--because they represent two logically separate things.  There is some degree of interchangeability (e.g., storing a whole genome in one file vs. storing each chromosome its own file), and this is likely contributing to the broad peak in file size between 512 MB and 8 GB.  With that being said, it appears that the typical long-tail user stores a substantial amount of decidedly "small" files on Lustre, and this is exemplified by the fact that 90% of the files resident on the file systems analyzed here are 1 MB or less in size.

    -
    This alone suggests that large parallel file systems may not actually be the most appropriate choice for HPC systems that are designed to support a large group of long-tail users.  While file systems like Lustre and GPFS certainly provide a unique capability in that some types of medium-sized jobs absolutely require the IO capabilities of parallel file systems, there are a larger number of long-tail applications that do single-thread IO, and some of these perform IO in such an abusive way (looking at you, quantum chemistry) that they cannot run on file systems like Lustre or GPFS because of the number of small files and random IO they use.
    -

    -
    So if Lustre and GPFS aren't the unequivocal best choice for storage in long-tail HPC, what are the other options?
    -

    Burst Buffers

    -
    I would be remiss if I neglected to mention burst buffers here since they are designed, in part, to address the limitations of parallel file systems.  However, their actual usability remains unproven.  Anecdotally, long-tail users are generally not quick to alter the way they design their jobs to use cutting-edge technology, and my personal experiences with Gordon (and its 300 TB of flash) were that getting IO-nasty user applications to effectively utilize the flash was often a very manual process that introduced new complexities, pitfalls, and failure modes.  Gordon was a very experimental platform though, and Cray's new DataWarp burst buffer seems to be the first large-scale productization of this idea.  It will be interesting to see how well it works for real users when the technology starts hitting the floor for open science in mid-2016, if not sooner.
    -

    High-Performance NAS

    -
    An emerging trend in HPC storage is the use of high-performance NAS as a complementary file system technology in HPC platforms.  Traditionally, NAS has been a very poor choice for HPC applications because of the limited scalability of the typical NAS architecture--data resides on traditional local file system with network service being provided by an additional software layer like NFS, and the ratio of storage capacity to network bandwidth out of the NAS is very high.
    -

    -
    The emergence of cheap RAM and enterprise SSDs has allowed some sophisticated file systems like ZFS and NetApp's WAFL to demonstrate very high performance, especially in delivering very high random read performance, by using both RAM and flash as a buffer between the network and spinning rust.  This allows certain smaller-scale jobs to enjoy substantially better performance when running on flash-backed NAS than a parallel file system.  Consider the following IOP/metadata benchmark run on a parallel file system and a NAS head with SSDs for caching:

    Figure 11. File stat rate on flash-backed NAS vs. a parallel file system as measured by the mdtest benchmark

    A four-node job that relies on statting many small files (for example, an application that traverses a large directory structure such as the output of one of the Illumina sequencers I mentioned above) can achieve a much higher IO rate on a high-performance NAS than on a parallel file system.  Granted, there are a lot of qualifications to be made with this statement and benchmarking high-performance NAS is worth a post of its own, but the above data illustrate a case where NAS may be preferable over something like Lustre.
    -

    Greater Context

    Parallel file systems like Lustre and GPFS will always play an essential role in HPC, and I don't want to make it sound like they can be universally replaced by high-performance NAS.  They are fundamentally architected to scale out so that increasing file system bandwidth does not require adding new partitions or using software to emulate a single namespace.  In fact, the single namespace of parallel file systems makes the management of the storage system, its users, and the underlying resources very flexible and straightforward.  No volume partitioning needs to be imposed, so scientific applications' and projects' data consumption do not have to align with physical hardware boundaries.

    However, there are cases where a single namespace is not necessary at all; for example, user home directories are naturally partitioned with fine granularity and can be mounted in a uniform location while physically residing on different NAS heads with a simple autofs map.  In this example, leaving user home directories on a pool of NAS filers offers two big benefits:

    1. Full independence of the underlying storage mitigates the impact of one bad user.  A large job dropping multiple files per MPI process will crush both Lustre and NFS, but in the case of Lustre, the MDS may become unresponsive and block IO across all users' home directories.
    2. Flash caches on NAS can provide higher performance on IOP-intensive workloads at long-tail job sizes.  In many ways, high-performance NAS systems have the built-in burst buffers that parallel file systems are only now beginning to incorporate.
    Of course, these two wins come at a cost:
    -
    1. Fully decentralized storage is more difficult to manage.  For example, balancing capacity across all NAS systems is tricky when users have very different data generation rates that they do not disclose ahead of time.
    2. Flash caches can only get you so far, and NFS will fall over when enough IO is thrown at it.  I mentioned that 98% of all jobs use 1024 cores or fewer (see Figure 1), but 1024 cores all performing heavy IO on a typical capacity-rich, bandwidth-poor NAS head will cause it to grind to a halt.
    Flash-backed high-performance NAS is not an end-all storage solution for long-tail computational science, but it also isn't something to be overlooked outright.  As with any technology in the HPC arena, its utility may or may not match up well with users' workloads, but when it does, it can deliver less pain and better performance than parallel file systems.
    -
    -
    -

    Acknowledgments 

    -
    -
    -
    As I mentioned above, the data I presented here was largely generated as a result of an internal project in which I participated while at SDSC.  I couldn't have cobbled this all together without the help of SDSC's HPC Systems group, and I'm really indebted to +Rick+Haisong, and +Trevor for doing a lot of the heavy lifting in terms of generating the original data, getting systems configured to test, and figuring out what it all meant when the dust settled (even after I had left!).  SDSC's really a world-class group of individuals.
    \ No newline at end of file diff --git a/_posts/glennklockwood/2014-4-24-tagbloggercom1999blog-4307061427721284246post-5875436226821857964.md b/_posts/glennklockwood/2014-4-24-tagbloggercom1999blog-4307061427721284246post-5875436226821857964.md deleted file mode 100644 index 06b3d87..0000000 --- a/_posts/glennklockwood/2014-4-24-tagbloggercom1999blog-4307061427721284246post-5875436226821857964.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2014-04-25 00:29:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2014/04/parallelizing-r-on-supercomputers.html -slug: parallelizing-r-on-supercomputers -title: Parallelizing R on Supercomputers ---- - -
    Executive summary:  I've posted a tutorial on how to parallelize R codes on my website.  This post is a more personal reflection on how I got there.

    -

    -
    "Parallel Options for R" was the title of the first talk I ever presented on behalf of my employer, and despite the fact that I didn't (and still don't) know anything about the R language, statistics, or how to parallelize any of it, the shoe seemed to fit at the time.  The talk went over well, and I've been asked to give the talk in my capacity as the resident "parallel R guy" plenty of times since.

    -
    Every once in a while I get asked how I came to become so involved in some of the weird topics about which I write and speak--after all, I really have no formal training in things like SR-IOV, Hadoop, and next-generation gene sequencing.  As much as I'd like to claim I just have some infinite sage-like knowledge, the reality is that I have to learn about these various technologies as a result of my day job--answering helpdesk tickets.  In the case of parallel R, I simply got a ticket in January 2013 that read,
    -
    "I just ran an intensive R script through [the supercomputer].  Its not much faster than my own machine.  Could you point me to a tutorial for how I can make the process run in different processors in parallel?"
    -
    I couldn't very well say "lol no idea" (which was the truth), but the fact is that there are only about three whole people in my group** who are tasked with solving every problem that comes in from the thousand unique users who run jobs on our system every year.  If I didn't know the answer, there was a good chance that nobody else knew either.  That doesn't change the fact that someone needs to answer the user's question though, and that fact is what got me into the parallel R business.
    -

    -
    In my quest for an answer to this user's helpdesk request, I further discovered that there were no good tutorials online that explain the process of parallelizing R codes.  Thus, I wound up having to buy a book to learn what I need to know to answer the user's question.  So I did, and I learned the rough basics of how someone might go about parallelizing their R codes.  I gave the user a few starting pointers, some of the libraries that he might want to check out on CRAN, and tried to provide some boilerplate code that might help him parallelize his particular script.  We then went our separate ways.
    -

    -
    With all this reflection aside though, I never lost sight of the reality that I never did answer the user's question: what is a good tutorial on how to parallelize R codes?
    -

    -
    This question has actually come up a number of times from a number of users over the last year.  Rather than take the easy route and tell everyone to attend my next talk on the subject, I decided to turn my presentation on parallelizing R into a series of tutorials which I've put on my website:
    -

    - -

    -
    It's not comprehensive by any means; notably, I did not cover either the pbdr library out of UTK/Oak Ridge (an omission with no particularly good justification) or SPRINT from Edinburgh (it's a bit specialized in functionality).  I also haven't had the opportunity to convert my presentation on using R with Hadoop and Spark into the final component of this tutorial.  Those topics will come as time permits.  Regardless, I hope someone finds the write-up useful.
    -

    -
    ** I say "whole people" to reflect that our funding provides somewhere in the neighborhood of three full-time equivalent employees providing front-line user support.  That funding winds up getting distributed across more physical staff.
    \ No newline at end of file diff --git a/_posts/glennklockwood/2014-6-29-tagbloggercom1999blog-4307061427721284246post-4750722661841327889.md b/_posts/glennklockwood/2014-6-29-tagbloggercom1999blog-4307061427721284246post-4750722661841327889.md deleted file mode 100644 index f93ec48..0000000 --- a/_posts/glennklockwood/2014-6-29-tagbloggercom1999blog-4307061427721284246post-4750722661841327889.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2014-06-29 21:31:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2014/06/exascale-in-perspective-rscs-12.html -slug: exascale-in-perspective-rsc-s-1-2-petaflop-rack -title: Exascale in perspective- RSC's 1.2 petaflop rack ---- - -
    Russian supercomputing manufacturer RSC generated some buzz at ISC'14 last week when they showed their 1.2 PF-per-rack Xeon Phi-based platform.  I was aware of this system from when they first announced it a few months prior, and I referenced it in a piece of a blog post I was writing about the scarier aspects of exascale computing.  Given my impending career change though, it is unclear that I will have the time to ever finish that post before it becomes outdated.  Since RSC is back in the spotlight though, I thought I'd post the piece I wrote up to illustrate how wacky this 1.2 PF rack really is in terms of power consumption.  Power consumption, of course, is the limiting factor standing between today and the era of exascale computing.

    So, to put a 400 kW, 1.2 PF rack into perspective, here is that piece:

    -

    -

    The Importance of Energy Efficiency

    Up through the petascale era in which we currently live, raw performance of high-performance components--processors, RAM, and interconnect--were what limited the ultimate performance of a given high-end machine.  The first petaflop machine, Los Alamos' Roadrunner, derived most of its FLOPs from high-speed PowerXCell 8i processors pushing 3.2 GHz per core.  Similarly, the first 10 PF supercomputer, RIKEN's K computer, derived its performance from its sheer size of 864 cabinets.  Although I don't mean to diminish the work done by the engineers that actually got these systems to deliver this performance, the petascale era really was made possible by making really big systems out of really fast processors.

    By contrast, Exascale represents the first milestone where the limitation does not lie in making these high-performance components faster; rather, performance is limited by the amount of electricity that can be physically delivered to a processor and the amount of heat that can be extracted from it.  This limitation is what has given rise to these massively parallel processors that eschew a few fast cores for a larger number of low-powered ones.  By keeping clock speeds low and densely packing many (dozens or hundreds) of compute cores on a single silicon die, these massively parallel processors are now realizing power efficiencies (flops per watt) that are an order of magnitude higher than what traditional CPUs can deliver.

    The closest technology on the market that will probably resemble the future's exaflop machines are based on accelerators--either NVIDIA GPUs or Intel's MICs.  The goal will be to jam as many of these massively parallel processors into as small a space and with as tight of an integration as possible.  Recognizing this trend, NERSC has opted to build what I would call the first "pre-exascale" machine in its NERSC-8 procurement which will feature a homogeneous system of manycore processors.

    However, such pre-exascale hardware doesn't actually exist yet, and NERSC-8 won't appear until 2016.  What does exist, though, is a product by Russia's RSC Group called PetaStream: a rack packed with 1024 current-generation Xeon Phi (Knight's Corner) coprocessors that has a peak performance of 1.2 PF/rack.  While this sounds impressive, it also highlights the principal challenge of exascale computing: power consumption.  One rack of RSC PetaStream is rated for 400 kW, delivering 3 GFLOPs/watt peak.  Let's put this into perspective.

    Kilowatts, megawatts, and gigawatts in perspective

    During a recent upgrade to our data center infrastructure, three MQ DCA220SS-series diesel generators were brought in for the critical systems.  Each is capable of producing 220 kVA according to the spec sheets.
    -
    Three 220 kVA diesel generators plugged in during a PM at SDSC
    It would take three of these diesel generators to power a single rack of RSC's PetaStream.  Of course, these backup diesel generators aren't a very efficient way of generating commercial power, so this example is a bit skewed.

    Let's look at something that is used to generate large quantities of commercial power instead.  A GE 1.5-77 wind turbine, which is GE's most popular model, is advertised as delivering 1.5 megawatts at wind speeds above 15 miles per hour.

    GE 1.5 MW wind turbine.   Source: NREL
    Doing the math, this means that the above pictured turbine would be able to power only three racks of RSC PetaStream on a breezy day.

    To create a supercomputer with a peak capability of an exaflop using RSC's platform, you'd need over 800 racks of PetaStream and over 300 MW of power to turn it all on.  That's over 200 of the above GE wind turbines and enough electrity to power about 290,000 homes in the U.S.  Wind farms of this size do exist; for example,

    300 MW Stateline Wind Farm.  Source: Wikimedia Commons
    the Stateline Wind Farm, which was built on the border between Oregon and Washington, has a capacity of about 300 MW.  Of course, wind farms of this capacity cannot be built in any old place.

    Commercial nuclear power plants can be built in a variety of places though, and they typically generate on the order of 1 gigawatt (GW) of power per reactor.  In my home state of New Jersey, the Hope Creek Nuclear Generating Station has a single reactor that was built to deliver about 1.2 GW of power:

    1.2 GW Hope Creek nuclear power station.  The actual reactor is housed in the concrete cylinder to the bottom left.  Courtesy of the Nuclear Regulatory Commission.

    This is enough to power almost 4 exaflops of PetaStream.  Of course, building a nuclear reactor for every exaflop supercomputer would be extremely costly, given the multi-billion dollar cost of building reactors like this.  Clearly, the energy efficiency (flops/watt) of computing technology needs to improve substantially before we can arrive at the exascale era.
    \ No newline at end of file diff --git a/_posts/glennklockwood/2014-6-8-tagbloggercom1999blog-4307061427721284246post-6242128228381347032.md b/_posts/glennklockwood/2014-6-8-tagbloggercom1999blog-4307061427721284246post-6242128228381347032.md deleted file mode 100644 index 37b880f..0000000 --- a/_posts/glennklockwood/2014-6-8-tagbloggercom1999blog-4307061427721284246post-6242128228381347032.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2014-06-08 03:34:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2014/06/spark-on-supercomputers-few-notes.html -slug: spark-on-supercomputers-a-few-notes -title: Spark on Supercomputers- A Few Notes ---- - -I've been working with Apache Spark quite a bit lately in an effort to bring it into the fold as a viable tool for solving some of the data-intensive problems encountered in supercomputing.  I've already added support for provisioning Spark clusters to a branch of the myHadoop framework I maintain so that Slurm, Torque, and SGE users can begin playing with it, and as a result of these efforts, I've discovering a number of interesting issues with Spark running on traditional supercomputers.

    At this point in time, Spark is very rough around the edges.  The core implementation of resilient distributed datasets are all there and work wonderfully, but I've found that it doesn't take long to start discovering bugs and half-implemented features that can get very confusing very quickly.  Perhaps half of the problems I've faced are the result of the fact that I have been trying to run Spark in non-traditional ways (for example, over hosts' TCP over InfiniBand interfaces and with non-default config directories), and although the documentation claims to support all of the features necessary to make this possible, the reality is a bit different.

    What follows are just some incoherent notes I've taken while porting Spark to the myHadoop framework.  Spark is rapidly developing and it is constantly improving, so I hope this post becomes outdated as the Spark developers make the framework more robust.

    Control Script Problems

    Hadoop and Spark both ship with "control scripts" or "cluster launch scripts" that facilitate the starting and stopping of the entire cluster of daemons.  At the highest level, this includes start-all.sh and stop-all.sh, which make calls to start-dfs.sh and start-yarn.sh (in Hadoop) and start-master.sh and start-slaves.sh.  In Hadoop, these scripts work wonderfully, but Spark's implementation of these control scripts is still quite immature because they carry implicit assumptions about users' Spark configurations.

    Like Hadoop, Spark supports a spark-env.sh file (located in $SPARK_CONF_DIR) which defines environment variables for all of the remote Spark workers that are spawned across the cluster.  This file is an ideal place to put the following environment variable definitions:
    • SPARK_MASTER_IP - the default value for this is `hostname` which is generally not a great default on most clusters.  On Rocks, we append ".ibnet" to the hostname to get Spark to operate over the InfiniBand fabric.
    • SPARK_LOCAL_IP - again, ensure that this is set up to use the correct interface on the cluster.  We append .ibnet on Rocks.
    • SPARK_HOME, SPARK_PREFIX, and SPARK_CONF_DIR should also be defined here since spark-env.sh will usually override the variables defined by spark-config.sh (see below)
    $SPARK_HOME/sbin/spark-config.sh is where much of the Spark control scripts' "intelligence" comes from as far as defining the environment variables that Spark needs to launch.  In particular, spark-config.sh defines the following variables before reading spark-env.sh:
    • SPARK_PREFIX
    • SPARK_HOME
    • SPARK_CONF_DIR
    The problem is that spark-config.sh will stomp all over anything the user defines for the above variables, and since spark-config.sh is called from within all of the Spark control scripts (both evoked by the user and evoked by sub-processes on remote hosts during the daemon spawning process), trying to get Spark to use non-default values for SPARK_CONF_DIR (e.g., exactly what myHadoop does) gets to be tedious.

    The Spark developers tried to work around this by having the control scripts call spark-env.sh after spark-config.sh, meaning you should be able to define your own SPARK_CONF_DIR in spark-env.sh.  Unfortunately, this mechanism of calling spark-env.sh after spark-config.sh appears as

    . "$sbin/spark-config.sh"

    if [ -f "${SPARK_CONF_DIR}/spark-env.sh" ]; then
    . "${SPARK_CONF_DIR}/spark-env.sh"
    fi

    That is, spark-config.sh will stomp all over any user-specified SPARK_CONF_DIR, and then use the SPARK_CONF_DIR from spark-config.sh to look for spark-env.sh.  Thus, there is no actual way to get the Spark control scripts (as of version 0.9) to honor the user-specified SPARK_CONF_DIR.  It looks like the latest commits to Spark have started to address this, but a cursory glance over the newest control scripts suggests that this remains broken.

    Anyway, as a result of this, myHadoop's Spark integration eschews the Spark control scripts and handles spawning the daemons more directly using the manual method of spawning slaves.  Doing this averts the following issues:
    1. start-slaves.sh can't find any slaves because it always looks for $SPARK_HOME/etc/slaves.  This can be worked around by passing SPARK_SLAVES=$SPARK_CONF_DIR/slaves to start-slaves.sh for a non-default SPARK_CONF_DIR.
    2. stop-master.sh doesn't do anything useful because you still need to kill -9 the master process by hand.  Not sure why this is the case.

    -

    Deciphering Spark Errors

    Here are various cryptic stack traces I've encountered while working on Spark.  I kept these mostly for myself, but I've started meeting people that hit the same problems and thought it might be worthwhile to share the diagnoses I've found.

    In general, Spark seems to work best when used conservatively, but when you start doing things that do not strictly fall within the anticipated use case, things break in strange ways.  For example, if you try to write an RDD with an empty element (e.g., a text file with empty lines), you would get this really crazy error that does not actually say anything meaningful:

    14/04/30 16:23:07 ERROR Executor: Exception in task ID 19
    scala.MatchError: 0 (of class java.lang.Integer)
         at org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:110)
         at org.apache.spark.api.python.PythonRDD$$anon$1.(PythonRDD.scala:153)
         at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:96)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:109)
         at org.apache.spark.scheduler.Task.run(Task.scala:53)
         at org.apache.spark.executor.Executor$TaskRunner$$anonfun$run$1.apply$mcV$sp(Executor.scala:213)
         at org.apache.spark.deploy.SparkHadoopUtil.runAsUser(SparkHadoopUtil.scala:49)
         at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:178)
         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
         at java.lang.Thread.run(Thread.java:722)

    I filed a bug report about this particular problem and the issue has been fixed, but it's just one of those edge cases where Spark will fail catastrophically (I had to look at the source code to figure out what "scala.MatchError" meant).  Usually you wouldn't be operating on empty data sets, but I discovered this error when I was trying to quickly determine if my Spark slaves were communicating with my master correctly by issuing

    file = sc.textFile('hdfs://master.ibnet0/user/glock/input.txt')
    file.saveAsTextFile('hdfs://master.ibnet0/user/glock/output')

    That is, simply reading in a file and writing it back out with pyspark would cause catastrophic failure.  This is what I meant when I say Spark's still rough around the edges.

    Here are a few more errors I've encountered.  They're not problems with Spark, but the stack traces and exceptions thrown can be a little mysterious.  I'm pasting it all here for the sake of googlers who may run into these same problems.

    If you try to use Spark built against Hadoop 2 with a Hadoop 1 HDFS, you'll get this IPC error:

    >>> file.saveAsTextFile('hdfs://s12ib:54310/user/glock/gutenberg.out')
    Traceback (most recent call last):
      File "", line 1, in
      File "/home/glock/apps/spark-0.9.0/python/pyspark/rdd.py", line 682, in saveAsTextFile
        keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
      File "/home/glock/apps/spark-0.9.0/python/lib/py4j-0.8.1-src.zip/py4j/java_gateway.py", line 537, in __call__
      File "/home/glock/apps/spark-0.9.0/python/lib/py4j-0.8.1-src.zip/py4j/protocol.py", line 300, in get_return_value
    py4j.protocol.Py4JJavaError: An error occurred while calling o23.saveAsTextFile.
    : org.apache.hadoop.ipc.RemoteException: Server IPC version 9 cannot communicate with client version 4
         at org.apache.hadoop.ipc.Client.call(Client.java:1070)
         at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:225)
         at $Proxy7.getProtocolVersion(Unknown Source)
         at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:396)
         at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:379)


    If your Pythons aren't all the same version across the nodes when Spark workers are instantiated, you might get a cryptic error like this when trying to call the count() method on an RDD:

    14/04/30 16:15:11 ERROR Executor: Exception in task ID 12
    org.apache.spark.api.python.PythonException: Traceback (most recent call last):
      File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/worker.py", line 77, in main
        serializer.dump_stream(func(split_index, iterator), outfile)
      File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/serializers.py", line 182, in dump_stream
        self.serializer.dump_stream(self._batched(iterator), stream)
      File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/serializers.py", line 117, in dump_stream
        for obj in iterator:
      File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/serializers.py", line 171, in _batched
        for item in iterator:
      File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/rdd.py", line 493, in func
        if acc is None:
    TypeError: an integer is required

         at org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:131)
         at org.apache.spark.api.python.PythonRDD$$anon$1.(PythonRDD.scala:153)
         at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:96)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:109)
         at org.apache.spark.scheduler.Task.run(Task.scala:53)
         at org.apache.spark.executor.Executor$TaskRunner$$anonfun$run$1.apply$mcV$sp(Executor.scala:213)
         at org.apache.spark.deploy.SparkHadoopUtil.runAsUser(SparkHadoopUtil.scala:49)
         at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:178)
         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
         at java.lang.Thread.run(Thread.java:722)


    If you try to write an RDD to a file with mismatched Python versions, or if you were using anything earlier than Python 2.7 (e.g., 2.6) with any Spark version earlier than 1.0.0, you'd see this:

    14/04/30 17:53:20 WARN scheduler.TaskSetManager: Loss was due to org.apache.spark.api.python.PythonException
    org.apache.spark.api.python.PythonException: Traceback (most recent call last):
      File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/worker.py", line 77, in main
        serializer.dump_stream(func(split_index, iterator), outfile)
      File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/serializers.py", line 117, in dump_stream
        for obj in iterator:
      File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/rdd.py", line 677, in func
        if not isinstance(x, basestring):
    SystemError: unknown opcode

         at org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:131)
         at org.apache.spark.api.python.PythonRDD$$anon$1.(PythonRDD.scala:153)
         at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:96)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)
         at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:109)
         at org.apache.spark.scheduler.Task.run(Task.scala:53)
         at org.apache.spark.executor.Executor$TaskRunner$$anonfun$run$1.apply$mcV$sp(Executor.scala:213)
         at org.apache.spark.deploy.SparkHadoopUtil.runAsUser(SparkHadoopUtil.scala:49)
         at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:178)
         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
         at java.lang.Thread.run(Thread.java:722)


    If your HDFS URI is wrong, the error message actually makes sense.  It is buried quite deeply though.

    Traceback (most recent call last):
      File "", line 1, in
      File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/rdd.py", line 682, in saveAsTextFile
        keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
      File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/lib/py4j-0.8.1-src.zip/py4j/java_gateway.py", line 537, in __call__
      File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/lib/py4j-0.8.1-src.zip/py4j/protocol.py", line 300, in get_return_value
    py4j.protocol.Py4JJavaError: An error occurred while calling o23.saveAsTextFile.
    : java.lang.IllegalArgumentException: java.net.UnknownHostException: s12ib.ibnet0
         at org.apache.hadoop.security.SecurityUtil.buildTokenService(SecurityUtil.java:418)
         at org.apache.hadoop.hdfs.NameNodeProxies.createNonHAProxy(NameNodeProxies.java:231)
         at org.apache.hadoop.hdfs.NameNodeProxies.createProxy(NameNodeProxies.java:139)
         at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:510)
         at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:453)
         at org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:136)
         at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2433)
         at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:88)
         at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2467)
         at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2449)
         at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:367)
         at org.apache.hadoop.fs.Path.getFileSystem(Path.java:287)
         at org.apache.hadoop.mapred.SparkHadoopWriter$.createPathFromString(SparkHadoopWriter.scala:193)
         at org.apache.spark.rdd.PairRDDFunctions.saveAsHadoopFile(PairRDDFunctions.scala:685)
         at org.apache.spark.rdd.PairRDDFunctions.saveAsHadoopFile(PairRDDFunctions.scala:572)
         at org.apache.spark.rdd.RDD.saveAsTextFile(RDD.scala:894)
         at org.apache.spark.api.java.JavaRDDLike$class.saveAsTextFile(JavaRDDLike.scala:355)
         at org.apache.spark.api.java.JavaRDD.saveAsTextFile(JavaRDD.scala:27)
         at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
         at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)
         at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)
         at java.lang.reflect.Method.invoke(Method.java:597)
         at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)
         at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:379)
         at py4j.Gateway.invoke(Gateway.java:259)
         at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
         at py4j.commands.CallCommand.execute(CallCommand.java:79)
         at py4j.GatewayConnection.run(GatewayConnection.java:207)
         at java.lang.Thread.run(Thread.java:619)
    Caused by: java.net.UnknownHostException: s12ib.ibnet0
         ... 29 more
    \ No newline at end of file diff --git a/_posts/glennklockwood/2015-1-29-tagbloggercom1999blog-4307061427721284246post-8310251418902836284.md b/_posts/glennklockwood/2015-1-29-tagbloggercom1999blog-4307061427721284246post-8310251418902836284.md deleted file mode 100644 index 0443628..0000000 --- a/_posts/glennklockwood/2015-1-29-tagbloggercom1999blog-4307061427721284246post-8310251418902836284.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2015-01-29 07:53:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2015/01/thoughts-on-nsf-future-directions.html -slug: thoughts-on-the-nsf-future-directions-interim-report -title: Thoughts on the NSF Future Directions Interim Report ---- - -The National Academies recently released an interim report entitled Future Directions for NSF Advanced Computing Infrastructure to Support U.S. Science and Engineering in 2017-2020 as a part of a $723,000 award commissioned to take a hard look at where the NSF's supercomputing program is going.  Since releasing the interim report, the committee has been soliciting feedback and input from the research community to consider as they draft their final report, and I felt compelled to put some of my thoughts into a response.

    NSF's HPC programs are something I hold near and dear since I got my start in the industry by supporting two NSF-owned supercomputers.  I put a huge amount of myself into Trestles and Gordon, and I still maintain that job encompassed the most engaging and rewarding work I've ever done.  However, the NSF's lack of a future roadmap for its HPC program made my future feel perpetually uncertain, and this factored heavily in my decision to eventually pursue other opportunities.

    Now that I am no longer affiliated with NSF, I wanted to delineate some of the problems I observed during my time on the inside with the hope that someone more important than me really thinks about how they can be addressed.  The report requested feedback in nine principal areas, so I've done my best to contextualize my thoughts with the committee's findings.

    With that being said, I wrote this all up pretty hastily.  Some of it may be worded strongly, and although I don't mean to offend anybody, I stand by what I say.  That doesn't mean that my understanding of everything is correct though, so it's probably best to assume that I have no idea what I'm talking about here.

    Finally, a glossary of terms may make this more understandable:

    • XD is the NSF program that funds XSEDE; it finances infrastructure and people, but it does not fund supercomputer procurements or operations
    • Track 1 is the program that funded Blue Waters, the NSF's leadership-class HPC resource
    • Track 2 is the program that funds most of the XSEDE supercomputers.  It funded systems like Ranger, Keeneland, Gordon, and Stampede



    1. How to create advanced computing infrastructure that enables integrated discovery involving experiments, observations, analysis, theory, and simulation.

    Answering this question involves a few key points:
    1. Stop treating NSF's cyberinfrastructure as a computer science research project and start treating it like research infrastructure operation.  Office of Cyberinfrastructure (OCI) does not belong in Computer & Information Science & Engineering (CISE).
    2. Stop funding cyberinfrastructure solely through capital acquisition solicitations and restore reliable core funding to NSF HPC centers.  This will restore a community that is conducive to retaining expert staff.
    3. Focus OCI/ACI and raise the bar for accountability and transparency.   Stop funding projects and centers that have no proven understanding of operational (rather than theoretical) HPC.
    4. Either put up or give up.  The present trends in funding lie on a road to death by attrition.  
    5. Don't waste time and funding by presuming that outsourcing responsibility and resources to commercial cloud or other federal agencies will effectively serve the needs of the NSF research community.
    I elaborate on these points below.

    2. Technical challenges to building future, more capable advanced computing systems and how NSF might best respond to them.

    "Today’s approach of federating distributed compute- and data-intensive resources to meet the increasing demand for combined computing and data capabilities is technically challenging and expensive."
    This is true.
    "New approaches that co-locate computational and data resources might reduce costs and improve performance. Recent advances in cloud data center design may provide a viable integrated solution for a significant fraction of (but not all) data- and compute-intensive and combined workloads."
    This strong statement is markedly unqualified and unsubstantiated.  If it is really recommending that the NSF start investing in the cloud, consider the following:
    • Cloud computing resources are designed for burst capabilities and are only economical when workloads are similarly uneven.  In stark contrast, most well-managed HPCs see constant, high utilization which is where the cloud becomes economically intractable.
    • The suggestion that cloud solutions can "improve performance" is unfounded.  At a purely technological level, the cloud will never perform as well as unvirtualized HPC resources, period.  Data-intensive workloads and calculations that require modest inter-node communication will suffer substantially.

    In fact, if any cost reduction or performance improvement can be gained by moving to the cloud, I can almost guarantee that incrementally more can be gained by simply addressing the non-technological aspects of the current approach of operating federated HPC.  Namely, the NSF must
    1. Stop propping up failing NSF centers who have been unable to demonstrate the ability to effectively design and operate supercomputers. 
    2. Stop spending money on purely experimental systems that domain scientists cannot or will not use.

    The NSF needs to re-focus its priorities and stop treating the XD program like a research project and start treating it like a business.  Its principal function should be to deliver a product (computing resources) to customers (the research community).  Any component that is not helping domain scientists accelerate discovery should be strongly scrutinized.  Who are these investments truly satisfying?
    "New knowledge and skills will be needed to effectively use these new advanced computing technologies."
    This is a critical component of XD that is extremely undervalued and underfunded.  Nobody is born with the ability to know how to use HPC resources, and optimization should be performed on users in addition to code.  There is huge untapped potential in collaborative training between U.S. federal agencies (DOE, DOD) and European organizations (PRACE).  If there is bureaucratic red tape in the way, it needs to be dealt with at an official level or circumvented at the grassroots level.

    3. The computing needs of individual research areas.

    XDMoD shows this.  The principal workloads across XSEDE are from traditional domains like physics and chemistry, and the NSF needs to recognize that this is not going to change substantially over the lifetime of a program like XD.

    Straight from XDMoD for 2014.  MPS = math and physical sciences, BIO = biological sciences, GEO = geosciences.  NSF directorate is not a perfect alignment; for example, I found many projects in BIO were actually chemistry and materials science.


    While I wholeheartedly agree that new communities should be engaged by lowering the barriers to entry, these activities cannot be done at a great expense of undercutting the resources required by the majority of XD users.

    The cost per CPU cycle should not be deviating wildly between Track 2 awards because the ROI on very expensive cycles will be extremely poor.  If the NSF wants to fund experimental systems, it needs to do that as an activity that is separate from the production resources.  Alternatively, only a small fraction of each award should be earmarked for new technologies that represent a high risk; the Stampede award was a fantastic model of how a conservative fraction of the award (10%) can fund an innovative and high-risk technology.

    4. How to balance resources and demand for the full spectrum of systems, for both compute- and data-intensive applications, and the impacts on the research community if NSF can no longer provide state-of-the-art computing for its research community.

    "But it is unclear, given their likely cost, whether NSF will be able to invest in future highest-tier systems in the same class as those being pursued by the Department of Energy, Department of Defense, and other federal mission agencies and overseas."
    The NSF does not have the budget to support leadership computing.  This is clear even from a bird's eye view: DOE ASCR's budget for FY2012 was $428 million and, by comparison, NSF ACI's budget was only $211 million.  Worse yet, despite having half the funding of its DOE counterpart, the NSF owned HPC resources at seven universities in FY2012 compared to ASCR's three centers.

    Even if given the proper funding, the NSF's practice of spreading Track 2 awards across many universities to operate its HPC assets is not conducive to operating leadership computing.  The unpredictable nature of Track 2 awards has resulted in very uneven funding for NSF centers which, quite frankly, is a terrible way to attract and retain the highly knowledgeable world-class staff that is necessary to operate world-class supercomputers.

    5. The role of private industry and other federal agencies in providing advanced computing infrastructure.

    The report makes some very troubling statements in reference to this question.
    "Options for providing highest-tier capabilities that merit further exploration include purchasing computing services from federal agencies…"
    This sounds dirty.  Aren't there are regulations in place that restrict the way in which money can flow between the NSF and DOE?  I'm also a little put off by the fact that this option is being put forth in a report that is crafted by a number of US DOE folks whose DOE affiliations are masked by university affiliations in the introductory material.
    "…or by making arrangements with commercial services (rather than more expensive purchases by individual researchers)."
    Providing advanced cyberinfrastructure for the open science community is not a profitable venture.  There is no money in HPC operations.  I do not see any "leadership" commercial cloud providers offering the NSF a deal on spare cycles, and the going rate for commercial cloud time is known to be far more expensive than deploying HPC resources in-house at the national scale.

    6. The challenges facing researchers in obtaining allocations of advanced computing resources and suggestions for improving the allocation and review processes.

    "Given the “double jeopardy” that arises when researchers must clear two hurdles—first, to obtain funding for their research proposal and, second, to be allocated the necessary computing resources—the chances that a researcher with a good idea can carry out the proposed work under such conditions is diminished."
    XD needs to be more tightly integrated with other award processes to mitigate the double jeopardy issue.  I have a difficult time envisioning the form which this integration would take, but the NSF GRF's approach of prominently featuring NSF HPC resources as a part of the award might be a good start.  As an adaptive proposal reviewer within XSEDE and a front-line interface with first-time users, I found that having the NSF GRF bundle XSEDE time greatly reduced the entry barrier for new users and made it easier for us reviewers to stratify the proposals.  Another idea may be to invite NSF center staff to NSF contractors' meetings (if such things exist; I know they do for DOE BES) to show a greater amount of integration across NSF divisions.

    In addition, the current XSEDE allocation proposal process is extremely onerous.  The document that describes the process is ridiculously long and contains of obscure requirements that serve absolutely no purpose.  For example, all XSEDE proposals require a separate document detailing the scaling performance of their scientific software.  Demonstrating an awareness of the true costs of performing certain calculations has its merits, but a detailed analysis of scaling is not even relevant for the majority of users who run modest-scale jobs or use off-the-shelf black-box software like Gaussian.  The only thing these obscure requirements do is prevent new users, who are generally less familiar with all of the scaling requirements nonsense, from getting any time.  If massive scalability is truly required by an application, the PI needs to be moved over to the Track 1 system (Blue Waters) or referred to INCITE.

    As a personal anecdote, many of us center staff found ourselves simply short-circuiting the aforementioned allocations guide and providing potential new users with a guide to the guide.  It was often sufficient to provide a checklist of minutia whose absence would result in an immediate proposal rejection and allow the PIs to do what they do best—write scientific proposals for their work.  Quite frankly, the fact that we had to provide a guide to understanding the guide to the allocations process suggests that the allocations process itself is grossly over-engineered.

    7. Whether wider and more frequent collection of requirements for advanced computing could be used to inform strategic planning and resource allocation; how these requirements might be used; and how they might best be collected and analyzed.

    The XD program has already established a solid foundation for reporting the popularity and usability of NSF HPC resources in XDMoD.  The requirements of the majority are evolving more slowly than computer scientists would have everyone believe.

    Having been personally invested in two Track 2 proposals, I have gotten the impression that the review panels who select the destiny of the NSF's future HPC portfolio are more impressed by cutting edge, albeit untested and under-demanded, proposals.  Consequentially, taking a "functional rather than a technology-focused or structural approach" to future planning will result in further loss of focus.  Instead of delivering conservatively designed architectures that will enjoy guaranteed high utilization, functional approaches will give way to computer scientists on review panels dictating what resources domain scientists should be using to solve their problems.  The cart will be before the horse.

    Instead, it would be far more valuable to include more operational staff in strategic planning.  The people on the ground know how users interact with systems and what will and won't work.  As with the case of leadership computing, the NSF does not have the financial commitment to be leading the design of novel computing architectures at large scales.  Exotic and high-risk technologies should be simply left out of the NSF's Track 2 program, incorporated peripherally but funded through other means (e.g., MRIs), or incorporated in the form of a small fraction of a larger, lower-risk resource investment.

    A perspective of the greater context of this has been eloquently written by Dr. Steven Gottlieb.  Given his description of the OCI conversion to ACI, it seems like taking away the Office of Cyberinfrastructure's (OCI's) autonomy and placing it under Computer & Information Science & Engineering (CISE) exemplifies an ongoing and significant loss of focus within NSF.  This changed reflected the misconception that architecting and operating HPC resources for domain sciences is a computer science discipline.

    This is wrong.

    Computer scientists have a nasty habit of creating tools that are intellectually interesting but impractical for domain scientists.  These tools get "thrown over the wall," never to be picked up, and represent an overall waste of effort in the context of operating HPC services for non-computer scientists.  Rather, operating HPC resources for the research community requires experienced technical engineers with a pragmatic approach to HPC.  Such people are most often not computer scientists, but former domain scientists who know what does and doesn't work for their respective communities.

    8. The tension between the benefits of competition and the need for continuity as well as alternative models that might more clearly delineate the distinction between performance review and accountability and organizational continuity and service capabilities.

    "Although NSF’s use of frequent open competitions has stimulated intellectual competition and increased NSF’s financial leverage, it has also impeded collaboration among frequent competitors, made it more difficult to recruit and retain talented staff, and inhibited longer-term planning."
    Speaking from firsthand experience, I can say that working for an NSF center is a life of a perpetually uncertain future and dicing up FTEs into frustratingly tiny pieces.  While some people are driven by competition and fundraising (I am one of them), an entire organization built up to support multi-million dollar cyberinfrastructure cannot be sustained this way.

    At the time I left my job at an NSF center, my salary was covered by six different funding sources at levels ranging from 0.05 to 0.30 FTEs.  Although this officially meant that I was only 30% committed to directly supporting the operation of one of our NSF supercomputers, the reality was that I (and many of my colleagues) simply had to put in more than 100% of my time into the job.  This is a very high-risk way to operate because committed individuals get noticed and almost invariably receive offers of stable salaries elsewhere.  Retaining talent is extremely difficult when you have the least to offer, and the current NSF funding structure makes it very difficult for centers to do much more than continually hire entry-level people to replace the rising stars who find greener pastures.

    Restoring reliable, core funding to the NSF centers would allow them to re-establish a strong foundation that can be an anchor point for other sites wishing to participate in XD.  This will effectively cut off some of the current sites operating Track 2 machines, but frankly, the NSF has spread its HPC resources over too many sites at present and is diluting its investments in people and infrastructure.  The basis for issuing this core funding could follow a pattern similar to that of XD where long-term (10-year) funding is provisioned with a critical 5-year review.

    If the NSF cannot find a way to re-establish reliable funding, it needs to accept defeat and stop trying to provide advanced cyberinfrastructure.  The current method of only funding centers indirectly through HPC acquisitions and associated operations costs is unsustainable for two reasons:
    • The length of these Track 2 awards (typically 3 years of operations) makes future planning impossible.  Thus, this current approach forces centers to follow high-risk and inadequately planned roadmaps.
    • All of the costs associated with maintaining world-class expertise and facilities have to come from someone else's coffers.  Competitive proposals for HPC acquisitions simply cannot afford to request budgets that include strong education, training, and outreach programs, so these efforts wind up suffering.


    9. How NSF might best set overall strategy for advanced computing-related activities and investments as well as the relative merits of both formal, top-down coordination and enhanced, bottom-up process.

    Regarding the top-down coordination, the NSF should drop the Track 2 program's current solicitation model where proposers must have a vendor partner to get in the door.  This is unnecessarily restrictive and fosters an unhealthy ecosystem where vendors and NSF centers are both scrambling to pair up, resulting in high-risk proposals.  Consider the implications:
    1. Vendors are forced to make promises that they may not be able to fulfill (e.g., Track 2C and Blue Waters).  Given these two (of nine) solicitations resulted in substantial wastes of time and money (over 20% vendor failure rate!), I find it shocking that the NSF continues to operate this way.
    2. NSF centers are only capable of choosing the subset of vendors who are willing to play ball with them, resulting in a high risk of sub-optimal pricing and configurations for the end users of the system.

    I would recommend a model, similar to many European nations', where a solicitation is issued for a vendor-neutral proposal to deploy and support a program that is built around a resource.  A winning proposal is selected based on not only the system features, its architecture, and the science it will support, but the plan for training, education, collaboration, and outreach as well.  Following this award, the bidding process for a specific hardware solution begins.

    This addresses the two high-risk processes mentioned above and simultaneously eliminates the current qualification in Track 2 solicitations that no external funding can be included in the proposal.  By leaving the capital expenses out of the selection process, the NSF stands to get the best deal from all vendors and other external entities independent of the winning institution.

    Bottom-up coordination is much more labor-intensive because it requires highly motivated people at the grassroots to participate.  Given the NSF's current inability to provide stable funding for highly qualified technical staff, I cannot envision how this would actually come together.

    \ No newline at end of file diff --git a/_posts/glennklockwood/2015-4-29-tagbloggercom1999blog-4307061427721284246post-8244135124065934589.md b/_posts/glennklockwood/2015-4-29-tagbloggercom1999blog-4307061427721284246post-8244135124065934589.md deleted file mode 100644 index 7bcbb37..0000000 --- a/_posts/glennklockwood/2015-4-29-tagbloggercom1999blog-4307061427721284246post-8244135124065934589.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2015-04-29 17:33:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2015/04/more-conjecture-on-knls-near-memory.html -slug: more-conjecture-on-knl-s-near-memory -title: More Conjecture on KNL's Near Memory ---- - -The Platform ran an interesting collection of conjectures on how KNL's on-package MCDRAM might be used this morning, and I recommend reading through it if you're following the race to exascale.  I was originally going to write this commentary as a Google+ post, but it got a little long, so pardon the lack of a proper lead-in here.

    I appreciated Mr. Funk's detailed description of how processor caches interact with DRAM, and how this might translate into KNL's caching mode.  However, he underplays exactly why MCDRAM (and the GDDR on KNC) exists on these manycore architectures in his discussion on how MCDRAM may act as an L3 cache.  On-package memory is not simply another way to get better performance out of the manycore processor; rather, it is a hard requirement for keeping all 60+ cores (and their 120+ 512-bit vector registers, 1.8+ MB of L1 data cache, etc) loaded.  Without MCDRAM, it would be physically impossible for these KNL processors to achieve their peak performance due to memory starvation.  By extension, Mr. Funk's assumption that this MCDRAM will come with substantially lower latency than DRAM might not be true.

    As a matter of fact, the massive parallelism game is not about latency at all; it came about as a result of latencies hitting a physical floor.  So, rather than drive clocks up to lower latency and increase performance, the industry has been throwing more but slower clocks at a given problem to mask the latencies of data access for any given worker.  While one thread may be stalled due to a cache miss on a Xeon Phi core, the other three threads are keeping the FPU busy to achieve the high efficiency required for performance.  This is at the core of the Xeon Phi architecture (as well as every other massively parallel architecture including GPUs and Blue Gene), so it is unlikely that Intel has sacrificed their power envelope to actually give MCDRAM lower latency than the off-package DRAM on KNL nodes.

    At an architectural level, accesses to MCDRAM still needs to go through memory controllers like off-package DRAM.  Intel hasn't been marketing the MCDRAM controllers as "cache controllers," so it is likely that the latencies of memory access are on par with the off-package memory controllers.  There are simply more of these parallel MCDRAM controllers (eight) operating relative to off-package DRAM controllers (two), again suggesting that bandwidth is the primary capability.

    Judging by current trends in GPGPU and KNC programming, I think it is far more likely that this caching mode acts at a much higher level, and Intel is providing it as a convenience for (1) algorithmically simple workloads with highly predictable memory access patterns, and (2) problems that will fit entirely within MCDRAM.  Like with OpenACC, I'm sure there will be some problems where explicitly on/off-package memory management (analogous to OpenACC's copyin, copyout, etc) aren't necessary and cache mode will be fine.  Intel will also likely provide all of the necessary optimizations in their compiler collection and MKL to make many common operations (BLAS, FFTs, etc) work well in cache mode as they did for KNC's offload mode.

    However, to answer Mr. Funk's question of "Can pre-knowledge of our application’s data use--and, perhaps, even reorganization of that data--allow our application to run still faster if we instead use Flat Model mode," the answer is almost unequivocally "YES!"  Programming massively parallel architectures has never been easy, and magically transparent caches rarely deliver reliable, high performance.  Even the L1 and L2 caches do not work well without very deliberate application design to accommodate wide vectors; cache alignment and access patterns are at the core of why, in practice, it's difficult to get OpenMP codes working with high efficiency on current KNC processors.  As much as I'd like to believe otherwise, the caching mode on KNL will likely be even harder to effectively utilize, and explicitly managing the MCDRAM will be an absolute requirement for the majority of applications. \ No newline at end of file diff --git a/_posts/glennklockwood/2016-6-21-tagbloggercom1999blog-4307061427721284246post-6567557412815023149.md b/_posts/glennklockwood/2016-6-21-tagbloggercom1999blog-4307061427721284246post-6567557412815023149.md deleted file mode 100644 index e33068e..0000000 --- a/_posts/glennklockwood/2016-6-21-tagbloggercom1999blog-4307061427721284246post-6567557412815023149.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2016-06-21 07:36:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2016/06/an-uninformed-perspective-on.html -slug: an-uninformed-perspective-on-taihulight-s-design -title: An uninformed perspective on TaihuLight's design ---- - -
    Note: What follows are my own personal thoughts, opinions, and analyses.  I am not a computer scientist and I don't really know anything about processor design or application performance, so it is safe to assume I don't know what I'm talking about.  None of this represents the views of my employer, the U.S. government, or anyone except me.
    -
    China's new 93 PF TaihuLight system is impressive given the indigenous processor design and its substantial increase in its HPL score over the #2 system, Tianhe-2.  The popular media has started covering this new system and the increasing presence of Chinese systems on Top500, suggesting that China's string of #1 systems may be a sign of shifting tides.  And maybe it is.  China is undeniably committed to investing in supercomputing and positioning itself as a leader in extreme-scale computing.

    That being said, the TaihuLight system isn't quite the technological marvel and threat to the HPC hegemony that it may seem at first glance.  The system features some some critically limiting design choices that make the system smell like a supercomputer that was designed to be #1 on Top500, not solve scientific problems.  This probably sounds like sour grapes at this point, so let's take a look at some of the details.

    Back-of-the-envelope math

    Consider the fact that each TaihuLight node turns 3,062 GFLOPS (that's 3 TFLOPS) and has 136.51 GB/sec of memory bandwidth. This means that in the time it takes for the processor to load two 64-bit floats into the processor from memory, it could theoretically perform over 350 floating point operations. But it won't, because it can only load the two operands for one single FLOP.

    Of course, this is an oversimplification of how CPUs work.  Caches exist to feed the extremely high operation rate of modern processors, and where there are so many cores that their caches can't be fed fast enough, we see technologies like GDDR DRAM and HBM (on accelerators) and on-package MCDRAM (on KNL) appearing so that dozens or hundreds of cores can all retrieve enough floating-point operands from memory to sustain high rates of floating point calculations.

    However, the ShenWei SW26010 chips in the TaihuLight machine have neither GDDR nor MCDRAM; they rely on four DDR3 controllers running at 136 GB/sec to keep all 256 compute elements fed with data.  Dongarra's report on the TaihuLight design briefly mentions this high skew:

    "The ratio of floating point operations per byte of data from memory on the SW26010 is 22.4 Flops(DP)/Byte transfer, which shows an imbalance or an overcapacity of floating point operations per data transfer from memory. By comparison the Intel Knights Landing processor with 7.2 Flops(DP)/Byte transfer."

    This measure of "Flops(DP)/Byte transfer" is called arithmetic intensity, and it is a critical optimization parameter when writing applications for manycore architectures.  Highly optimized GPU codes can show arithmetic intensities of around 10 FLOPS/byte, but such applications are often the exception; there are classes of problems that simply do not have high arithmetic intensities.  This diagram, which I stole from the Performance and Algorithms Research group at Berkeley Lab, illustrates the spectrum:

    -
    To put this into perspective in the context of hardware, let's look at the #3 supercomputer, the Titan system at Oak Ridge National Lab.  The GPUs on which it is built (NVIDIA's K20X) each have a GDDR5-based memory subsystem that can feed the 1.3 TFLOP GPUs at 250 GB/sec.  This means that Titan's FLOPS/byte ratio is around 5.3, or over 4x lower (more balanced) than the 22 FLOPS/byte of TaihuLight's SW26010 chips.

    This huge gap means that an application that is perfectly balanced to run on a Titan GPU--that is, an application with an arithmetic intensity of 5.3--will run 4x slower on one of TaihuLight's SW26010 processors than a Titan GPU.  Put simply, despite being theoretically capable of doing 3 TFLOPS of computing, TaihuLight's processors would only be able to deliver performance to 1/4th of that, or 0.75 TFLOPS, to this application.  Because of the severely limited per-node memory bandwidth, this 93 PFLOP system would perform like a 23 PFLOP system on an application that, given an arithmetic intensity of 5.3, would be considered highly optimized by most standards.

    Of course, the indigenous architecture also means that application developers will have to rely on indigenous implementations or ports of performance runtimes like OpenMP and OpenACC, libraries like BLAS, and ISA-specific vector intrinsics.  The maturity of this software stack for the ShenWei-64 architecture remains unknown.

    What is interesting

    This all isn't to say that the TaihuLight system isn't a notable achievement; it is the first massive-scale deployment of a CPU-based manycore processor, it is the first massive-scale deployment of EDR InfiniBand, and its CPU design is extremely interesting in a number of ways.

    The CPU block diagrams included in Dongarra's report are a bit like a Rorschach test; my esteemed colleagues at The Next Platform astutely pointed out its similarities to KNL, but my first reaction was to compare it with IBM's Cell processor:

    IBM Cell BE vs. ShenWei SW26010.  Cell diagram stolen from NAS; SW26010 diagram stolen from the Dongarra report.

    The Cell processor was ahead of its time in many ways and arguably the first manycore chip targeted at HPC.  It had
    • a single controller core (the PPE) with L1 and L2 caches
    • eight simpler cores (the SPEs) on an on-chip network with no L2 cache, but an embedded SRAM scratchpad
    and by comparison, the SW26010 has
    -
    • a single controller core (the MPE) with L1 and L2 caches
    • sixty-four simpler cores (the CPEs) on an on-chip network with no L2 cache, but an embedded SRAM scratchpad
    -Of course, the similarities are largely superficial and there are vast differences between the two architectures, but the incorporation of heterogeneous (albeit very similar) cores on a single package is quite bold and is a design point that may play a role in exascale processor designs:

    What an exascale processor might look like, as stolen from Kathy Yelick

    which may feature a combination of many lightweight cores (not unlike the CPE arrays on the TaihuLight processor) and are accompanied by a few capable cores (not unlike the MPE cores).

    The scratchpad SRAM present on all of the CPE cores is also quite intriguing, as it is a marked departure from the cache-oriented design of on-package SRAM that has dominated CPU architectures for decades.  The Dongarra report doesn't detail how the scratchpad SRAM is used by applications, but it may offer a unique new way to perform byte-granular loads and stores that do not necessarily waste a full cache line's worth of memory bandwidth if the application knows that memory access is to be unaligned.

    This is a rather forward-looking design decision that makes the CPU look a little more like a GPU.  Some experimental processor designs targeting exascale have proposed eschewing deep cache hierarchies in favor of similar scratchpads:

    The Traleika Glacier processor design, featuring separate control and execution blocks and scratchpad SRAM.  Adapted from the Traleika Glacier wiki page.

    Whether or not we ever hear about how successful or unsuccessful these processor features are remains to be seen, but there may be valuable lessons to be learned ahead of the first generation of exascale processors from architectures like those in the TaihuLight system.

    Outlook

    At a glance, it is easy to call out the irony in the U.S. government's decision to ban the sale of Intel's KNL processors to the Chinese now that the TaihuLight system is public.  It is clear that China is in a position to begin building extreme-scale supercomputers without the help of Intel, and it is very likely that the U.S. embargo accelerated this effort.  As pondered by an notable pundit in the HPC community,


    And this may have been the case.  However, despite the TaihuLight system's #1 position and very noteworthy Linpack performance and efficiency, is not the massive disruptor that puts the U.S. in the back seat.  Underneath TaihuLight's shiny, 93-petaflop veneer are some cut corners that substantially lower its ability to reliably deliver the performance and scientific impact commensurate to its Linpack score.  As pointed out by a colleague wiser than me, Intel's impending KNL chip is the product of years of effort, and it is likely that it will be years before ShenWei's chip designs and fabs are able to be really deliver a fully balanced, competitive, HPC-oriented microarchitecture.

    With that being said, TaihuLight is still a massive system, and even if its peak Linpack score is not representative of its actual achievable performance in solving real scientific problems, it is undeniably a leadership system.  Even if applications can only realize a small fraction of its Linpack performance, there is a lot of discovery to be made in petascale computing.

    Further, the SW201060 processor itself features some bold design points, and being able to test a heterogeneous processor with scratchpad SRAM at extreme scale may give China a leg up in the exascale architecture design space.  Only time will tell if these opportunities are pursued, or if TaihuLight follows its predecessors into an existence of disuse in a moldy datacenter caused by a high electric bill, poor system design, and lack of software. \ No newline at end of file diff --git a/_posts/glennklockwood/2016-7-22-tagbloggercom1999blog-4307061427721284246post-5095302032595554238.md b/_posts/glennklockwood/2016-7-22-tagbloggercom1999blog-4307061427721284246post-5095302032595554238.md deleted file mode 100644 index 09dd2a8..0000000 --- a/_posts/glennklockwood/2016-7-22-tagbloggercom1999blog-4307061427721284246post-5095302032595554238.md +++ /dev/null @@ -1,40 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2016-07-22 07:07:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2016/07/basics-of-io-benchmarking.html -slug: basics-of-i-o-benchmarking -title: Basics of I/O Benchmarking ---- - -Most people in the supercomputing business are familiar with using FLOPS as a proxy for how fast or capable a supercomputer is.  This measurement, as observed using the High-Performance Linpack (HPL) benchmark, is the basis for the Top500 list.  However, I/O performance is becoming increasingly important as data-intensive computing becomes a driving force in the HPC community, and even though there is no Top500 list for I/O subsystems, the IOR benchmark has become the de facto standard way to measure the I/O capability for clusters and supercomputers.

    Unfortunately, I/O performance tends to be trickier to measure using synthetic benchmarks because of the complexity of the I/O stack that lies between where data is generated (the CPU) to where it'll ultimately be stored (a spinning disk or SSD on a network file system).  In the interests of clarifying some of the confusion that can arise when trying to determine how capable an I/O subsystem really is, let's take a look at some of the specifics of running IOR.

    Getting Started with IOR

    IOR writes data sequentially with the following parameters:
    • blockSize (-b)
    • transferSize (-t)
    • segmentCount (-s)
    • numTasks (-n)
    which are best illustrated with a diagram:
    -
    -
    These four parameters are all you need to get started with IOR.  However, naively running IOR usually gives disappointing results.  For example, if we run a four-node IOR test that writes a total of 16 GiB:

    $ mpirun -n 64 ./ior -t 1m -b 16m -s 16
    ...
    access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
    ------ --------- ---------- --------- -------- -------- -------- -------- ----
    write 427.36 16384 1024.00 0.107961 38.34 32.48 38.34 2
    read 239.08 16384 1024.00 0.005789 68.53 65.53 68.53 2
    remove - - - - - - 0.534400 2

    we can only get a couple hundred megabytes per second out of a Lustre file system that should be capable of a lot more.

    Switching from writing to a single-shared file to one file per process using the -F (filePerProcess=1) option changes the performance dramatically:
    -

    -
    $ mpirun -n 64 ./ior -t 1m -b 16m -s 16 -F
    ...
    access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
    ------ --------- ---------- --------- -------- -------- -------- -------- ----
    write 33645 16384 1024.00 0.007693 0.486249 0.195494 0.486972 1
    read 149473 16384 1024.00 0.004936 0.108627 0.016479 0.109612 1
    remove - - - - - - 6.08 1

    This is in large part because letting each MPI process work on its own file cuts out any contention that would arise because of file locking.  
    -

    -
    However, the performance difference between our naive test and the file-per-process test is a bit extreme.  In fact, the only way that 146 GB/sec read rate could be achievable on Lustre is if each of the four compute nodes had over 45 GB/sec of network bandwidth to Lustre--that is, a 400 Gbit link on every compute and storage node.

    -

    Effect of Page Cache on Benchmarking

    What's really happening is that the data being read by IOR isn't actually coming from Lustre; rather, files' contents are already cached, and IOR is able to read them directly out of each compute node's DRAM.  The data wound up getting cached during the write phase of IOR as a result of Linux (and Lustre) using a write-back cache to buffer I/O, so that instead of IOR writing and reading data directly to Lustre, it's actually mostly talking to the memory on each compute node.
    -

    -
    To be more specific, although each IOR process thinks it is writing to a file on Lustre and then reading back the contents of that file from Lustre, it is actually
    -
    -
    1. writing data to a copy of the file that is cached in memory.  If there is no copy of the file cached in memory before this write, the parts being modified are loaded into memory first.
    2. those parts of the file in memory (called "pages") that are now different from what's on Lustre are marked as being "dirty"
    3. the write() call completes and IOR continues on, even though the written data still hasn't been committed to Lustre
    4. independent of IOR, the OS kernel continually scans the file cache for files who have been updated in memory but not on Lustre ("dirt pages"), and then commits the cached modifications to Lustre
    5. dirty pages are declared non-dirty since they are now in sync with what's on disk, but they remain in memory
    Then when the read phase of IOR follows the write phase, IOR is able to just retrieve the file's contents from memory instead of having to communicate with Lustre over the network.
    -

    -
    There are a couple of ways to measure the read performance of the underlying Lustre file system. The most crude way is to simply write more data than will fit into the total page cache so that by the time the write phase has completed, the beginning of the file has already been evicted from cache. For example, increasing the number of segments (-s) to write more data reveals the point at which the nodes' page cache on my test system runs over very clearly:

    -
    -
    However, this can make running IOR on systems with a lot of on-node memory take forever.

    -
    A better option would be to get the MPI processes on each node to only read data that they didn't write.  For example, on a four-process-per-node test, shifting the mapping of MPI processes to blocks by four makes each node N read the data written by node N-1.

    -
    -
    -
    Since page cache is not shared between compute nodes, shifting tasks this way ensures that each MPI process is reading data it did not write.
    -

    IOR provides the -C option (reorderTasks) to do this, and it forces each MPI process to read the data written by its neighboring node.  Running IOR with this option gives much more credible read performance:
    -

    -
    $ mpirun -n 64 ./ior -t 1m -b 16m -s 16 -F -C
    ...
    access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
    ------ --------- ---------- --------- -------- -------- -------- -------- ----
    write 41326 16384 1024.00 0.005756 0.395859 0.095360 0.396453 0
    read 3310.00 16384 1024.00 0.011786 4.95 4.20 4.95 1
    remove - - - - - - 0.237291 1

    But now it should seem obvious that the write performance is also ridiculously high. And again, this is due to the page cache, which signals to IOR that writes are complete when they have been committed to memory rather than the underlying Lustre file system.

    To work around the effects of the page cache on write performance, we can issue an fsync() call immediately after all of the write()s return to force the dirty pages we just wrote to flush out to Lustre. Including the time it takes for fsync() to finish gives us a measure of how long it takes for our data to write to the page cache and for the page cache to write back to Lustre.

    IOR provides another convenient option, -e (fsync), to do just this. And, once again, using this option changes our performance measurement quite a bit:

    -
    $ mpirun -n 64 ./ior -t 1m -b 16m -s 16 -F -C -e
    ...
    access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter
    ------ --------- ---------- --------- -------- -------- -------- -------- ----
    write 2937.89 16384 1024.00 0.011841 5.56 4.93 5.58 0
    read 2712.55 16384 1024.00 0.005214 6.04 5.08 6.04 3
    remove - - - - - - 0.037706 0

    and we finally have a believable bandwidth measurement for our file system.

    Defeating Page Cache

    Since IOR is specifically designed to benchmark I/O, it provides these options that make it as easy as possible to ensure that you are actually measuring the performance of your file system and not your compute nodes' memory.  That being said, the I/O patterns it generates are designed to demonstrate peak performance, not reflect what a real application might be trying to do, and as a result, there are plenty of cases where measuring I/O performance with IOR is not always the best choice.  There are several ways in which we can get clever and defeat page cache in a more general sense to get meaningful performance numbers.

    When measuring write performance, bypassing page cache is actually quite simple; opening a file with the O_DIRECT flag going directly to disk.  In addition, the fsync() call can be inserted into applications, as is done with IOR's -e option.

    Measuring read performance is a lot trickier.  If you are fortunate enough to have root access on a test system, you can force the Linux kernel to empty out its page cache by doing
    # echo 1 > /proc/sys/vm/drop_caches
    and in fact, this is often good practice before running any benchmark (e.g., Linpack) because it ensures that you aren't losing performance to the kernel trying to evict pages as your benchmark application starts allocating memory for its own use.

    Unfortunately, many of us do not have root on our systems, so we have to get even more clever.  As it turns out, there is a way to pass a hint to the kernel that a file is no longer needed in page cache:


    The effect of passing POSIX_FADV_DONTNEED using posix_fadvise() is usually that all pages belonging to that file are evicted from page cache in Linux.  However, this is just a hint--not a guarantee--and the kernel evicts these pages asynchronously, so it may take a second or two for pages to actually leave page cache.  Fortunately, Linux also provides a way to probe pages in a file to see if they are resident in memory.

    Finally, it's often easiest to just limit the amount of memory available for page cache.  Because application memory always takes precedence over cache memory, simply allocating most of the memory on a node will force most of the cached pages to be evicted.  Newer versions of IOR provide the memoryPerNode option that do just that, and the effects are what one would expect:

    -
    The above diagram shows the measured bandwidth from a single node with 128 GiB of total DRAM.  The first percent on each x-label is the amount of this 128 GiB that was reserved by the benchmark as application memory, and the second percent is the total write volume.  For example, the "50%/150%" data points correspond to 50% of the node memory (64 GiB) being allocated for the application, and a total of 192 GiB of data being read.

    This benchmark was run on a single spinning disk which is not capable of more than 130 MB/sec, so the conditions that showed performance higher than this were benefiting from some pages being served from cache.  And this makes perfect sense given that the anomalously high performance measurements were obtained when there was plenty of memory to cache relative to the amount of data being read.

    Corollary 

    Measuring I/O performance is a bit trickier than CPU performance in large part due to the effects of page caching.  That being said, page cache exists for a reason, and there are many cases where an application's I/O performance really is best represented by a benchmark that heavily utilizes cache.

    For example, the BLAST bioinformatics application re-reads all of its input data twice; the first time initializes data structures, and the second time fills them up.  Because the first read caches each page and allows the second read to come out of cache rather than the file system, running this I/O pattern with page cache disabled causes it to be about 2x slower:

    -
    Thus, letting the page cache do its thing is often the most realistic way to benchmark with realistic application I/O patterns.  Once you know how page cache might be affecting your measurements, you stand a good chance of being able to reason about what the most meaningful performance metrics are. \ No newline at end of file diff --git a/_posts/glennklockwood/2018-11-24-tagbloggercom1999blog-4307061427721284246post-489145380970011565.md b/_posts/glennklockwood/2018-11-24-tagbloggercom1999blog-4307061427721284246post-489145380970011565.md deleted file mode 100644 index 130380d..0000000 --- a/_posts/glennklockwood/2018-11-24-tagbloggercom1999blog-4307061427721284246post-489145380970011565.md +++ /dev/null @@ -1,91 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2018-11-24 01:44:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html -slug: a-week-in-the-life-of-an-sc-attendee -title: A week in the life of an SC attendee ---- - -Last week was the annual Supercomputing conference, held this year in Dallas, and it was as busy as they always are.  Every year I take plenty of photos and post plenty of tweets throughout the week, but this year I thought it might be fun to share some of those photos (and the related things I learned) now that the dust has settled.  Since some people might also be interested in how someone might approach the conference from a technical and philosophical perspective, I figured I'd write a more general piece documenting my entire SC experience this year.

    This post wound up being a massive, meandering, chronological documentary of a week in my life that includes both technical and non-technical commentary.  For anyone who is only interested in the technical insights I gained during SC, check out the items prefixed with (tech) in this table of contents:

    Everything that's not labeled (tech) is part diary and part career development perspective.  Hopefully someone will find something in here that's of some value.

    Finally, disclosures:
    • I omitted some names in the interests of respecting the privacy of the folks who took the time to talk to me one-on-one.  If you're part of this story and don't mind having your name out there, I'd be happy to include it.
    • Everything I paraphrase here is public information or conjecture on my part.  Nothing in this post is either confidential or sensitive.  That said, check your references before citing anything here.  I don't know what I'm talking about.
    • Everything here is my personal opinion and does not necessarily reflect the viewpoint of my employer or its funding agency.  I attended the conference as a part the regular course of business in which I am employed.  However I took all photos for personal purposes, and the entirety of this post was written on my own personal time.

    Before the conference

    Everyone's SC experience is different because it draws such a diverse range of professionals.  There are plenty of activities for everyone ranging from students and early-career staff to senior management and leadership, and people on different career tracks (e.g., facilities staff, computer science researchers, program managers, product sales) are likely to be drawn to very different parts of the conference agenda.  My priorities during the week of SC are definitely shaped by where I am in my career, so when filling out my calendar a few weeks ahead of the conference, I considered the following:

    My job is half research and half facilities staff.  50% of my time is funded by grant money to do applied research in characterizing parallel I/O systems.  The other half of my time is spent staying current on emerging technologies in computing and storage.  These two responsibilities mean that my SC is usually a mix of attending technical program sessions (to see what my peers in research are doing and see what research ideas might turn up in future technologies) and engaging with vendors.

    I work in advanced technologies.  This means I am generally not in the trenches directly feeling the pains of operating HPCs today; instead, my job is to identify technologies that will cause less problems tomorrow.  This also means that I don't have purchasing authority, and I am less likely to be involved with anything that's going to hit the floor in the next year.  As such, I generally don't do vendor sales meetings or briefings at SC because they are generally focused on nearer-term products and sales.

    I did not get to where I am by myself.  I first heard about SC in 2010 when I was a graduate student, and it sounded almost infinitely more exciting than the materials science conferences I was attending.  I had no experience in HPC at the time, but it made me realize what I really wanted to pursue as a career.  I relied heavily on the good will of the online HPC community to learn enough to get my first HPC job at SDSC, and after that, the faith of a great many more to get me to where I am now.  SC is often the only time I get to see people who have helped me out in my early career, and I always make time connect with them.

    The net result of these goals was a pretty full schedule this year:

    -
    My SC'18 schedule.  Note that the time zone is PST, or two hours behind Dallas time.


    I mark everything that I must attend (usually because I'm a speaker) in red to know the immovable obligations. Blue items are things I will attend unless an emergency comes up, and grey things are events I should attend because they sound interesting.

    White space is very important to me too; between 10am and 6pm, white spaces are when I can walk the expo floor.  A lot of people write off the expo as a waste of time, but I actually feel that it's one of the most valuable parts of SC.  Since my job is to understand emerging technology (and the market trends that drive them), accosting a pre-sales engineer or product manager in a strategically important technology provider can yield an invaluable peek into the markets they're serving.  White space in the evenings are equally important for engagements of opportunity or working on slides that have to be presented the next day.

    -

    Saturday, November 10

    I always fly to SC on the Saturday before the conference starts.  I have historically opted to do workshops on both Sunday and Monday, as I really enjoy attending both PMBS and PDSW-DISCS.  I bring a suitcase with has extra room for conference swag, and doing so this year was critically important because I opted to bring along a pair of cowboy boots that I knew I would not want to wear on the flight home.

    My brown kicks.  Also Harriet the cat.

    On just about every work flight I'm on, I've got PowerPoint slides to review; this trip was no different, and I spent the 3.5-hour flight time reviewing the slides I had to present the next day. Once in Dallas and at my hotel, I carried out my usual work-travel night-of-arrival ritual: order the specialty pizza from a local pizza joint, text home saying I arrived safely, and iron my clothes while watching Forensic Files.

    Sunday, November 11

    This year I had the honor of presenting one part of the famed Parallel I/O in Practice tutorial at SC along with Rob Ross, Brent Welch, and Rob Latham.  This tutorial has been running for over fifteen years now, and at some point over those years, it picked up the curious ritual of being kicked off with some juggling:

    Brent leading up to the tutorial start time with some juggling.  He brought the pins with him.

    The tutorial itself is really comprehensive and includes everything from device-level performance behavior to parallel file systems architecture and I/O middleware.  Even though I can proudly say that I knew 95% of the material being presented throughout the day (as I probably should since I was a presenter!), I found this particular slide that Rob Latham presented particularly insightful:

    The ease and portability of using I/O middleware comes without sacrificing performance!  Sorry for the odd angle; this is the screen as us presenters were able to view it.

    It makes the case that there is no significant performance penalty for using higher-level I/O libraries (like PnetCDF or parallel HDF5) despite how much easier they are to use than raw MPI-IO.  One of the biggest take-home messages of the entire tutorial is to use I/O middleware wherever possible; doing so means that understanding parallel file system architecture isn't prerequisite to getting good I/O performance.

    Monday, November 12

    Monday was the official first day of SC.  Workshops and tutorials went on throughout the day, and the opening keynote and exhibition hall opening gala started in the evening.
    -

    -

    PDSW-DISCS 2018

    The 3rd Joint International Workshop on Parallel Data Storage & Data Intensive Scalable Computing Systems (PDSW-DISCS) was on Monday, and I had the honor of being asked to serve as its Publicity Chair this year.

    The PDSW-DISCS full-day workshop agenda

    It's a really great workshop for people working in I/O, storage, and data and always draws a large crowd:

    -
    For researchers, it's a great venue for short papers that IEEE or ACM publishes, and it also has a really nice Work-in-Progress track where a page-long abstract gives you a seven minute spot to pitch your work.  For attendees, it's always chock full of good talks that range from pure research to applied development.

    This year's keynote speaker was Rangan Sukumar, Cray's analytics guru.  His talk was interesting in that it approached the oft-mentioned convergence between HPC and AI (which has become an over-used trope by itself) from the perspective of a system architect (which is where the rubber meets the road):

    -
    As many great keynote speakers are, Rangan used hyperbole at times to contrast HPC and "Big Data" workloads, and this stimulated some discussion online.  Although the slides alone tell only part of the story, you can download them from the PDSW-DISCS'18 website.

    Later in the morning, Margaret Lawson (University of Illinois, Sandia Labs) presented a follow-on to the EMPRESS metadata system she presented last year:

    -
    Last year, EMPRESS seemed a little too researchy for me (as a facilities person) to sink my teeth into.  This year though, the picture seems a lot more complete and I quite like the architectural framework.  Although EMPRESS may not ever be a household name, the concept of separating data streams and metadata streams underneath some sort of I/O middleware is really solid.  I think that storing data and metadata in different, architecturally distinct storage systems that map to the unique access patterns of data and metadata is ultimately the right way to approach large-scale data and metadata management in HPC, and I expect to see this design pattern proliferate as scientific data analysis becomes a bigger part of large-scale HPC workloads.

    In the afternoon, researchers from OSU offered a rare peak into Alibaba through a high-level analysis of SSD failure data provided by the Chinese hyperscaler:

    -

    The most alarming finding to me was that 20% of SSD failures were caused by humans yanking the wrong SSD.  This immediately made me wonder who Alibaba is hiring to do routine operational support at their data centers; if people are causing a significant fraction of storage faults, either they aren't hiring with the same standards as their US counterparts, or their data centers are a mess.  The speaker's proposed remedy was to use a different SSD form factor for each logical use case for SSDs so that operators could visually identify an SSD reserved for metadata versus one reserved for data.  I personally think a label maker, a barcode scanner, and a decent salary is an easier, standards-based solution.

    Other highlights included
    • Characterizing Deep-Learning I/O Workloads in TensorFlow, presented by Stefano Markidis of KTH.  The first time I've seen an I/O-centric evaluation of how deep learning workflows will affect storage requirements of future systems.  I learned a lot.
    • Toward Understanding I/O Behavior in HPC Workflows, presented by Jakob Lüttgau of DKRZ/ANL.  Rather than analyze the I/O pattern of a single MPI job, this paper began examining the I/O patterns of related jobs that all work towards a single scientific objective.  Again, one of the first research papers I've seen that takes a critical look at end-to-end workflows from an I/O perspective.
    • Methodology for the Rapid Development of Scalable HPC Data Services, presented by Matthieu Dorier of ANL.  I think this paper is intended to be the canonical reference for the Mochi project, which I was glad to finally see.  The idea of enabling quickly composable, purpose-built I/O services that are optimized for next-generation media and interconnects is a brilliant one, and I am a huge believer that this approach will be what demonstrates the earliest scientific successes that rely on storage-class memory at scale.

    There were a number of really promising ideas presented at the WIP sessions as well, and recapping the entirety of the workshop is a blog post in and of itself.  Fortunately, all the papers and slides are openly available on the PDSW-DISCS website.

    SC Opening Keynote and Gala

    I've actually stopped going to the SC keynotes over the last year since they're increasingly focused on the societal impacts enabled by HPC rather than HPC itself.  While I'm definitely not knocking that theme--it's a great way to inspire early-career individuals, big-picture program management types, and disenchanted technical folks in the trenches--it's just not why I attend SC.  Instead, I make use of my exhibitor badge and head into the expo floor before it opens to the public; this is the only time during the conference where I seem to be able to reliably find the people I want to meet at their booths.

    This year I visited a few small businesses with whom I've fostered good will over the last few years to say hello, then dropped in on the SDSC booth to catch up with the latest news from my former coworkers.  They also happen to have free beer on the opening night.

    Once the expo floor opens to the public following the opening keynote, booth activity goes from zero to eleven really quickly.  Every booth has a big splash during the gala which makes it hard to choose just one, but my decision this year was made easier by Cray choosing to unveil its new exascale HPC platform, Shasta, and celebrate its first sale of a Shasta system to NERSC.

    Cray CEO Pete Ungaro at the Shasta unveiling ceremony

    This new system, named Perlmutter, will be delivered in 2020 and has a bunch of really slick new technologies incorporated into it.

    After Cray CEO Pete Ungaro unveiled the prototype Shasta blades, there was a celebratory toast and both NERSC and Cray staff donned their "ASK ME ABOUT SAUL" pins:

    NERSC and Cray staff got these VIP pins to promote NERSC's next system, named after astrophysicist, Nobel laureate, and Berkeley Lab scientist Saul Perlmutter.

    I stuck around to shake hands with my colleagues at Cray (including the CEO himself!  Haven't washed my hand since) and catch up with some of my counterparts in storage R&D there.

    The Beowulf Bash

    The gala shut down at 9 PM, at which time I headed over to the Beowulf Bash to try to find other some colleagues who said they would be there.  I generally don't prioritize parties at SC for a couple reasons:
    1. Shouting over music all night is a great way to burn out one's voice.  This is not good when I have to present something the next day.
    2. The crowds and lines often undercut my enjoyment of catching up with old colleagues (and meeting new ones).
    3. I almost always have slides that need to be finished by the end of the night.
    I make an exception for the Bash because I personally value many of the people behind organizing and sponsoring it, and it captures the scrappier side of the HPC community which helped me get my foot in the door of the industry.  This year I specifically went to catch up with my colleagues at The Next Platform; Nicole and Tim are uncommonly insightful and talented writers and editors, and they always have wacky anecdotes to share about some of the more public figures in our industry.
    -

    -
    More generally and self-servingly though, maintaining a good relationship with members of the HPC trade press at large has tremendous value over time regardless of your affiliation or job title.  Behind every interesting HPC news article is an editor with incomparable access to a broad network of people in the industry.  Despite this though, they still are subject to the same haters as anyone else who puts something out in the spotlight, so I have to imagine that putting in a kind word in-person will is always worth it.
    -

    -
    At around midnight, only the die-hards were still around.
    -

    -
    Late night Beowulf Bash at Eddie Deen's Ranch.

    -
    Regrettably, I barely had any time to catch up with my colleagues from the FreeNode HPC community at the Bash (or at all).  Maybe at ISC.
    -

    -
    After getting back to the hotel, I realized I hadn't eaten anything since lunch.  I also learned that absolutely nothing that delivers food in the downtown Dallas area is open after midnight.  After waiting an hour for a food delivery that wound up going to a restaurant that wasn't even open, I had to settle for a hearty dinner of Hot Pockets from the hotel lobby.
    -

    -
    I hadn't eaten a Hot Pocket since graduate school.  Still taste the same.

    -
    Fortunately my Tuesday was relatively light on hard obligations.
    -

    Tuesday, November 13

    Tuesday was the first day in which the SC technical program and expo were both in full swing.  I split the day between paper talks, meetings, and the expo floor.

    -

    Technical Program, Part 1 - Data and Storage

    My Tuesday morning began at 10:30 AM with the Data and Storage paper presentation session in the technical program.  Of note, the first two papers presented were about cloud-centric storage paradigms, and only the third one was clearly focused on scientific HPC workloads.

    • SP-Cache: Load-Balanced, Redundancy-Free Cluster Caching with Selective Partition by Yu et al was a paper squarely aimed at reducing tail latency of reads.  Very important if you want to load an old GMail message without waiting more than a few seconds for it to load.  Less useful for most scientific HPC workloads.
    • BESPOKV: Application Tailored Scale-Out Key-Value Stores by Anwar et al was a paper presenting a framework that is uncannily similar to the Mochi paper presented at PDSW on the day before.  The premise was to allow people to compose their own Cassandra-like KV store with specific consistency and durability balance without having to reinvent the basic building blocks.
    • Scaling Embedded In Situ Indexing with DeltaFS by Zheng et al was the talk I really wanted to hear but I had to miss on account of a conflicting meeting.  The DeltaFS work being done by CMU and LANL is a really innovative way to deal with the scalability challenges of parallel file system metadata, and I think it's going to ultimately be where many of the nascent software-defined storage technologies aimed at HPC will converge.
    Unfortunately I had to cut out of the session early to meet with a vendor partner at a nearby hotel.
    -

    Interlude of Meetings

    The first of my two vendor meetings at this year's SC was less a sales call and more about continuing a long-running discussion about technology futures in the five-to-ten year timeframe.  No sane vendor will commit to any roadmap that far out, especially given the uncertainty surrounding post-Moore's Law technologies, but they are receptive to input from customers who are formulating their own strategic directions for the same time period.  Maintaining these sorts of ongoing conversations is a major part of what falls under my job title in "advanced technologies."

    Unfortunately that vendor meeting overlapped with the Lustre BOF, but other staff from my institution were able to attend and ensure that our interests were represented.  I was also able to attend the Lustre Lunch that followed the BOF which was very fruitful; in addition to simply being present to remind the Lustre community that I (and the institution I represent) am a part of it, I happened to connect in-person with someone I've known for a few years via Twitter and make a valuable connection.  Unfortunately I had to leave the Lustre Lunch early to make another meeting, unrelated to SC, that allowed a geographically distributed committee to meet face-to-face.

    After that committee meeting, I seized the free hour I had to visit the show room floor.

    Expo Floor, Part 1

    The first photo-worthy tech I saw was the Shasta blade at the Cray booth.  Because the booth was mobbed with people during the previous night's gala, this was actually my first time seeing Shasta hardware up close.  Here's the compute blade:

    Part of a Cray Shasta compute blade up-close

    Unlike the Cray XC blade of today's systems which uses a combination of forced-air convection and heat exchangers to enable liquid cooling, these Shasta blades have direct liquid cooling which is rapidly becoming a de facto minimum requirement for an exascale-capable rack and node design.  I had some questions, so I struck up a conversation with a Cray employee at the booth and learned some neat things about the Shasta packaging.

    For the sake of clarity, here is a hand-drawn, annotated version of the same photo:

    Part of a Cray Shasta compute blade up-close with my annotations

    What stood out to me immediately was the interesting way in which the DIMMs were direct-liquid cooled.  Unlike IBM's attempt at this with the POWER 775 system (the PERCS system of Blue Waters infamy) where cold plates were attached to every DIMM, Cray has opted to use what looks like a heat-conductive foam that wraps copper cooling lines.  To service the DIMMs, the entire copper cooling complex that runs between the two rows of two DIMMs unfastens and lifts up.  There's enough slack in the liquid cooling lines (highlighted in purple) so that DIMMs (and presumably every other field-replaceable part in the blade) can be serviced without draining the coolant from the blade.

    The NIC is also pretty interesting; it is a commercial high-end data center Ethernet NIC that's manufactured in a custom form factor to fit this blade.  It looks like a second CPU is housed underneath the NIC so that it may be the case that the NIC and one of the CPUs shares a common cooling block.  The NIC is also positioned perpendicular to the long edge of the blade, meaning that there are probably some pretty good cable runs going from the front-most NIC all the way to the rear of the blade.  Finally, because the NIC is on a discrete mezzanine card, the networking technology is no longer soldered to the compute as it is with Aries on today's XC.

    The network switch (which I did not photograph, but others did) is another blade that slots into the rear of the Shasta cabinet and mates perpendicularly with a row of compute blades such that a single switch blade can service a fully populated compute chassis.  The engineer with whom I spoke said that these Shasta cabinets have no actual midplane; the compute blades connect directly to the switch blades through a bunch of holes cut out of the sheet metal that separates the front of the cabinet from the rear.  Without a midplane there is presumably one less single point of failure; at the same time though, it wasn't clear to me how out-of-band management works without a centralized controller somewhere in the chassis.

    At this point I should point out that all of the above information is what I learned by talking to a Cray booth employee at SC without any special privilege; although I'm sure that more details are available under non-disclosure, I frankly don't remember any of it because I don't work on the compute side of the system.

    My next big stop on the show room floor was at the Fujitsu booth, where they had their post-K prototype hardware on display.  Of particular note was their A64FX engineering sample:

    -

    If you look very carefully, you can see the four stacks of high-bandwidth memory (HBM) on-die along with the ARM, which is fantastically historic in that it's the first general-purpose CPU (of which I am aware) that has integrated HBM2.  What's not present is any indication of how the on-chip Tofu NIC is broken out; I guess I was expecting something like Intel's -F series KNLs with on-package OmniPath.

    A sample node of the post-K system was also on display:

    -
    Seeing as how both this post-K system and Cray Shasta are exascale-capable system architectures, it's interesting to compare and contrast them.  Both have direct liquid cooling, but the post-K compute blade does not appear to have any field-replaceable units.  Instead, the entire board seems to be a single FRU, so CPUs must be serviced in pairs.  I think the A64FX lacks any cache coherence bus, meaning that two CPUs correspond to two nodes per FRU.

    That all said, the post-K design does not appear to have any DDR DRAM, and the NIC is integrated directly into the CPU.  With those two components out of the picture, the rate of a single component failure is probably a lot lower in post-K than it would be in Shasta.  Hopefully the post-K HBM has ECC though!

    In chatting with a Fujitsu engineer about the post-K node architecture at their booth, I also met a Fujitsu engineer who just happened to be developing LLIO, the post-K system's burst buffer service:

    LLIO burst buffer slide shown at the Fujitsu booth

    It sounds a lot like DataWarp in terms of features, and given that Fujitsu is also developing a new Lustre-based file system (FEFS 2.0?) for post-K, we might see a tighter integration between the LLIO burst buffer layer and the FEFS back-end disk storage.  This is definitely a technology that wasn't on my radar before SC but is definitely worth keeping an eye on as 2021 approaches.

    As I was racing between a few other booths, I also happened upon my boss (and NERSC-9 chief architect) presenting the Perlmutter system architecture at the NVIDIA booth:

    NERSC's Nick Wright, chief architect of the Perlmutter system, describing its architecture at the NVIDIA booth


    The talk drew a crowd--I'm glad to see people as jazzed about the new system as I am.

    Analyzing Parallel I/O BOF

    The Analyzing Parallel I/O BOF is a must-attend event for anyone in the parallel I/O business, and this year's BOF was especially good.  Andreas Dilger (of Lustre fame; now CTO of Whamcloud) gave a brief but insightful retrospective on understanding I/O performance:

    -
    Unfortunately I did not take a picture of Andreas' second slide (available on the Analyzing Parallel I/O BOF's website) which is a "what is needed?" slide which largely revolves around better integration between storage system software (like Lustre) and user applications.  I/O middleware seems to be at the center of most of the bullets that called for increased development which bodes well for scientific application developers who attended the Parallel I/O in Practice tutorial on Sunday--recall that this was my key takeaway.  It's good to know that the lead of Lustre development agrees with this vision of the future, and I hope Whamcloud moves Lustre in this direction so users and middleware developers can meet the storage system software somewhere in the middle.

    The BOF took a darker turn after this, starting with a presentation from Si Liu of TACC about the Optimal Overloaded IO Protection System, or OOOPS.  It's a library that wraps the standard POSIX I/O calls:

    OOOPS operates by hijacking standard I/O calls and lagging them.


    But in addition to passively monitoring how an application performs I/O, it purposely injects latency to throttle the rate at which I/O operations get issued by an application.  That is, it purposely slows down I/O from clients to reduce server-side load and, by extension, the effects of a single bad actor on the I/O performance of all the other users.

    Ideologically, I have a lot of problems with an HPC facility inserting itself into the user's workflow and reducing the efficiency with which he or she can accomplish their science relative to the peak capability of the HPC resource.  If a storage system allows a single user to accidentally deny service to other users in pursuit of peak performance, that is a problem with the storage system and it should be addressed at the system level.  And as Andreas pointed out in the BOF, tools exist to allow storage systems to accomplish fair sharing, which is distinctly different from explicitly penalizing users.  Granted, TACC is also the facility where one of its staff went on record as saying that the R language should not be used by anyone since it is a waste of energy.  Perhaps they have an institutionally different relationship with their user community.

    Fortunately, anything that relies on LD_PRELOAD can be circumvented by users, so OOOPS is unlikely to be used to enforce any kind of resource usage policy as it was pitched during the BOF.  I do see a lot of value in using it to fence data analysis workflows that may hit a pathological condition as a result of their inputs, and being able to trigger changes in application behavior by tracking I/O rates is a technique that could be useful in auto-tuning I/O middleware.

    Rosemary Francis, CEO of Ellexus, also spoke at the BOF and spoke for the need to make I/O performance analysis a little more accessible for the end users.  I was quite delighted by the visualizations she presented (presumably from her company's Breeze product) which used both color and human-readable "bad" I/O patterns to create a pie graph that quickly shows how much time an application spent doing I/O in various good, bad, and neutral ways.  Darshan, the tried-and-true open source I/O profiling library, operates at a slightly lower level and assumes a slightly higher level of user sophistication by comparison.

    The discussion half of the BOF was packed with engagement from the audience--so much so that I didn't find any moments of silence to seize the opportunity to stump for my own view of the world.  The combination of OOOPS and Rosemary's I/O war stories did steer the discussion towards ways to punish bad users though.  I can appreciate HPC operators' frustration in novice users causing system-wide problems, but I don't think shaming users who do bad I/O is a great solution.  Rather, something between OOOPS' automatic identification of bad I/O at runtime and Ellexus' user-centric reporting and feedback, combined with storage systems capable of enforcing QOS, is where we need to go.

    The Cray Celebration

    I wrote earlier that I normally don't do the SC vendor party circuit, but the Cray party this year was another exception for two reasons: (1) we had just announced Perlmutter along with Cray's Shasta unveiling which is worth celebrating, and (2) there were specific Cray staff with whom I wanted to confer sometime during the week.  So after the Parallel I/O BOF, I headed over to the event venue:

    -
    The event was quite nice in that it was not held at a loud bar (which made conversation much easier), it had plenty of food (no need for 2 AM Hot Pockets), and the format was conducive to moving around and meeting a lot of different people.  The event was awash with representatives from all the major Cray customers including the DOE labs, the big oil & gas companies, and the regional leadership computing centers in EMEA including CSCS and KAUST, as well as alumni of all those employers and Cray itself.  I've only worked at a Cray customer site for three years now, but I couldn't walk ten feet without running into someone I knew; in that sense, it felt a little like an event at the annual Cray User Group meeting but with a broader range of attendees.

    I don't know what this event would've been like if I was a student or otherwise didn't already know many of the regular faces within the Cray user community and instead had to start conversations cold.  That said, I was busy the entire evening getting to know the people behind all the conference calls I'm on; I find that getting to know my industry counterparts as people rather than just vendor reps really pays dividends when surprises happen and conflicts need to be resolved.  Events like this at SC are invaluable for building and maintaining these sorts of relationships.

    Wednesday, November 14

    My Wednesday began bright and early with a quick run-around of the expo floor to figure out who I needed to visit before the end of the week.

    -
    The expo floor was awkwardly laid out this year, so I really needed to do this to make sure I didn't spin my tires trying to find certain booths once the crowd showed up.  Incidentally, I did witness a sales person violate the unwritten rule of keeping everything friendly until the expo floor opened to the public--a sales rep selling "the world's fastest storage system" tried to stir up cold sales leads at my employer's booth at 8 AM while we were all still drinking our coffee and catching up on e-mail.  If you do this, shame on you!  Respect the exhibitor access and don't put your game face on until the public is allowed in.

    SC Student Career Fair and Booth Talk

    My first meeting was a chat over coffee with VAST Data, a storage technology company that has some really innovative and exciting ideas in the pipeline, to keep up to date with the latest news as they approach public launch.

    My second obligation was volunteering at my employer's booth at the SC Career Fair.  I generally enjoy booth duty and talking to students, and this year I was doubly motivated by my desire to fill some career and student job openings related to my responsibilities.  A diverse cross section of students dropped by our booth looking for both summer internships and full-time jobs; many seemed very well rehearsed in their cold pitch, while some others were a little more casual or cautious.  Although I'm not particularly qualified to give career advice, I will say that knowing how to sell yourself cold can be a valuable skill in your early career.  If you are seeking employment, be prepared to respond to a request to "tell me about yourself" in a way that makes you stand out.

    After the Career Fair, I wound up hunkering down at the SDSC booth to have lunch with my former coworkers and review the slides I volunteered to present at the adjacent DDN booth.

    At 2 PM I took the stage (booth?) and one of my colleagues was not only kind enough to sit in on this booth talk, but also share this photo he took right before I started:

    Beginning of my talk at the DDN booth.  Photo credit goes to Suhaib Khan via Twitter.

    I continue to be humbled that anyone would go out of their way to come hear what I have to say, especially when my talk is as unvetted as booth talks tend to be.  Talking at booths rarely goes well for me; the audio is always a wildcard, the audience is often unwitting, and auditory and visual distractions are literally everywhere.  The DDN booth was my sole booth talk of this year and it went about as well as I would have expected.  On the up side, quite a few attendees seemed genuinely interested to hear what I had to say about the variety of ways one can deploy flash in an HPC system.  Unfortunately, I ran a few minutes long and got derailed by external distractions several times during the presentation though.  Flubbing presentations happens, and none of the audience members seemed to mind.

    Shortly after the booth talk, I had to find a quiet spot to jump on a telecon.  This was no easy task; since cell phones killed the public phone booth, there are very few places to take a call on the expo floor.

    Expo Floor, Part 2

    The afternoon afforded me two more hours to race around the expo floor.  Despite my planning earlier in the morning, I wound up spinning my tires looking for a few key vendors who simply didn't show up to SC this year, including

    • Samsung and SK Hynix, two of the top three DRAM vendors and the sole manufacturers of HBM2
    • Seagate, one of two hard disk drive manufacturers
    • Broadcom/Avago, the company manufacturing most of the serdes used in the upcoming 200G and 400G network devices
    • Juniper, one of the major players in the 400 GbE space
    • AdvancedHPC, one of the few US integrators selling BeeGFS

    I'm not really sure why so many vendors didn't show up this year, but it made getting a holistic view of the storage and networking technologies markets impossible.  That said, I still saw a few noteworthy things.

    One of the big open questions in high-performance storage revolves around the battle between the NF1 (formerly NGSFF, promoted by Samsung) and EDSFF (promoted by Intel) form factors for NVMe.  It's clear that these long-and-skinny NVMe designs are going to have to replace the thermally inefficient 2.5" U.2 and unserviceable HHHL PCIe form factors, but the dust is far from being settled.  On the one hand, Samsung leads flash storage sales worldwide, but their NF1 form factor caps the power consumption (and therefore performance) of its devices to levels that are squarely aimed at cheaper data center flash.  On the other, the EDSFF form factor being pushed by Intel has a short version (competing directly with NF1) and a longer version that allows higher power.

    The Supermicro booth had actual EDSFF drives on display, and this was the first time I could actually see one up-close:

    A long-type EDSFF NVMe drive at the Supermicro booth.  The aluminum casing is actually required to meet the thermals.


    What I didn't realize is that the higher thermal specification enabled by the long-version EDSFF drives requires that the entire SSD circuit board be enclosed in the aluminum casing shown to enable better heat dissipation.  This has the nasty side effect of reducing density; while a standard 19" 1U chassis can fit up to 36 NF1 SSDs, the aluminum casing on long EDSFFs reduces the equivalent density to 32 SSDs.  Although long EDSFF drives can compensate for this by packing more NAND dies on the physically longer EDSFF board, supporting these longer SSDs requires more engineering on the chassis design to fit the same amount of compute into a smaller area.

    Similarly but differently, the Lenovo booth was showcasing their D3284 JBOD which packs 84x 3.5" HDDs into a double-decker 5U chassis.  I had naively assumed that all of these super-dense 84-drive enclosures were top-loading such that each drive mates to a backplane that is mounted to the floor of the chassis, but it turns out that's not the case:

    Lenovo's 5U84 JBOD

    Instead, each 3.5" drive goes into its 2.5U shelf on its side, and each drive attaches to a carrier that has to be slid slightly toward the front of the JBOD to release the drive, and then slide towards the back of the JBOD to secure it.  This seems a little harder to service than a simple top-load JBOD, but I assume there are thermal efficiencies to be gained by this layout.

    The Western Digital booth had a pretty broad portfolio of data center products on display.  Their newest gadget seems to be a planar NAND-based U.2 device that can present itself as DRAM through a custom hypervisor.  This sounds like a direct competitor to Intel's Memory Drive offering which uses ScaleMP's hypervisor to expose flash as DRAM to a guest VM.  The combination of exposing flash as very slow memory and relying on software virtualization to do this lends this to being a technology not really meant for HPC, and the engineer with whom I spoke confirmed as much.  Virtualized big-and-slow memory is much more appealing to in-memory databases such as SAP HANA.

    Perhaps more interestingly was the lack of any mention of Western Digital's investment in storage-class memory and microwave-assisted magnetic recording (MAMR) disk drives.  When I prodded about the state of MAMR, I was assured that the technology will work because there is no future of hard drives without some form of energy-assisted magnetic recording.  However, product announcements are still 18-24 months away, and the capacity for these drives will enter the market at the rather underwhelming range of ~20 TB.  Conveniently, this matches Seagate's recent cry of wolf that they will launch HAMR drives in 2020 at a 20 TB capacity point.  Western Digital also made no mention of multi-actuator drives, and asking about it only got me a sly grin; this suggests that Western Digital is either playing slow and steady so as not to over-promise, or Seagate has a slight technological lead.

    My last substantive stop of the afternoon was at the IBM booth, where they had one of their new TS4500 tape libraries operating in demo mode.  The window was too reflective to take a vide of the robotics, but I will say that there was a perceptible difference between the robotics in IBM's enterprise tape library and the robotics in another vendor's LTO tape library.  The IBM enterprise robotics are downright savage in how forcefully they slam tapes around, and I now fully believe IBM's claims that their enterprise cartridges are constructed to be more physically durable than standard LTO.  I'm sure there's some latency benefit to being able to ram tapes into drives and library slots at full speed, but it's unnerving to watch.

    IBM also had this cheeky infographic on display that was worth a photo:

    -
    If I built a tape drive that was still operating after forty years in outer space, I'd want to brag about it too.  But there are a couple of factual issues with this marketing material that probably made every physical scientist who saw it roll their eyes.

    Over at the compute side of the IBM booth, I learned that the Summit and Sierra systems sitting at the #1 and #2 positions on Top500 are built using node architectures that IBM is selling commercially.  There are 2 CPU + 6 GPU nodes (which is what Summit at OLCF has) which require liquid cooling, and 2 CPU + 4 GPU nodes (which is what Sierra at LLNL has) which can be air- or liquid-cooled.  I asked an IBM technologist which configuration is more commercially popular, and the Sierra configuration is currently leading sales due to the relative lack of infrastructure to support direct liquid cooling in commercial data centers.

    This has interesting implications for the exascale technologies I looked at on Tuesday; given that the exascale-capable system designs presented by both the Fujitsu and Cray rely on direct liquid cooling, bridging the gap between achieving exascale-level performance and delivering a commercially viable product is pretty wide from a facilities perspective.  Fortunately, the Fujitsu A64FX chip usually runs below 200 W and can feasibly be air-cooled with lower-density packaging, and Cray's Shasta will support standard air-cooled 19" racks via lower-density nodes.

    The IO-500/VI4IO BOF

    The second must-attend BOF for people working in I/O is the IO-500 and Virtual Institute for I/O BOF.  It's a very pragmatic BOF where people discuss system architecture, benchmarking, and various related community efforts, and since 2017, also began to include the semiannual unveiling of the IO-500 list.

    This year was exciting in that the top system, a DDN IME installation at JCAHPC, was unseated by the monstrous storage system attached to the Summit system at Oak Ridge and sustained an astounding 2 TiB/sec and 3 million opens/sec.  In fact, the previous #1 system dropped to #4, and each of the new top three systems was of a different architecture (Spectrum Scale at Oak Ridge, IME at KISTI, and Lustre at Cambridge).

    Perhaps the most interesting of these new submissions was the #3 system, the Data Accelerator at Cambridge, which is a home-grown whitebox system that was designed to be functionally equivalent to DataWarp's scratch mode:

    Alasdair King presenting the Data Accelerator design at the IO-500 BOF


    The hardware are just Dell boxes with six NVMe drives and one OPA NIC per socket, and the magic is actually handled by a cleanroom reimplementation of the interface that Slurm uses to instantiate DataWarp partitions on Cray XC systems.  Rather than use a sophisticated orchestration system as DataWarp does though, the Data Accelerator translates Slurm #DW pragmas into Ansible plays that spin up and tear down ephemeral Lustre file systems.

    The fact that the #3 fastest storage system in the world is a whitebox NVMe system is really remarkable, and my hat is off to the team at Cambridge that did this work.  As all-flash parallel file systems go from the realm of being a high-end boutique solution and become affordably mainstream, relatively scrappy but innovative engineering like the Cambridge system are surely going to cause a rapid proliferation of flash adoption in HPC centers.

    DDN also presented their software-defined IO-500 submission, this time run in Google Cloud and landing in the #8 position:

    -
    Since DDN's embedded SFA product line already runs virtual machines on their controller hardware, it doesn't seem like a big stretch to run the same SFA VMs in the cloud.  While this sounds a little counterproductive to DDN's biggest differentiator in providing a fully integrated hardware platform, this idea of running SFA in Google Cloud arose from the growing need for parallel file systems in the cloud.  I can only assume that this need is being largely driven by AI workloads which require a combination of high I/O bandwidth, high IOPS, and POSIX file interfaces.

    Thursday, November 15

    The conference was showing signs of winding down by Thursday, as many attendees brought their luggage with them to the convention center so they could head back home that night.  The expo floor also closes in the mid-afternoon on Thursday.
    -

    -

    Technical Program, Part 2 - Exhibitor Forum

    My Thursday began at 10:30 AM with the HPC Storage and Memory Architectures session of the Exhibitor Forum.  Liran Zvibel, former CTO and now CEO of WekaIO was the first presenter and gave a surprisingly technical description of the WekaIO Matrix parallel file system architecture:

    WekaIO's Matrix file system architecture block diagram.  Surprising amount of detail can be cleaned by examining this carefully.

    In terms of building a modern parallel file system from the ground up for all-flash, WekaIO checks off almost all of the right boxes.  It runs almost entirely in user space to keep latency down, it runs in its own reserved pool of CPU cores on each client, and capitalizes on the approximate parity between NVMe latency and modern high-speed network latency.  They make use of a lot of the smart ideas implemented in the enterprise and hyperscale storage space too and are one of the few really future-looking storage companies out there who are really thinking about the new possibilities in the all-flash world while still courting the HPC market.

    There is a fair amount of magic involved that was not broken down in the talk, although I've found that the WekaIO folks are happy to explain some of the more complex details if asked specific questions about how their file system works.  I'm not sure what is and isn't public though, so I'll save an architectural deep-dive of their technology for a later date.

    Andreas Schlapka of Micron Technology was the next speaker, and his talk was quite a bit more high-level.  Aside from the grand statements about how AI will transform technology though, he did have a couple of nice slides that filled some knowledge gaps in my mind.  For example:

    Broad strokes highlighting the different computational (and architectural) demands of training and inference workloads

    Training is what the vast majority of casual AI+HPC pundits are really talking about when extolling the huge compute requirements of deep learning.  Part of that is because GPUs are almost the ideal hardware solution to tackle the mathematics of training (dense matrix-matrix multiplication) and post impressive numbers; the other part is that inference can't happen without a well-trained model, and models are continually being refined and re-trained.  What I hadn't fully appreciated is that inference is much more of an interesting computational problem in that it more closely resembles the non-uniform and latency-bound workloads of scientific computing.

    This has interesting implications for memory technology; while HBM2 definitely delivers more bandwidth than DDR, it does this by increasing the channel width to 128 bits and hard-wiring 8 channels into each stack.  The extra bandwidth helps feed GPUs for training, but it's not doing much for the inference side of AI which, presumably, will become a much more significant fraction of the cycles required overall.  In my mind, increasing the size of SRAM-based caches, scratchpads, and register files are the more obvious way to reduce latency for inference, but we haven't really seen a lot of fundamentally new ideas on how to effectively do that yet.

    The speaker went on to show the following apples-to-apples system-level reference:

    System-level speeds and feeds of the memory products available now or in the near future as presented by Micron

    It's not terribly insightful, but it lets you back out the bus width of each memory technology (bandwidth / data rate / device #) and figure out where its bandwidth is coming from:
    • DDR4 and DDR5 use 64-bit channels and relies on increasing channel-level parallelism to improve bandwidth.  This is now putting them in a place where you wind up having to buy way more capacity than you may want just to get sufficient bandwidth.  This is analogous to where HDDs are in the HPC storage hierarchy today; it's rapidly becoming uneconomical to rely on DDR for bandwidth.
    • GDDR uses narrower channels (32 bits) but more of them to get better bandwidth.  They also rely on phenomenally high data rates per pin; I don't really understand how this is possible since they rely on inefficient single-ended signaling.
    • HBM uses both wide (128 bits) and plentiful channels to get its performance; the table is a misleading in this regard since each "device" (HBM stack) contains eight channels.  This is fine for feeding highly parallel arithmetic units like vector ALUs, but this offers no benefit to latency-bound workloads that, for example, chase pointers to traverse a graph. (it turns out HBM is just fine for pointer chasing--thanks to one of the HPC's memory-wizards-at-large for pointing this out to me!)
    Micron also made the strange assertion that they are the only company that offers the entire range of memory products.  I guess since Samsung and SK Hynix both opted to skip SC, Micron can say whatever it likes; however, Samsung is currently the only company shipping commercial quantities of HBM, and Hynix's HBM capability just came online.  As far as I know, Micron has never manufactured a stack of HBM since they spent years promoting the competing-but-now-defunct Hybrid Memory Cube technology.

    The NSF Future Directions BOF

    I opted to see what was new with National Science Foundation's Office of Advanced Cyberinfrastructure (OAC) at their noon BOF.  Despite having left the NSF world when I left San Diego, I still care deeply about NSF computing because they pay for many of the most accessible HPC resources in the US.  I certainly got my start in HPC on the NSF's dime at SDSC, and I got to see firsthand the huge breadth of impact that SDSC's XSEDE resources had in enabling smaller research groups at smaller institutions to perform world-class research.  As such, it's also no surprise that the NSF leads the pack in developing and deploying many of the peripheral technologies that can make HPC accessible such as federated identity, science gateways, and wide-area file systems.

    That all said, actually listening to the NSF HPC strategic vision makes me rather grumpy since the directions of such an important federal office sometimes appear so scattershot.  And judging by the audience questions at the end of the BOF, I am not the only one--Very Important People(tm) in two different national-level HPC consortia asked very pointed questions of Manish Parashar, the NSF OAC director, that highlighted the dichotomy between OAC's strategic vision and where it was actually putting money.  I really believe in the critical importance of NSF investment in maintaining national cyberinfrastructure which is probably why I keep showing up to these BOFs and do my best to support my colleagues at SDSC and the other XSEDE SPs.

    After sitting through this Future Directions BOF, I could write another updated rant about how I feel about the NSF's direction in HPC and get myself in trouble.  Instead, I'll instead share just a few slides I photographed from afar along with some objective statements and leave it at that.

    The future directions summary slide:

    NSF OAC's future directions
    • Performance, capability computing, and global leadership are not mentioned in the above slides.  Terms like "agility, responsiveness, accessibility") are often used to describe the cloud.
    • "reduce barriers to CI adoption" indicates that NSF wants to serve more users.  NSF is not increasing investment in capital acquisition (i.e., more or larger HPC systems beyond the status quo of technology refreshes).
    • "Prioritize investments to maximize impact" does not define what impacts are to be maximized.

    The Frontera slide:

    NSF's next leadership-class HPC, Frontera, to be deployed by TACC
    • The award amount was $60M.  The previous Track-1 solicitation that funded Blue Waters was $200M.  Stampede was $30M, and Stampede 2 was another $30M.
    • "leadership-class ... for all [science and engineering] applications" either suggests that all science and engineering applications are leadership-capable, or this leadership-class system is not primarily designed to support a leadership computing workload.
    • It is unclear what the significance of the "CPU" qualifier in "largest CPU system" is in the larger context of leadership computing.
    • There is mention of "leadership-class" computing.  There is no mention of exascale computing.  There is nothing that acknowledges leveraging the multi-billion-dollar investment the US has made into the Exascale Computing Project.  An audience member politely asked about this omission.

    The Midscale Research Infrastructure slide:
    -

    -
    Upcoming solicitations for research cyberinfrastructure
    • NSF OAC expects to issue one $6M-$20M solicitation and another $20M-$70M solicitation "soon" to fund HPC systems and the associated infrastructure.
    • $6M-$20M is on the same order of magnitude as the Track-2 solicitations that funded SDSC's Gordon ($10M) and Comet ($12M).
    • $20M-$70M is on the same order of magnitude as the Track-2 solicitations that funded TACC's Stampede 1 and 2 ($30M).  NSF's next leadership-class investment (Frontera) is $60M.

    My SC Paper

    The next major item on my agenda was presenting my paper, A Year in the Life of a Parallel File System, as the final talk in the final session of the paper track.

    My name in lights--or something like that.

    I was admittedly bummed out when I found out that I was going to be the conference closer since a significant number of SC attendees tend to fly out on Thursday night and, presumably, would not stick around for my presentation.  As a result, I didn't take preparation for it as seriously in the weeks leading up to SC as I normally would have.  I knew the presentation was a 30-35 minute talk that had to be fit into a 25-minute slot, but I figured I would figure out how to manage that on the night before the talk and mostly wing it.

    What I realized after arriving at SC was that a bunch of people--most of whom weren't the expected audience of storage researchers--were looking forward to hearing the talk.  This left me scrambling to seriously step up the effort I was going to put into making sure the presentation was well composed despite needing to drop ten minutes of material and fit it into the 25 minutes I was given.  I documented my general approach to crafting presentations in my patented Glenn K. Lockwood Five Keys to Being a Successful Researcher (FKBSR) method, but I'll mention some of my considerations for the benefit of anyone who is interested in how others approach public speaking.
    1. I absolutely could not overshoot the timing because some attendees had to leave at 5 PM to catch 7 PM flights.  This meant that it would be better for me to undershoot the time and either draw out the conclusions and acknowledgments slides to finish on time or finish early and leave extra time for questions.
    2. The people I met at SC who indicated interest in my talk were storage systems people, not statisticians.  This meant I could probably tone down the statistical rigor in the presentation without offending people's scientific sensibilities.
    3. Similarly, because attendees were already familiar with typical HPC I/O systems and the relevant technologies, I could gloss over the experimental setup and description of the different compute and storage systems.
    4. Given the above considerations, a reasonable approach would be to punt as many non-essential details into the Q&A after the talk and let people try to poke holes in my methods only if they really cared.
    I also know two things about myself and the way I present:
    -
    1. I can present either at a casual pace where I average ~70 seconds per slide or in turbo mode where I average ~50 seconds per slide.  Orating at turbo speed requires a lot more preparation because it requires speaking through slide transitions rather than pausing to reorient after each slide transition.
    2. I get distracted easily, so I would rather have people begin to leave after my monologue ended and Q&A began than have the commotion of people getting up derail the tail end of my presentation.
    -
    As a result of all these factors, I opted to both cut a lot of details to get the talk down to ~25-30 minutes when presented at a casual pace, then prepare to present in turbo mode just in case the previous speakers went long (I was last of three speakers), there were A/V issues (they were prolific at this SC, especially for Mac users), or there were any audience interruptions.

    I also opted to present from my iPad rather than a full laptop since it did a fine job earlier at both PDSW-DISCS and the IO-500/VI4IO BOF.  In sticking with this decision though, I learned two valuable things during the actual presentation:
    1. The iOS "do not disturb" mode does not suppress Twitter notifications.  A couple of people were kind enough to tweet about my presentation as I was giving it, but this meant that my presenter view was blowing up with Twitter noise as I was trying to present!  Fortunately I only needed to look down at my iPad when transitioning between slides so it didn't derail me.
    2. There's no usefully sized timer or clock in PowerPoint for iOS's presenter view, and as a result, I had no idea how I was doing on time as I entered the final third of my slides.  This became a distraction because I was fully expecting a five-minute warning from the session moderator at some point and got worried that I wasn't going to get one.  As such, I didn't want to slow down the tail of the presentation without knowing how close I was getting to the target.  It turned out that I didn't get a five-minute warning because I was already concluding at that point.
    Fortunately the audience was sufficiently engaged to pad out the Q&A period with many of the questions that would've been answered by the slides I had dropped.  Afterwards I got feedback that indicated the presentation was noticeably short to the audience (not great) but that the narrative remained understandable to most attendees throughout the entire presentation (good).
    -

    -
    As far as the technical content of the presentation though, I won't recap that here--until I write up the high-level presentation as another blog post, you may have to read the paper (or invite me to present it at your institution!).
    -

    -

    SC Technical Program Reception

    I've never attended the reception that wraps up the last full day of SC for a variety of reasons, and I was going to skip it again this year to fit some me-time into the otherwise frantic week.  However the venue (the Perot Museum) and its close proximity to my hotel lured me out.
    -

    -
    The entryway to the Perot Museum

    -
    I am not a "never eat alone" kind of person because I find that my ability to be at the top of my game diminishes without at least some intermittent time to sit back and digest.  As such, I approached the reception with very selfish intent: I wanted to see the museum, learn about something that had nothing to do with supercomputing, have a drink and a meal, and then go back to my hotel.  So I did just that.
    -

    -
    The dinosaurs seemed like a major feature of the museum:
    -

    -
    Rapetosaurus skeleton on display at the Perot Museum

    -
    The archaeological diversity of the dinosaur room reminded me of the dinosaur museum near my wife's hometown in the Canadian prairies, but the exhibit seemed to be largely reproduction fossils that blended science with entertainment.
    -

    -
    More impressive to me was the extensive mineral collection:
    -

    -
    I'm a sucker for quartz.  I did my PhD research on silicates.

    -
    Not only were the minerals on display of remarkable quality, but many of them were found in Texas.  In fact, the museum overall had a remarkably Texas-focused set of exhibits which really impressed me.  The most interesting exhibit that caught my attention was a mini-documentary on the geologic history of Texas that explained how plate tectonics and hundreds of millions of years resulted in the world-famous oil and gas reserves throughout the state.
    -

    -
    Having learned something and enjoyed some delightful food at the museum, I then called it quits and cashed out.
    -

    Friday, November 16

    The last day of SC is always a bit odd because the expo has already wrapped up, most of the vendors and casual attendees have gone home, and the conference is much more quiet and focused.  My day started with a surreal shuttle ride to the conference center in what appeared to be a 90's-era party bus:
    -

    -
    Conference shuttle, complete with taped-together audio system, faux leather sofa, and a door that had to be poked with a broom stick to open.

    -

    -
    Only six concurrent half-day workshops and a panel were on the agenda:
    -

    -
    The entire Friday agenda fit on a single screen

    -
    I stuck my head into the P3HPC workshop's first panel discussion to catch the age-old but ever-lively argument over someone's proposed definition of performance portability and productivity either being too broad or too narrow.  I/O performance portability generally does not have a place in these sorts of conversations (which I don't fault--algorithmic complexity in I/O is usually hidden from user applications) so I attended only as an interested observer and wasn't as fastidious about taking notes as I was earlier in the week.
    -

    -
    At 10:30 AM I headed over to the Convergence between HPC and Big Data: The Day After Tomorrow panel discussion which had a star-studded speaker lineup.  NERSC's Katie Antypas gave a great overview of the NERSC-9/Perlmutter architecture which fit the panel topic uncannily well since it is a system design from the ground up to meet the needs of both traditional HPC and large-scale data analysis.
    -

    -
    The NERSC-9 Project Director describing how the Perlmutter system embodies the convergence of HPC and Big Data in front of a remarkably big crowd in the final session of SC.

    -
    Unfortunately I had to duck out shortly after she spoke to get to my last meeting of the week with an old colleague for whom I always make time at SC.  Incidentally, some of the most valuable time you can spend at SC is talking to industry consultants.  Not unlike getting to know members of the trade press, good consultants have exposure to a tremendous breadth of problem and solution spaces.  They can give you all manner of interesting insights into different vendors, industry verticals, and market trends in an otherwise brief conversation.
    -

    -
    After my final meeting was cut short by my colleague's need to run to the airport, I had a quick bite with another Friday holdout then made my own way to the airport to catch up on a week's worth of e-mails.  The flight back to Oakland was one of the rare occasions where I was just too worn out to try to catch up on some delinquent report writing and just watched three hours of Dark Tourist on Netflix.
    -

    -

    After the Conference

    It was technically Saturday by the time I finally got home, but the family was happy to see me (and the swag I had in tow):
    -

    -
    George fully appreciating the giant pile of conference swag with which I came home

    -
    This was definitely the busiest SC of my career, but in many ways it was also the most productive.  I owe sincere thanks to everyone in the HPC community who made it such a worthwhile conference to attend--vendors, presenters, old colleagues, and even the new colleagues who occasionally just wanted to introduce themselves and express that they enjoy reading the nonsense I post on Twitter.  I always leave SC more amazed and humbled by all the bright minds with whom I connect, and I hope that I am doing my part to pay that experience forward for others now and in the SC conferences to come.
    - \ No newline at end of file diff --git a/_posts/glennklockwood/2018-2-24-tagbloggercom1999blog-4307061427721284246post-4879102640203246583.md b/_posts/glennklockwood/2018-2-24-tagbloggercom1999blog-4307061427721284246post-4879102640203246583.md deleted file mode 100644 index bb7b04e..0000000 --- a/_posts/glennklockwood/2018-2-24-tagbloggercom1999blog-4307061427721284246post-4879102640203246583.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2018-02-24 09:21:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2018/02/are-fpgas-answer-to-hpcs-woes.html -slug: are-fpgas-the-answer-to-hpc-s-woes- -title: Are FPGAs the answer to HPC's woes? ---- - -

    Executive Summary

    Not yet.  I'll demonstrate why no domain scientist would ever want to program in Verilog, then highlight a few promising directions of development that are addressing this fact.

    The usual disclaimer also applies: the opinions and conjectures expressed below are mine alone and not those of my employer.  Also I am not a computer scientist, so I probably don't know what I'm talking about.  And even if it seems like I do, remember that I am a storage architect who is wholly unqualified to speak on applications and processor performance.

    Premise

    We're now in an age where CPU cores aren't getting any faster, and the difficulties of shrinking processes below 10 nm means we can't really pack any more CPU cores on a die.  Where's performance going to come from if we ever want to get to exascale and beyond?

    Some vendors are betting on larger and larger vectors--ARM (with its Scalable Vector Extensions) and NEC (with its Aurora coprocessors) are going down this path.  However, algorithms that aren't predominantly dense linear algebra will need very efficient scatter and gather operations that can pack vector registers quickly enough to make doing a single vector operation worthwhile.  For example, gathering eight 64-bit values from different parts of memory to issue an eight-wide (512-bit) vector multiply requires pulling eight different cache lines--that's moving 4096 bits of memory for what amounts to 512 bits of computation.  In order to continue scaling vectors out, CPUs will have to rethink how their vector units interact with memory.  This means either (a) getting a lot more memory bandwidth to support these low flops-per-byte ratios, or (b) pack vectors closer to the memory so that pre-packed vectors can be fetched through the existing memory channels.

    Another option to consider are GPUs, which work around the vector packing issue by implementing a massive numbers of registers and giant crossbars to plumb those bytes into arithmetic units.  Even then, though, relying on a crossbar to connect compute and data is difficult to continue scaling; the interconnect industry gave up on this long ago, which is why today's clusters now connect hundreds or thousands of crossbars into larger fat trees, hypercubes, and dragonflies.  GPUs are still using larger and larger crossbars--NVIDIA's V100 GPU is one of the physically largest single-die chips ever made--but there's an economic limit to how large a die can be.

    This bleak outlook has begun to drive HPC designers towards thinking about smarter ways to use silicon.  Rather than build a general-purpose processor that can do all multiplication and addition operations at a constant rate, the notion is to bring hardware design closer to the algorithms being implemented.  This isn't a new idea (for example, RIKEN's MDGRAPE and DESRES's Anton are famous examples of purpose-built chips for specific scientific application areas), but this approach historically has been very expensive relative to just using general-purpose processor parts.  Only now are we at a place where special-purpose hardware may be the only way to sustain HPC's performance trajectory.

    Given the diversity of applications that run on the modern supercomputer though, expensive and custom chips that only solve one problem aren't very appetizing.  A close compromise are FPGAs though, and there has been a growing buzz surrounding the viability of relying on FPGAs in mainstream HPC workloads.

    Many of us non-computer scientists in the HPC business only have a vague and qualitative notion of how FPGAs can realistically be used to carry out computations, though.  Since there is growing excitement around FPGAs for HPC as exascale approaches though, I set out to get my hands dirty and figure out how they might fit in the larger HPC ecosystem.

    Crash course in Verilog

    Verilog can be very difficult to grasp for people who already know how to program languages like C or Fortran (like me!).  On the one hand, it looks a bit like C in that has variables to which values can be assigned, if/then/else controls, for loops, and so on.  However these similarities are deceptive because Verilog does not execute like C; whereas a C program executes code line by line, one statement after the other, Verilog sort of execute all of the lines at the same time, all the time.

    A C program to turn an LED on and off repeatedly might look like:

    -where the LED is turned on, then the LED is turned off, then we repeat.

    In Verilog, you really have to describe what components your program will have and how they are connected. In the most basic way, the code to blink an LED in Verilog would look more like

    -
    Whereas C is a procedural language in that you describe a procedure for solving a problem, Verilog is more like a declarative language in that you describe how widgets can be arranged to solve the problem.

    This can make tasks that are simple to accomplish in C comparatively awkward in Verilog. Take our LED blinker C code above as an example; if you want to slow down the blinking frequency, you can do something like

    -
    Because Verilog is not procedural, there is no simple way to say "wait a second after you turn on the LED before doing something else." Instead, you have to rely on knowing how much time passes between consecutive clock signals (clk incrementing).

    For example, the DE10-Nano has a 50 MHz clock generator, so every 1/(50 MHz) (20 nanoseconds), and everything time-based has to be derived from this fundamental clock timer. The following Verilog statement:

    -
    indicates that every 20 ns, increment the cnt register (variable) by one. To make the LED wait for one second after the LED is turned on, we need to figure out a way to do nothing for 50,000,000 clock cycles (1 second / 20 nanoseconds). The canonical way to do this is to
    1. create a big register that can store a number up to 50 million
    2. express that this register should be incremented by 1 on every clock cycle
    3. create a logic block that turns on the LED when our register is larger than 50 million
    4. rely on the register eventually overflowing to go back to zero
    If we make cnt a 26-bit register, it can count up to 67,108,864 different numbers and our Verilog can look something like

    -
    However, we are still left with two problems:
    1. cnt will overflow back to zero once cnt surpasses 226 - 1
    2. We don't yet know how to express how the LED is connected to our FPGA and should be controlled by our circuit
    Problem #1 (cnt overflows) means that the LED will stay on for exactly 50,000,000 clock cycles (1 second), but it'll turn off for only 226 - 1 - 50,000,000 cycles (17,108,860 cycles, or 0.34 seconds). Not exactly the one second on, one second off that our C code does.

    Problem #2 is solved by understanding the following:

    • our LED is external to the FPGA, so it will be at the end of an output wire
    • the other end of that output wire must be connected to something inside our circuit--a register, another wire, or something else

    The conceptually simplest solution to this problem is to create another register (variable), this time only one bit wide, in which our LED state will be stored. We can then change the state of this register in our if (cnt > 5000000) block and wire that register to our external LED:

    -
    Note that our assign statement is outside of our always @(posedge clk) block because this assignment--connecting our led output wire to our led_state register--is a persistent declaration, not the assignment of a particular value. We are saying "whatever value is stored in led_state should always be carried to whatever is on the other end of the led wire." Whenever led_state changes, led will simultaneously change as a result.

    With this knowledge, we can actually solve Problem #1 now by
    1. only counting up to 50 million and not relying on overflow of cnt to turn the LED on or off, and
    2. overflowing the 1-bit led_state register every 50 million clock cycles
    Our Verilog module would look like

    -
    and we accomplish the "hello world" of circuit design:

    -
    This Verilog is actually still missing a number of additional pieces and makes very inefficient use of the FPGA's hardware resources. However, it shows how awkward it can be to express a simple, four-line procedural program using a hardware description language like Verilog.

    So why bother with FPGAs at all?

    It should be clear that solving a scientific problem using a procedural language like C is generally more straightforward than with a declarative language like Verilog. That ease of programming is made possible by a ton of hardware logic that isn't always used, though.

    Consider our blinking LED example; because the C program is procedural, it takes one CPU thread to walk through the code in our program. Assuming we're using a 64-core computer, that means we can only blink up to 64 LEDs at once. On the other hand, our Verilog module consumes a tiny number of the programmable logic blocks on an FPGA. When compiled for a $100 hobbyist-grade DE10-Nano FPGA system, it uses only 21 of 41,910 programmable blocks, meaning it can control almost 2,000 LEDs concurrently**. A high-end FPGA would easily support tens of thousands.

    The CM2 illuminated an LED whenever an operation was in flight. Blinking the LED in Verilog is easy.  Reproducing the CM2 microarchitecture is a different story.  Image credit to Corestore.
    Of course, blinking LEDs haven't been relevant to HPC since the days of Connection Machines, but if you were to replace LED-blinking logic with floating point arithmetic units, the same conclusions apply.  In principle, a single FPGA can process a huge number of FLOPS every cycle by giving up its ability to perform many of the tasks that a more general-purpose CPU would be able to do.  And because FPGAs are reprogrammable, they can be quickly configured to have an optimal mix of special-purpose parallel ALUs and general purpose capabilities to suit different application requirements.

    However, the fact that the fantastic potential of FPGAs hasn't materialized into widespread adoption is a testament to how difficult it is to bridge the wide chasm between understanding how to solve a physics problem and understanding how to design a microarchitecture.

    Where FPGAs fit in HPC today

    To date, a few scientific domains have had success in using FPGAs.  For example,

    The success of these FPGA products is due in large part to the fact that the end-user scientists don't ever have to directly interact with the FPGAs.  In the case of experimental detectors, FPGAs are sufficiently close to the detector that the "raw" data that is delivered to the researcher has already been processed by the FPGAs.  Convey and Edico products incorporate their FPGAs into an appliance, and the process of offloading certain tasks to the FPGA in proprietary applications that, to the research scientist, look like any other command-line analysis program.
    -

    -
    With all this said, the fact remains that these use cases are all on the fringe of HPC.  They present a black-and-white decision to researchers; to benefit from FPGAs, scientists must completely buy into the applications, algorithms, and software stacks.  Seeing as how these FPGA HPC stacks are often closed-source and proprietary, the benefit of being able to see, modify, and innovate on open-source scientific code often outweighs the speedup benefits of the fast-but-rigid FPGA software ecosystem.
    -

    -

    Where FPGAs will fit in HPC tomorrow

    The way I see it, there are two things that must happen before FPGAs can become a viable general-purpose technology for accelerating HPC:
    -
    1. Users must be able to integrate FPGA acceleration into their existing applications rather than replace their applications wholesale with proprietary FPGA analogues.
    2. It has to be as easy as f90 -fopenacc or nvcc to build an FPGA-accelerated application, and running the resulting accelerated binary has to be as easy as running an unaccelerated binary.
    The first steps towards realizing this have already been made; both Xilinx and Intel/Altera now offer OpenCL runtime environments that allow scientific applications to offload computational kernels to the FPGA.  The Xilinx environment operates much like an OpenCL accelerator, where specific kernels are compiled for the FPGA and loaded as application-specific logic; the Altera environment installs a special OpenCL runtime environment on the FPGA.  However, there are a couple of challenges:
    -
    -
    • OpenCL tends to be very messy to code in compared to simpler APIs such as OpenACC, OpenMP, CUDA, or HIP.  As a result, not many HPC application developers are investing in OpenCL anymore.
    • Compiling an application for OpenCL on an FPGA still requires going through the entire Xilinx or Altera toolchain.  At present, this is not as simple as f90 -fopenacc or nvcc, and the process of compiling code that targets an FPGA can take orders of magnitude longer than it would for a CPU due to the NP-hard nature of placing and routing across all the programmable blocks.
    • The FPGA OpenCL stacks are not as polished and scientist-friendly right now; performance analysis and debugging generally still has to be done at the circuit level, which is untenable for domain scientists.
    Fortunately, these issues are under very active development, and the story surrounding FPGAs for HPC application improves on a month by month basis.  We're still years from FPGAs becoming a viable option for accelerating scientific applications in a general sense, but when that day comes, I predict that programming in Verilog for FPGAs will seem as exotic as programming in assembly is for CPUs.
    -
    -

    -
    Rather, applications will likely rely on large collections of pre-compiled FPGA IP blocks (often called FPGA overlays) that map to common compute kernels.  It will then be the responsibility of compilers to identify places in the application source code where these logic blocks should be used to offload certain loops.  Since it's unlikely that a magic compiler will be able to identify these loops on their own, users will still have to rely on OpenMP, OpenACC, or some other API to provide hints at compile time.  Common high-level functions, such as those provided by LAPACK, will probably also be provided by FPGA vendors as pre-compiled overlays that are hand-tuned.
    -

    -

    Concluding Thoughts

    We're still years away from FPGAs being a viable option for mainstream HPC, and as such, I don't anticipate them as being the key technology that will underpin the world's first exascale systems.  Until the FPGA software ecosystem and toolchain mature to a point where domain scientists never have to look at a line of Verilog, FPGAs will remain an accelerator technology at the fringes of HPC.
    -

    -
    However, there is definitely a path for FPGAs to become mainstream, and forward progress is being made.  Today's clunky OpenCL implementations are already being followed up by research into providing OpenMP-based FPGA acceleration, and proofs of concept demonstrating OpenACC-based FPGA acceleration have shown promising levels of performance portability.  On the hardware side, FPGAs are also approaching first-class citizenship with Intel planning to ship Xeons with integrated FPGAs in 2H2018 and OpenPOWER beginning to ship Xilinx FPGAs with OpenCAPI-based coherence links for POWER9.
    -

    -
    The momentum is growing, and the growing urgency surrounding post-Moore computing technology is driving investments and demand from both public and private sectors.  FPGAs won't be the end-all solution that gets us to exascale, nor will it be the silver bullet that gets us beyond Moore's Law computing, but they will definitely play an increasingly important role in HPC over the next five to ten years.
    -

    -
    If you've gotten this far and are interested in more information, I strongly encourage you to check out FPGAs for Supercomputing: The Why and How, presented by Hal Finkel, Kazutomo Yoshii, and Franck Cappello at ASCAC.  It provides more insight into the application motifs that FPGAs can accelerate, and a deeper architectural treatment of FPGAs as understood by real computer scientists.
    -

    -** This is not really true.  Such a design would be limited by the number of physical pins coming out of the FPGA; in reality, output pins would have to be multiplexed, and additional logic to drive this multiplexing would take up FPGA real estate.  But you get the point.
    SaveSave
    SaveSaveSaveSave \ No newline at end of file diff --git a/_posts/glennklockwood/2019-11-27-tagbloggercom1999blog-4307061427721284246post-4381869359328536242.md b/_posts/glennklockwood/2019-11-27-tagbloggercom1999blog-4307061427721284246post-4381869359328536242.md deleted file mode 100644 index 445a3aa..0000000 --- a/_posts/glennklockwood/2019-11-27-tagbloggercom1999blog-4307061427721284246post-4381869359328536242.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2019-11-27 09:59:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2019/11/sc19-recap.html -slug: sc-19-recap -title: SC'19 Recap ---- - -Last week was the annual Supercomputing conference, held this year in Denver, and it was its usual whirlwind of big product announcements, research presentations, vendor meetings, and catching up with old colleagues.  As is the case every year, SC was both too short and too long; there is a long list of colleagues and vendors with whom I did not get a chance to meet, yet at the same time I left Denver on Friday feeling like I had been put through a meat grinder.

    All in all it was a great conference, but it felt like it had the same anticipatory undertone I felt at ISC 2019.  There were no major changes to the Top 500 list (strangely, that mysterious 300+ PF Sugon machine that was supposed to debut at ISC did not make an appearance in Denver).  AMD Rome and memory-channel Optane are beginning to ship, but it seems like everyone's got their nose to the grindstone in pursuit of achieving capable exascale by 2021.

    As with every major HPC conference, I approached SC this year with the following broad objectives:
    1. Sharing knowledge and ideas by contributing to the technical program and its workshops, tutorials, and BOFs with the goal of getting more momentum behind good ideas and steering research and roadmaps in a direction best aligned with where I think the HPC industry needs to go
    2. Gathering intelligence across different technologies and market verticals to stay ahead of where technology and the community may be driving as a result of other parallel industries
    3. Contributing to community development amongst storage and I/O researchers and practitioners with the goal of broadening the community and bringing more people and ideas to the table
    4. Building and maintaining relationships with individual vendor representatives and peers so that I know to whom I can turn when new opportunities or challenges come up
    The things I took away from the conference are colored by these goals and the fact that I mostly work in high-performance storage systems design.  If I missed any major themes or topics in this recap post, it was likely a reflection of the above goals and perspective.

    Before the conference

    SC'19 started back in the early spring for me since I served on the technical papers committee and co-chaired the Parallel Data Systems Workshop this year.  That all amounted to a predictable amount of work throughout the year, but there were two surprises that came up in October with respect to SC that are worth mentioning before we dive into the technical contents of the conference.

    The "I am HPC Guru" campaign

    Jim Cownie had the brilliant idea in early October to launch a covert campaign to create "I am HPC Guru" pins for SC, and he enlisted a group of willing members of the HPC Twitter community to pitch in.  I was fortunate enough to be invited to participate in the fun, and judging by the reach of the #IAmHPCGuru tag on Twitter during the conference, it was a wild success.

    An allotment of "I am HPC Guru" pins.  People who pitched in also got a commemorative larger-sized pin (shown outside the bag above) which was a calling card for members of the secret society.

    Hats off to Jim for conceiving this great idea, seeing through the design and shipment of the pins, and being so inclusive with the whole idea.  There are now hundreds of HPC_Guru pins all over the world thanks to Jim's efforts (and a couple dozen still with me here in California...), and I think it was a really positive way to build the Twitter-HPC community.

    The new job

    Life also threw me a bit of a curve ball in late October when I took on a new set of responsibilities at NERSC and changed from contributing to an R&D group to leading an operational storage team.  This meant that, in addition to all the pre-conference commitments I had made with an eye towards longer-term storage technology strategy, I suddenly had to contextualize my goals with respect to a completely new role in tactical planning and deployment.

    Whereas I’ve historically written off sales-oriented meetings at SC, having good relationships with vendor sales teams in addition to their engineers and product managers is now an essential component of my new position.  As a result of wearing these two hats instead of one, the number of hard commitments I had over the course of the conference about doubled over what it usually had been.  About half of these meetings were private (and not things about which I could write), and they also reduced the time I could've otherwise getting into the weeds about upcoming technologies.

    Because the conference was so broken up into private and public meetings for me this year, a chronological recounting of the conference (as I did for my SC'18 recap) would be full of odd gaps and not make a whole lot of sense.  Instead, I will focus around a few of the juiciest topics I took away from the conference:
    1. High-level trends that seemed to pop up repeatedly over the week
    2. Intel's disclosures around the Aurora/A21 system
    3. Outcomes from the 2019 Parallel Data Systems Workshop (PDSW 2019)
    4. The Perlmutter all-NVMe storage node architecture
    5. DAOS and the 2019 DAOS User Group meeting
    6. Everything else

    -It's difficult to group together all of the disparate things I heard and learned over the week into crisp bundles that I would consider emerging trends, but there were a few broad topics that kept popping up that suggested the following:

    #1 - Memory-channel 3D XPoint is now out in the wild at sufficient scale that a picture is beginning to form around where it fits in the I/O stack.  The NEXTGenIO project and Intel DAOS both demonstrated the performance achievable when 3D XPoint is integrated into larger systems this year, and the acceleration it offers can be staggering when a sensible software framework is built upon around persistent memory to bridge it with other media (like flash) and higher-level functionality (like parallel storage).  Michèle Weiland and Adrian Jackson presented their successes with the NEXTGenIO project throughout the week, most notably in the technical papers track (see "An early evaluation of Intel's Optane DC persistent memory module and its impact on high-performance scientific applications") and across several smaller events (e.g., Adrian presented performance results, detailed in his EPCC blog post, at the Multi-Level Memory BOF).  DAOS also made a splash on IO-500; more on this below.

    #2 - The I/O ecosystem developed in preparation for the manycore era is making the transition from pure research to practical engineering effort.  As the first generation of 7nm CPUs hit the market with KNL-like core counts and massive scale-up GPU node architectures are being announced by every major HPC silicon provider, latency-hiding techniques for I/O are becoming a hot topic.  Asynchronous I/O—that is, techniques that allow an application to continue computing while a write I/O operation is still happening—came up a few times, and this technique is also moving up in the software stack from system software (such as DAOS, WekaIO, and VAST) into middleware (MPI-IO and HDF5).  I touch on this in the PDSW section below.

    #3 - Innovation in HPC storage is moving away from the data plane and towards full data life cycle.  Whereas focus in HPC I/O has traditionally revolved around making I/O systems as fast as possible, research and product announcements this year seemed to gravitate towards data management—that is, how to manage the placement of data before, during, and after I/O.  Proprietary frameworks for data migration, policy management, tiering, and system-level analytics and intelligence (backed by serious vendor investment; see Cray ClusterStor Data Services and DDN STRATAGEM) are popping up across the storage appliance market as a differentiator atop open-source software like Lustre, and research around applying AI to optimize data placement is maturing from novel research into product engineering.

    #4 - Scientific workflows—and the parallels they have with enterprise and hyperscale markets—are starting to be taken seriously by technology providers.  Vendors have begun to take ownership of the data movement challenges that exist between bursts of compute-intensive jobs. Advances aimed at edge computing are becoming surprisingly relevant to HPC since decentralized data that is far away from compute is, in a sense, how HPC has done storage for decades.  Whether they be sensors distributed across billions of cell phones, thousands of non-volatile storage media distributed across an exascale computing system, or detectors deployed at giant telescopes relying on a supercomputer for image processing, there are a common set of data management, movement, and remote processing challenges whose solutions can be applied across the board.

    Intel's big splash

    Following on their big system-level disclosures at ISC'19, Intel's disclosure of the ALCF exascale system node architecture and the unveiling of their software strategy seemed to be the biggest splash of SC'19.  I was not actually at the Intel DevCon keynote where Raja Koduri made the announcements, but his slides on Xe and oneAPI are available online.

    The node architecture is, at a glance, very similar to the Summit node architecture today:
    From the slide and accompanying discussion on Twitter, there was quite a lot unveiled about the node architecture.  Each node will have:
    • Two Sapphire Rapids Xeons (which appear to have 8 channels of DDR in the aforementioned slide) and six Ponte Vecchio Intel GPUs
    • A CXL-based "Xe Link" router provides all-to-all connectivity between the GPUs, presumably comparable to (but more standards-based than) NVLink/NVSwitch, for a unified memory space
    • Eight Slingshot NIC ports per node, which is 1.6 Tbit/sec of injection bandwidth
    • A "Rambo Cache" that sits between HBM, GPU, and CPU that presumably reduces NUMA effects for hot data that is being touched by many computing elements
    • A "matrix engine" (which sounds an awful lot like NVIDIA's tensor cores) in each GPU
    This was an extremely daring release of information, as Intel has now publicly committed to a 7nm GPU part (comparable to TSMC's 5nm process), along with a high-yield EMIB process (their chiplet interconnect for HBM integration) and Foveros (their 3D die stacking for Rambo integration), in 2021.
    -

    -
    Intel also released the beta version of their Intel oneAPI which appears to be a mixture of re-branded Intel developer products (Fortran and C++ compilers, TBB, MKL, DAL, MPI, VTune, etc) with their new SYCL-based Data Parallel C++ compiler.  The novelty here is that Intel is committing to supporting this entire stack for CPUs, GPUs, FPGAs, and matrix accelerators so that, for example, you could feasibly write a single application with a single set of tools that runs across all accelerator types.
    -

    -
    There was a lot of interest in SYCL at the Performance Portability and Productivity workshop, P3HPC, on Friday.  There were two talks of particular interest in the parts I attended; the first, presented by Balint Joo of Jefferson Lab, presented the performance of a quantum chromodynamics kernel when implemented using Kokkos, accelerator-specific libraries, and SYCL:

    SYCL vs. Kokkos vs. native on NVIDIA and Intel architectures

    These early results are promising, and with the exception of KNL, the SYCL ecosystem is already showing promise as a performance-portable framework.  The same is generally true for more complex computational kernels as well, as presented by Istvan Reguly from Pázmány Péter Catholic University:
    -
    Performance portability figure of merit for a complex kernel using different performance-portable parallel runtimes.

    Intel's choice to back an open standard rather than develop its own proprietary APIs for each accelerator type was a very smart decision, as it looks like they are already making up lost ground against NVIDIA in building a robust software ecosystem around their accelerator technologies.  The fact that these presentations were given by application scientists, not Intel engineers, really underscores this.
    -

    -
    Strangely, AMD kept a low profile at SC by comparison despite the fact that Rome is beginning to enter the market and, by all accounts I heard on the show floor, selling like gangbusters.  One major procurement I heard about switched from an Intel CPU-based plan of record to AMD processor as a result of a schedule slip by Intel; this wound up resulting the system obtaining 50% more cores at the same cost (plus the added benefit of PCIe Gen4) which is a testament to the advantage that AMD currently has in the near term.
    -

    -
    By comparison, very few large HPC centers seem to be biting on Intel's Cascade Lake-AP despite Intel's very aggressive marketing against Rome.  Combined with the above observation that the Aurora architecture's Sapphire Rapids processors will only have eight memory channels per socket suggests that Cascade Lake-AP's 12-channel socket was likely released as a stopgap to have an answer to Rome while 10nm Xeon part production is scaling up.
    -

    PDSW 2019

    This year I had the great honor of co-chairing the Parallel Data Systems Workshop, the premiere data and storage workshop at SC, along with the esteemed Phil Carns (creator of Darshan and PVFS2/OrangeFS, among other things).  We tried to broaden the scope of the workshop to be more inclusive of "cloudy" storage and data topics, and we also explicitly tried to build the program to include discussion about data management that ran tangential to traditional HPC-focused storage and I/O.

    The proceedings are already online in an interim location hosted by ACM, and the full proceedings will be published by IEEE TCHPC.  Slides are available on the PDSW website, and I tried to tag my realtime thoughts using #pdsw19 on Twitter.

    Alluxio Keynote

    Our keynote speaker was Haoyuan Li, founder of Alluxio, who gave a brilliant talk about the data orchestration framework he developed at AMPLab and went on to commercialize.  It is an abstraction that stitches together different storage resources (file systems, object stores, etc) into a single namespace that applications can use to read and write data in a way that hides the complexity of tiered storage.  It was designed towards the beginning of the "Big Data revolution" with a specific eye towards providing a common interface for data accessibility; by writing an application against the Alluxio API, it would be made future-proof if the HDFS or S3 APIs fizzled since Alluxio normalizes the specific API and semantics of a native storage interface from user applications.

    Had something like this existed in the early days of HPC, there's a good chance that we would not be stuck using POSIX I/O as the least common denominator for data access.  That said, Alluxio does solve a slightly easier problem in that it targets analytics workloads that are read-intensive—for example, it does not provide a means for applications to do random writes, and so it provides only a subset of the full semantics that some more general-purpose I/O interfaces (such as file access) may provide.  In making this trade-off though, it is able to aggressively cache data from any storage backend in a distributed memory space, and Alluxio has a configurable cache eviction policy for predictable workflows.

    In describing the motivation for the Alluxio design, Haoyuan had some interesting insights.  In particular, he pointed out that there is a growing movement away from the hyperconverged hardware architecture that motivated Hadoop and HDFS:

    -
    The whole "move compute to where the data is!" model for Hadoop has always struck me as rather fanciful in practice; it only works in single-tenant environments where there's no chance of someone else's compute already existing where your data is, and it imposes a strict coupling between how you scale data and analytics.  As it turns out, the data analytics industry is also waking up to that, and as Haoyuan's slide above shows, separating storage from compute gives much more flexibility in how you scale compute with respect to data, but at the cost of increased complexity in data management.  The whole point of Alluxio is to minimize that cost of complexity by making data look and feel local by (1) providing a single namespace and API, and (2) using distributed memory caching to make data access perform as well as if compute and memory were colocated.

    This is a bit ironic since HPC has been disaggregating storage from compute for decades; HPC systems have tended to scale compute capability far faster than storage.  However, the HPC community has yet to address the added complexity of doing this, and we are still struggling to simplify storage tiering for our users.  This is only getting worse as some centers slide back into hyperconverged node designs by incorporating SSDs into each compute node.  This causes different tiers to spread data across multiple namespaces and also further complicate data access since the semantics across those namespaces differ.  For example, it's not sufficient to know that
    • /local is the fastest tier
    • /scratch is less fast
    • /home is slow
    since
    • /local is only coherent with other processes sharing the same physical compute node
    • /scratch is globally coherent
    • /home is globally coherent
    Alluxio is not the solution to this problem at present because it is optimized for write-once, read-many workloads whereas HPC does have to support random writes.  That said, HPC storage systems that incorporate the same design goals as Alluxio (connecting many types of storage under a single namespace, providing a restricted set of semantics, and applying aggressive caching to deliver local-like performance) hold a lot of promise.  Perhaps it's no surprise that every serious parallel file system on the market is beginning to implement features like this—think Lustre File-Level Redundancy (FLR) and Persistent Client Caching (LPCC), Spectrum Scale AFM, and the core two-tier design of WekaIO.

    Haoyuan also presented a few case studies that showcased the ability of Alluxio to ease the transition from on-premise infrastructure (like Hadoop with HDFS) to hybrid cloud (e.g., run Presto across datasets both in older on-prem HDFS and newer S3 buckets).  It seems to be very fashionable to run analytics directly against data in object stores in industry, and Alluxio essentially gives such data more dynamism by being the place where active data can be staged for processing on demand.  Because it is a stateless orchestration layer rather than a storage system itself, Alluxio also seems nicely compatible with dynamic provisioning of compute resources.  In this sense, it may be an interesting internship project to see if Alluxio could be deployed on an HPC system to bridge a large data analytics job with an off-system object store.  Get in touch with me if you know a student who may want to try this!

    Asynchronous I/O

    Middleware for asynchronous I/O came up in two different papers this year.  The first, "Enabling Transparent Asynchronous I/O using Background Threads" by Tang et al., described a new pluggable runtime for HDF5 that processes standard HDF5 I/O requests asynchronously.  It does this by copying I/O requests and their metadata into a special buffer, putting those requests on a queue that is managed by the asynchronous runtime, building a directed graph of all requests' dependencies, and dispatching I/Os alongside regular application execution using a lightweight (Argobots-based) asynchronous worker pool.

    What this amounts to is that a standard HDF5 write call wouldn't block until the I/O has been committed to disk somewhere; instead, it returns immediately after the async runtime makes a copy of the data to be written into its own private memory buffer.  The application is then free to continue computing, while an Argobots thread begins buffering and dispatching outstanding asynchronous I/O calls.  The performance that results from being able to overlap I/O with computation is remarkable:

    I/O speedup at scale as a result of the asynchronous runtime backend for HDF5 presented by Tang et al.

    What's more impressive, though, is that this backend is almost entirely transparent to the user application; in its simplest form, it can be enabled by setting a single environment variable.

    Later in the day, Lucho Ionkov presented a much more novel (research-y?) asynchronous I/O runtime in his paper, "A Foundation for Automated Placement of Data" which glued together DRepl (an abstraction layer between scientific applications and storage architectures, vaguely similar to what Alluxio aims to do), TCASM (a Linux kernel modification that allows processes to share memory), and Hop (an expressive key-value store with tunable performance/resilience requirements).  The resulting runtime provides a high-level interface for applications to express I/O and data placement as a series of attach, publish, and re-attach operations to logical regions of memory.  The runtime then manages the actual data movement (whether it be between nodes or to persistent storage) asynchronously.

    Again, the net result in speedup as the problem size scales up is impressive:

    I/O speedup at scale using the asynchronous I/O runtime presented by Iokov in Otstott et al.
    As with the asynchronous HDF5 paper, performance gets better with scale as the increasing costs of doing I/O at scale are amortized by overlapping it with computation.  In contrast to HDF5 though, this runtime comes with a completely new application API, so one would need to convert an application's critical I/O routines to use this framework instead of POSIX I/O.  The runtime is also pretty heavyweight in that it requires a separate global data placement "nameserver," a custom Linux kernel, and buy-in to the new memory model.  In that sense, this is a much more research-oriented framework, but the ideas it validates may someday appear in the design of a fully framework that incorporates both an application runtime and a storage system.

    Why is this important?  These asynchronous I/O runtimes are making a lot more sense in the era of heterogeneous computing where accelerators (think GPUs) really aren't good at driving a full kernel-based I/O pipeline.  Instead of running a full I/O stack and enforcing strict consistency (i.e., serializing I/O) on a lightweight accelerator core, having an asynchronous runtime running on a fat core that simply copies an I/O buffer from accelerator memory to slower memory before releasing program control back to the accelerator allows the accelerator tp spend less time doing what it's terrible at doing (ordering I/O operations) and more time computing.  At the same time, the fat core that is running the asynchronous I/O runtime can then operate on that copied I/O buffer on its own time, reorder and serialize operations to ensure consistency, and jump into and out of the kernel to enforce file permissions without interrupting the accelerator:

    Sketch of how an asynchronous I/O runtime might map to a heterogeneous node architecture

    Ron Oldfield did raise a really great consideration during PDSW about this though: at the end of the day, the asynchronous I/O runtime still has to share network resources with the application's message passing runtime (e.g., MPI).  He alluded to work done a decade ago that found that asynchronous I/O was often stomping on MPI traffic since both MPI and I/O could happen at the same time.  Without some kind of awareness or coordination between the asynchronous I/O runtime and the application communication runtime, this sort of scheme is prone to self-interference when running a real application.

    Given this, the right place to integrate an asynchronous I/O runtime might be inside the message passing runtime itself (e.g., MPI-IO).  This way the asynchronous I/O scheduler could consider outstanding asynchronous messages it must pass as well and be smart about dispatching too many competing network transfers at the same time.  Unfortunately this then places a complex burden of serialization and synchronization on the runtime, and this starts to look a lot like just throwing messages at the NIC and letting it figure out the correct ordering.  The principal advantage here would be that the runtime has a lot more visibility into user intent (and may have more spare processing capacity if most of the application time is spent on an accelerator), so it could afford to be smarter about how it builds its dependency graph.

    Analytics for Runtime and Operations

    No computing-related workshop would be complete without a smattering of artificial intelligence and machine learning, and PDSW was no different this year.  Two papers were presented that attempted to use machine learning to predict parallel I/O performance in slightly different ways.

    Suren Byna presented "Active Learning-based Automatic Tuning and Prediction of Parallel I/O Performance" where the authors developed an approach for autotuning parallel I/O (specifically using MPI-IO hints and Lustre striping parameters) using active learning to predict the optimal values for their tuning parameters.  They used two different approaches, and the faster one uses predicted performance to infer optimal tuning values.  Given how many factors actually come to play in parallel I/O performance on production systems, their model was able to predict I/O performance quite well under a range of I/O patterns:

    -
    Bing Xie et al presented "Applying Machine Learning to Understand Write Performance of Large-scale Parallel Filesystems" which pursued a similar line of work—using machine learning to predict I/O performance—but with a slightly different goal.  Xie's goal was to identify the factors which most strongly affect predicted I/O performance, and she found that write performance was most adversely affected by metadata load and load imbalance on Blue Gene/Q and GPFS, whereas Cray XK7 and Lustre were more affected by aggregate file system load and load imbalance.  This system-centric work laid out a more sophisticated blueprint for identifying causal relationships between poor I/O performance and system-level health events, and I think applying these approaches to the dataset I published last year with my Year in the Life of a Parallel File System paper might identify some interesting emergent relationships between bad performance and the subtle factors to which they can be attributed.

    Why is this important?  Industry is beginning to take notice that it is no longer sufficient to just report there here-and-now of how parallel file systems are behaving, and more sophisticated analytics engines are being co-deployed with very large systems.  For example, the Summit system at Oak Ridge made a splash in October by announcing the real-time analytics engine that was implemented on top of it, and Cray View is a similar analytics-capable engine built atop Lustre that Cray offers as a part of its ClusterStor lineup.  I'm not sure if DDN has something comparable, but their recent purchase of Tintri and its robust, enterprise-focused analytics engine means that they hold IP that can be undoubtedly be applied to its HPC-focused storage product portfolio.

    Being able to predict performance (and the conditions that cause it to degrade!) is the holy grail of parallel I/O systems management, and it's a sure bet that all the HPC storage vendors are watching research in this area very closely to see what ideas they can pluck from the community to add value to their proprietary analytics engines.  The fact that AI is being applied to production system data and yielding useful and actionable outcomes gives legs to this general idea of AI for self-driving systems.  The talks at PDSW this year were only demonstrations, not hardened products, but these ad-hoc or small-scale demonstrations are moving us in the right direction.

    My Talk on Data Motion

    I also coauthored and presented a paper at PDSW this year that was an exploratory study of how we can understand data movement throughout an entire data center.  The goal of the entire paper, "Understanding Data Motion in the Modern HPC Data Center," was to generate this diagram that shows how much data flows between different systems at NERSC:

    -
    -
    -
    -
    -
    I won't recount the technical content of the talk here, but the paper is open access for those interested.  The essence of the study is that we showed that it is possible to examine data motion beyond the context of individual jobs and begin tying together entire workflows, but there's a lot of supporting work required to shore up the tools and telemetry from which this analysis draws.  The paper was very much a long-form work in progress, and I'd be interested in hearing from anyone who is interested in pursuing this work further.

    Scale-up highly available NVMe hardware

    Although it didn't make a many headlines (as storage rarely does), Cray announced its new ClusterStor E1000 platform shortly before SC and had some of their E1000-F all NVMe enclosures on display at a few booths.  I normally don't care too much about storage enclosures (it's all just sheet metal, right?), but this announcement was special to me because it is the hardware platform that is going into NERSC's Perlmutter system in 2020, and I've been involved with the different iterations of this hardware design for over a year now.

    It's very gratifying to see something start out as a CAD drawing and a block diagram and grow up into actual hardware:

    The E1000-F all-NVMe enclosure

    Torben Kling Petersen gave a talk at the Exhibitor Forum disclosing the details of the hardware design on behalf of Cray, and it looks like they've made just about everything surrounding the E1000 public:

    -
    The foundation for this platform is the E1000-F high-availability enclosure as shown in the above slide.  It has two separate Rome-based servers ("controllers") and 24 U.2 NVMe slots capable of PCIe Gen4.  Each Rome controller has slots for up to three 200 Gbit NICs; doing the math, this gives a very nicely balanced design that is implemented entirely without PCIe switches:

    Cartoon block diagram for one half of the E1000-F chassis.  Note that the NVMe read rates (violet text) are assumed based on Samsung PM1733 specs and performance projections that Petersen presented.  Also note that each NVMe drive is 2x2 PCIe Gen4 with multipath to the other Rome controller (not shown).
    I visited the booth of the ODM with whom Cray worked to develop this node design and was fortunate enough to meet the node architects from both sides who gave me a really helpful breakdown of the design.  Physically, the 2U chassis is laid out something like this:

    -
    Just about everything is both hot-swappable and fully redundant.  The entire system can be powered and cooled off of a single 1.2 kW(?) power supply, and all the fans are hot-swappable and configured in a 5+1:

    Fans are all individually replaceable and configured in 5+1.  You can also see the NVMe backplanes, attached to an active midplane (not shown), through the open fan slot.

    All the fans are on the same pulse-width modulator (PWM), so they all operate at the same speed and provide even airflow as long as they are properly powered.  My recollection from what the architect told me is that the PWM signal is provided by an FPGA on the midplane which also handles drive power-up.  Because there is only a single midplane and this power/cooling controller lives on it, this power/cooling FPGA is also configured redundantly as 1+1.  Thus, while the midplane itself is not redundant or field-replaceable, the active components on it are, and it would take physical damage (e.g., someone punching a hole through it and breaking the PCB traces) to knock the whole chassis offline.

    Each chassis has two independent node boards that are hot-pluggable and self-contained:

    One of the E1000-F node sleds with its cover popped off at the Cray booth
    Each node board is wrapped in a sheet metal sled and has a screwed-on lid.  The whole node sled was designed by the ODM to be a field-replaceable unit (FRU), so doing something like a DIMM swap does require a screwdriver to remove the top cover.  However it's ultimately up to OEMs to decide how to break down FRUs.

    The ODM had a bare controller board at its booth which looks like this:

    -
    E1000-F bare controller board
    There are two M.2 PCIe Gen4 slots for mirrored boot drives and a pair of big hot-plug block connectors in the front of the board for redundant power and 48 lanes of PCIe Gen4 for the 24x U.2 drives hanging off the midplane.  There's a single riser slot for two standard HHHL PCIe add-in cards where two NICs plug in, and a third OCP-form factor slot where the third NIC can slot in.  The rear of the controller sled shows this arrangement:

    Rear view of a single Rome controller
    It looks like there's a single RJ45 port (for LOM?), a power and reset button, a single USB-3, and a mini DisplayPort for crash carting.

    When Cray announced the E1000-F, HPCwire ran a block diagram of the complete chassis design that suggested that heartbeating would be done through a non-transparent bridge (NTB) implemented on the AMD Rome host interface.  This was a little worrisome since AMD has yet to release the proper drivers to enable this NTB for Linux in a functional way; this simple fact is leading other ODMs towards a more conservative node design where a third-party nonblocking PCIe switch is added simply to provide a functioning NTB.  When I asked the architect about this, though, he revealed that the E1000-F also has an internal gigabit Ethernet loop between both controllers for heartbeating which completely obviates the need to rely on any NTB for failover.

    Another interesting thing I learned while talking to the E1000-F designers is that the power supply configuration gives a lot of runway for the overall system design:

    One of the two power supply sleds for the E1000-F chassis.  Lots of free real estate remains and is currently occupied by bus bars.
    The current power supply is (I believe) ~1200 W, and the carrier sled on which it is mounted is mostly empty space taken up by two fat bus bars that reach all the way to the front of it.  In leaving all of this space in the sled, it will be fully possible to build a physically compatible PSU sled that delivers significantly more power to the U.2 NVMe drives and host controllers if the power consumption of the controllers or the NVMe drives increases in the future.  The ODM confirmed that the cooling fans have similar headroom and should allow the whole enclosure to support a higher power and thermal load by just upgrading the power and controller FRUs.

    This point is important because the performance of PCIe Gen4 SSDs are actually capped by their power consumption—if you look at product sheets for ruler SSDs (M.2, NF1, and E1.S), you will find that their performance is universally lower than their U.2 and HHHL variants due to the fact that the ruler standards limit power to 8-12W compared to U.2/HHHL's ~25W.  This E1000-F chassis is designed as-is for 25W U.2 drives, but there are already proposals to push individual SSD power up to 40W and beyond.  Given this trend and the high bandwidth available over a PCIe Gen4 x4 connector, it's entirely possible that there will be a demand for higher-power NVMe enclosures as Gen4 matures and people want to drive Gen4 NVMe at line rate.

    DAOS User Group

    The 2019 DAOS User Group was held on Wednesday in a hotel adjacent to the main convention center. Contrary to previous years in which I attended, this meeting felt like a real user group; there were presenters from several different organizations, none of whom directly contribute to or are contractual customers of DAOS.  There were also real performance data which largely centered around the insanely high IO-500 benchmark score that DAOS posted earlier in the week:

    Bandwidth spread on the IO-500's IOR test suite
    These numbers are using a pretty modest server environment and client count (24 DAOS servers, 26 client nodes, 28 ranks per client, dual-rail OPA100) and use the native DAOS API.  What I didn't snap a photo of are the crazy metadata rates which posted a geometric mean of 4.7 million IOPS; by comparison, the 250 PB Alpine file system attached to the Summit supercomputer at Oak Ridge posted 1.2 million IOPS using more than 500 clients.  To the extent that it was meant to address the IOPS limitations intrinsic to traditional parallel file systems, the DAOS design is looking like a resounding success.

    According to the speaker, the metadata performance of this IO-500 run was not limited by any server-side resources, so adding more clients (like WekaIO's top-scoring run with 345 clients) could have pushed this number higher.  It was also stated that the staggering IOR read performance was limited by the aggregate Optane DIMM bandwidth which is a testament to how highly optimized the data path is.

    Actually using DAOS

    This is all using the DAOS native API though, and unless you intend to rewrite all your open()s and write()s as daos_pool_connect() + daos_cont_open() + daos_array_open()s and daos_array_write()s, it's hard to tell what this really means in terms of real-world performance.  Fortunately there was a great set of talks about the DAOS POSIX compatibility layer and related middleware.  I described the POSIX middleware a little in my recap of ISC'19, but it's much clearer now exactly how a POSIX application may be adapted to use DAOS.  Ultimately, there are three options that DAOS provides natively:

    • libdfs, which is a DAOS library that provides a POSIX-like (but not POSIX-compatible) API into DAOS.  You still have to connect to a pool and open a container, but instead of reading and writing to arrays, you read and write arbitrary buffers to byte offsets within file-like objects.  These objects exist in a hierarchical namespace, and there are functions provided by libdfs that map directly to POSIX operations like mkdir, rmdir, statfs, etc.  Using libdfs, you would still have to rewrite your POSIX I/O calls, but there would be a much smaller semantic gap since POSIX files and directories resemble the files and directories provided by libdfs.  A great example of what libdfs looks like can be found in the IOR DFS backend code.
    • dfuse, which is a FUSE client written on top of libdfs.  With this, you literally get a file system mount point which POSIX applications can interact with natively.  Because this uses FUSE though, such accesses are still generating system calls and memory copies which come with steep latency penalties.
    • libioil, which is a POSIX interception library.  This is what you'd LD_PRELOAD in front of a standard application, and it does the remapping of genuine POSIX API calls into libdfs-native calls without ever going through the kernel.

    Cedric Milesi from HPE presented benchmark slides that showed that using the DFS (file-based) API over the native (array-based) API has no effect on performance:
    -

    -
    Performance scaling of the native DAOS API (which encodes array objects) to the DAOS DFS API (which encodes file and directory objects).  No discernible performance difference.

    Thus, there is no performance difference whether you treat DAOS like an array store (its original design) or a file/directory store (through the libdfs API) as far as bandwidth is concerned.  This is excellent news, as even though libdfs isn't a drop-in replacement for POSIX I/O, it implements the POSIX data model (data is stored as streams of bits) which is a more comfortable look and feel for a storage system than storing typed arrays.  And since libioil is a shim atop libdfs, the above performance data suggests that POSIX applications won't pay significant bandwidth overheads by preloading the POSIX intercept library to get DAOS compatibility out of the box.
    -

    -
    What's less clear is what the metadata overheads of libdfs are.  Because the whole metadata model of DFS (files and directories) is very different from native DAOS (arrays), it's impossible to do a head-to-head comparison of metadata performance.  That said, DFS metadata is only a subset of the full POSIX metadata so it should be faster even on identical hardware.  For example, DAOS only enforces permissions when opening a container, so I would not expect DFS to have any notion of file-level or directory-level ownership or permissions bits.  As such, DFS would not incur the cost of doing an expensive recursive permission check on dfs_open(), and the open rate should be much higher than something that adheres to POSIX.
    -

    -
    Kevin Harms from ALCF also presented a really enlightening slide containing very early performance tests from their internal DAOS testbed using dfuse and libioil:
    -

    -
    -

    -
    This slide is a treasure trove of interesting information:
    -
    1. It implicitly confirms that the verbs provider for libfabric not only works, but works well.  Recall that the Intel testbed from which IO-500 was run used Intel OmniPath 100, whereas the Argonne testbed uses a competitor's fabric, InfiniBand.
    2. Single-stream performance of DAOS using the dfuse interface is 450 MB/sec which isn't terrible.  For comparison, single-stream performance of Lustre on Cray Aries + FDR InfiniBand is about the same.
    3. Using the libioil POSIX interface dramatically increases the single-stream performance which shines a light on how costly using the Linux VFS kernel interface (with FUSE on top) really is.  Not using FUSE, avoiding an expensive context switch into kernel mode, and avoiding a memcpy from a user buffer into a kernel buffer gives a 3x performance boost.
    Again, in the sense that DAOS was meant to address the performance impacts of using a kernel-based storage system for I/O, it looks like DAOS is meeting expectation.
    -
    -

    -
    Finally, Mohamad Chaarawi also spent some time talking about the Lustre/DAOS integration which uses DAOS dfuse to stitch together a Lustre namespace with DAOS DFS namespaces.  I mentioned this in my ISC recap, but there's now a pretty detailed slide about how this will look in practice:
    -

    -
    -

    -
    This Lustre integration won't be quite as rosy as I described earlier since DFS namespaces don't seamlessly merge into the Lustre namespace.  Instead, it looks like DFS namespaces will be mounted in a separate directory hierarchy governed by their pool UUID ("PUUID" in above slide) and container UUID ("CUUID"), and the Lustre namespace will contain symlinks to the DFS mounts.  What exactly creates and destroys these symlinks is unclear; in July it had sounded like Lustre foreign layouts would dynamically stitch DAOS objects into Lustre using the Lustre control plane, but now it sounds like DAOS will behave more like autofs on top of Lustre.
    -
    -

    -

    The burgeoning DAOS community

    Although the progress and increasing tangibility of DAOS is impressive, I was most struck by the diversity of stakeholders represented at the DAOS User Group meeting.  In particular, the participation of HPE (the non-Cray part, no less!) and Lenovo was a surprise to me since neither has an immediate interest in the Argonne exascale system which has been the biggest driver for DAOS development.  Lenovo in particular made the bold statement that they want to sell a DAOS appliance in 4Q2020/1Q2021 called the "DSS-D Integrated Solution with DAOS."
    -

    -
    Oddly enough, the Cray part of HPE was not obviously present at the DAOS User Group despite their involvement in Argonne's Aurora system and activity on the DAOS mailing lists.  This may just be a reflection of Cray's historic reluctance to send engineering staff to SC, but their absence was quite notable in contrast to Lenovo's head-first dive into announcing a DAOS appliance.  There were also no loud voices supporting all of the work that DAOS has put into integrating with Apache Spark, nor were there any vocal supporters of Intel's newly stated ambition to create a native SEG-Y interface (a format used by oil and gas) for DAOS.

    -

    Everything else

    There were some interesting tidbits that I picked up at SC this year don't fit neatly anywhere else in this post but are worth writing down.
    -

    -

    Technical tidbits - the Cray Shasta cabinet

    Much like the Cray E1000-F storage enclosure, I have also watched the Cray Shasta cabinet design evolve from a set of CAD diagrams to living, breathing behemoth of sheet metal and coolant tubing.  SC'19 was the debut of a finished Cray Shasta compute cabinet, and it's a sight to behold:
    -

    -
    The front end of the new Cray Shasta compute cabinet
    These new cabinets are all direct liquid cooled, and the water tubing to each blade from the center manifold is all done up in the above photo.  Compute blades slot in vertically, and each cabinet has French doors that open in directions opposite to each other.  The back end is a little less neat at a glance:
    -

    -
    The back end of the new Cray Shasta compute cabinet
    As with the front end, it opens up with French doors, and interestingly, the rear doors look identical to the front doors.  Although I didn't ask explicitly, my guess is that this means that both the front and rear of the cabinets could feature giant cabinet graphics if so desired.
    -

    -
    The rear cabling is almost all copper 200 Gb/s:
    -

    -
    Cray Slingshot switch blade and Cray chassis management module
    And, in a departure from the XC and XT/XE lines, all of this copper cabling uses a standard QSFP-DD connectors to carry 2x200 Gb.  In the above photo, you can see a genuine Cray Slingshot switch blade slotted in horizontally (cf. the vertically slotted compute blades) and the water coupling for the liquid-cooled switch blade and management module.  There are no fancy coolant waterfalls with Shasta, but that's probably not a bad thing.  As I've heard it told, the Cray-2 waterfall was a case of making lemonade from lemons; apparently fluorinert reacts corrosively with curved plastic surfaces.
    -

    -

    Less-technical tidbits

    SC isn't purely about the technology, and truth be told, the personalities and community are the principal reason I attend every year.  It follows that a number of personal highlights for me weren't directly related to HPC at all but were nevertheless very valuable bits of information that I took away from Denver.
    -

    -
    For example, I met two of the big marketing minds behind a major HPC company who really floored me by attributing value to my support of the HPC industry and community through social media.  Social media is really how I got my start in this industry (I started as a hobbyist), so it's gratifying to hear that I might be contributing in a way that is meaningful to kindred spirits who also got into the HPC field from unconventional paths.  It was also a reminder that there are always real people behind every corporate Twitter account, and you very well may meet them at a conference like SC.  When that happens, it can be a really positive experience ("Great to meet the person behind the handle!") or an embarrassing one ("I really did say that three years ago, didn't I?").  This year was the first time it became clear that, in trying to avoid the latter case as a matter of course, the former becomes more prevalent without a whole lot of added effort.
    -

    -
    I also met what may have been the world's slickest corporate sales team, whose brilliantly staged choreography of chance encounters over drinks only became apparent to me as I was walking back to my hotel.  I know that plenty of people dislike interacting with sales, but being a great salesperson is really a craft in and of itself, and I respect people who are masters of their trade regardless of what it is.  And now if I ever find myself in a situation where I need to win someone over cold, I know from whom I can draw inspiration to unleash my inner "customer success manager."  It's a careful balance of drawing out concerns, driving open-ended complaints towards something actionable, and knowing where to cut through red tape and just get the right people talking.
    -

    -
    Another non-technical area in which I was looking for information this year was management philosophy.  I've had the pleasure of working with and for some very talented managers who recognize management as a distinct vocation in and of itself, and I made it a point to get time with a few such people who've consistently built me up over the years.  One of the more pithy philosophies I took away from one colleague is that there are times when neither "asking for permission" nor "asking for forgiveness" is the right approach—rather, sometimes you have to "radiate intent."  I'd never heard this before, but it makes sense in that it allows others the opportunity to say "no" and take explicit ownership of inaction, but it doesn't require the inverse of saying "yes" and taking responsibility for the outcomes.
    -

    -

    Staying organized

    Finally, I am always trying to figure out the optimal "workflow" for keeping organized at SC, and this year was no different.  A few years ago I fully committed to simply not bringing my laptop to the conference venue every day in lieu of bringing a much lighter and more versatile iPad Pro, and this worked fine with two exceptions:
    -
    • For the Parallel I/O in Practice tutorial I co-presented, I brought my laptop so that all four presenters could project from it and I could use my iPad for keeping realtime notes.
    • For PDSW, I brought my laptop just in case, knowing that I would be in the same room all day.  I wound up presenting from it simply because it provided a better viewing angle from the podium; the room arrangements in Denver were such that it was impossible for a speaker at the podium to see the slides being projected, so he or she would have to rely on the device driving the projector to tell what content was actually being projected.
    I did have to use the laptop at the hotel on Saturday night to make some final modifications to my PDSW talk (there are a few obscure features in PowerPoint that simply aren't exposed in the iOS version), but the rest of the conference (including a couple of BOF talks) that were iPad-only.
    -
    -

    -
    For notetaking, I started storing all of my notes in Agenda, and where appropriate, used Agenda's feature to create a single note for each calendar entry corresponding to a formal meeting.  For unstructured conversations on the expo floor or between sessions, I kept one catch-all note per day in which I typed everything I could remember as soon as the conversation ended.  For example, the conversation I had with the designers of the E1000-F enclosure was saved as a combination of obscure written details I took as soon as I left the booth and photos I snapped during the conversation.
    -

    -
    In places where typing on an iPad was not possible (e.g., in most technical sessions, where there were no tables), I used Nebo and an Apple Pencil to take handwritten notes.  As it turns out, hand-writing on an iPad sitting on your knee is far more productive than either trying to type text letter-by-letter into the on-screen iPad keyboard or awkwardly balancing the folded-out iPad Pro keyboard on a lap or bag.  Nebo is really good at converting handwriting into ASCII, and that ASCII easily copies out and into an Agenda note.
    -

    -
    This workflow supplanted my approach last year which relied exclusively on using Notability and hand-written notes with OCR.  In meetings where a table was available (i.e., vendor briefings), being able to type rather than handwrite was far more effective in capturing every nuance in spoken word.  I've found that I rarely ever get a copy of the slides shown at SC briefings, so being able to quickly capture exact hardware specs or release dates as someone is trying to gloss over some unflattering details is really not possible when writing everything by hand.
    -

    -
    For tracking action items, I've started used Things 3 (which is admittedly crazy expensive) but is really good at capturing to-do items in under five seconds so that they can be more formally sorted, assigned a start/complete date, etc at the end of the day or after the conference.
    -

    -
    This all mostly worked, but I did run into a major issue with Agenda where all my ad-hoc notes vanished when I got home from Denver and my home computer decided to sync.  The good news is that Agenda uses internal versioning so the notes' contents weren't truly lost, and their support team was extremely responsive in both recovering my lost notes and releasing a fix within a week.  Not a great first experience with the app, but I'm not sure that'll stop me from using it.
    -

    -

    Concluding thoughts

    As always seems to be the case, the week of SC was over before I knew it.  There's a lot I know that I didn't get to see in terms of colleagues, exhibitors, and technical program sessions.  Of everything I did get to see, there's there's plenty that I wasn't sure I'd be allowed to write up.  So if you happened to get this far and are wondering why I didn't write about the most interesting thing that you got out of the conference this year, odds are that I didn't see it, or if I did, I wasn't sure I was allowed to write about it.  And if I did write about you and you won't get in trouble for being attributed by name, please let me know and I'd be happy to update this post to give you credit.
    -

    -
    Denver was the city of the first SC I ever attended, so I was glad to be back.  I was also happy to get to see snow at least once this year:
    -

    -
    -

    -
    and the convention center did an excellent job of providing space, AV support, catering, and gigantic coffee urns:
    -

    -
    -

    -
    I got less sleep on average this year than any SC prior (around 6 hours a night), and yet I feel like I accomplished less of what was on my list than ever before.  I suppose that's just a sign that the conference (or perhaps my ambition!) continues to grow, and I should expect SC'20 to be even bigger, better, and exhausting.
    \ No newline at end of file diff --git a/_posts/glennklockwood/2019-6-27-tagbloggercom1999blog-4307061427721284246post-2872044202410197608.md b/_posts/glennklockwood/2019-6-27-tagbloggercom1999blog-4307061427721284246post-2872044202410197608.md deleted file mode 100644 index d7b9622..0000000 --- a/_posts/glennklockwood/2019-6-27-tagbloggercom1999blog-4307061427721284246post-2872044202410197608.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2019-06-27 01:31:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2019/06/isc19-recap.html -slug: isc-19-recap -title: ISC'19 Recap ---- - -I was fortunate enough to attend the ISC HPC conference this year, and it was a delightful experience from which I learned quite a lot.  For the benefit of anyone interested in what they have missed, I took the opportunity on the eleven-hour flight from Frankfurt to compile my notes and thoughts over the week.

    I spent most of my time in and around the sessions, BOFs, and expo focusing on topics related to I/O and storage architecture, so that comprises the bulk of what I’ll talk about below.  Rather than detail the conference chronologically as I did for SC’18 though, I’ll only mention a few cross-cutting observations and trends here.

    I’ll also not detail the magnificent HPC I/O in the Data Center workshop here, but anyone reading this who cares about storage or I/O should definitely flip through the slides on the HPC-IODC workshop website!  This year HPC-IODC and WOPSSS merged their programs, resulting in a healthy mix of papers (in both CS research and applied research), expert talks, and fruitful discussion.

    High-level observations

    As is often the case for ISC, there were a few big unveilings early in the week.  Perhaps the largest was the disclosure of several key architectural details surrounding the Aurora exascale system to be deployed at Argonne in 2021.  TACC’s Frontera system, a gigantic Dell cluster stuffed with Intel Cascade Lake Xeons, made its debut on the Top500 list as well.  In this sense, Intel was in good form this year.  And Intel has to be, since only one of the handful of publicly disclosed pre-exascale (Perlmutter and Fugaku) and exascale systems (Frontier) will be using Intel parts.

    The conference had also had an anticipatory undertone as these pre-exascale and exascale systems begin coming into focus.  The promise of ARM as a viable HPC processor technology is becoming increasingly credible as Sandia’s Astra machine, an all-ARM cluster integrated by HPE, appeared throughout the ISC program.  These results are paving the way for Fugaku (the “post-K” machine), which will prove ARM and its SVE instruction set at extreme scale.

    Also contributing to the anticipatory undertone was a lot of whispering that occurred outside of the formal program.  The recently announced acquisition of Cray by HPE was the subject of a lot of discussion and conjecture, but it was clear that the dust was far from settled and nobody purported to have a clear understanding of how this would change the HPC market.  There was also some whispering about a new monster Chinese system that was on the cusp of making this year’s ISC Top500.  Curiously, the Wuxi supercomputer center (where Tianhe-2 is housed) had a booth on the show floor, but it was completely vacant.

    Also noticeably absent from the show floor was NVIDIA, although they certainly sent engineers to participate in the program.  By comparison, AMD was definitely present, although they were largely promoting the impending launch of Rome rather than their GPU lineup.  A number of HPC solutions providers were excited about Rome because of both high customer demand and promising early performance results, and there wasn’t a single storage integrator with whom I spoke that wasn’t interested in what doors will open with an x86 processor and a PCIe Gen4 host interface.

    Intel disclosures about Aurora 2021

    Perhaps the biggest news of the week was a “special event” presentation given by Intel’s Rajeeb Hazra which disclosed a number of significant architectural details around the Aurora exascale system being deployed at Argonne National Laboratory in 2021.

    Nodes will be comprised of Intel Xeon CPUs and multiple Intel GPUs

    Intel has confirmed that Aurora will be built on Intel-designed general-purpose GPUs based on the “Xe” architecture with multiple GPUs per node.  With this disclosure and the knowledge that nodes will be connected with Cray’s Slingshot interconnect, it is now possible to envision what a node might look like.  Furthermore, combining the disclosure of a high GPU:CPU ratio, the Aurora power budget, and some vague guessing at the throughput of a 2021 GPU narrows down the number of nodes that we may expect to see in Aurora.

    Although no specific features of the Intel GPUs were disclosed, Intel was also promoting their new AVX512-VNNI instructions to position their latest top-bin Xeon cores as the best option for inference workloads.  Coupled with what we can assume will be highly capable GPUs for training acceleration, Intel is building a compelling story around their end-to-end AI portfolio.  Interestingly, news that NVIDIA is partnering with ARM dropped this past week, but NVIDIA’s noted absence from ISC prevented a comparable ARM-NVIDIA AI solution from shining through.

    System will have over 10 PB of system memory

    Aurora will have a significant amount of memory presumably comprised of a combination of HBM, DDR, and/or Optane persistent memory.  The memory capacity is markedly higher than that of the AMD-based Frontier system, suggesting that Intel may be leveraging Optane persistent memory (which has a lower cost per bit than DDR) to supplement the HBM that is required to feed such a GPU-heavy architecture.

    The storage subsystem will deliver over 230 PB of capacity at over 25 TB/sec

    Perhaps the most interesting part of Aurora is its I/O subsystem, which will use an object store and an all-solid-state storage architecture instead of the traditional parallel file system.  This will amount to 230 PB of usable flash capacity that can operate in excess of 25 TB/sec.  Although I’ll describe this storage architecture in more depth below, combining the performance point of 25 TB/sec with the aforementioned high GPU:CPU ratio suggests that each compute node will be able to inject a considerable amount of I/O traffic into the fabric.  This points to very capable Xeon cores and very capable NICs.

    The programming model for the system will utilize SYCL

    Intel has announced that its “One API’ relies on the Khronos Group’s SYCL standard for heterogeneous programming in C++ rather than the incumbent choices of OpenMP, OpenACC, or OpenCL.  This does not mean that OpenMP, OpenACC, and/or OpenCL won’t be supported, but it does reveal where Intel intends to put all of its efforts in enabling its own GPUs and FPGAs for HPC.  They further emphasized their desire to keep these efforts open, standards-based, and portable, undoubtedly demonstrating stark contrast with the incumbent GPU vendors.  This is an interesting long-term differentiator, but time will tell whether SYCL is able to succeed where OpenCL has failed and gain a foothold in the HPC ecosystem.

    DAOS will be HPC's gateway drug to object stores

    DAOS (the “Distributed Asynchronous Object Store,” pronounced like it’s spelled) is an object store that Intel has been developing for the better part of a decade in collaboration with the US Department of Energy.  The DAOS name has become overloaded in recent years as a result of it changing scope, focus, and chief architects, and the current version is quite different from the original DAOS that was prototyped as a part of the DOE Fast Forward program (e.g., only one of three original DAOS components, DAOS-M, survives).  A few key features remain the same, though:
    • It remains an object store at its core, but various middleware layers will be provided to expose alternate access APIs and semantics
    • It is specifically designed to leverage Intel Optane persistent memory and NAND-based flash to deliver extremely high IOPS in addition to high streaming bandwidth
    • It relies on user-space I/O via Mercury and SPDK to enable its extreme I/O rates
    • Its storage architecture is still based on a hierarchy of servers, pools, containers, and objects
    Object stores have historically not found success in HPC due to HPC apps’ general dependence on POSIX-based file access for I/O, but the Aurora DAOS architecture cleverly bridges this gap.  I was lucky enough to run into Johann Lombardi, the DAOS chief architect, at the Intel booth, and he was kind enough to walk me through a lot of the details.

    DAOS will provide seamless integration with a POSIX namespace by using Lustre’s new foreign layout feature which allows an entity in the Lustre namespace to be backed by something that is not managed by Lustre.  In practice, a user will be able to navigate a traditional file namespace that looks like any old Lustre file system using the same old ls and cd commands.  However, some of the files or directories in that namespace may be special DAOS objects, and navigating into a DAOS-based object transparently switches the data path from one that uses the traditional Lustre client stack to one that uses the DAOS client stack.  In particular,
    • Navigating into a directory that is backed by a DAOS container will cause the local DAOS agent to mount that DAOS container as a POSIX namespace using FUSE and junction it into the Lustre namespace.  Files and subdirectories contained therein will behave as regular POSIX files and subdirectories for the most part, but they will only honor a subset of the POSIX consistency semantics.
    • Accessing a file that is backed by a DAOS container (such as an HDF5 file) will cause the client to access the contents of that object through whatever API and semantics the DAOS adapter for that container format provides.
    DAOS also includes a preloadable library which allows performance-sensitive applications to bypass the FUSE client entirely and map POSIX API calls to DAOS native API calls.  For applications that use middleware such as HDF5 or MPI-IO, I/O will be able to entirely bypass the POSIX emulation layer and get the highest performance through DAOS-optimized backends.  In the most extreme cases, applications can also write directly against the DAOS native object API to control I/O with the finest granularity, or use one of DAOS's addon APIs that encapsulate other non-file access methods such as key-value or array operations.

    A significant amount of this functionality is already implemented, and Intel was showing DAOS performance demos at its booth that used both IOR (using the DAOS-native backend) and Apache Spark:

    -

    The test hardware was a single DAOS server with Intel Optane DIMMs and two Intel QLC NAND SSDs and demonstrated over 3 GB/sec on writes and over a million read IOPS on tiny (256-byte) transfers.  Johann indicated that their testbed hardware is being scaled up dramatically to match their extremely aggressive development schedule, and I fully expect to see performance scaling results at SC this November.

    This is all a far cry from the original Fast Forward DAOS, and this demo and discussion on the show floor was the first time I felt confident that DAOS was not only a good idea, but it was a solution that can realistically move HPC beyond the parallel file system.  Its POSIX compatibility features and Lustre namespace integration provide enough familiarity and interoperability to make it something usable for the advanced HPC users who will be using the first exascale machines.

    At the same time, it applies a number of new technologies in satisfying ways (Mercury for user-space network transport, GIGA+ for subtree sharding, Optane to coalesce tiny I/Os, ...) that, in most ways, puts it at technological parity with other high-performance all-flash parallel storage systems like WekaIO and VAST.  It is also resourced at similar levels, with DOE and Intel investing money and people in DAOS at levels comparable to the venture capital that has funded the aforementioned competitors.  Unlike its competitors though, it is completely open-source and relies on standard interfaces into hardware (libfabric, SPDK) which gives it significant flexibility in deployment.

    As with everything exascale, only time will tell how DAOS works in practice.  There are plenty of considerations peripheral to performance (data management policies, system administration, and the like) that will also factor into the overall viability of DAOS as a production, high-performance storage system.  But so far DAOS seems to have made incredible progress in the last few years, and it is positioned to shake up the HPC I/O discussion come 2021.

    The Cloud is coming for us

    This ISC also marked the first time where I felt that the major cloud providers were converging on a complete HPC solution that could begin eroding campus-level and mid-range HPC.  Although application performance in the cloud has historically been the focus of most HPC-vs-cloud debate, compute performance is largely a solved problem in the general sense.  Rather, data—its accessibility, performance, and manageability—has been the single largest barrier between most mid-range HPC users and the cloud.  The convenience of a high-capacity and persistent shared namespace is a requirement in all HPC environment, but there have historically been no painless ways to produce this environment in the cloud.

    AWS was the first to the table with a solution in Amazon FSx, which is a managed Lustre-as-a-service that makes it much easier to orchestrate an HPC workflow that relies on a high-performance, high-capacity, shared file system.  This has prompted the other two cloud vendors to come up with competing solutions:  Microsoft Azure’s partnership with Cray is resulting in a ClusterStor Lustre appliance in the cloud, and Google Cloud will be offering DDN's EXAScaler Lustre appliances as a service.  And Whamcloud, the company behind Lustre, offers its own Lustre Cloud Edition on all three major cloud platforms.

    In addition to the big three finally closing this gap, a startup called Kmesh burst on to the I/O scene at ISC this year and is offering a cloud-agnostic solution to providing higher-touch parallel file system integration and management in the cloud for HPC.  Vinay Gaonkar, VP of Products at Kmesh, gave insightful presentations at several big I/O events during the week that spoke to the unique challenges of designing Lustre file systems in a cloud ecosystem.  While architects of on-prem storage for HPC are used to optimizing for price-performance on the basis of purchasing assets, optimizing price-performance from ephemeral instance types often defies conventional wisdom; he showed that instance types that may be considered slow on a computational basis may deliver peak I/O performance at a lower cost than the beefiest instance available:

    -
    Vinay's slides are available online and offer a great set of performance data for high-performance storage in the public clouds.

    The fact that there is now sufficient market opportunity to drive these issues to the forefront of I/O discussion at ISC is an indicator that the cloud is becoming increasingly attractive to users who need more than simple high-throughput computing resources.

    Even with these sorts of parallel file systems-as-a-service offerings though, there are still non-trivial data management challenges when moving on-premise HPC workloads into the cloud that result from the impedance mismatch between scientific workflows and the ephemeral workloads for which cloud infrastructure is generally designed.  At present, the cost of keeping active datasets on a persistent parallel file system in the cloud is prohibitive, so data must continually be staged between an ephemeral file-based working space and long-term object storage.  This is approximately analogous to moving datasets to tape after each step of a workflow, which is unduly burdensome to the majority of mid-scale HPC users.

    However, such staging and data management issues are no longer unique to the cloud; as I will discuss in the next section, executing workflows across multiple storage tiers is no longer a problem unique to the biggest HPC centers.  The solutions that address the burdens of data orchestration for on-premise HPC are likely to also ease the burden of moving modest-scale HPC workflows entirely into the cloud.

    Tiering is no longer only a problem of the rich and famous

    Intel started shipping Optane persistent memory DIMMs earlier this year, and the rubber is now hitting the road as far as figuring out what I/O problems it can solve at the extreme cutting edge of HPC.  At the other end of the spectrum, flash prices have now reached a point where meat-and-potatoes HPC can afford to buy it in quantities that can be aggregated into a useful tier.  These two factors resulted in a number of practical discussions about how tiering can be delivered to the masses in a way that balances performance with practicality.

    The SAGE2 project featured prominently at the high-end of this discussion.  Sai Narasimhamurthy from Seagate presented the Mero software stack, which is the Seagate object store that is being developed to leverage persistent memory along with other storage media.  At a distance, its goals are similar to those of the original DAOS in that it provides an integrated system that manages data down to a disk tier.  Unlike the DAOS of today though, it takes on the much more ambitious goal of providing a PGAS-style memory access model into persistent storage.

    On the other end of the spectrum, a number of new Lustre features are rapidly coalescing into the foundation for a capable, tiered storage system.  At the Lustre/EOFS BOF, erasure coded files were shown on the roadmap for the Lustre 2.14 release in 2Q2020.  While the performance of erasure coding probably makes it prohibitive as the default option for new files on a Lustre file system, erasure coding in conjunction with Lustre’s file-level replication will allow a Lustre file system to store, for example, hot data in an all-flash pool that uses striped mirrors to enable high IOPS and then tier down cooler data to a more cost-effective disk-based pool of erasure-coded files.

    In a similar vein, Andreas Dilger also discussed future prospects for Lustre at the HPC I/O in the Data Center workshop and showed a long-term vision for Lustre that is able to interact with both tiers within a data center and tiers across data centers:

    -

    Many of these features already exist and serve as robust building blocks from which a powerful tiering engine could be crafted.

    Finally, tiering took center stage at the Virtual Institute for I/O and IO-500 BOF at ISC with the Data Accelerator at Cambridge beating out OLCF Summit as the new #1 system.  A key aspect of Data Accelerator’s top score arose from the fact that it is an ephemeral burst buffer system; like Cray DataWarp, it dynamically provisions parallel file systems for short-term use.  As a result of this ephemeral nature, it could be provisioned with no parity protection and deliver a staggering amount of IOPS.

    Impressions of the industry

    As I’ve described before, I often learn the most by speaking one-on-one with engineers on the expo floor.  I had a few substantive discussions and caught on to a few interesting trends.

    No winners in EDSFF vs. NF.1

    It’s been over a year since Samsung’s NF.1 (formerly M.3 and NGSFF) and Intel’s EDSFF (ruler) SSD form factor SSDs, and most integrators and third-party SSD manufacturers remain completely uncommitted to building hardware around one or the other.  Both form factors have their pros and cons, but the stalemate persists by all accounts so far.  Whatever happens to break this tie, it is unlikely that it will involve the HPC market, and it seems like U.2 and M.2 remain the safest bet for the future.

    Memory Landscape and Competition

    The HBM standard has put HMC (hybrid memory cube) in the ground, and I learned that Micron is committed to manufacturing HBM starting at the 2e generation.  Given that SK Hynix is also now manufacturing HBM, Samsung may start to face competition in the HBM market as production ramps up.  Ideally this brings down the cost of HBM components in the coming years, but the ramp seems to be slow, and Samsung continues to dominate the market.

    Perhaps more interestingly, 3DXPoint may be diversifying soon.  Although the split between Intel and Micron has been well publicized, I failed to realize that Intel will also have to start manufacturing 3DXPoint in its own fabs rather than the shared facility in Utah.  Micron has also announced its commitment to the NVDIMM-P standard which could feasibly blow open the doors on persistent memory and non-Intel processor vendors to support it.  However, Micron has not committed to an explicit combination of 3DXPoint and NVDIMM-P.

    Realistically, the proliferation of persistent memory based on 3DXPoint may be very slow.  I hadn’t realized it, but not all Cascade Lake Xeons can even support Optane DIMMs; there are separate SKUs with the requisite memory controller, suggesting that persistent memory won’t be ubiquitous, even across the Intel portfolio, until the next generation of Xeon at minimum.  Relatedly, none of the other promising persistent memory technology companies (Crossbar, Everspin, Nantero) had a presence at ISC.

    China

    The US tariffs on Chinese goods are on a lot of manufacturers’ minds.  Multiple vendors remarked that they are either

    • thinking about moving more manufacturing from China into Taiwan or North America,
    • already migrating manufacturing out of China into Taiwan or North America,
    • under pressure to make shorter-term changes to their supply chains (such as stockpiling in the US) in anticipation of deteriorating conditions

    I was not expecting to have this conversation with as many big companies as I did, but it was hard to avoid.

    Beyond worrying about the country of origin for their components, though, none of the vendors with whom I spoke were very concerned about competition from the burgeoning Chinese HPC industry.  Several commented that even though some of the major Chinese integrators have very solid packaging, they are not well positioned as solutions providers.  At the same time, customers are now requiring longer presales engagements due to the wide variety of new technologies on the market.  As a result, North American companies playing in the HPC vertical are finding themselves transitioning into higher-touch sales, complex custom engineering, and long-term customer partnerships.

    Concluding thoughts

    This year's ISC was largely one of anticipation of things to come rather than demonstrations that the future has arrived.  Exascale (and the pre-exascale road leading to it) dominated most of the discussion during the week.  Much of the biggest hype surrounding exascale has settled down, and gone are the days of pundits claiming that the sky will fall when exascale arrives due to constant failures, impossible programming models, and impossible technologies.  Instead, exascale is beginning to look very achievable and not unduly burdensome: we know how to program GPUs and manycore CPUs already, and POSIX file-based access will remain available for everyone.  Instead, the challenges are similar to what they've always been--continuing to push the limits of scalability in every part of the HPC stack.
    -

    -
    I owe my sincerest thanks to the organizers of ISC, its sessions, and the HPC-IODC workshop for putting together the programs that spurred all of the interesting discourse over the week.  I also appreciate the technical staff at many of the vendor booths with whom I spoke.  I didn't name every person with whom I drew insights on the expo floor, but if you recognize a comment that you made to me in this post and want credit, please do let me know--I'd be more than happy to.  I also apologize to all the people with whom I spoke and sessions I attended but did not include here; not everything I learned last week fit here.
    \ No newline at end of file diff --git a/_posts/glennklockwood/2020-5-20-tagbloggercom1999blog-4307061427721284246post-1586489402384574408.md b/_posts/glennklockwood/2020-5-20-tagbloggercom1999blog-4307061427721284246post-1586489402384574408.md deleted file mode 100644 index 306e651..0000000 --- a/_posts/glennklockwood/2020-5-20-tagbloggercom1999blog-4307061427721284246post-1586489402384574408.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2020-05-20 18:33:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2020/05/exascales-long-shadow-and-hpc-being.html -slug: exascale-s-long-shadow-and-the-hpc-being-left-behind -title: Exascale's long shadow and the HPC being left behind ---- - -The delivery of Japan's all-CPU Fugaku machine and the disclosure of the UK's all-CPU ARCHER 2 system amidst the news, solidly "pre-Exascale" machines with pre-exascale budgets, is opening old wounds around the merits of deploying all-CPU systems in the context of leadership HPC.  Whether a supercomputer can truly be "leadership" if it is addressing the needs of today using power-inefficient, low-throughput technologies (rather than the needs of tomorrow, optimized for efficiency) is a very fair question to ask, and Filippo took this head-on:



    Of course, the real answer depends on your definition of "leadership HPC."  Does a supercomputer qualify as "leadership" by definition if its budget is leadership-level?  Or does it need to enable science at a scale that was previously unavailable?  And does that science necessarily have to require dense floating point operations, as the Gordon Bell Prize has historically incentivized?  Does simulation size even have anything to do with the actual impact of the scientific output?

    While I do genuinely believe that the global exascale effort has brought nearly immeasurable good to the HPC industry, it's now casting a very stark shadow that brings contrast to the growing divide between energy-efficient, accelerated computing (and the science that can make use of it) and all the applications and science domains that do not neatly map to dense linear algebra.  This growing divide causes me to lose sleep at night because it's splitting the industry into two parts with unequal share of capital.  The future is not bright for infrastructure for long-tail HPC funded by the public, especially since the cloud is aggressively eating up this market.

    Because this causes a lot of personal anxiety about the future of the industry in which I am employed, I submitted the following whitepaper in response to an NSCI RFI issued in 2019 titled "Request for Information on Update to Strategic Computing Objectives."  To be clear, I wrote this entirely on my personal time and without the permission or knowledge of anyone who pays me--to that extent, I did not write this as a GPU- or DOE-apologist company man, and I did not use this as a springboard to advance my own research agenda as often happens with these things.  I just care about my own future and am continually trying to figure out how much runway I've got.

    The TL;DR is that I am very supportive of efforts such as Fugaku and Crossroads (contrary to accusations otherwise), which are looking to do the hard thing and advance the state of the art in HPC technology without leaving wide swaths of traditional HPC users and science domains behind. Whether or not efforts like Fugaku or Crossroads are enough to keep the non-Exascale HPC industry afloat remains unclear.  For what it's worth, I never heard of any follow-up to my response to this RFI and expect it fell on deaf ears.

    Response to “Request for Information on Update to Strategic Computing Objectives”

    G. K. Lockwood
    August 17, 2019

    Preface

    This document was written as a direct response to the Request for Information on Update to Strategic Computing Objectives (Document Number 2019-12866) published on June 18, 2019.  All views expressed within are the personal opinion of its author and do not represent the views or opinions of any individuals or organizations with whom the author may or may not be associated in any professional or personal capacities.  This document was authored without the support, knowledge, or input of any such individuals or organizations, and any similarity between the opinions expressed here and any other individuals or organizations is purely coincidental.

    Question 1. What are emerging and future scientific and technical challenges and opportunities that are central to ensuring American leadership in Strategic Computing (SC), and what are effective mechanisms for addressing these challenges?


    While the NSCI Strategic Plan identified four overarching principles which are undeniably required to maintain continued American leadership, its five strategic objectives are, in many ways, mutually incompatible with each other.

    In the three years following the initial NSCI plan towards delivering capable exascale, the outcomes of the Aurora and CORAL-2 procurements within DOE have made undeniably clear that the definition of “capable exascale” necessarily requires the use of GPU technologies.  Because GPUs are, in many ways, accelerators specifically suited for scientific problems that can be reduced to dense linear algebra, this has effectively signaled that scientific challenges which are not reducible to dense linear algebra (and therefore incompatible with GPU technologies) are, by definition, no longer of strategic significance.

    By bifurcating science domains based on whether they are or are not compatible with GPU-based acceleration, we are now at a crossroads where entire classes of domain science research that have historically run at-scale on CPU-based leadership computing systems will be left behind.  To be clear, this is not simply a matter of engineering—many important classes of scientific challenges are fundamentally incompatible with the GPU accelerator model of computation, and no amount of code modernization will change this fact.  Yet these same science domains, which rely on complex multiphysics applications that are core to strategic areas such as stockpile stewardship and climate science, are of undeniably critical importance to both national security and society at large.

    Thus, there is now a clear and growing gap between NSCI’s ambition to deliver capable exascale and the larger mission to maintain leadership in entirety of truly strategically important computing in the nation.  There are technical challenges intrinsic in this growing gap which include pursuing research in hardware and software technologies that approach strategic computing more holistically rather than exclusively from a FLOPS perspective.  The community has long acknowledged that the scope of HPC has surpassed simply performing floating point operations, and the definition of capability computing now includes enabling science that, for example, may require tremendous data analysis capabilities (e.g., moving, transforming, and traversing massive data sets) but have relatively low floating point requirements.  The DOE Crossroads procurement and the Japanese leadership program and its Fugaku system embody this more balanced approach, and there is little doubt that both Crossroads and Fugaku will demonstrate a number of world’s-firsts and, by definition, demonstrate leadership in strategic computing without making all of the sacrifices required to meet today's definition of capable exascale.

    Both Crossroads and Fugaku have required significant R&D investment to enable these dimensions of capability, and the NSCI would do well to explicitly call out the need for continued investment in such directions that are orthogonal to exaflop-level capability.

    Question 2. What are appropriate models for partnerships between government, academia and industry in SC, and how can these partnerships be effectively leveraged to advance the objectives of SC?


    The most impactful models for industry-government partnership in HPC have come in the form of close collaboration between the HPC facilities that deploy extreme-scale systems and the technology providers in industry that create and support the required hardware and software solutions.  Strategy necessarily involves taking input from user requirements, workload characterization, and technology trends to inform future directions, and HPC facilities are uniquely qualified to speak to both user requirements (by virtue of the fact that they directly interact with users in support of HPC systems) and workload characterization (by virtue of the fact that they manage HPC systems).  Complementarily, industry technology providers (vendors) are uniquely qualified to speak to technology directions, marketability, and sustainability in the larger technology market.

    This effective collaboration can take the form of non-recurring engineering such as those contracts associated with large system procurements (often to address more tactical challenges towards strategic computing) or standalone programs such as DOE PathForward (which addresses longer-term technology development towards strategic computing).  In both cases though, industry (not HPC facilities or academic researchers) propose the initial scope of work based on their own understanding of both (1) HPC-specific requirements and (2) larger market and profit prospects.  This latter point is critical because the HPC market alone is simply not large enough to sustain purpose-built technologies, and sustaining new technologies and their peripheral enabling ecosystems requires buy-in from multiple markets.

    The role of academia in research is more complex, as academic research in HPC can be either basic or applied in nature.  Basic research (such as in applied mathematics and algorithm development) has stood on its own historically since such work results in a larger base of knowledge from which specific technology solutions (whether developed by industry or HPC facilities) can be composed both today and in the future.  The federal agencies participating in NSCI can claim credit for funding the basic research outcomes that have been incorporated into innumerable software and hardware technologies in use today.

    On the other hand, applied research (such as developing new software systems that may implement the outcomes of basic research) has had very mixed outcomes.  It is often the case that applied researchers who have no direct relationship with neither HPC facilities nor technology providers formulate research projects based on second-hand HPC requirements and technology trends.  It follows that their interpretation of such requirements is incomplete, and their research outcomes are misaligned with the actual needs of HPC facilities and industry.  Barring cases where academic applied research outcomes are so valuable that they stand on their own (of which there are many examples including OpenMPI and Tau), applied research in the absence of such a sustainability path results in a tremendous amount of software that has virtually no long-term (i.e., strategic) value to SC.

    This speaks to a gap between applied research in academia and those who apply research in practice that must be closed.  This gap has been perpetuated by a lack of HPC practitioners (domain scientists and applied researchers directly attached to HPC facilities or technology providers) on the committees that evaluate the merit of research.  Thus, a more effective engagement model would involve coupling the academic research pipeline to HPC facilities and industry more closely.  This may be something as informal as increasing the diversity of review panels and program committees to include representatives from facilities and industry to a formal requirement that successful research proposals have a clearly defined connection to a specific industry or facility partner.  Regardless of the solution though, funding applied research that will be "thrown over the wall" to HPC facilities and vendors without their input is not compatible with SC.

    Question 3. How do we develop and nurture the capable workforce with the necessary skill and competencies to ensure American leadership in SC? What are effective nontraditional approaches to lowering the barriers to knowledge transfer?


    Although virtually every report discussing strategic directions and future requirements of HPC call for knowledge transfer and building a larger workforce through training and outreach (e.g., see the complete set of DOE Exascale Requirements Reviews), such reports generally neglect two critical realities of employing and retaining a talented workforce at production HPC facilities and in industry.

    The first reality is that the problems intrinsic to modern HPC (solving problems at extreme scales) are no longer exclusive to HPC.  The ubiquity of technology in modern life now means that the entire technology industry must deal with problems at scale as a matter of course.  As such, the HPC community is now competing with well-capitalized commercial entities that have increased the absolute value of a skilled engineer to levels that the scientific research community simply cannot afford.

    Thus, the perceived lack of skilled workforce in HPC is not a failing of the workforce development strategy in place; in fact, it may be a great indicator of its success, as it has created a workforce whose skills have value that far outstrip the investment put into workforce development.  However, this also means that the talented individuals who eschew the higher pay and amenities of working in the larger technology industry do so for non-monetary reasons (work-life balance, attraction to the science mission, geographic locality).  It is therefore critically important that strategic computing identify these motivators and built upon them to the greatest possible degree to maintain an edge in an extremely competitive hiring landscape.

    The second reality is that the key to an exceptional workforce is not simply a matter of technical knowledge.  There is no shortage of individuals who understand parallel programming in the world, and it is of little strategic value to pursue workforce development strategies that prioritize knowledge transfer as the principal outcome.  Rather, strategic computing requires a workforce that is capable of critical thinking and has a natural drive to solve problems that have never been solved before.  These traits should be emphasized to a far greater degree than the current pedagogical emphasis on material that can be learned from a manual by anyone with a curious mind.

    By definition, very few people in the world have prior experience in world-class HPC.  There are very limited opportunities to build a credible work history in extreme-scale HPC for individuals who are ineligible for student internships or postdoctoral appointments.  As a result, world-class HPC facilities rarely see qualified applicants for open positions when “qualified” is defined on the basis of relevant work experience; a mid-career developer or systems engineer working in a campus-scale HPC organization simply has no opportunities to demonstrate his or her intellectual capability in a way that is outstanding to the facilities that deliver strategic computing resources.

    Thus, an integrative approach to workforce development that (1) emphasizes problem-based learning rather than rote reiteration of manuals and standards documents in an environment where (2) representatives from NSCI constituent agencies can engage with trainees (i.e., potential employees) in a fashion with less formality and pretense than a typical "CV-phone screen-interview" pipeline may reveal a much broader potential workforce whose strengths more closely align with strategic computing.  Such an approach may manifest in the form of intensive boot camps such as the DOE ATPESC program, grants for mid-career retraining in partnership with a leadership computing facility, or sabbatical support for technical staff at the nation’s mid-scale computing facilities.

    Question 4. How can technical advances in SC and other large government and private initiatives, including infrastructure advances, provide new knowledge and mechanisms for executing next generation research?


    No response.

    Question 5. What are the future national-level use cases that will drive new computing paradigms, and how will new computing paradigms yield new use cases?

    It is easy to claim that artificial intelligence will be the most important future national use case to drive new computing paradigms.  However, this is a very dangerous statement to make without qualification, as the actual level of readiness for applying AI to solve scientific problems is very low, and the actual scales, aggregate demand, and algorithmic motifs required by such workloads for scientific discovery are poorly undefined.  More generally, the requirements of AI workloads at large remain uncertain; for example, the Facebook uses a variety of AI techniques in production and have found that each application area requires different computational, storage, and network resources (see Applied Machine Learning at Facebook: A Datacenter Infrastructure Perspective).  Outside of the large hyperscale datacenters, industry consensus suggests that production AI workloads remain largely at single-server scales.  As such, it is difficult to confidently assert what the rate of scale-out AI will be for strategic computing.

    The current leading technique for AI at scale is deep learning, yet scientific discovery is at odds with the black-box nature of this method.  Alternative methods such as decision trees offer much more insight into why a trained model behaves as it does and is more compatible with applying physical constraints to which physical systems being modeled (e.g., see Iterative random forests to discover predictive and stable high-order interactions).  However, the relative importance of such non-block-box learning techniques in HPC are completely unknown, as are the general optimization points for such techniques in the context of scientific computing.  There is a danger that the similarities between deep learning and many HPC problems (GEMM-heavy workloads) place an artificially high importance on the role of deep learning in SC.  It may be the case that deep learning is the most effective method for applying AI to address problems in scientific computing, but caution must be taken to ensure that major challenges in SC not all look like deep-learning nails simply because GPUs are a very effective hammer.

    From a domain science perspective, there are very few domain sciences where AI can replace traditional simulation-driven workflows wholesale.  As such, the role of AI in SC will be largely supplementary; scientific workflows may integrate an AI component to generate starting conditions, replace humans in the loop during steering, or identify areas of interest in the results of a primary simulation.  However, it is very unlikely that AI will grow to be of greater significance to scientific computing than modeling and simulation.  Instead, it will be the source of new computational resource requirements that simply did not exist in the past because those tasks were carried out by humans.  The road towards integrating AI into scientific workflows will also be a long and tortuous one, as the field is evolving far more rapidly in industry than scientific computing traditionally has.  Care must be taken that SC not tie itself too closely to a method (and its associated hardware configurations) that may be deprecated in short order.

    Question 6. What areas of research or topics of the 2016 NSCI Strategic Plan should continue to be a priority for federally funded research and require continued Federal R&D investments? What areas of research or topics of the 2016 Strategic Plan no longer need to be prioritized for federally funded research?


    The five objectives outlined in the 2016 NSCI Strategic Plan all gravitate around elements of topics that require continued federal R&D investments, but they do require realignment with the technological, scientific, and economic landscape as it exists now.

    Objective 1: accelerating the development of capable exascale by the mid-2020s

    The 2016 NSCI report correctly stated that capable exascale technologies would not be available until the mid-2020s, but DOE pulled its exascale system deliveries into the early 2020s.  As a result, the delivery of exascale had to be accelerated at significantly higher costs: there have been significant capital costs (the first US exascale systems will cost between 2x and 10x their immediate predecessors, either setting a new bar for the cost of future leadership HPC systems or resulting in a bubble in funding for all post-exascale machines), operational costs (the power budgets may exceed the original 20 MW goal by 50%), and opportunity cost (only two of the three CORAL labs actually deployed a CORAL-1 machine).

    Notably absent here is a commensurate increase (2x-10x, 1.5x, or 1.3x as above) in R&D efforts towards making these exascale systems widely accessible to applications that do not fall under the umbrella of ECP funding.  As such, NSCI must continue to emphasize the importance of funding R&D to enable the “capable” component of this objective through the mid-2020s at minimum.

    Objective 2: Developing a coherent platform for modeling, simulation, and data analytics

    The convergence of HPC and Big Data was a popular point of discussion when the 2016 report was written, but there has yet to be a compelling, quantitative analysis that demonstrates the difference between a “Big Data” system and an “HPC” system despite the best efforts of several leadership-scale HPC facilities.  The challenge is not one of technology and system architecture; rather, the principal design point for “Big Data” systems outside of the HPC world has simply been one of cost (e.g., scaling out cheap hardware over a cheap network for a very well-defined bulk data access pattern) over performance.  There is absolutely nothing that stops the typical “Big Data” application stacks, both old (e.g., Hadoop and Spark; see this paper) and new (e.g., TensorFlow; see this paper) from running at scale on any modern HPC systems, and both have been demonstrated at scale on systems that were sensibly designed.

    As such, this objective need not be emphasized in the future.  Rather, engineering work is required to enable the “Big Data” stacks in use outside of HPC to work efficiently on the HPC systems of tomorrow.  This remains a software, not architectural, problem, and very much an engineering, not research, challenge.

    Objective 3: R&D towards post-CMOS technologies and new paradigms

    It is not the role of NSCI constituent agencies to fund the development of new materials systems explicitly for post-CMOS computing, because these agencies, their review committees, and the academic researchers they fund do not have the insight into the realities of logistics, material costs, and manufacturing required to predict what combination of materials and microarchitectures could actually be turned into a marketable product that can be sustained by the larger technology industry.  In the absence of this insight, R&D towards post-CMOS technologies is likely to produce interesting demonstrations that are impractical for the purposes of actually developing leadership-scale computing systems.  Instead, such research should be funded using facility-industry partnerships as discussed previously in Question 2.

    Investing in R&D towards new paradigms in computing should also be considered not with respect to enabling new scientific applications, but rather accelerating existing scientific workloads that are incompatible with exascale technologies (GPUs).  As discussed in response to Question 1, there is a very real risk of leaving entire domains of computational science behind as the definition of leadership computing (when equated to exascale) becomes increasingly narrow in scope.  Developing new accelerator technologies that are of benefit to complex application workflows (e.g., multiphysics simulations) are of critical importance in the coming years missions such as stockpile stewardship and climate science fall by the wayside.

    Objective 4: Improving application development and workforce development

    The DOE Exascale Computing Project (ECP) has demonstrated a highly effective way of integrating researchers, application code teams, and facilities towards improving application development.  Providing a coherent ecosystem of recommended methods (such as its IDEAS project; e.g., see ECP-IDEAS), development tools (funded under its Software Technologies area), algorithm-application partnerships (through its co-design centers), and application integration efforts (funded under Hardware and Integration area) are an excellent blueprint for improving application development.  Developing a more generic model for establishing and supporting this style of development beyond the timeline of the ECP funding should be pursued.

    Improving workforce development should reduce its focus on basic technical training and more on improving critical thinking as described in the response to Question 3 above.

    Objective 5: Broadening public-private partnership

    As described in the response to Question 2 above, public-private partnership is absolutely critical to sustain SC in the coming years.  The financial incentives driving technology development from the world outside of HPC have come to outstrip the resources available to HPC to exist independently.  SC efforts must engage with both technology providers and the primary market forces (the enterprise and hyperscale computing industries) to better understand where technologies, solutions, and opportunities can be pursued in partnership rather than in parallel.

    Question 7. What challenges or objectives not included in the 2016 NSCI Strategic Plan should be strategic priorities for the federally funded SC R&D? Discuss what new capabilities would be desired, what objectives should guide such research, and why those capabilities and objective should be strategic priorities?

    The mission of providing capable exascale as described in the 2016 NSCI Strategic Plan is proving to be not a sustainable long-term path.  As described in the response to Question 1 above, the first exascale machines stand to accelerate scientific problems that can be cast as dense matrix-matrix multiplication problems, but there are large swaths of scientific problems to which this does not apply.  If one considers the Graph500 BFS list, three of the top five systems are over seven years old and will be retired in 2019.  While graph problems are not prolific in SC, the fact that such little progress has been made in accelerating extreme-scale graph traversal during the seven years that exascale has been aggressively pursued is indicative of some classes of HPC problems being abjectly left behind.

    Thus, a primary objective towards capable exascale must be examining the opportunity costs of the current strategic direction.  If it is determined that there is simply no way to bring forward those types of computational problems that are incompatible with GPU-based acceleration, then a clearer strategy must be formulated to ensure that the scientific challenges being solved by those computational problems do not stagnate.  As it stands, the public discourse surrounding the first-generation US exascale architectures is not universally positive because of this perceived scientific exclusivity of the chosen architectures, and such exclusivity is at odds with both capable computing and computing leadership.

    \ No newline at end of file diff --git a/_posts/glennklockwood/2022-5-27-tagbloggercom1999blog-4307061427721284246post-2015541491779458493.md b/_posts/glennklockwood/2022-5-27-tagbloggercom1999blog-4307061427721284246post-2015541491779458493.md deleted file mode 100644 index f186db3..0000000 --- a/_posts/glennklockwood/2022-5-27-tagbloggercom1999blog-4307061427721284246post-2015541491779458493.md +++ /dev/null @@ -1,56 +0,0 @@ ---- -author: Glenn K. Lockwood's Blog -author_tag: glennklockwood -blog_subtitle: Personal thoughts and opinions of a supercomputing enthusiast -blog_title: Glenn K. Lockwood -blog_url: https://glennklockwood.blogspot.com/search/label/hpc -category: glennklockwood -date: '2022-05-27 06:42:00' -layout: post -original_url: https://glennklockwood.blogspot.com/2022/05/life-and-leaving-nersc.html -slug: life-and-leaving-nersc -title: Life and leaving NERSC ---- - -

    When word started to spread that I was leaving my job at NERSC for Microsoft, a lot of people either directly or indirectly attributed my decision to being one motivated by money.  Rationalizing my decision to leave is certainly a lot easier with this "Glenn was lured away with bags of cash" narrative, but that wasn't really a factor when I chose to move on.  Rather, my decision is a reflection of where I see the world of HPC going in the coming decade and where I personally wanted to position myself.  For my own therapeutic reasons (and perhaps the benefit of anyone interested in what it's like to work within, and subsequently leave, the DOE HPC complex), I'll try to write it all out here.

    -

    -

    Working at NERSC

    First things first: NERSC has been a wonderful place to work.

    -
    -
    A typical view from outside NERSC's facility in Berkeley after work during the winter months.  Yes, it really does look like this.
    -

    When I started in mid-2015, I came in with about three years of prior work experience (two at SDSC doing user support and one at a biotech startup) and knew a little bit about a lot of things in HPC.  But I didn't really know the basics of I/O or storage--I couldn't tell you what "POSIX I/O" really meant or how GPFS worked.  The fact that I got to help author NERSC's ten-year strategy around storage in just two years, was invited to present my view on how to bridge the gap between HPC and enterprise storage at Samsung's North American headquarters a year later, and was trusted to oversee the design and execution of the world's first 35 petabyte all-flash Lustre file system through my first four years is a testament to how much opportunity is available to learn and grow at NERSC.

    -

    There are a couple of reasons for this.

    -

    Stable funding

    Perhaps foremost, NERSC (and DOE's Leadership Computing Facilities, ALCF and OLCF) enjoy healthy budgets and financial stability since worldwide leadership in scientific advancement is generally a national priority by both major political parties in the US.  This means that, regardless of who is president and which party holds majorities in Congress, the DOE HPC facilities can pay their employees and deploy new supercomputers.  This solid funding makes it much easier to invest in staff development and long-term planning; I was able to become a resident I/O expert at NERSC because I was never forced to chase after the funding du jour to make ends meet.  Congress trusts NERSC to allocate its funding responsibly, and NERSC prioritized letting me learn as much as I could without distraction.

    -

    Instant credibility and access

    Second, having a NERSC affiliation gives you instant credibility and access in many cases.  It's not necessarily fair, but it's definitely true.  Within my first year at NERSC, I was invited to give a presentation about I/O performance monitoring in Paris because the organizer wanted a lineup of speakers from all the big players in HPC.  I had never been to Europe at that point in my life, but being the I/O guy from NERSC (and being able to present well!) was enough to get me there.  And it was during that trip to Paris that I got to meet--and literally have conversation over dinner with--more industry bigshots that I can remember.  And that trip to Paris was not an outlier; pandemic aside, NERSC let me go to Europe at least once or twice every year I've worked there.

    -
    -
    The first photo I ever took of Notre Dame on the first day I'd ever set foot in Europe.  NERSC sent me there less than a year after I started.
    -

    Of course, this is not to say that every employee at a DOE HPC facility is wining and dining in Paris every summer.  Many of these opportunities are earned by showing the value of the work you're doing, just like at any job.  But owing to healthy budgets, travel expenses are rarely the limiting factor in chasing after these opportunities.  In addition, going out into the world and talking about what you do is part of the job at a DOE facility; being a leader in the field of HPC is part of the mission of NERSC, ALCF, and OLCF, so doing high-risk, first-of-a-kind work and telling the world about it is uniquely valued within DOE in a way that it is not in industry.

    -

    Smart people

    A product of these two factors (stable budget and instant credibility) results in coworkers and colleagues who are generally very experienced and capable.  There's an interesting mix of laissez-faire management and rigorous process-driven management as a result.

    -

    Staff are generally given the freedom to choose their own destiny and focus on work that they enjoy much like in any academic environment; it's not hard to pick up passion projects or even move between groups if things get stale on a day-to-day basis.  Since everyone is working on their own slices of HPC, there's also easy access to world experts in different areas of technology if you need one.  For example, I recall once reviewing a storage system that appeared to rely on multiplexing two 12G SAS links over a single 24G SAS.  After one email and a few hours, a coworker confirmed, complete with a citation to the SCSI standards, that this was totally possible.  Even if someone in-house didn't know the answer, I had direct access to an engineering manager at a leading storage vendor who owed me a favor and definitely would've known the answer.  It's really, really hard to find as many smart people in arm's reach in most other HPC centers. 

    -

    At the same time, there is rigorous federal oversight on major projects and procurements to ensure that taxpayer dollars are responsibly spent.  This is a double-edged sword because all of the reporting and reviews that go into massive capital projects make forward progress very slow at times.  All DOE HPC facilities review and re-review everything about these giant supercomputers before making a decision, so by the time the public sees a press release about a new supercomputer, lab staff have spent literal years going over every detail and risk.  It sometimes may not seem that way (how many problems has Aurora had?), but rest assured that every schedule slip or technology change the public hears was preceded by countless hours of meetings about risk and cost minimization.  On the flip-side though, you have the opportunity to learn every gory detail about the system directly from the people who designed it.

    -

    Pay

    In true millennial fashion, I think it's important to have an open discussion about the pay.  DOE labs pay more than any other HPC facility in the world as far as I am aware, and even in the San Francisco Bay Area, salary at NERSC is comparable to the base salaries offered by all the big tech companies.  You can get an idea of what entry-level salaries (think: first job after postdoc or a few years out of undergrad) by searching H1B Visa postings, and anecdotally, I'd wager that a typical HPC job at NERSC pays about 2x that of the same job at a typical US university and 3x-4x that of the same job at a British or European university.  All the labs pay about the same to boot, so an HPC job at somewhere like Oak Ridge can afford you a relatively luxurious lifestyle.

    -

    Don't get me wrong though; affording to buy a Bay Area house on a single NERSC salary alone would be tough in the same way that buying a Bay Area house on any single salary would be.  And while NERSC's compensation is comparable to the base salary of the big tech companies, that base is about all you can get since DOE labs cannot offer equity or substantial bonuses.  This is less of a gap if you're just starting out, but anyone who's looked at compensation structures in tech knows that stock-based compensation, not base salary, dominates total compensation as you move up.

    -

    So, if money wasn't an issue for me and NERSC is such a great place to work, why would I ever leave?

    -

    The road ahead for HPC

    On one hand, HPC's future has never been brighter thanks to how much life (and money!) the AI industry is bringing to the development of HPC technologies.  We have new all-flash file systems, gigantic GPUs, awesome CPU memory technologies, and mixed-precision techniques in the HPC space that were all directly driven by developments primarily intended for AI workloads.  On the other hand, leadership HPC appears to be engaging in unsustainable brinkmanship while midrange HPC is having its value completely undercut by cloud vendors.  I've not been shy about my overall anxiety about where HPC is going because of this, but I'll elaborate now that the exascale race has been won.

    -

    The future of leadership HPC

    Without some monumental breakthrough in transistor technology, there is only one path forward in continuing to build faster and faster supercomputers in the next decade: pour more and more energy (and dissipate more and more heat) into larger and larger (and more and more) GPUs.

    -

    The goal post for exascale power keeps moving because that's been the easiest way to hit the mythical exaflop milestone; while the original goal was 20 MW, Frontier is coming in at 29 MW and Aurora at "under 60 MW."  Not only is this just a lot of power to feed into a single room, but the cost and effort of actually building this infrastructure is newsworthy in and of itself these days.  At the current trajectory, the cost of building a new data center and extensive power and cooling infrastructure for every new leadership supercomputer is going to become prohibitive very soon.

    -

    HPC data centers situated in places where the cost of electricity and real estate (stacked atop the risk of earthquake or wildfire) further skew the economics of just adding more power are going to run up against this first.  It used to be easy to dismiss these practicality concerns by arguing that colocating scientists with supercomputers created immeasurable synergy and exchange of ideas, but the fact that science never stopped during the work-from-home days of the pandemic have taken a lot of air out of that argument.

    -

    My guess is that all the 50-60 MW data centers being built for the exascale supercomputers will be the last of their kind, and that there will be no public appetite to keep doubling down.

    -

    Given this, DOE's leadership computing facilities are facing an existential threat: how do you define leadership computing after exascale if you can't just add another 50% more power into your facility?  How do you justify spending another $600 million for a supercomputer that uses the same power but only delivers 15% more performance?  You can pour similarly huge amounts of money into application modernization to accelerate science, but at the end of the day, you'd still be buying a lot of hardware that's not a lot faster.

    -

    The future of places like NERSC

    NERSC is probably a little better off since its lack of an exascale machine today gives it at least one more turn of the crank before it hits a hard power limit in its data center.  That gives it the ability to deploy at least one more system after Perlmutter that is significantly (at least 2x) more capable but draws significantly more power.  However, compared to Frontier and Aurora, such a system may still look rather silly when it lands in the same way that Perlmutter looks a bit silly compared Summit, which was funded by the same agency but deployed years earlier.

    -

    And therein lies the dilemma of centers like NERSC--how do you position yourself now so that by the time you deploy an HPC system that is close to maxing out on power, it is sufficiently different from a pure-FLOPS leadership system that it can solve problems that the leadership systems cannot?

    -

    The easy go-to solution is to craft a story around "data-centric" supercomputing.  We did this when I was at the San Diego Supercomputer Center when we were budget-limited and had to differentiate our $12 million Comet supercomputer from TACC's $30 million Stampede.  You invest more in the file system than you would for a pure-FLOPS play, you provide low-cost but high-value onramps like Jupyter and science gateways to enable new science communities that have modest computing needs, and you fiddle with policies like allocations and queue priority to better suit interactive and urgent computing workloads.  From a productivity standpoint, this is can be a great story since users will always respond well to lower queue wait times and less frustrations with the file system.  From a system architect's standpoint, though, this is really boring.  The innovation happens in policies and software, not clever hardware or design, so there's very little that's new for a system designer to think about in this case.

    -

    A more innovative approach is to start thinking about how to build a system that does more than just run batch jobs.  Perhaps it gives you a private, fast file system where you can store all your data in a way indistinguishable from your personal laptop.  Perhaps it gives you a convenient place to run a Jupyter notebook that has immediate access to a powerful GPU.  Or perhaps it gives you all the tools to set up an automated process where all you have to do is upload a file to trigger an automatic data analysis and reduction pipeline that returns its output to a shiny HTTP interface.  Such a system may not be able to crank out an exaflop using HPL, but does that matter if it's the only system in the country that supports such automation?

    -

    There are interesting system architecture questions in the latter case, so as a system designer, I much prefer it over the "data-centric" angle to non-exaflop supercomputing strategies.  But there remains a problem.

    -

    The problem: cloud

    Such a "more than just batch jobs" supercomputer actually already exists.  It's called the cloud, and it's far, far ahead of where state-of-the-art large-scale HPC is today--it pioneered the idea of providing an integrated platform where you can twist the infrastructure and its services to exactly fit what you want to get done.  Triggering data analysis based on the arrival of new data has been around for the better part of a decade in the form of serverless computing frameworks like Azure Functions.  If you need to run a Jupyter notebook on a server that has a beefy GPU on it, just pop a few quarters into your favorite cloud provider.  And if you don't even want to worry about what infrastructure you need to make your Jupyter-based machine learning workload go fast, the cloud providers all have integrated machine learning development environments that hide all of the underlying infrastructure.

    -

    And therein lies the problem: the definition of "innovation" as non-exaflop HPC runs up against this power wall might actually mean "catching up to the cloud."

    -

    This is not to say that NERSC-like HPC centers are entirely behind the cloud; all the DOE HPC facilities have bigger, faster, and more convenient parallel file systems that are generally always on and where data is always somewhere "fast."  They also provide familiar, managed software environments and more egalitarian support to small- to mid-scale science projects.  DOE HPC also takes the most risk in deploying unproven technologies to shake them out before they become available to the wide market.

    -

    However, those gaps are beginning to close.  You can stick a full Cray EX system, identical to what you might find at NERSC or OLCF, inside Azure nowadays and avoid that whole burdensome mess of building out a 50 MW data center.  You can also integrate such a system with all the rich infrastructure features the cloud has to offer like triggered functions.  And when it comes to being first to market for risky HPC hardware, the cloud has already caught up in many ways--Microsoft deployed AMD Milan-X CPUs in their data centers before any HPC shop did, and more recently, Microsoft invested in AMD MI-200 GPUs before Frontier had a chance to shake them out.

    -

    Given this steep trajectory, I see only two scenarios for large-scale, non-exaflop HPC facilities in the 10+ year horizon:

    -

    -
    1. They develop, adopt, steal, or squish cloud technologies into their supercomputers to make them functionally equivalent to cloud HPC deployments.  They may be a little friendlier to scientific users since cloud functionality wasn't designed for scientific computing alone, but they also may not be as stable, mature, or feature-rich as their cloud cousins.
    2. They find better overall economics in eventually moving to massive, long-term, billion-dollar deals where flagship HPC systems and their "more than just batch jobs" features are colocated inside cloud datacenters sited at economically advantageous (that is, cheap power, cooling, and labor) locations in the country.

    There's also grey area in between where national HPC facilities consolidate their physical infrastructure in cheap areas to manage costs but still self-manage their infrastructure rather than fully outsource to a commercial cloud.  CSCS has hinted at this model as their future plan since they cannot build 100 MW datacenters in Switzerland, and this is proof that leading HPC facilities around the world see the writing on the wall and need to maneuver now to ensure they remain relevant beyond the next decade.  Unfortunately, the politics of consolidating the physical infrastructure across the DOE HPC sites would likely be mired in Congressional politics and take at least a decade to work out.  Since serious work towards this hasn't started yet, I don't envision such a grey-area solution emerging before all the DOE facilities hit their power limit.

    -

    Hopefully I've painted a picture of how I perceive the road ahead for large-scale HPC facilities and you can guess which one I think will win out.

    -

    Final thoughts

    I have every confidence that there will still be DOE HPC facilities in ten years and that they will still be staffed by some of the brightest minds in HPC.  And even if a cloud-based HPC facility ultimately consumes centers like NERSC, I don't think many people would be out of work.  The vast majority of what DOE's HPC people do is think carefully about technology trends, maintain a deep understanding of user requirements, provide excellent support to its thousands of users, and keep complex supercomputers running well.  Those jobs don't go away if the supercomputer is in the cloud; it's just the physical location, the hands doing physical hardware swaps, and the breadth of vendor interactions that may change.

    -

    For me as a system architect though, it's become too hard for me to catch up to all the new technologies and techniques HPC needs for the future while also building up other staff to be masters of today's I/O challenges.  I found myself at a fork in the road.  One path would mean catching up on a technical level and then getting in front of where the future of HPC lies before it gets there.  The other path would mean trying to steer the entire DOE HPC ship in the right direction, as long as that may take, and have faith that the people I bring along can race far enough ahead to tell me if we're still going where we need to go.  Perhaps a bit selfishly, I chose the former.  I'm just not ready to give up on racing ahead myself yet, and the only way I could hope to catch up was to make it a full-time job.

    -

    I don't claim to know the future, and a lot of what I've laid out is all speculative at best.  NERSC, ALCF, or OLCF very well may build another round of data centers to keep the DOE HPC party going for another decade.  However, there's no denying that the stakes keep getting higher with every passing year.

    -

    That all said, DOE has pulled off stranger things in the past, and it still has a bunch of talented people to make the best of whatever the future holds.

    -

    \ No newline at end of file diff --git a/_posts/markhpc/2021-11-22-crimson-2021q3.md b/_posts/markhpc/2021-11-22-crimson-2021q3.md deleted file mode 100644 index 09f899e..0000000 --- a/_posts/markhpc/2021-11-22-crimson-2021q3.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -author: Mark Nelson's Blog -author_tag: markhpc -blog_subtitle: I like to make distributed systems go fast. -blog_title: Mark Nelson’s Blog -blog_url: https://markhpc.github.io/ -category: markhpc -date: '2021-11-22 00:00:00' -layout: post -original_url: https://markhpc.github.io/2021/11/22/Crimson-2021Q3.html -slug: ceph-crimson-2021-q3-project-update -title: Ceph Crimson 2021 Q3 Project Update ---- - -

    This is the first time we’re seeing Bluestore in Crimson beating Bluestore in Classic in some (low core count) tests. Starting to see lower tail latency as well which is a really good sign. Top end performance will be contingent on multi-reactor support though. Slides available here.

    \ No newline at end of file diff --git a/_posts/markhpc/2021-7-29-crimson-2021q2.md b/_posts/markhpc/2021-7-29-crimson-2021q2.md deleted file mode 100644 index 7d0988f..0000000 --- a/_posts/markhpc/2021-7-29-crimson-2021q2.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -author: Mark Nelson's Blog -author_tag: markhpc -blog_subtitle: I like to make distributed systems go fast. -blog_title: Mark Nelson’s Blog -blog_url: https://markhpc.github.io/ -category: markhpc -date: '2021-07-29 01:00:00' -layout: post -original_url: https://markhpc.github.io/2021/07/29/Crimson-2021Q2.html -slug: ceph-crimson-2021-q2-project-update -title: Ceph Crimson 2021 Q2 Project Update ---- - -

    Slides are available here.

    \ No newline at end of file diff --git a/_posts/markhpc/2021-8-30-crimson-classic.md b/_posts/markhpc/2021-8-30-crimson-classic.md deleted file mode 100644 index c84ad44..0000000 --- a/_posts/markhpc/2021-8-30-crimson-classic.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -author: Mark Nelson's Blog -author_tag: markhpc -blog_subtitle: I like to make distributed systems go fast. -blog_title: Mark Nelson’s Blog -blog_url: https://markhpc.github.io/ -category: markhpc -date: '2021-08-30 01:00:00' -layout: post -original_url: https://markhpc.github.io/2021/08/30/Crimson-Classic.html -slug: crimson-vs-classic-1-nvme-multi-osd-analysis -title: Crimson vs Classic 1 NVMe Multi-OSD Analysis ---- - -

    Spreadsheet looking at Crimson vs Classic performance when scaling multiple OSDs on one NVMe drive. Done to simulate what we can hopefully expect from multi-reactor down the road. Includes cycles/OP comparisons as well.

    \ No newline at end of file diff --git a/_posts/markhpc/2022-1-12-age-binning.md b/_posts/markhpc/2022-1-12-age-binning.md deleted file mode 100644 index 75a6fab..0000000 --- a/_posts/markhpc/2022-1-12-age-binning.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -author: Mark Nelson's Blog -author_tag: markhpc -blog_subtitle: I like to make distributed systems go fast. -blog_title: Mark Nelson’s Blog -blog_url: https://markhpc.github.io/ -category: markhpc -date: '2022-01-12 00:00:00' -layout: post -original_url: https://markhpc.github.io/2022/01/12/Age-Binning.html -slug: cache-age-binning-pr-finally-merged- -title: Cache Age Binning PR Finally Merged! ---- - -

    I’ve had this PR hanging around in various forms for years. It’s basically the last peice of the OSD memory target code. We can now get a “binned” view of the relative ages of items in different LRU caches and dynamically adjust target sizes for different caches. PR is here and memory usage behavior charts are here.

    \ No newline at end of file diff --git a/_posts/markhpc/2022-10-24-qemu-kvm.md b/_posts/markhpc/2022-10-24-qemu-kvm.md deleted file mode 100644 index a4e83bf..0000000 --- a/_posts/markhpc/2022-10-24-qemu-kvm.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -author: Mark Nelson's Blog -author_tag: markhpc -blog_subtitle: I like to make distributed systems go fast. -blog_title: Mark Nelson’s Blog -blog_url: https://markhpc.github.io/ -category: markhpc -date: '2022-10-24 01:00:00' -layout: post -original_url: https://markhpc.github.io/2022/10/24/qemu-kvm.html -slug: qemu-kvm-ceph-librbd-performance -title: QEMU/KVM + Ceph Librbd Performance ---- - -

    Checkout my blog post at the ceph.io website about tuning QEMU/KVM for high performance with librbd. We got over 123K random read IOPs with 16K IOs from a single VM!

    \ No newline at end of file diff --git a/_posts/markhpc/2022-11-8-osd-cpu-scaling.md b/_posts/markhpc/2022-11-8-osd-cpu-scaling.md deleted file mode 100644 index 5c333b8..0000000 --- a/_posts/markhpc/2022-11-8-osd-cpu-scaling.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -author: Mark Nelson's Blog -author_tag: markhpc -blog_subtitle: I like to make distributed systems go fast. -blog_title: Mark Nelson’s Blog -blog_url: https://markhpc.github.io/ -category: markhpc -date: '2022-11-08 00:00:00' -layout: post -original_url: https://markhpc.github.io/2022/11/08/OSD-CPU-Scaling.html -slug: ceph-osd-cpu-scaling-part-1 -title: Ceph OSD CPU Scaling - Part 1 ---- - -

    Last summer we had a user that hit some performance issues based on a recommendation to use 2 cores per OSD in their systems. I wanted to provide some data for the community and wrote up a blog post on the ceph.io website. Please take a look!

    \ No newline at end of file diff --git a/_posts/markhpc/2022-4-13-spooky-allocator.md b/_posts/markhpc/2022-4-13-spooky-allocator.md deleted file mode 100644 index 6608a71..0000000 --- a/_posts/markhpc/2022-4-13-spooky-allocator.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -author: Mark Nelson's Blog -author_tag: markhpc -blog_subtitle: I like to make distributed systems go fast. -blog_title: Mark Nelson’s Blog -blog_url: https://markhpc.github.io/ -category: markhpc -date: '2022-04-13 01:00:00' -layout: post -original_url: https://markhpc.github.io/2022/04/13/Spooky-Allocator.html -slug: spooky-allocator-issues-and-fixes -title: Spooky Allocator Issues and Fixes ---- - -

    Recently we started noticing performance issues in the main branch of Ceph that ultimately were traced back to a commit last summer that changed parts of our AVL and hybrid disk allocator implementations in bluestore. Strangly, the issue only affected some of the NVMe drives in our test lab but not others. The quick fix was to always update and save the allocator’s cursor position so that we don’t search (and fail) over and over in fast-fit mode for every allocation request. Another interesting offshoot of this though is that it may be much nicer to limit fast-fit searches based on time rather than byte distance or the number of iterations.

    \ No newline at end of file diff --git a/_posts/markhpc/2022-5-26-bluewal.md b/_posts/markhpc/2022-5-26-bluewal.md deleted file mode 100644 index 8b9897f..0000000 --- a/_posts/markhpc/2022-5-26-bluewal.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -author: Mark Nelson's Blog -author_tag: markhpc -blog_subtitle: I like to make distributed systems go fast. -blog_title: Mark Nelson’s Blog -blog_url: https://markhpc.github.io/ -category: markhpc -date: '2022-05-26 01:00:00' -layout: post -original_url: https://markhpc.github.io/2022/05/26/BlueWAL.html -slug: experimenting-with-igor-s-bluestore-wal -title: Experimenting with Igor’s Bluestore WAL ---- - -

    Igor Fedetov is one of the most knowledgable developers working on Ceph. He’s started working on replacing our use of RocksDB’s write ahead log with a bluestore native implementation. After tuning we can achieve up to 122K random write IOPS on a single OSD! That’s nearly a 50% improvment over the current main branch and over twice as fast as Pacific!

    \ No newline at end of file diff --git a/_posts/markhpc/2022-7-25-rocksdb-tuning.md b/_posts/markhpc/2022-7-25-rocksdb-tuning.md deleted file mode 100644 index 4d55222..0000000 --- a/_posts/markhpc/2022-7-25-rocksdb-tuning.md +++ /dev/null @@ -1,15 +0,0 @@ ---- -author: Mark Nelson's Blog -author_tag: markhpc -blog_subtitle: I like to make distributed systems go fast. -blog_title: Mark Nelson’s Blog -blog_url: https://markhpc.github.io/ -category: markhpc -date: '2022-07-25 01:00:00' -layout: post -original_url: https://markhpc.github.io/2022/07/25/RocksDB-Tuning.html -slug: ceph-rocksdb-tuning-deep-dive -title: Ceph RocksDB Tuning Deep-Dive ---- - -

    See my post on the Ceph.io blog about tuning RocksDB in Ceph!

    \ No newline at end of file diff --git a/_posts/vsoch/2022-1-7-what-is-hard.md b/_posts/vsoch/2022-1-7-what-is-hard.md deleted file mode 100644 index fcf4446..0000000 --- a/_posts/vsoch/2022-1-7-what-is-hard.md +++ /dev/null @@ -1,88 +0,0 @@ ---- -author: Vanessasaurus -author_tag: vsoch -blog_subtitle: dinosaurs, programming, and parsnips -blog_title: VanessaSaurus -blog_url: https://vsoch.github.io/ -category: vsoch -date: '2022-01-07 12:30:00' -layout: post -original_url: https://vsoch.github.io/2022/what-is-hard/ -slug: things-that-are-hard -title: Things that are Hard ---- - -

    I saw a funny tweet on Twitter the other night - it was someone from a large consumer company sharing -their vision for “the next generation shopping experience” and it was a virtual person walking through a supermarket aisle and reaching out to pick up a bottle of wine. -I can’t find the specific tweet, but it said something to the effect of:

    - - -
    -

    Nobody asked for this. Stop making stuff to solve problems that people don’t have

    - -
    - -

    My dear reader, it me! 😲️ This message hit me really hard, because I am definitely one to build niche tools for use cases that likely don’t exist but seem fun or interesting to me. I also feel pretty disconnected from communities that are innovating and testing new ideas.

    - - -

    What is hard?

    - -

    This is a problem that a lot of us have. We build things that nobody needs. We need to focus more on the problems that people are actually facing. I would also scope that to developer workflows, which includes automation, testing, and development. Since I have a nice view into my own mental space, here is my list of things that are hard.

    - - -
      -
    1. When I am trying to develop software and I can't open an interface with the code and environment I need
    2. -
    3. That my main interaction with a resource is via SSH
    4. -
    5. When a workflow or even container works in one place but not another
    6. -
    7. When I need to develop, build in CI, push to a registry, and pull. One mistake? Start from scratch
    8. -
    9. When I need to run a job and I have to interact with a job manager and it's hard and annoying
    10. -
    11. Logging or monitoring means looking at text files with cryptic names
    12. -
    13. Automated testing on HPC is not a thing. Build on GitHub and pray.
    14. -
    15. When I can't easily navigate code, documentation, or it's completely missing
    16. -
    17. When I set up everything the way I like it and I have to login to a new system and do it all over again
    18. -
    19. When I want to develop something that uses a cluster resource but there are no exposed APIs.
    20. -
    21. When it's impossible to compare between systems because they are special snowflakes
    22. -
    23. When I can't easily test across the systems that my software is intended for.
    24. -
    25. To scale anything I have to use a job manager, wait hours, and then again if there is one mistake
    26. -
    27. When it takes 2 hours or more to get a node allocated
    28. -
    29. When I can't really make tools for HPC because I'm trying to find workarounds for all these problems
    30. -
    - -

    And I’d add a “thing that is annoying” to be this obsessive focus on power and scale, in a competitive sense, and this race -to be in the top 500 and beat the other guy over all else. The constant need to rebuild clusters means we never -focus on the details of how we use them. We do the same things over again. I’ve mentioned these things before, possibly many times, but I need to point it out again.

    - - -
    -

    Our current developer environments are more like handcuffs than places we are enabled to thrive.

    - -
    - -

    The reality for me is that I tend to put myself in a new role or environment, and then think of lots of cool ways to extend a particular tool, or build something before it. This is why I’ve made a ton of visualizations, associated tools, or posts for spack - it’s literally just the thing that is right in front of me. Put something else in front of me, such as an entire infrastructure with APIs, and I’d do the same. So why can’t a nice set of developer tools be available for the resources I’m supposed to be using?

    - - -

    Develop based on specific problems

    - -

    I think I want to develop more focusing on these problems. Don’t get me wrong - I’ll definitely keep making silly projects. But my vision for the future needs to be oriented toward these pains. These in particular are the problems that I think our community needs to look at, at least given this developer perspective. -I say this because I’ve seen and used the dark side - having free rein of beautiful cloud APIs to let me automate to my heart’s content! -I only now, without being a part of some cloud or container cluster deployed project, am aware that I don’t have access to these development tools. - What is my solution now? I largely don’t ssh into an HPC cluster until I absolutely have to - either to scale something, or reproduce a workflow on GitHub actions that works there (but then is really challenging to get it working on the cluster resource). Indeed this entire thread resulted after a frustrating evening of exactly that.

    - - -

    What isn’t helpful? What isn’t helpful is telling me “This center / place / person has this thing that has solved this problem.” Can I easily access it, and what about the entire research software engineering community? This kind of response shuts down the conversation -and makes the developer (myself for example) realize that the person I’m talking to is not interested in thinking about how to inspire change. -I’ve been really frustrated recently with mentioning even an abstract idea, and getting shut down that “Oh that sounds like this other tool.” -For a project to reach this “mention status” it needs to be easy to install or use, and not have a barrier of privilege that you have to work at a certain place or know people. Telling me that there is a solution that requires some convoluted steps and permissions not only implies that it is only available to those in privilege, but that the solution is not well adopted enough or shared enough to be truly a solution for our community.

    - - -

    Inspiring Vision

    - -

    If we aren’t happy with the current state of the world, what are our options? Well, we could leave our current roles to find another state that is more similar to what we want. Personally speaking, I haven’t hit that point quite yet. I want to try my hardest to formulate a vision for how I want the world to be, and then find opportunity to work on it from where I am. The wisdom here is that no specific role is perfect, and optimally we should place ourself somewhere where there are resources and open mindedness for change. it’s up to us to extend our influence as best we can to help drive some possible future. If you try that and fail? At least you tried.

    - - -

    These are the things that are hard. I am going to try harder to have them be the focus of my thinking about the future. I want to make them easier. I’m starting to realize that possibly the reality is that I should think beyond the constraints of HPC, and more toward the kind of infrastructure that I want, and then -figure out how to slowly integrate it as a part of our culture too. We can start with a core vision for a future that we want, and then -slowly build up tooling and community around that.

    - - -

    Happy Friday, friends!

    \ No newline at end of file diff --git a/_posts/vsoch/2022-11-18-converged-computing.md b/_posts/vsoch/2022-11-18-converged-computing.md deleted file mode 100644 index 300f3d2..0000000 --- a/_posts/vsoch/2022-11-18-converged-computing.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -author: Vanessasaurus -author_tag: vsoch -blog_subtitle: dinosaurs, programming, and parsnips -blog_title: VanessaSaurus -blog_url: https://vsoch.github.io/ -category: vsoch -date: '2022-11-18 08:30:00' -layout: post -original_url: https://vsoch.github.io/2022/converged-computing/ -slug: converged-computing -title: Converged Computing ---- - -

    For many years, there has been a battle between cloud and HPC. The cloud side of the equation says “micro services, cloud native!” -and the HPC side says “too expensive!” Conversations often don’t progress because both sides are up-in-arms and -focused on why they cannot work together. At best, we might get access to cloud from an HPC center, -or an company might present a product as branded for “HPC.” But it’s not truly collaborative in the way that I’d like.

    - - -

    I’ll also step back and comment that (I do not believe) folks (myself included) on the HPC side have done enough -to sit at the table. For example, we haven’t been a voice in the Open Containers Initiative (although I’ve tried), nor have we been present (historically) for conferences that are more focused around cloud native technologies. -There is no pointing fingers or fault here - it’s just a matter of two different cultures, and it’s been challenging figuring out how to talk to one another, and how to work together. I’ve tried my best to be involved, to the best of my ability, in small ways on both sides. But I’m only one person. This isn’t to say there haven’t been small collaborations, but I believe we can do more.

    - - -

    Change is Coming

    - -

    I think this is going to change. The reason is because both sides of the equation have started to realize we have similar goals, -and it’s not about creating hybrid environments – having both pancakes and waffles for breakfast – but rather convergence – recognizing that pancakes and waffles are both kinds of breakfast cakes, and we can take features that we like of each to create a breakfast cake that will make everyone happy. -The idea of “Converged Computing” comes from my amazing team (see Dan’s talk at KubeCon here) and is the idea that technologies from HPC can be integrated into more traditionally cloud approaches to produce a solution that -solves problems on both sides. Explicitly for these projects, it means testing the Flux Framework scheduler alongside Kubernetes. Do we still want portable workflows that can move from an HPC environment to cloud? Of course. -However, the niche or gradient that I’m interested in is the space that lives between these two worlds.

    - - -

    While I won’t go into huge detail (this would be more appropriate for a talk) the lab openly works on -Flux Framework, a resource manager that (in my opinion) is one of the coolest projects coming out of our space. I started working with these teams a few months ago, and am bringing my excitement and vision for (what I hope to be) a future where we are actively developing alongside other Kubernetes projects, and our work is well-known and established in this space. -What does that mean? Let me share some cool work under development. This is all being done publicly on GitHub, so there is -no issue to talk about it! My first year or so at the lab I was hired under a research project, and although I learned a lot, I haven’t felt inspired and driven until starting this work. Let’s talk about some of it! 🎉️

    - - -

    The Flux Operator

    - -
    - -
    - - -

    If you aren’t familiar with Kubernetes Operators, let’s step back and talk about a human operator. If you are a syadmin managing apps -with associated services and databases on a cluster, you often had to do maintenance or update tasks like increasing a storage volume, -or modifying a service to a new user need. As this pattern has emerged as a common thing, they have come up with the concept of a Kubernetes Operator - an actual controller you install to your cluster that can automate this. In simple terms, after you install an operator to your cluster, -you can hand it a desired state (represented in a yaml configuration file) and the operator will do whatever it takes to reach that state. What does that means in the context of Flux? The Flux Operator is interested in creating -what we are calling a “Mini Cluster,” illustrated below.

    - - -
    - -
    - - -

    In Kubernetes object terms this is an Indexed Job, a few config maps, secrets, and a RESTFul API and user interface that I designed exposed as a service. You can read more about our current design here.

    - - -

    This Mini Cluster is generated from a “custom resource definition” or CRD (the yaml you provide), and it can take these parameters. Concetually, you as the user own the Mini Cluster and can submit jobs to it (either via the web interface or the API) until you are done. When you are done, you can bring down the cluster.

    - - -

    We are excited for this work because in the next months (to a bit longer) we are going to be testing different kinds of workloads -running using Flux alongside this Mini Cluster, but on Kubernetes! I’ve started a small repository of dummy examples that I’m extending quickly at -rse-ops/flux-hpc and please open an issue there if you have a suggestion.

    - - -

    Stay Tuned!

    - -

    Stay tuned for more work in this space! I’ve been doing a ton of programming in Go, Python, and working -on a wide range of technologies, and fairly quickly, and I am very much in my happy place. Please come and join us! ❤️

    \ No newline at end of file diff --git a/_posts/vsoch/2022-11-3-containerize-it-baby.md b/_posts/vsoch/2022-11-3-containerize-it-baby.md deleted file mode 100644 index 3d9795d..0000000 --- a/_posts/vsoch/2022-11-3-containerize-it-baby.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -author: Vanessasaurus -author_tag: vsoch -blog_subtitle: dinosaurs, programming, and parsnips -blog_title: VanessaSaurus -blog_url: https://vsoch.github.io/ -category: vsoch -date: '2022-11-03 09:30:00' -layout: post -original_url: https://vsoch.github.io/2022/containerize-it-baby/ -slug: containerize-it-baby- -title: Containerize It, Baby! ---- - -

    I’ve just submit my entry to the HPC Guru Elevator Pitch Contest for the Supercomputing 2022 conference!

    - - - - -

    I’m fairly sure (like many of these contests) it will be a politically correct winner - someone that is best appealing -to the conference, but I’ll take a stand right now that I think my submission is tops in terms of creativity -and excited energy! I mean, there is just no alternative when it comes to technologies I’m excited about.

    - - -
    -

    Containerize it, baby!

    - -
    - -

    Mic Drop! 🎙️

    - - -

    Regardless of the outcome of this contest, I feel like I’ve already won - I’ve had so much fun making this and sharing with the community! 🎉️

    \ No newline at end of file diff --git a/about/index.html b/about/index.html new file mode 100644 index 0000000..d4f5748 --- /dev/null +++ b/about/index.html @@ -0,0 +1,107 @@ + + + + + + + About the hpc.social Community Blog - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    About the hpc.social Community Blog

    +

    This belongs in the hpc.social family of blogs. In particular, this is an aggregated feed of personal posts that meet the following criteria:

    + +
    +

    The personal blogs aggregator is the “soul” of the HPC community - HPCers who are personally invested in the minutiae of the work they are doing, the projects they are working on with some content about their culture and pet pictures :D - things that we all find in common and share and talk about.

    +
    + +

    The hpc.social blogs aims to share ideas, news, and stories from diverse people, centers and groups across the High Performance Computing (HPC) community. +Whether you are an advanced practitioner, a developer that touches HPC, or a novice, your voice is welcome here. This community blog reflects exactly that - a syndicated feed of our community voices. It’s one places that you can subscribe to via a variety of channels (below) and also follow instructions to contribute your own feed.

    + +

    + +

    + +

    As a word of caution, we curate authors at the point of adding the blog, but do not regularly curate individual posts that are added nightly via automation. The opinions expressed here are owned by the authors and do not reflect the opinion of the hpc.social community at large. Offensive or inappropriate posts should be reported to info@hpc.social if you wish to report anonymously. You can also submit a pull request to remove the offending feed if you are comfortable doing so publicly.

    + + +
    + + +
    + + + + + diff --git a/archive/index.html b/archive/index.html new file mode 100644 index 0000000..a9b5884 --- /dev/null +++ b/archive/index.html @@ -0,0 +1,1011 @@ + + + + + + + Archive - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    Archive

    + + +

    + + + +
    + + + + +

    2023

    + +

    2022

    + +

    2021

    + +

    2020

    + +

    2019

    + +

    2018

    + +

    2017

    + +

    2016

    + +

    2015

    + +

    2014

    + +

    2013

    + +

    2012

    + +

    2011

    + +

    2010

    + + +
    + +
    + + +
    + + + + + diff --git a/assets/css/highlight.css b/assets/css/highlight.css new file mode 100644 index 0000000..92791bb --- /dev/null +++ b/assets/css/highlight.css @@ -0,0 +1,199 @@ +.highlight table td { padding: 5px; } +.highlight table pre { margin: 0; } +.highlight .gh { + color: #999999; +} +.highlight .sr { + color: #f6aa11; +} +.highlight .go { + color: #888888; +} +.highlight .gp { + color: #555555; +} +.highlight .gs { +} +.highlight .gu { + color: #aaaaaa; +} +.highlight .nb { + color: #f6aa11; +} +.highlight .cm { + color: #75715e; +} +.highlight .cp { + color: #75715e; +} +.highlight .c1 { + color: #75715e; +} +.highlight .cs { + color: #75715e; +} +.highlight .c, .highlight .ch, .highlight .cd, .highlight .cpf { + color: #75715e; +} +.highlight .err { + color: #960050; +} +.highlight .gr { + color: #960050; +} +.highlight .gt { + color: #960050; +} +.highlight .gd { + color: #49483e; +} +.highlight .gi { + color: #49483e; +} +.highlight .ge { + color: #49483e; +} +.highlight .kc { + color: #66d9ef; +} +.highlight .kd { + color: #66d9ef; +} +.highlight .kr { + color: #66d9ef; +} +.highlight .no { + color: #66d9ef; +} +.highlight .kt { + color: #66d9ef; +} +.highlight .mf { + color: #ae81ff; +} +.highlight .mh { + color: #ae81ff; +} +.highlight .il { + color: #ae81ff; +} +.highlight .mi { + color: #ae81ff; +} +.highlight .mo { + color: #ae81ff; +} +.highlight .m, .highlight .mb, .highlight .mx { + color: #ae81ff; +} +.highlight .sc { + color: #ae81ff; +} +.highlight .se { + color: #ae81ff; +} +.highlight .ss { + color: #ae81ff; +} +.highlight .sd { + color: #e6db74; +} +.highlight .s2 { + color: #e6db74; +} +.highlight .sb { + color: #e6db74; +} +.highlight .sh { + color: #e6db74; +} +.highlight .si { + color: #e6db74; +} +.highlight .sx { + color: #e6db74; +} +.highlight .s1 { + color: #e6db74; +} +.highlight .s, .highlight .sa, .highlight .dl { + color: #e6db74; +} +.highlight .na { + color: #a6e22e; +} +.highlight .nc { + color: #a6e22e; +} +.highlight .nd { + color: #a6e22e; +} +.highlight .ne { + color: #a6e22e; +} +.highlight .nf, .highlight .fm { + color: #a6e22e; +} +.highlight .vc { + color: #ffffff; + background-color: #272822; +} +.highlight .nn { + color: #ffffff; + background-color: #272822; +} +.highlight .nl { + color: #ffffff; + background-color: #272822; +} +.highlight .ni { + color: #ffffff; + background-color: #272822; +} +.highlight .bp { + color: #ffffff; + background-color: #272822; +} +.highlight .vg { + color: #ffffff; + background-color: #272822; +} +.highlight .vi { + color: #ffffff; + background-color: #272822; +} +.highlight .nv, .highlight .vm { + color: #ffffff; + background-color: #272822; +} +.highlight .w { + color: #ffffff; + background-color: #272822; +} +.highlight { + color: #ffffff; + padding: 5px; + background-color: #272822; +} +.highlight .n, .highlight .py, .highlight .nx { + color: #ffffff; + background-color: #272822; +} +.highlight .ow { + color: #f92672; +} +.highlight .nt { + color: #f92672; +} +.highlight .k, .highlight .kv { + color: #f92672; +} +.highlight .kn { + color: #f92672; +} +.highlight .kp { + color: #f92672; +} +.highlight .o { + color: #f92672; +} diff --git a/assets/css/style.css b/assets/css/style.css new file mode 100644 index 0000000..aeecbac --- /dev/null +++ b/assets/css/style.css @@ -0,0 +1,219 @@ +:root { + --bg-color: #161f29; + --bg-hover-color:#36404c; + --body-bg: #10171e; + --primary-font-color: #bfbfbf; + --primary-link-color: aqua; + --border-color: #6a6a6a; +} + +[data-theme="light"] { + --bg-color: #fafafa; + --bg-hover-color: #ececec; + --body-bg: #fff; + --primary-font-color: #000; + --primary-link-color: darkred; + --border-color: #dadada; +} + +[data-theme="dark"] { + --bg-color: #161f29; + --bg-hover-color:#36404c; + --body-bg: #10171e; + --primary-font-color: #bfbfbf; + --border-color: #6a6a6a; +} + +.author-badge { + background-color: #e50039; + margin-right: 5px; + color: black; + padding:0px 5px; + border-radius:2px; +} + +.btn { + cursor: pointer; +} + +body{ + font-family: 'Catamaran', sans-serif; + font-weight: 300; + background: var(--body-bg); + color: var(--primary-font-color); + margin: 0; + padding: 5px; +} +.container{ + display: flex; + flex-direction: row; + flex-wrap: wrap; + justify-content: space-between; + width: 60%; + margin-left: auto; + margin-right: auto; +} +.post-container{ + display: flex; + flex-direction: row; + flex-wrap: wrap; + justify-content: space-between; + width: 100%; +} + +.footer{ + display: flex; + flex-direction: row; + flex-wrap: wrap; + width: 100%; + padding: 5px 10px; + background: var(--bg-color); + border:1px var(--border-color); +} + +.post-list{ + width: 100%; + display: flex; + flex-direction: row; + flex-wrap: wrap; + justify-content: space-between; + align-items: center; + padding: 5px 10px; + margin-bottom: 5px; + background:var(--bg-color); + border: 1px var(--border-color); +} + +.post-list:hover{ + background: var(--bg-hover-color); +} + +.post-list .post-title{width: 80%; color: var(--primary-font-color); font-weight: 600;} +.post-list .post-date{width: 20%; text-align: right;} + +.post-header{ + width: 100%; + display: flex; + flex-direction: row; + flex-wrap: wrap; + justify-content: space-between; + align-items: center; + padding: 5px 10px; + margin-bottom: 5px; + background:var(--bg-color); + border:1px var(--border-color); +} + +.post-header .post-share{font-weight: 600; color: var(--primary-font-color); display: flex; justify-content: flex-end; align-items: center;} +.post-header .post-date{font-weight: 600;} +.post-header .post-share a{display: flex;} + +.tags-container{ + width: 100%; + display: flex; + flex-direction: row; + flex-wrap: wrap; + align-items: center; + margin-top: 20px; + margin-bottom: 20px; +} + +.post-tag{margin-bottom: 5px;} + +.tags{display: flex; flex-wrap: wrap; justify-content: space-between;} +.tag{background: var(--bg-color); padding: 5px 10px; margin-right: 10px; margin-bottom: 10px; display: flex; align-items: center;} + +.navigation{ + width: 100%; + display: flex; + flex-direction: row; + flex-wrap: nowrap; + justify-content: space-between; + align-items: center; + margin-bottom: 5px; + background:var(--bg-color); + border:1px var(--border-color); + +} +.navigation .prev{width: 45%; overflow: hidden; white-space: nowrap; text-overflow: ellipsis; color: var(--primary-font-color); font-weight: 600; padding: 5px 10px;} +.navigation .next{width: 45%; color: var(--primary-font-color); overflow: hidden; white-space: nowrap; text-overflow: ellipsis; font-weight: 600; text-align: right; padding: 5px 10px;} + +.pagination{ + width: 100%; + display: flex; + flex-direction: row; + flex-wrap: wrap; + justify-content: space-between; + align-items: center; + padding: 5px 10px; + margin-bottom: 5px; +} +.pagination .prev{color: var(--primary-font-color); font-weight: 600;} +.pagination .next{color: var(--primary-font-color); font-weight: 600; text-align: right;} + +.blog-post-content{ + width: 100%; + font-size: 18px; +} + +.post-tag{background: var(--bg-color); color:var(--primary-font-color); padding: 5px 10px;} + +.profile{ + width: 100%; + display: flex; + flex-direction: row; + flex-wrap: wrap; + justify-content: space-between; + align-items: center; + padding: 5px 10px; + margin-bottom: 20px; +} +.profile-image-container{width: 40%;} +.profile-image{width: 128px; height: 128px; border-radius: 50%; transition: transform .2s;} +.profile-image:hover{transform: scale(1.05);} +.profile-about{width:60%; text-align: right;} + +.font-2{font-size: 2rem;} +.font-1-5{font-size: 1.5rem;} +.font-1-2{font-size: 1.2rem;} +.font-0-8{font-size: .8rem;} + +.text-right{text-align: right;} +.w-100{width: 100%!important;} + +a:hover{text-decoration: none;} +a{color: var(--primary-link-color); text-decoration: none;} +.footer a{color: var(--primary-font-color); text-decoration: none;} +img{max-width: 100%;} +figure{margin:0; padding: 15px;} +pre{font-size: 14px; max-width: 90vw; overflow: auto;} +.social-icon{height: 24px;} + +@media (min-width: 1200px){ + .container, .container-lg, .container-md, .container-sm, .container-xl { + max-width: 800px; + } +} + +@media (min-width: 1440px){ + .container, .container-lg, .container-md, .container-sm, .container-xl { + max-width: 960px; + } +} +@media only screen and (max-width: 768px) { + .container{width: 100%;} + .mode:after { + line-height: 22px; + left: 1px; + } + h1{font-size: 1.5rem;} + .profile-image{width: 96px; height: 96px;} +} + +[data-theme="dark"] .social-icon{filter: invert(100%) sepia(50%) saturate(0%) hue-rotate(360deg) brightness(100%) contrast(100%);} +.mode {float: right;position: relative;margin: auto;width: 42px;height: 22px;background: #F1F2F4;border-radius: 20px;cursor: pointer;} +.mode:after {content: "☀️";position: absolute;top: 0;left: 4px;font-size: 14px;height: 22px;line-height: 23px;text-align: center;transition: all 0.3s ease;} +.mode.active {background: #0771ca;} +.mode.active:after {content: "🌛";transform: translateX(20px);} +.mode.active span {transform: translateX(0);} +.mode span {position: absolute;z-index: 1;margin: 2px;display: block;width: 18px;height: 18px;background: white;border-radius: 50%;box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);transform: translateX(20px);transition: all 0.4s ease;} diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..9918eeda5a2af872785f7bb0d6c57ce834022661 GIT binary patch literal 689723 zcmeFYWmH_v(l$J}!{81xgS)%CySux)y9D>(?(QCfJ0yVw3l2dN+$9k5lKaSWpY^?e zp0&RB-$&EDNrS65wKRlE0|-m$96GHA#|$N&HUO-@!)9RPr({PRVGe{HGA{DcGm zP<`;%)b&(1^8vZKyI9*fT7f+MT&+M>zIN6CfbVKWojp-&tHzup72w%uuQ%j;=d2>k0x&l3EG}mW_@gY-VqN9`DOm=IX-nj zdM>0#^*K-!rQ+dH>Y;b=S#f50evQ1U`Qzd7@}bDg;BV#MdoS0`Ax}Lm z{@UFtahK&Og1;+oxI)W+mno#1z`pFmZ4#&lZ}Ge+*##OUz&*Bk{d^gDT3=MXF6#eL zRUH_D|MKj0@6hi4?62_CD)sn&StMv9>S~7N^L@}R`FZ}+vd`t=@|Rc>il0`ux4+*n zo#%11W%sl$+&ZX>R9^Le3yL@RzO=#Cenk8mpLu?BYiLVZ+eLlo`7VAyAOVQ9jK-&% zy%T-if7VQBo-{Ew&ck+=y%{gm!Mm$m^l-_uzVcZ%lfPx1<%DnSyd~0SZKo^1=Xm`q zv`b=GJly`6$gks<9|&h!bdlRn_xfy&1RX1=5~*JX`VwOzpE|1dnJ$tC242(*u`wC- zt=>+Wnk^q<)kTJY#r(sR`&u4cE^P0-pfY0&@Zlw__Cy!N=}X|Z%pJkaDFpzfXflnY zc*IEcQntx#joQ&lnT_1!_KQs-g*~f@<50p>+8TAU0z{?ow;tdS{jm%J zvL41UH1x$8cy%l;>w2UM3!JweJHI<`sghEF57zCl-iZEyjb7tC<4QE8gm|m(h@sw3d5cq-n6*8d*Zh zN{frHvsJ>lG~d;sKN&IAscAjq#$Lwjt;chyudrQJkR;;J#X5&#c)rx!kRM2=k5yhk ztnwjM>%wE5Uz}ZJEvEOnVg6%#vjdpUpMBgN<=ET#lNo!zmbLdgU6-s#$LkcbjU;v2 z#FWF_2toV!m5D$GJ52fHYwuw}9o@+C$XQViq`l29)FY9HI6=g`hcH9QL_yFnSJQiI zkp!9qY3)s<{Cy&oc&eKoJS5)X)nBjx`-I@OH>+)n{lpcHjSGg3P5Le>Hr6334GwOX zc~7;`vX>}FDlL^728t9*aNCdC;~xTd{nu(9zD>7sFZFh~#&TQtGV0MivPLW1sRo?3 z+L!Y_+JZzbTjgRtlZ;Meac4;tFPT)nbF64%_dUR#g_ z-}=d~!mV7q4GBmZRj$kU#WyuM6G&q+?HNDgxs86dg=R&BuV1QUIg(#>ZxCHU_(5M7 zpuUmg9v;dAdAL*FAB?8l^d3}6q#uBJKqaW5d#h~~^f(!`7ZlTO{f?eX{-k?C%CAD{ zCGj)S_x%m9#*H@`M+Y2G!@8tFRbef=$emJc1L334+rNsVQ9|4)sDHutW zDF(tQaZwo&Tvp4t4*J+>UNk{yqC~33rd7(!!x5(6{k`N{&r>>^8?>k)Xi;yko$}y{ zGo|GiBb^+e29b*q9QKpmX8UJze;((CRcAbHbc@tHwC*q{NA$wZkOUrSU<6J z3~O!Bk-3pwAj3;L*U!((C4Fp!xk+b?NbAILoNZNVO*Xt*Kw{Z*N~PN68gAK0CP%E1 z4=?(8xTV_pnRa+Eii@Tgj-X;a0ggpW&RtCs)_sGomqNi?H}1!WZ_-+fb5&LYKW*x< zV@3-+wcqU`~V3i_Rx4H95OU!+Q2 z0+QMvkzN9flW?@nNKHd?-tM}3RC_jLip-*JQZafe`;B!sFxO=ESR^` zzep#lDeCo1XHikO>g1c`O;sY#pu*ODq)UoCM(4aU$%Dq`oKm+l^ylAHUh@ob+j(Yt zjvt^!N{SOG%193?83HX)EiHjJm~z;qXC=gEIQ9&MW3KR`cZ$w1t~v{!jFMHqe5RA! zErx|u#Aw>iLbXHha(h(Hwe9!z6jU9Nv|ag>DoN;^ zRwBF9T-nqVP_CRpMRJ6`m}pJ9;A-fY%`(bID-vG-P^%ClfXO?wS8-xJYrX20n$Ag# zakAouEpKL!Sje#h!UzbXMzoq`-#V(7z^GBu;g!Bs;u9YZjLYD5b`_E;yGYGulbC(a zKwUx=Ug)xk|I^$x#c%fuu4@AN#gVDVV?1c(d)SPUGxLVEdgg9`nN=-RUxI?a(|)j= zvF0EVO}SXypl2g#=)s~6CT`KU) zD-GW1tAc5WUkPKHONoQ{o9VeuMI64BXJ{1p^v@9tt+vb(nPpl%b{U&&qll2FGxUueo`_O;st}m;xJIC!f4@xsxSMlW!r3bofJ|$lRGfM45iq9oUaFvY7zV~2`BygA49+JA^hC4Nx zE`U~Gud5oM?hfWwqnRQArYiDjZ>{dIF=N&vCJ915uMmQa#kk&Gy3igMmiNi|vIgm# z%Qm9{vw|^&N|KkRSjW-9&|UQfr+&BIG? zxfDZdyH2>Ops2SDbK^2Z3oKtD&GiIE41-IN*}5*QLR|_CyV;}dvyFich9Z~ks+hKw zOu`8WMeOQHhDqN;t%yTS{E{OaO<*ycO-o0u(na=qL+ljU_(D0S!i;m0;1Z0ciiY>X z#uC*_ANajpmb}=z(J!PHL`woGWt!M1(=hnOH^I5=Y72WM(9dQGkAfYPj#G>l&L{v! zOgCB31rvtW6N4jY?8v=gr$ulyd9i(m{4JwABEgvD4VRwtcnmH!OG(W)mxiGzAnqr)1Y}^h)9W?KQ&P%I*ENjL1fpya?y^q!;amr#5c54>+C&4p$2V#1 zC2bYXlg>B4Db-EyYZ08-T-1{s3CNxcCcJb9%T}$p=zmPp+v*~JmZ(n38c;_VRFgsW zOF;VpI~d-dvburGiO}||7BQIh`!G0QCg?*d+pT}>J+KjL@PgqKDjUph<_7PJ+@Ehk zY3E-~9!D%V7M(oymJVv+B;!^{QfDixSY*FAotwOlY0~z4ZM4sFp})&EK7D*yBi~jc+yJI` zs>j_qY-LU@9{j3S3LwtWQu`_qiTH&99j*=28Vc5glFM}N@*Rvv-wuIQik3-R!dHG* zFn^y&RIDx?zF$nxHGm9SYnnWx0-#ro7A$MX8Hor5&LHeSyI*k{918aKHG9GaCPx+q zDji$pv%kYeG%aF>n!H(RRzQm}TPmD}v4;WEB+gPq0HqL|qQw+ff^GLfspv;A{Q4ku z*wl6fjlvL8axrjVpX`))i{Ovtot5`g->zt5Y;-BaxtXGI4P{J8zd}$Od1Ptj*Gi-z zv6!AAn0zxH0{Da3k=vNWIBX~@Vl{2pb#WgHZQ(AORxFV(w8U1DT=xYMYt5P4N@tr5 zSCxjt*{wP>MM}9*=ZJ7)!w99e)OHIWz6sw2M&sIx2Wt4& zcO;MC$8m(I%%q0Jh-!|E`GgXurJmEqOh(M|Q9 z4H9MdCenFMV780$3IBj72zoi$X9dm|5)p*uFoKHIxYUK$V0;s5`6cfMn{hdzUJ|b~ zen|D81oJ{Q6tF%#%Z{a2`@Y08HR(PRTXxM!=1uv&p@+3jv9!1(R{drx(l~jpW&(_D z^nhb_iXhJ(LUzS<%2QQ@tA;~!{aLNQZoFE`kAqVKKxSUR6k%8s!i@yM;OUy!Vyj2h z#dikqzeUo_%o<%x&B_3`CutG52RMYJ6{%SrVaoBv>IuBx78ZT>vv?fQw!&+_PP|4q zr*Z^;ccq?fa1yJ8p&Krb8Tip-IFvDz#fxpMpy^i}qvr=8lnU+f(5#RN^Rcy&$=RGw zns#=mLhG!+0$+E*rHp63$*|qHa#+Xj8llwPI;CZmY`BY6O=1`^EF=8MLjiqTXju(4 ziDf&c%VD%leeq>5rc_13G`xJsYkOKFdT(MfnL>1cRoB^4RYY|^dJ5?c7PQohCu+Hh z*)sFY)*#{Zx*50HO`gCLoF$a0Mu)Vg?E20QNo25I*39r7Z%whB`vgB>}%Fy@5o^AcSo920yWd`)uu`IhQ&(Yc)8Py zyBAtsXHRvLbDAaC{WpB*rX7}<-1)X0z}yv&7e%z{s8fE#MBDyk?~EJzCiujvfcn0 zpu@(4;^#9?hUg85M~Nf!;32AQG@M$!>`*jwXb?V5II3ug;w7c^=PEok8BOY=Ov9a# zwX4CfUMS|Jz$^kJSr-+Et3y#5t)zA|lD=%WD2HC;@Cu-$?w6aB(?-^45($l+A}NeZ zo_YaH7;ec+iyWp{8(TumP%izx1hRBoM>G1`MI_3phCMk}*`AmU+67|*&-{swbUhoQ*9^x-;zzxZ?5lUaxsy*C zoCvPmbEis%1xK^aY=jcVrC2sj@2w7z2LhUh*H=X+_3TrKjJJ2C_zi=Tzr+6u^VJ(Z z#Ap~sM{Tx^*a32qez!YTQ}v}R<1|;aGuK)d_#ozXS@D`MWSGQHakvE) zNn?O5qjzNLRKPP1+sEqng&p6Lo+2Gnrubvu)#|kmr_!9Sc2b1yJ+H~>_IZ43-TB58 zfkN=Dgid}h7JaVSVL^$j8I}ygR*CaxrVG@TCf%*{F`cz+rjGjqKnvH4=+mKB+;i0p2`ZDmI$Y1&K*>XTaa<%W#ws8YYF6|zVM-`WnrULL zcMWY!TLmg0w|)Nz|EJ+o(#pcSa>RQ)y^?JMv#`Y{(Sk|4+gdC!6bA#bJn$%VUWh3E zis0m&gd8n3pQiZ;Rfd!+`MNfIv3~WF8 zDg{hmOrYwoo}Q`%QHA=b&Vzy_RT`)&7(`iUwLox5bvEPsYg?2@4EXz-7Q$*K970ji zVJh=h~NcN$joRnBYzaS#=^RUw3R&lgx{Kj z!HwLM!4lW8KN3A?6`lka@Q8K+U<9MdE>hS4$%dn*W6`z?3(EGkx(Q)~*~S&Wx!pf_ zphV6cetW$@mR#B=5FQ#Yrm(flK7SsN_f)95y?E@7&1t2j9O}c~@l`oQCzmxa4&FAY zf0@xa_$DiUm0wrI7_|%^C>x`?obdY;4wL%XEGlma{-vDk!&< zAC|}t@I=!`*cxqSH0p}FN`bCHEJrJJq{Syf+Xo~0wrb@m7d?C!Lo(kYqEa*{MtuT> z|K})WSwS~jkJ&W^I}#)*QslRKHpOBv=}|zn7xLKf+TbAFw`tojeAq7TJWS=8i7uz6 zQ@ztL+u!&gvA3F_c>kb2__&g>VyZzR7sn=Tv##*5uEq9W1N$lrUUWayy!4NqmsR-ay1939~p|rB!+XJk?|}a5=x&5osYR4Kfcd zI1|sw%$$06jI3=9J;AF+J78u*Vaka}T40C^AZKd%Eon=`V-B;hk!1+4ie+_3c%b9+V$5?8z^Das8;I(=slj#Pp+f9y2VoxLRVW zw2l4-zhzuUZRbaVcqPVb-!HQNCue!@17!zt{vhB9vmF$(t&Sorm)j&NUW!X|3D=Pfh zbQC0lxU)G!+Snc2A+uTS&Cpw|iU_~iD4o(@p8^OAiAkfRt3qjnLYy-Qs@oCSe$;Ei zV4{qjb&W@+p-nbXmf^Sx76-V=M&0o_8er9m8DXe(<`aZP>lvT~NtiLBTj4HrdIw~Z zh&~U)ebzdP5l<^fdZcp39g5rBSuE9n7Nq-#YaNg$!VKOfWNQX;f~e?8 znWSsX_jmmSil&iG8D*L6bh6eL>~@B^i3cEgZ@AyBqmAQ>RzeczBK>OHa-;G`J?}30 zpnl1JDth-TE}PDRtK|Ec=h=+7V@J8I&h?^@Zn5(u(_NxfUm}umax_RHuXFhh#8Ulb zFwu48L(Xj@E;w!VT2qwrhxvE%SRSuzzRf~9;cW4+u5LdKmt~tbPo@)clPnoxZ^FF? zI{a&UB=NX|@6p-VP4C0YYA5O7 z7fMnQ@NXG|TLt>5t!0tNFIMpDeTN4fOvu5AOKQ^f~FpHmcDBthMQ|oQU<~fpow!Xv4k!ISpH(pDTo~wyb9Fi_J5}ojZc`LqGaL#Lt zSz5Vj;LeqkruakCy)7<#S)A&qyAdy6aw(QQGYqZ>9q`$NB>>~4ncdz25wnL=%Q8bh z=)t5y8H=`Mg8c51y4k-RZoSGPs(6@@?En#1U5C17dKjKOnIC;^jY(ID7yITJ0Cd-M zMID&&R9dFcqTliC0kJ0NN&k2&Bb$y~a)OO#7?>IV)6;W$F{^w`ZDuZu*EY#r)aQd* zM@3{(n?MpjAMm><#=53d7sJWK)zKA&&Y&pWsmO`e&#fnWj@T*nU9R@yR0mVnZKJub z9YqWv!d-hZ5ol-oh#-XIeNTT>@do#7w`vP_>9^dRHztbk zlNd*ZCAJ^MY@OPFs45VzINcesKGZz9=$E^4erG#TWx-a7)%Upifqj!6Dj{PVL3t9{ zD#;$9P7zbfT5%a)HT4eyKy<(0HT%_M>MC zx7-k;w*-A@4EB$*Ox>KA-i{o?j?2D1sLu#d5XOL&aajFj?39(%?^r9p&}@yC$}xIKYh6FZ#F zrVwIxa|KcGeU>rm#C|8Xt*Gle?>L+K`kznQ$NoC8*g(FA)##gQ^UZo-osXv&5eMHM zY-`d`ZlB1A){t*Du3W@KqK=JfIZk;fqhh5|0|i}W14F3gNFW07F3^n_GB|mx*pH5< z$TMxIB@md1bH}i#2jHE$zWo{2l+;8RNB?&1$;fbCZ3ob)y!8Ng%Ii*Z`+2q!Ev|R} z;SCNI|6#|KrW22I!%qd71F9is;=1DN0iwOHn;cky6LD;~9gd~p2*OK8YhPr2vj>qw zTkId~?1R4;Z?1`oy8GYYHP(>dE-B>*o%(`!Gc)KrqVPIZFR3k3-={~ayrjbK@YJpg z+zPi4J(O`ocREa&%bZuUQYlom<6Wu~vRc z+GeLjISi<6I~K&|F-Vlu@J;PrHLnUS@IM&bi?FT>J(A(?U{>4YxY!J{+(OleX-Y0A z5n7O>mgpBizl9wX9n;Hkts&2r8j-oHNzh7OP!ovbeu6W)5MPGjqDGLmZc2YwXh)$k zH%wY_o>qNp4nHbAlBe&mmIhX1=G%qSB`5g- zGL}l-3fnH@J-;tivp>Ocb=zv4nPCvzcLJeMDaK+bOZaog z`i$(f{N@`Xgt+>%Ks=-dV$|S1wWm_GjcqQw!EKk#weZzk*k2x5k26rQPa=Y5k>7si zzpn|~x_`ItA{(Krh$dNLA7FxUa24;_xkb5&dc+Sja=5OclqhU{8CQIltL^^WJv1u* zh=A^|EN{Kc#c7e%BETn`Wsf2M*bT+i8gAvl-zl(jyHFBxFZx@%Ha8CXWe2Kb*)OZ) zm)w39_ALQ0V%|-V$V`%D4Hq*JmI|Cl=N*#Vhh|IOLn3v4s~Tng!*fWi0LNE?W>lPD zxMxmRi3!tkt*YbLBM_704v_k_Kw7Z}{%?l%}cgBmp5639@u;Xqw`%xN`c zYroPkm`piS^rux;NhPzi6}%Epq9d$H)P+t?Aa-b)*EgWE zkCu=j8~wN#(NhvDZLL0Dt%I6jKD~-}k<#lefJq>Wyf{Bz3h>IziO=~cIM_Y(bH^~B zU;4A&!v`PuIb@`S&G~DK6$d`+QIuT8#7mnSJDB$>+|^Vz4&UCT47+7(utbw(fQ^2w zCExoJJ2y>;&n1RU{utGL7b4w~ny7g0RCFvGdmrk2q2;;*T#}756jB+}7Y>+(RAh-k z)p*`S!hA%+&FM1j6z9rQ+f~m~L`@;N_;n<6Wb5NP9>T4TJo8nmWN%npyHl@~tcvKU zJiQpo%_|>Tm`$A(+3Ltx?$Q`1n#XVydj2Aee1=-+?qf)?Xa_K)_)P_cJ|YR^%^Rui zqY{nx0)5eDq{^l2p~$yd4Pxv%OGL=%_M#5<+YeZAG1Hm4D&>@on0{|AiDf=1x7Uv^ zgCSGuPUJ$q6$ta7Y)VRom1L&!BwcCK!!ROrljMva*)OoAo4>V>el_5DHZf%47+32! zRg_J$#+oFHPfp2rN#6Sa5^=dmY5bQ~HzWMdPt-9i$(CQUBkaCG53VKoY?!fB z_=WuVq6?MBIJkPCGJNtIDQ}JAu)_zp@6roM&T8BNI2e_tt5%+7+%d+|8|?m=>8TwO z6Re%w`zV#%3#G`?(WE*HAW`QsVx{Dc5Aaw!doAhBhQZL)8vMG<)`&LW-9(rHIgqfr z!WPw{RrjOhjGH=Rm6zHzF@q0jZqVDc+!o)@W?;@`{0#XLG^;J!lX^9>9lY&(mHZWT zF{Z@r_G;|zvEf3}^uMR^OWfvHcn5Zk8kBL~25uMYPIgXjthv*jVW|>oU{%83?t@58 z$mv~cv_zVpl{Wn?i!IZ;L&eGDt2ehec98JqgCoeWTT2-o%(TA%_X6^z7o+duLi{4{d|tPA^oQYQ@Td%CSkCF&!>C1odzz70 zWAvj&SMOaUlTpIyS-M``PVLvkBR{Tx}0H7l5BqUVjBqaW| zvFmj^SAI~cknD&k$(V@-S1B$6M+~oJwKATh6iElNR5j2(N}F_6W}sM3B5ikx=n;tQo9F(N29LKD_`#ViMo&Y21>GEvz+ZpNL;!LfxPXbKuj&6;l;jE>O z)C=x=;2a!TU;XMqJ|z_vkvJ6X?DY*(g|L3n_T*>|=@aY(-EAQsNNWo}%axK2uerVx^3-z)ZLIBt`8iD&WN zP%H1=k|kj;;jLBW7ZPdcc-?SVOypYH5>x~>eKd8)a$6CyZSt#UtR5Wqx5a4|P`KYy zghW=zpU;*)swh9mrSi@_?)H*X00vMv?HiwhPI?ouLC>g$giIk7=(B-o#twG#6vZLL z-QCW1(`2qaFsQ;gtyBT(+00v#*ou>pnaHKv2Zj z)y%@d$`fR6Wn*l5Crnt5BisVPOeHy|Au$=_$Ldm ze6aYMxw5b^v$8ljvHV@b!&A!p737}={a-aaG+(zXvZz~mxOll+SV?(XIeSw49m3M$ z-}EC{SWDX`Tm#itCW%wzod(W*B|rbB!wV<#^<+mv9Pn`|Lc&Khlh=a zmD7rem6ex|iPM6coynYwi<^npoQ;c(!a`MB7a_*gkCn9QwtcwhB-tavPW{sy7qZuc6MW{!Wi>JOCVD-{jMhTwFZdTz^4XTJTG|xI3A>PN$udnT-{T ztFz5t9e)Vt7gLoJg0M5Q{!fdlqnW4mtAY?j(azb+_kTJx?VPMMJk9>F$;Qpi%g4jV z&CSZi%g)C451aqO)3$Q=c#XtAm~5=f9RI-mGcEkD&b$(9_9sqX0sex0^@U%;-O9|< z#a+|I#Zd_I2Lkj*^RMs*3I1bJWbHg&CH($G{6Aw}!^-U+Xa7h6N4vkeK%l?Emfy_c zAA@+9d0ScjHPEZxKe{Yz&75tlUi15(0rjtTyZ_5%@o?~%vvaXoGx1uRaWHXOvU9$2 zZOO)DX2E8`WnpG*&d$m4pXeSg)}B6Q?p9(pub#eo^BO>Zc>|*Ti%Poxl=iW;`ok0J zYi_f#F|o61vT^dW^YC;2nL~b7RtU>K9cKBH)&KHXkmdiwiQr!Xe=7s8djDv9EibRt zisj$s>Ytqbq4EFW=bv-&|8Rs?=>HD#zvB15bp4mE{}luOE8+h}*MI5yUor5%68>*= z{r^T6^8eiMSUJD0f_z?YXSh~;|Gbn0Z>}IC3BUsA0#NYCzLLJSAi2uwc>n+?*nhrI zfZTk7*G2?SIVCBC0~8!wVw%>rz61aO1dx*y)AU_Ed2dkfWaqtgOZ+X*!Q^8Fv(bi8 zQ0x3zZd( zgO;YoR0q(-ipG|8%B`Q8e(u*?Ypm_hP|?!4<@5TezeOOJz}DY0jTjR+bar-`wd&P( z-#g27Xz20j^7jkAEhkZ;blLoWfBr?m1L>O^=6})goU~yWM`E}qHDrR7l`QZg_yF@} z8P<5h??Wm}*r!c{SL!Z5BsTbn{gc6O)6k5k;M4Up~P%GZ1>3}on~=8 zVA@7>#O`m1knIbAFWQaQ#WGF^>HIQ@u}T zg>OEk-G1xUwSunCTo=n5`&`Y-Zb4|zUTVhgcn$krP7s7IqIir$-p9uOnSb%pe-d!{ zba$X~lQgIRUjzt|OU(++A`xbuREQQ~fhHJY??%u8LDRyMBAes8O*XvfmzTu0vyLD*I6a-6|6Uw53fZMuvTN>mKC3 zzsB%Qo}caqehIwnsyvUX_>$lLrzP)#JgVHIuAzq$33!q=dA%th_dq|fSz$8}*!-ed zGrO-S5U>uXQUX)x2%TAqATu#oxrhRV{qZdtA|%|RTR;M5f)%b3B$2AA*^OY+EDMjC z1>%;BSzb`pPD#c$A2Ed8AVk0Tbu0PYB=r0FI^^hfdCT|Rr-c8sVvT~N5O=Sz=Y8n+ z#9bci_0V-ta2e#2KiU}p)(+U_ZwnkHRh^}YGq>we#W;jjfM9;o9z@l=uBxyG4t-F) z1kzK2LAx=?;Alh$q?IB(bk5&p9;k}5Y}&t7kPA7Ilje0smM~v45|`U~^iB10U?^9T zbhlUxhPrt=$RC)qQTY>G;D5(wNcd43G*8k7b{78nt@Nl1$$;2%dbS|xbc889?9c{~ zR1ST#`HYUJTBfG*vnVAlZ0hMYHC<@TVJt%_9;_%EP6e;)5M|*TDmzT!gvol!Hpu79 zoiz0XO)XIF8ka@hsN3r^G|gz(M_pKjVKGK4;>e$3Mwj4fJ>kkt4lnF9K`AHUE8^Wd znh(ERZjhExpn@t!0|Y~6+ADdKE>u_Sc+iRKdbj5x`<4!_-PK+8(mN+s?I&XF-|A;mg3hc7M3y?AzM z#uKNut3(?cvf_WCaK8H%gvb3_>ael?S?;!65oaRTV_dP7aQ!1(KSkj$Jkl6Gd)J_s z*TbK&4jN&0X(ig#*aGn-QLp8dWwXR(@`t(zG|0`;=j9rrVBSmlD*C&Nj>=pWVc2h`RsT#SpXib=IS$r4){nJ&6Njx~qAouCh$pgT_b=a??(2 zNS?Ab4)A~0JHN&SHRV4qFu|WnBv{s;rSN-bE}$Vi-CLm>-N%XDA*iWL5}2a69h5{6 zUGOPd4RFxhwX^qldRm_H1peJKU;-jjl8=ktU6OBrQL1KMi45MPVMIDh#Rj8mh_?A- z-}S{(Ni`GwmgwI`7Pgo~n9!ss0wYD13e=8q5CSD-==n*QI;80F=x4$7{ASu^GM?d; zaNh*OA;Md%Ba&HKWv$?kQ^-Eyp($atHq_D7P}qcoX+!y)8F)5b1CZ(m2j^!bk0*hk z1D?N!&p;gkKl6|<|Avd?4cR2h*|oH!v*gYAhA}j}mS4^%52&2-6>tVrnmm-sZFrhl zD!Z#PV?$WPTQBgNecYGqa#32UQm3^sylJQ8&VvfRi`vK`~WF z65*gri0<^27$&_q_FBipDep?*vedir(pP?Et zrOvOq1c#D0mV*chVXj_sE&2@iafCoGqKnO9aZ}KNd!`a32GX^Z1*LW58FTnMyZ3ur z%S(dFSpp`U!zL3KRB0l)BHM^Q_=~J967%gAX?ZhtNMb_MYa!^zA?jI**9b|L$x%GS zBX`Zt##;L1H`jqGeqU(1WE_d713s@LKlUE^+4dbNjWm{)7LU*VGP=U1v!CG5#D3Ah zf2sfAcjBV<_v(2WGP2|9X7$Cybw^qd5y7p8?KU8L0C0umhAn_1w{AhsKsyg4EYWmT zs1UcqxC{I0(XBLY+SKAHVK_qF^68GPsV;hY&|-050ilCR8ub${npzLgQbGFlWn1|0 zC}waj2PY168XCb@el;TKPgDAIRAQJ+m;RPURp2Qm+ZD1jT};7Ri49Xdp<3;B5`y{f zVSb%Xqrr1mF)_Q144ATkU_Z7cgI$&nWI%v-@^upTbk7f9c+G zo8Uor^2^WRHQL1% zX*dUVG(bE;HLr~Df;NoQAl1FF{4r8S+^|D$JLiCnaldHs)!vRZEoi$pbIo~^lfY>! zLc(qpLoYQ9ieuiaxDxol#VyIX*^W;m!zBtsfB>bM*Rp7Q6JFfEQc4*~`8B*bJY}&g zw`eQ99Vnt zNGH?n*|%^|VxHN0oo3CHShcN;B$;0LX=d>)x#w~QmdoRD6#bY>~y?it7`|Z*L&#l<*HZMPdy0rAGo>^wL~G1kUfQEipzo%Z&}t?o;oO zhH|PgRuUONrW}m8b&VWaq;ZxoGABOL+;O}Nq@_fn+L_XrgcZ4y@^c*_8gOMf%Pqph z;js-yN5FE~D3j7|f<>h1IiIn|FV@#hcRVex8JnB(k0t7*Qs}8`ckEE zQ|>UpL_F<389r9}<4-QDe(?V=1tH6)RPt_;OW1YJ=Fe_Cl2`k*lys$Ui>G#?l}oi; zoib~wJ}D<8i+lI!{A(xp>kmC-Br$O$^S2dH-PUI#IlGq0S+clN)|xT}|CXl5yrYgNSsKz~ds zyUoH$eZAPKL~q!~*?GZlnwGGHSg=z4g_LsXd^o$U(T`HO=D+`)jdmk*mYUw@+n0y&Q-;orpvQe$D$AZ_}`Jt_iZZZv2S*m_vfL!V`$IRA0 zd1kaslCg|ULdexCAn=)}^3iG1=ux|0ywl8Kzn0{%?AmRab_8B5i@s#61-=_-__J(@ zl>Af7h!UGT9M`s$GvnW|5T|uSF_|mWLJ2{atyN_%pw>gf)e* z=#vK4w!pk8I{%V}_giswWcq+@N}J3`@W)ww*bqcK#q=j!$Mvj>5L{kcUZXW*ib1g?;WgVwd3ELVUZE9djYrD|KR9LJ2!QxZ=m39JiLF*J25ZRwC5xtK?v zb4NlBFbJX>2PIm`(i|gW;L%cjpW6}p>vx38NbcVHG^x%8qU9-Y?G*N4YbikMtsPdGSc|oUOf@WgQmTj9e1Zl9oX> z`YPes<7MTT=sI3$@9D%U>sK?HjE3Q1{%q|{nM(>RARv!@eJ zYa4%C!aM}Yig&NgP$GfZh7L){N>ZI`KB)8zu&@t*62~I71aU79GUw2^2_KJV3x(SP z$7a3ZqNF!yh`iBE@FKCu${Du`v~qL8Vt3z3nS&JLC~4QCnhp|Gweqo1u)A8|FSTNO z3Tfga+`2ZLn}f+L6BbzHKc*_i=W%eha>=9t9|{YAh6%ew(@FLq%nwW*5TJKdT!moC zF5@~=A^8vF;^}-dj|)oj$z`8J1Nnxc<&BDy(XmE-M5=HZB6TtKx2jDOIqxlU0JEWCs1r=5R74R@mL`RC z0`%G%k=NoW?6r(_Fqtk@J&Bk}gfG!68LOO4rqU}@$dfHXIanW@<_lae0u3bvA8HX3 zOV->GvwoE+2>H;sXuDd~uxpdN*=Ap6s-O2X)Gj%8_mDQqzw^2E>MDWPf2hsYyBKymAK%y zpA(#~WK<>|olT(r+=TqAc>ESI|K6SaG(+1&5KOMasZ;%a72~5jH03o}xK=X>Wt835 z%1~3um*LvJStvGlin||%?9Hw0$fdy?`*(zjwu_CTDMYDGs>aBAcyLpWaBiOfv(xyp z;v;pr>aFcNC-36`u3H=o+6~q9gEH<9wjA?5{W_*1$C;8tPP!mI=D@IFJO`h%*5=@7&A4#mJZ0-sP*E)_8=S zOc6)F%h4spZ~(rm;iG>0)bd7E-Gw5+xAFVcE9CdBnmew@Te z_NI;Uvy>?2!^WLa5IAoX7zfsmYC)ewWnDr9yK1RQ@7%P7%GPR|Zb&|-*19lSNp`4d zs+O>hawLO~`Gc5m$DZsV@dSje;Y<=tI}BZ*O5>SA<*&pnl{Sb}PZk>Q)u7UC_3^T8 z;?U%_E<}?*1kjD(wvZ;tCpWzY* z<R~)7**8`q()>h zEy(%Er;j;f`46oJmN>BbmQt%Gclt|hc@_%H<=#;U;0g|5n;sk20ih3Tz5lW7C>ZB; zymzvuj+}X2OEVLT{ygSlLhYT|Ty}~k5Vk-zrYIi^{eDz^>K!@#R+r(FZa#VJ;^wCY zIQfb7oj5C(_F&d8@zR>tH#Z^utweFEs*J~M_o*8mpS`^nxW0f}hr}#RE|=d6x4UDo z%2@5;*~C=Am0(6j*`%1Gf5)>_b_MZ?+Qb~P$H%}TiT-k=ajmr|XQXRogq(|z>4Wnc zWziQ?D^hY$7rRsUK6=_*PnIGcOXv%@^C+azP~#C2%PCg-Kd)I~(7v605uIywKwO#a zLyy11`Q$2}Ts}SRpxcAPOnA04A8pRA1LtUEq^PnRiP(U%)dmWrMB=R5^n_4g7-C5Z z%;VgVEJJ*1`IlOu#;@N$>NtJ;-P(ln_a(|!Y+i)ZyHD*|QqQ=O=kMNB1#2W~xwI0o z>-4=D^o;laNQnSxXGMgreH?TmFK!c~klrMi)j=Q4$Fz;5(C34G+cSvVMd^eI!TX7% zAxVwmjA|^et0ws6_iKz`Ea)|EU9)f!6aIU$^&mHxM3FFWHBd~wBpuJDID8vW}@(P zg5IE^SpHf)PPX&^F!he{aYxY2rJIt6RLCDeET*qNg?Wu zlQ?5=MtBoeRZ_mzjwzngnMsq*%ZWtAP%%&gv*;?iS}@b7t~RliqD@d$kl^dU@4we> zd)^j+oZlVK29yOM+DQShjpJ4>7FVE89} z_@EO;B$p)v?F_N;Qx9(KQ}K)%23K1U2P`=(rN;KV$aBOuduQ5cR9@*}FlW&K1}8L! zb`N7B1rk)7rY;Z{y(Zf)Ts-3|1t*H@n@tOM*pj%ya$(KgQ|>h_uB;kQ7_kFoX-YG0 zGRnYWgK!j!bfhU`l6uY7BKIQwCy%CDIKOrqOy~gI`Dg}fbfPul6pG?fgW>zxamiNY zBc4Tq%~FU}?tCY8vuslZ3E86#NW{VeG>xjN?UWp*ee}pI!uG*LdUxoDf(y=;Y3@tY>H$2YrBJ(?1v5sb3rNQ{4{1aI%U z1=Q7p|GUNl4J)@k=Cdt%4~IRElm66QL!Brl_mkT%SW-=oj>UrjLWq~4)06uB5F}V^ z;Jaq1)U@H%hE?Q!zkvj72@*czC}!gn_2B+-S^5z;iGLeOj-`078A157EaD~|Pfav& zcd3+6f+`{~`ZPBEZxVD&4+qr%ZV16S;T^)-4f(Vr-Aa3B0mdEN6= zV9D_Do_NRY-cf3Ks1|_aVvUKwV1}qB@s*-aRd%y)6bx=f)~Pmxm`b*D?T2QzJ+3y- z+)N98fu3&8^yAYgs6E2wr%e6x!d5SXX`p~PCZKubv<*UB z$UexadzuG&<2P9?!@XZa_f?`9(1kKZA%u$9@QwFo9hu!o~QmBby zT-l`@Wo$8IBLfi5lo=(!gIHjOG7d+S&F&HthuEZKnKB;TG(?w2k8O8Y@FEmNKA2)6 zAFKF_iYIKsW#`XckWlspfwR59iU=Q{VeVD%U;6?AosI7H1DM@wF7K;pBN6v|*sl9a zdxzqUo_6!7@cBb`iBXC)5<5dD#BzH&phy)$4(kHr6SnS^Czr=w*2?+!sl_KuBR=XN3Fnt`e(`>284Bl1^fzvW5wH`k>eHcPd<&4yhA5Bgj8!> zsd-$+TTY(!)xWA@H;w&Xhdz#4gZiT=`2R>CLdixRH?Iq)^><6y(LGwA zo{_Q3)pL(4v_ESD!~nYD4=Lw`3MH%=;L?`AS=ud(Znc*1llm}_2U*zOJE)tz-|Ydt zOrlj?a{`Xj`X^`&NozP-YEZ(_O~&M?b!29+8w4kz%g7WJZLWO4RMMZK8x6wxhxD7t zyoiH7!#AWsE^SdX?4{KJ3O{+>IYSBPi6NhIjLi=lU>y3VN;irNja%hYG}}`0EZh*P zErKx^kT@g4bxmC$|;rhI4iRuLy%wITxi0wgajLM5EWo{y{rihND`YQOH%ozDQ zT!ykZ83jDR<%TBP)+i2<_62~$xuua%iZ1I!?@-FZ*FTvL!;xQZ9=)~?lnRBGS1&AS zWNpc2siLWB`PHQhvxyoZ)NKx!jhhmwVY-V25rmLzdgo}^D*2!Xy#VVfceqdMa&$Ie zmw|Ub&iDPVhyQi&JRA+rUZ^JrVzj9wQ>2;aEh!z5pa0ZE!CB2ZdN#CLuC%+oyz|o* zsVidAI?K*(hao7oEh;8(ZTS_tGwZV40KLWsVq8y01L8)0Zu$m6^)@-QxB!+v@m)f29i zVW6QUj;M21={u~^w@~i?aGis9 zE;QxIabUT@Z+Ly`Id%8I?&>~7%f?TtLTu?gi^S-$G^v6Qxs1VUv-#gFfYtY_jm!}z zRF?(SzDf>_HJE`P(U_(1lucB?V|O7l4i{9^$)x_01qkv2g!UQXuh9H-rO^W3(qwnjpn|k(8b0mh%-e0_YiQggCl6 zCnPwR$VFmfM?emLY~->eM!E10hBG>qC~#21b2jl6^dD04|8U^p=qa@B$?^OSPwMu0 zrAf$UDY<8z5&y(Li8D4d^IG%_i<{wir@*0~aA1GwQ&XM1_Gx+&ck#1bMpThWHzXP7 zr&xwF+##E;c1-?_T&!GO)MzMXtNqqfY4F>z!>_;% zos$1jWM5D!|BMm`z#Sk4fp*h|xC)Bm_5bDYWq3w(CtZ`9cWG|ymxc}x_iy6yg62k)Rh z=lsMD7*KumBZT{a={M&oKP|xH6AYCJXBS4Ea$+{81fSJMrx-#c>Q-mZo{loc<4L5+ zQT_W!{Z(Mz*5T%)x9JE!9bb7>QuGg$#Z=XP&I8V|h-9g7`dAeMt%BArMR3o{Cmb-t z@qzw1GTo;ri=)7qtBXH1>i_q0G{SpF=1fB7{xZXO9on~)vf^jWJBi;A>&;fNN0vEN zOvv`9qsLBgO=nUAR799^zsUir8Pbcumna1KM`6z#V|Q1N%#-Z@ZC%^ADn1SnqmqS!o2r4 zYnMS=7lvdpaz#U4MI2#*+`fkBb~dU3h70B6{e61luC6_cb?%u&STB_U#QHC8Iv5)` zhjomZ2za#Zs?uL~IOYh5s@5ZjR&kkWRpur3C^_hqN$;XHNM-LKhN{S5*utxu)aK|Q z1}9DqqMPk1W@K(D?S6gh#5^c7nMHW<&yc8mij1WOm=ZZO(eI-%LDRJd8#2VKZpwdz zK*xMGdAW&&&S&FGhd84Y{{QYs0wV4o>H}Y!U%Kx}FHJweCm;=S{MH0~Di)v5Nv%Bl%|M@D-^g zIJqUY2NH)=q4*b8Is`5mgqyIbuy+pdm>jJ>x%EZ6DwEM2g0^aQcBczKdF)z3`KlR4 z#V!HIdt}#fRL$4=>5^LKYze;JA4=sW#)-VKSTrK+qsEf~K56{PwuUH%0{)g8M;2`t zrioh?#3~Eel9{2*L}Hp_`@U0 zi1GNh_lhC#uf54GYK4~%M;iUN0`5E~arDDuVuAnyDouUY7#suKdHm1^6&fr_q(RneF3X){mQW`LSX<0$?0EZq zw3`FR;^10~`@|+a610~E>&J#eJ`MbsO5II}K2ah{r4x1Y)kvJ?9Ci-atdd!~y_gPf zCT~iGk=!L7@!2}biaYpp{2hXf1VeFn%fIo&pcHjA+cY*HiJ8oJFJG>kLI(EZ8IDQ6 z-6+r`@=oGXwsMTqH8P?4l*G;vQl_`6iLEPSH3>92**dJ~eG(9Tp2qXZ z$GV{BLtH#EKB0FZMbt_1!HboHB(k(#%Wf2dHK`VP2^3ww4RZR8RasQvK;J?WU`tw%L|8`OA`Z0r7ip0f864$<=T2J^paOwSU^E zh|%dN>YEmCb5A$&1;Vxd5NtnYuhpOS5aZ0_lFP4zUixgfwqwaeoxle`wF>_QFb5;2 ze{KdkXcXinRE-0b!9u@wynUvK6a$WSb6=+F-vmVO4;;5F2U^dL6|EY53r^M;z>*EN zdK83(D|VqANDDf}c1b`H$7E+r2Ln|i^`c5!P|V6p+SH@y{cz3yX^s8AHyA&v_g;`A z$+WIiewL|_D4COol5;Xi%pbGTi(*jw*8%bPcWr8eO4Yw1Fd#JBw3lE{>s#YkxdW5MhSh+nD8HcD0?i&L^OAaMN)gHnYvg zSJ2t7a|hSicHr&-!m~@RhGrWe;l5uvhIe9`BMT|D-0DX*OqWDIpKM^3`n@mZVW5EK zVS#Cx7AG0tkD3nC#6;pG&Y)W@C6&^UOk6H&MH&`jePs6zvHJwF@x{ddV2Lxx+hFbpqjZl+3$OJ1ntOm=UzuJ_xEHs@@Il|7&Lla87 z-Zdg6f!b#bx7 z3Vv&1*JwXJhyK8e;yb5gf_^Me!_yZEm4Xz@6o`c@%1ldJ8+zB4>wFLOtEQ6y;z>_0 zstP!8Y|Lo@t|fPNh*V;o4bm_sbaqX-0SdAQ+zc;o!`3z0`K*Yf z5dukNfr7LotJds6pDY2Nwab6fan^9LTQr!d^8^9#%C4EP5!h>U5ebrfNGq`B%GJJt zb0xCqwbRS=NMzMPl(uS6WC>&?E>g1R7uAGU8c`%tYi1PG?S1s|G;3zZ+FXc4OLD(|cbZ z7+=~et(H6(>V`p&?JH(W3|6j&8S7i+_7e@C+@x^6RH~JQKZa2>cY1H)^8@URt*?k#mHJPiJ4*d za<0E}%=9m4fkIvNcsHh-$vH)fobZGO+0s*Fq|POmG?+sA?7x#n73v|7gqW}9 zTF6CURDZVS^S0(qT0{5=L6lA+ppT`L#N5nJ{cK5GNxsHsR^p8kiWP@EleaUUxFPe1 zzv7RY+2!mq^=U=rs^~1HVsxZA@{?0XlT~vV!)of78Jz&-RK66JOQrjwS?Cj_d+F7HdUXsGJ-Ydk{&!k)ohrVMqk2_TY0u7>MVfX+LC)GA6Rb zNTUNwW34?JrcEW=W>OIap&WQT%idXMo2>;KXR&eSJC3BnwfcF{N&ERDS*WT>`wY_N zkQ%@9ZM34aY$ZW!Tq$3Nbbcj0IMsz?GM;|gVkf!~pQAk~n9bYUW5?!|BJgm^jrxkAkDavwYoQH!-Qq3Z8U;HUNrNSv?;S<%8RLX89$MrLQ2Kq)as5-{>O)QKI z>j7ixsH)!KfBo18nn50Sz9*yV7q@fstZv8445TlD*SJn!qQ?zHUYXsRw=CkfHNfVA z{+Sk{3=8kll+tJ1a-ctOW7Pvo5y^~wDNQd=9p3PQ=QSsci%pm;%m_N!5noVf#koI6 ztXtM!a7K`~+J zJ157_{P)2b1TuV&mQfLQFlKT>B6f6cadj3+teD3^<|Tg=j;kgA+5Q!-=7w$7M4^*K zEkiBCu3VY;2H(8eRaM}WtGXAqX^C+_#VpB!tDT(uCxOb2N}etmX*+IV`w|qKto}TZ zuebgs!eM-9H6c0u>i6=HD-q{AQ%{5B1X=R^Jg%M`!@#+W<^e)g}w57=)D znW&lgA~?NZvNLoaSwr3os(oVS#(`tUkBlLwy2T+TICfUlwAxJ4WcKEO>NldQQmyiL zs*KYFJL4iDmr@D4!Zf)qq(VUyV+beV*KTObdhD}BlI`Lk+z>Rv6iWN=%A;a!>>vUP z(n>9d&0KcHz6g$JEiZRi97Ax@m9uYR4gE&`bTj(>>O+*_wb|xFlJU*PA&H3ZGs_Gy zi#NidxdQ-fV+uty&Npr3S-6yt%MjsgKH`TN14m|`3C>`qwRWH^dx5jvIm#MQ2n>sL zVvnM|(vSdg#=aKMHS+5$ic@_(>MC5S)3eW(bOnqs{gs>Ej2rB0cGb{yn=Qy|tIZL3Z$RoL-;eiST5zC`DBhpI!u4kLOifdQj;B zBNy4*TCOi#VbH}Nu+`F9s%g1f2^bKBa0sgzODvv1F5*fOOW=@v0EumK^lT)cIBny2 z=sEW2-$WKCBpPxbMUw#53R5@GEDxbD08)vId`3SlC{;&B9d9)SfRxKHE9dbT4I(*H ziAm+{^myDLNe=T+u6~XM63{7dxEUy<6u->bcu+Y8CbI}zid|g|>wW&sKUUrMOse#} zPP{aU9${-DP|1gkH<@~OeNJI83s;NI(QnLu2nNJ1b?Xkvo0}uJv}X1~wiGF2D&K9; zMfnmdWWVA#{6DkrW7NyYED>*yY=nP(?mLK+rG!RIJE&lh#FXV>iNS&T=6KVpmch3b zR0`PGiN^6i0YJ$ixQZ`0Z>;QTM z7Uah!O^S4Figb~E^;zR9Tlh=2C|f2X8(G$KE}zQN>BDQH%7*`6M3cb%c+vNRD?iaF zh!&OVkytjY-J#5ZUN)yH^RP`)S)+}p<++61T1i1N-~iRU$d%Al9dqV}7xvkXyOV)^ zXvM1%p-#;Agaq83gp*hKlQB6C0+WS4$f(K@mCB6VFNr>r-~!}ZvTH|Aa5IlgZo%(! zkLYjVZ<~B1pN5F_BTO2bx?TTS5o5uaST-=CB=R>JqGkh*p{IyKmGUgi= z?;y3sy}rxztf<#8cgU@~;|vQW?g0rXaAI>Ccd2d2dp)gf z9I}q`#W8YlU8O+Cc75yu-192f^SWm8R;KfQMmsth=#;Y=L42fA;E3QyD4FNiWXDVW z7k_iW=F5=>$0&amY&CkK83&>dJ&kF65J6=n;b3#3>T|ozKPyZe>CEUekD|S*2&8|~ zVMH0q)d7|0LwCor8Kp86KLhnz zx*C_4Q~%FZU5IX#2Dq@8i3Cj9J2Aa_8>qAS_OL&iXne+K`GH#h)0GxbC>H4exr^kV zx@hdbM?Wa!aFaG~zc<{Z{=09U{&#~?r4B0f`tl*!ho+<%5&B6V2X(B2oPLQVwV`lq zW2P>Llrbb=94_#a$yHSD-w>;>#qE|JCav&L1+cM;LBTSFy(uZ)o7)|@_4eV+lD~Njb|JXU|*R^jq_$#uD}x;2() z&z69PoVv1}IFg(jd>`zQpb^?AlUed}#IAL%$(;_T7+)%;eMeF|G%T)qsdiT=!a6(K zmOO@99xpVv{Fj_df}!$p2J#HdjFPun-NVBj7pv2X|HBh5Sx?Wf+d8!l3?2txyK~k6 z9|6(aFY=&q+->2pYFWH;Ypn3lc!2I?9&C$uEg>7&U;VB|3cW(ji$E`v`fx=TM0WMc z9cw=z*1#`5Kx6&y1xzg6r~xEc=;s$tsSuZtE*MtS$XasFyV$ox`fLQP4sY$lH_=3s zabY$3Y!de!c==V>Xk59O3bB&|7W|vz9~(Hik%B=nwib|L2r8zRRKz!+ zqnWfTL@}@8>1#m#Ndk;paBo$*;vY;o@Nhy|I38ys2m*+2@jh#%)zzb5qBo1kKzx({e&59{YL!}wuXqhWsH;Ny{_pO9Pmak=KS>sBR{nI?#A z6H_8I^5=6w2hH0)!_x(E;;4lQ`$wSof5JCcel8UAF8eHngp(gBGcs78>X3iqo1ebp z`_?)wZ(gBfSfs-o?0gM~Zf-H}7~z~6A2b9d;FwN9vm*tQqq@S^Y5Ti*EGkV?jTSJ@ zg>4&i0_#H%33f)Q-#x9L?%nU_r_FY0*K#tG z4;~MpvAzbPiPoeSiF{dpA!yI(;;q04%6-Dh@vpIW51YPBC``qjFUe*-8 z{aM+1q%bU4B6hGoieIY{fX4AjzYso^fcT-ud623cyxw=+WqruH=)wvw zg~ZgfBqr%;vYTwh9b-5BP!OGvRyaytMgNH`hjE;rQ~)aqmTUMz~SOGX?E_kM#7+K{*Mfa)>r2H~i@$NtDLIPMoFGa4xmG}YrR2Ro%`N(r9ta7% zzeF?L2S-=Y%HwxTd5#H0)Gnt*mxwf!eI!`gK3xkqE#7C;LOe!h zb#T5Oj-~Fa+%GkM*i8S^zS+C3+Cd2sWqi2b@h>bPGpTyxw+z2s(LM38&MK$fyw9Fi zCm{({kx%KyjbP*uW&pZvHsU6J-PC<1MbKq;tfGp7^EJE3qfA314cW}Xu%Kg0rt8ES zv-Fq=`k@>BBTaLZ!Lqx&&P;yMW#N+Zc)WCxJRL6fh`|YmdhtgC7Fz+A@-J1_0_I4$ z)A?pn$gQ}|FX=dOOvxw(xr~&H=ZDK8jAEkh?ngf!4Efmj8NPXv&MTI%Je8qAt&hXI0p)qf* zc9Nw&n4T~xC8wwI=ISkf0WZ`O%OL+!28N-?0V+2fK6a@;&mUTXlQ~s)6AQE13ftPk|m)P-M!Dwzevl zwGQ<)BL8S+s7o`YmPm6nH_he+^%qp_K+;`4V!}+Pcsv%Lmp|5nE!ZS+r{e(CxLh1k z$pMs#m>B&>rlS@q;Hw)oN&llDxnztFeL(^@Jj5P)#s&m4?xn%id;)YU6quODYJNw@ z&imnutz%U)(6pyG)zr!@pE-Lx)H0}UtA;}ACC;fcw1JNqgHZEpwRC#}cY+WXSg$bG zFfZLNN8jsekivzJxuSGGD;x$Bj?}1lv`-fSF#Ex;B=x^p073rTp?|l?(`E&$K+=uw zy?R|jOU za_~FD29j?JRvbkj{8iwqcixx#c!~cXODkBoswKk$G50m1{80`PLr6k2(Yqch-Q_}VQQ=HbmE_8&wsjz`n;yr|6p_cqM zof^p4D`ijWCReS3!ZBBpY~H)9<+Zp#|B(ZJ%hfGHLNz(e+VYQi6Zc8by^y3KyWI zAy3}Czo-eA)foH6cq2s}LC+N7iY1-#RH+^4^^9S2TrU?u6qBXkIw(AdCu>b`z=Q6^ zQ|M5I(0)0#!0`}eeA75+YlbF#<7YP@|IewJcW?soj!;mE<3m9I#M)cF?zXY^p+ zHv&?)t#VQxsId*)`tlJfxP=z;bl4=uBhV>+43c=Z^_rA^HLR%MF*m0mxRwp!w*XBm zMzwMS#?ns>%UyNr1Cj5)rVpa5*_5+UQ%4~<=rE%~Tcm>*kKDC#E9B6S5tPHMyM*?H z;K@^Y&{X&7LCAGNSUUoNDp;6|5?5d>{zK!*f zX1kTu-q6*U+oGeD1LP+uq6_G}-?&`}2ycV8zemrsiKXstZ%3|TC+!#1Ecj57(5hB_N3Cfuq&p4%) zuB+zP7U-~s4I1BaH}l&y`v|;^_knoterDP&UhMmH+H6Bx!&UJAyr!pX5{DxJ#nALT zq1{tl)ixHb{rs7$Bc5uFB!-Dy=aQfyq1R0;q19cEw@U5gRT!y7IR&1a~DdCyJkjr-FN( zZx?1PuK6!#phVI9PlyhM-VbQscQ$!@Zl5a1g#8~R&xI!Jl<2KuD~yL%3XK?1@;JMl2`&SQ6ef&#RM^TS;G% zYr^WAMYwF@SO4|HkFQEm2%^#yZe4$g%HiPRpo#IPJ4=EU8F01Ib|j6+r&-4hFShRC zzy!_!$JC9L4D;S&yS)sporw(bQ3x8rR5Nnu4W&Dm6|f52f%2TN5JQ_C+% z=+A3}U{@#=-(kFKNLLfK*ez5e!f6(1sZ|3t<-pQt>NT^03QG!0K#jf;rwy^bk;tpK zQO@m?uXo+r@474c_pkbD^DdxJQ$mae{N&GoB*js21r;m(l)ql>K;$XFo4!9! z8C5C?`xPgrYCuWMg_GO=AlWvjAfp!&*k5~YhOJDjind2qT+b?7z&r3 zd$k?#RPr#lA`;DNXe=hw&?sM8SK`#v;n{u1i)7%~6&{XxT?^MAZju&aA+S^Wu?TN| z>NA+TEny!j>cPdu-#M(;T}X}iukNS-T(kwr$N3c4q1aY`cS#loxiBEw3N9mU?(_X2 ziV;!bXE|HEb!@_2mCq}3uq+nv9hv=SIfjU_&92@BuQPa;Yz&}QdaLpusg`3WR<#e|QmF$Dj`&X4c%G#Xd+t&!Y%15rFX)y>ylxgC0m0jnD%fOaG zkDVr5JW-hwfl?#Jl|Yq-=|p8wMmSBQmAbx{HWsvtPzzH3nJ4AF8CzbX03#AKg$b3d z82#c-PG6!)DT}=io3as}4FA(l>GE8u;-OOqpE<@++z?t`-j|tUoy)?2yW>lj79q>#C8a=tYpIQ_=j7zf)c3!myHtzqTD=+*Zhi4XRp3&V{G9<`E5e9Qki z10m|%u}OhCL+}0qp^TEK=#63f6kc!?`7knY9mrkGT4pku@%qvN42deap7Z+}a6oOe zk-h9X8RX1l))YxyH1rL9d)|oOUF@w`(EoHu{xrQnOMezX6!H`)J&sx+Mm}1N^y~F3 zLsB>Y^g7)t>V+ZOi?n>reoc{HQoox%+i?v@O-iJzd2O;F6ep+oe##x!ibSz!f?Ly@ zIJuJf$?9ji;CT_wiE}o0ZO%6VZlhmR*CurN(A62U{kQhL2s`_OqpW@V(qhJ8JA+Z) z54k5ofxF>&=RLP;nIaFjFSn9K*Kti+#TFuyC)m2!kbP})wH4eP*w98QikBqr7@V@P zWmK5UJi*>2{~%~7398sg^6Vffe8-lLaJ=}8>!yRoXUWA6T1S}rD$cM_TkVR($CB9!9c(6fo zMN>$=fb(yWi9*D=1KgC*JIX&Kb_R>!bK(fZlQ7+Wp2$3Lr>j?`oT5OjqDp1)R9n*> z!0*JR&wh+R6%+VT{LBzi2Lukd4ExYernKG9xk{Js^eX=ZX(a zccdIbOqp{tf4PMjKn;jS_)ddE>ougyoyQ)9>uoilc_TN(-8e38>IAKp4rRQRs9~Ct zFhYOPGVb@w$Lps-*s&uvCe?1m2oFXP9Zv0cY4STL9;;9_D9WFnxcz<{bmr3Jiyy64 ztFRG$v`#1imz-py@0SP7-d`vF&Uyyid8ouwE6i#CAov)IP+Gd~EH%%M>t1y=1=l-k zS-lM$`<&0Fkos-awd-(B>9-NOG6j|C}ZaGEu^KeI~&W zGCC^#7M(@YMFVdz$zWxt>fnx@7zXD6nA;8OP__{Rn}R*jVJ3!?@!X;L9vk)3?sM4 zA%{+^*i9(=iQIN#j4-4fc%AK2yVFQrvGLJa1$3=q;~*v;cR2#|D?YVg^W7W~I zOVWz7(MDX}Kc(~^JxfB1b`pn^d7-f2N+sGk$v;IU%I*vXy)eO%X(tzYkQKhgi;SvN z#3BYiH1}KtqMKt3Rp$7d`IdW^*viYRk&pW_(~Hfn=L@a{C|#nUz!8ku9MaUoGV`KW zz2c%GN>?diuhu)B$DuCGnsmjyx&7Md{-uUzaZpsOEO%Ly5R4YQ?$pkhP``1$XMZl5 zSl|7X>>~bC*BI}HsGt*@GW!51cZbO!=1Yo4dr`=4vhy$VH1FX^9$h$unPIgq6hQlo zEROUNt9}6`cbcS`hwu%9HhWw$8)W033i}VSmMy^@oQIr__sbv0h>a0k!7F_9c?rpL8Q#aFT;sC>jIEpY;YYK3XkXy6W;*~BkCpm`Z=NYOfpyf{ zIyW*f>D_RqRgePHmRGTZe^v@j|Ef z+6l=c3PJMv5+gJl4>>-bJ(%_ z03{nhVwL|&TlUmX}r`Jc5 zPvI;XBj-`yjIC@Rx&9RxkyZE|(Nz1K@JD5qQwR$}u+Ki2op_cxpIi`|F^@@i&bWvjW*a~|{8{kJ9U*eNb~2I=2KK8Px{ zke&&C;0wajI*|5ymqU6}ZV!AT`#W*`+H&n@N-MIrh!BN z&xv6cSXWBgF)u{R{iq9wrgqbu3B~2zBWJ>hGxM7!0+R!Gm(lBA{3suyGf9Zq@cbv# zfM_sjpEc2WmUzTIb|29{OcS?bk%0Hjwtb?;-7}k=azsn7ht_^GZ(j}oG|b8J9I#Tw zg*iBeoGuyHWBIY1QLJZ(UOGCK312OQdE@y73qMeRM1or`gRXQ9okC%^<`=#B2w<9& zt;r!|8s>QHr);01dK|d}mxU=pe4MZtwgsPN=yYU|gL`P-&9Q7ulztr1f@dMz1s;D9 zo^UaO&d0JM|N85mjL@q~I3orklIo+SMpp@rIv|m0@#W^tThTmvG^T6cYH~FTMr8k! z(&RHWqj|R>Axs#dKOoDtZ4DR8467gCIEEyTq(la94L_E%dJXwEzJ|6r!tk1DIFnhw z+B86waOWQ$Z1qTG7)(!vE)W(r1QenKLyG}$@xP{eXpAfzNDSD`yo#YB*LwcC_%-^B zSwrVz?w?jDD^yigk zC?0V_98|oDN4&t7m4Wrm6KXwr4h+|{VtjJB(~E(^N`Qkjz*s_mX}?ic%=k#0H#?(y z`n>tB3LAs^FI`U~b8px~Z6G5?koMSgUM7DqL7afIisV;Qwj2O`TY%B922G-Zj#30M z6H~b~kE?)%h&i|f4oX`m;%|lNRk4^cc5))zC2G{e?*NfeQB})MzRlr2MA$b9=zXGB!jq&+8cG2_FlwsLW`t!Fvvl>KNb0{{N56u8DBn#A777ZgB9Ia{x z&{YipGrGgBamsJ>3=RVPY<6OK3ssm{b$_5AoL4jURw}bd-u+VJR6dIb*N`Bg zW^r3Hrqp5-r`(g=zjSIhQZnPAE;vK^leMq%#8iq`L<7-Ikho7RY|>QB{^e70c9+_i z<^`GjgbFx_4I~qN{8hhKW?hG*NpfDxWtBHg&=O_W*pDq;VwU@le==tYv#knTCO06H z=>sh8C6fF>(f{u&bh2lsrr?A+!7CT|t(@+yxiRtm%v*u~KT-<`b5vltF<_mgW#Jj- z4NI)67qR>oJ&ntE&$lpG-D5NURBrpAcnmvvUe=O9rb@X9XU#>5YUAdhtf?6l)Gs0w zGrA+@!qaHt7k?zOC_HJI5<;11lzQmSp^q-`=BVv#020%K#2HKWSsB9$z4I&GM#R#3 zvZC%b`Nr6kV7?(NKr{p5_s4uaj}yeL@F>>!Q!!31KI|f^SVGm~XI9|n26{?+9E~;t z)JF}%tu_?Vd9931DCloDB!_A2P<6uObYz`woXKX9p2k6geJ^YCJF1Rza3C!xb{`aK zYqRBA7zC*ZX znILa#x-B;sqTc5_qDz&{0Q8GrPP_fU4fjzVsLI{iU{2-(Eogd1 z9Ndh(y!e!dn7=)meLR6xQ7BlQKyeNkg)F<5tXTDXM3!DRL9Kk*3UUeO8&eq5tYcC* zFcfwdL1X5RflEvYkz#+XX#Wof@qT9jUASPj3m5E5p0k{;3+c6x>y|$a0Znkca*Rbc zUa&6d92ooyLfvuRT({RPUPYcPF zoDWXP??|#QWB(wG?yFXLq32X6Xa-0xaQ=!20lyCucPk2dLUh_*lxLyt@X?kQvj$9> z!%N{py5ozD*rNmK^oGR+=Q3eP*~bsQF8t5uMHPu=p2u>Z#swkRp+w z5i-gYfVBicyjV%5ptVSkmbKuIT)Q@U){cO!ALVcgE{GtIt^r&=?4KJZce|DKMdbbk*Hl*3i=||$p>iXA{?RqrbNT_mb}#5W zxBmkXvE4pF_+QexrQ&Q6q$nrMJKO_nOs(A_T1?nTUPi4v?(z1ETti7jq53O`)5TMs zLS6Vqet$QSF8xdn2C!DkVd)CD4IeG?_(<@>4pCa5Mc?_Cl?@2d6K9h4hsDxC4`RsB z(Fy&>xX4-=-gI5=HA~)%cF05hpsA5Qn(!18oq2DJ3#~F zgIxDXeC*RZbaS$oy2)OCSU} zZhuifF$fQ#2mUU767pS9gZxTbCK+A&tcfav4R<$TJFI@+T|vkddPNyv9un0Ew(L5{ zkwdY2i5H9kl!jyqTd(gf4>4`}OBcun!hV2omHgu!-i8hTy&+R&|F3Jeg;fY`@V@}B zR*d4@nm0ts+rMBJssWPLd14Hq$AbJDcw^>Z10LqIAEL79+kp)Wb(O`?R?byuib!b8zq>n=WylKM(XTLF*uL9)V;$3Yr=0nTEh- zP4RJeR`)@ZaSb!u@g#ZF<1nB8@ma;vM|TuxwA~KqvVYIhd)};mk@pY4Lwi|LYB530 zk%2-FDLe3$)K8JS1L5d{l2rtpoh4Ht3*LEnc0*5%R{BEDt5O@jo1Qt6yQ7^r6}Wle zay@O($lqyQ<0&~ZP-bRAy-DJ3d}2Go7?`r_ag0{mJ}pMq(dPM^!pAWd#*TkW1O zBhK&X`gAWU=z7)w+SdXZTq2(G!s#H0B-}0=#SuSw`VQ#VmMSxnj$OGp!W^I{hzaWe zRIGj(%jM$AMV?B;pbquyEnmmh^W2ZCtU_J#VPTX{Z8a3)t#L>Vh1wNQ3!@Sg3JY;d$m{u0N~EBf$zYhx#)F1N5;Y><#*$q`~OU7 z|2?h!&Csp6;{`uYV*D)#tYX0hl8p>S)J6DK8Nz(l)kSYpWo!#5E6T=kSW`oc_E3UYN|Vv9S;VG3v8d=imQ?z*6joSR-4JW%Q-$7|&cU$c8P;2WR@Z*qa#ZJ8*)AsksW;^)@ahs3p+O6cZg5qe> zorD@h3zlnNe9SqYNz7Jdc?`9(G>s*+z^pTik&?tJPpcthngIGX~L5`@xu?F?ZmbiQiH-IA@qYgF=`el=FJX-)=LoC7X6{!Bx$wPu93 zs3wJ(PZpY`64%$L-4f2Rng%%U3K@)VsAkkl4qiWEzO!j==RAw1w`o|78FyQw!l0#> ztMjrbBV-Nrm}uEAflrq+sg4#)m_^pR{$hXX)LyIcnyoQi6a3#?fZJLCa-8oPk}5AM ziP7kIOpj{3plk$&qAPoIq?SQ6AyvN94mo%+BWs-p95dJ^Eu}Qjogx zkLAGKow;$W+5d}+J>>evY~H{h1Oa||9nIcs=V&2g*%8xjv{p1LRs5{4?dhMO)CL-1 z)~(ixOJu=)c>`nwjfc?iSFN&=)p^b{0J7XOdzlYq5pEaq;xH!tJhvr5u{T)#d&4~h@uos&R%1nX+ptIim_yMQxrPn(Y#j$|P~YMg$SFK{x+=}Zu_mI~ zOr1<@(UVP#sQ2)rhSjd)Sy6G5h>HT?!)pw~PL2g|*K}#%lhPiDF|!M`y47fJN(p(o z&H?tRAS+?4|7?u&HfC zKL_zmJk}c|>^tPM?_2K^9Nyf4qfQ9X3qd{H@CWlPwQ@|{1tAD>l!CGR4X;8th zHo{L(J(I2tK}un-fz`P9q@O9uM0zE~Yj&!&NHohEHe4i{cEPygmBb8S#SKgnFeN@d zMqgBj1Ark@PcF7OAmNU99j+tzV0Ghmb-LRdKT*}NH*$PRaL-OE=A8aAFHPB6dk3b9 zO5JdpUokei{9%#7%H}Z@Ba})bePf9^?HaK$CS0P`%=XQstn@l}IzaH!evp?Y2#sP7 zd>y0!oP)dbg6(k?zWU#t?PNFz%=4EE^N9)2$;Shmt>sa+oB6Ps<(62I_Xu|{R`vTj z|M9h8xVpvcZZk69^F~q-kO7js3XPi~Y=>O))~|e0z?zV*0S{Kjh2fg)2dlk1csPT+ zJmHbq4DPpwSkZrNzt$-EE!T4zA-A4eDTO6x5dLT*h0;y-ixuFs%x^w?ldnMdC_&QZ zDdGqH=ZkrCeY3s(SZ?Jas*~HgAJAS9%dC=xt)NiGKqk|z)7a_(ES$irsvvKwdt*!HFiIILA{IGbV8I#Y>@#)knhS zct)jITF~g;7Ztf}o9@xLw|pkJpQ+y)j$N^LKu5z;7+20eOXpzQm!OvItFMLk>d2RD z0A#c!e~BL8bNUYL{o+m6_wsuS%A<T!H6utm5n zgsCREMcXx^ZueNgoE58V%4^s7042oMvUHV~m^Z6ngrsO?mtd21P_a>9v>=?~ifL2U zcN$Ow*D-OKlIj-2%~A=zIQPk({;hdNu~;nXM58IOil@lyzG$7x)jm=ZN`X)b#$r|3 zsgH&DV@%~xM*Q5~uas0&TPg8KnKSbKtRJgs9y`3!sQxg8Y(sx&^GtfDYY|RE|29gw zj8p;H??DfTp?&r;1v~(??mqu=S;Jf_iF7j}7tpJ`5YjMo|dANqH?k(m!%%!Wjw!&P4 zb|(@fSNdpAW8n+V0$t5rQ#Ah_@qfBmH1UIfOoG+-enqVt>4~q%2YZpx+A6&@yy-&T zQ}0*X0!l(WC&OR9xT~LKf6_SkhaEthxrlZ_MEa;OFT9` z`%i|Ko~0qCG)W@ZG8&Ndc3@>dy_V2zq0WKy8wo=}C*fgnkNBa z21h5=Y5gQo{OnDT1?j|8#9WV&?>P#tzN1+vtohiB%aanRw|ZS04xT!rPmnYKE#ulu zu^gfL0x7aqL@60uvmj>teVn}hFIU#L<9n~9{w+J>T&lm_RdpfE=iX}S6V7=f-iFLR zFmd1d^l-C&Kh)VR6BuBgM}kk)m{Pj0OE%ZC33ucb;TzqV6!i8k>*~EzoXHDAO;Oij z$29<(L6xO!B$j&ma+)SAtVa*nqu(F`?2(oCCg3Sd+N!J730xBKav=rA=&rp{3t3HK ziQ*9-b_EIk#&dT>y%t!;Ys^nQ5POdUYF?uf=TQ=hxiLwT_Foqu@%;V!zB3;*!H6R} z2U|~Ix`a`;S?=Z-+~;22KdPtM3u{Tk!Pg3 zgrko)`M;9_*){tLwrLDVNg0ZwH90=_qKh^@&K}NmTWgRDG8WBTSMKEA=yWNz=F&wh zS+&qv@s}APlB}H}Pw0(T!k24a(9Qd^(d;_gfJ+rag}k+l&JR!*;obuch8Z+lbupWv zOqU_rTl-++ah9$-Zm++2p^_@a&1h1tJK`bI?qEG2MLTV)WLa_cW|~hiKaJr1ts`aS zx82fnccV2Ott3VUhpg`iW4MUiOXOQPW#3-^A~KQA4_9{RsF)9#8Ut&w>l4B!@L#rhx93)1@QX1r%WmWrUtAQ93r`p+ii8$wN|5 zw|XJeq8(!dwCs#e6MO&xXgta6rBS{!JI?ge~;Aw!evS;=(F@dYM7 zSsJ>Nqfv~mrJD!Xe8`Z}2u!pW8MCo2+dk#@_++1SUKPYg#v*6K2m-u*g(Kp9zkQDS zJYelUZt_~0uPi317#6B8_=HQVHvPPN+YBI~ny_UMbCte*s0+Rhji3tLcP(i$r6jBK|g#3`+zo5dV7LeodPJ_*--q?9hK{O1S0;t8D!HB zVoV108%&kk>5ebWHvKBnTHlK8xgy|dB*FMUJhIyD_QkN)fZ^w#*x-Uqfac&F&GB$# z4XfnyvuOK=)YS2LN=0#o+y;(_{8@ai-)i$6n9+9MySylKC5!)_>5uCWYJ49-24NYV z=NrLRwa{XiiohrnS_OD+{?SZG8w`oeFLIyNY>84Cq_DvJxs|N`3YIa+h)g?i*4z@G zd0g?RK30i2-e$xAMa^RFwT^z9EK*B7-(-l4!JaGYMyDG|3r$?UjD5}q#jixCHUw{D zDBs2i9lLdy zvnq{R7i1G_azysYb*$>Qa`-!Gx`zb*wzY!Upfuh@b+7Byfskg%CllilHp<>EG9KC+d1xb)*!hS%{Z$2m_py&K!4$ zHhZzKu$fx?5q0~9hbPDPJGXjLF@5fOwgFFK9j^@P&nhrqrLRhj;N1U%q+)n0W3Yx9?=s(28QQzDqoW3z48^(|_#wqJw<7Tf%A^ny5QNjbC zlq%NrjgmM-Pny%TbOEOv2bhv|n-x41>mt6-QnR{@jR)J)h}d`ki}m zpTzHW6kZnmI)BOl6};GX zX3B6ZcUB%#ng!T#nc3m$+v#@iq&<>t00rk?KV%N9Kf(iQoyqE+kDl35zo%5zJtX>d z#nv6-fhA0XA2H@$!Zr~~Pl`oY#$Ep{C-}{d5?fjD32i*HF5#RV_R#cV zVTQU|*CcMJ#i`Sr*u#y2tjw;kjl7Vsg`kqyv}gREQS;nfTnW;g*F3{o`Z(*Ch!HXk zh-&zO59+fVOaqCk%Kz$P8|F|=QoX{bRV5AzS?F~L3F{4Ng}xf$q;fKyxUOebWZ@Z@ zqM`fCT}*WdK&xkvmDPud*&H=q*8TuQO;#Eg{=(`1O{oj2B{>iM8=*?{+Gf;Q;~YmS z3`Qf!j>$;wG_TEf)T<*Z8>CDAlriwWxVJHgGM;1m^+e~$;y}MUfoh2vLP^R6-nd0hEe0E8Y1qNZ_=!_pCLqG@L2>iVg*%9asghp*(qki+R$`0e2-=Ao$9gSPqMI?^D4? z7@Q8gH0}EpJ~O^9oc3YfbOD^uVY>F~=sISnHr%Cc!uaF9Vs>%o(67e>K5TM?CSZCb zKCcrs>)lspxr$lQG~j%+jP+PiqXb_yvM+69Y2d!tO^G#Xa8vOTE&#-!#wGE6Qdhg( z!w%CyvUs62Gq7_&Fx8>(F$DAlO-|ap#9Z>3Acs;9Dd&CrPtI)Bn2oH|fD$|5Y^^lx zz7OyE-LRjkP^U8sVVZC3Kf@G3Z{oI+sh_A~zaS0N^9P5##M{c?xR#l>(Wl!s+nYYqNKUk>8xF8!@nzBx5!qvo5_rJGpnG-LKV+YN;+h6F{qJ9+*9Z-MT<@1mEJ40C| zEb{=_)0b{C&4@j!;>9!mm!pURF`zuy!(ModouUKZP98E`{IBkI+_>g_oS{ z-*+SUKeGIfZ>-AZJVzvn_<7VACSGA00!0-TC{T|KOc|}(RhmrF!*px zP|zqqRhf4CmR-ttGBb!v18B~3E#Eir@S;mDkekF#S_k84{H64wvw1S)-+Lk-qRj%L zDX0;>PK&0y;@W*wVaR=>rEg;Aw2@gbidj!3HhNHAC)f1a{}u=soh1I;G#ffWa=EM; z-8M@X{#x`yRqZCmT;sP;dkHp*20K#1uBA+yJLqb;oVytGlxi)}|9~B5hV11?lboI2 zuN}sa`UpNIKOg&GODl=m_m=<@#FzmWt*|nO39Zb){k?wsis65G%=OgA*N496c}O7q zZebEOs7kcWD+Zu7aFgd0-AwR8 z976GdEH&Z~i*ILj;zp&?)2D{R#?pq2ccmG_hZQ;92Wd8W-B&bXwgkIy_#FI4Y#dC5 zlAmJ;Cwr^B@$19nEp6%RFBH?J2MvD4?UUqTd>Rp3D?JIyD?CPc49?~Rfb#*hHj(6r z=fW|+FB8DXw%G;G^R(_F zuNMzM91Uz1(lobmJ^T_*bi9>*_y-5r?JeHd%{T0R|G;Rvin{D?x)|`HN`ux3hCqMb zpyj_GIj=Q-uoTmH&)tM2ev%SUMXt%=C|yx)(l!%<9W|vfwMdgro;s-VM#(#+>E}bD zPHdlCNeyOmly4S?!h)YuVV`+HqI8V!c!^Gl|BHkB9)@8`ad)b96b0Xk zy888PcP*lh)%hZ95?ur=Jz4}dT#^kl4of9JGtdxj5*0=BQZM>5`qLlU+g2q?+NXSTFi^9{jz31Dexh@2%o5g@Tkdt!foZKXWSkt`w+QCE)>+$#49 z=~fSilaDo5WMQ_`N%H(Midox)TdI$oy799_#2-qie_?*QXKIdIidb`hUyVm^<~9Pg z`!0s=seX@RZweSVzJ7vXg*NQIE4*I*!^3uOGCOYFqjD%z3v5qvGNVHUc=a=sZb zl37IaT@yJ42N8|hN)*8kr=F0Xdv*9P>>l?0ukcYbR>&spBm5-PQ%2Ui?@~yP$CqmD z=q%r;Y${dJ-T12|+JFRt0U2_kNy(p-Vw8Dh4o~KghOFHteqY{}VqRfGIi^0WE(l3= z<@Q{U;#Agn&w#RY0~7ls-R)OVLHW$T#pULud4ps**?MY2nM!EkDeg#JKG`tnBv#Td z6@ig0C0rI6d2P;gWntL|&5;Bn!mm4nzoU}>0ifFm2rk&<^+g zdUvFf^y5gwm1@(MPw@IM zJ`=24J|ZK(TTsVlyccTEQvvT3ENN|M$!=rJ2=A>4ehTlA#0g?14${^9jFhjW4M}R z_yeb;lc~TdzA8h3J9XEuRoX!w%l&=^l50ZFguMf#$mo9SQa_>(3~`mU3rS(|Nb;ao zYP+B1aR+Gt#ZbuI5bKJNjy3h%&f01t=d-&%^WB|!+wKObpXscHED#$DI2Crcs;6g) zMtuyvCCgRGwuu3)k)U&YW%9L2d(N)o@x^pYsyq$~-Ds3Ee}MV8L8V^c|pyV{*pFie+bt7U`@*N3?0ai8FQSqkem?jvN{IJNY&ffiqmDwW${+Uh}drWGUnafM%4 zn^?B_4YPIs`%eGYc+ZityIUSgN0#6p?U}Rst%PBT4dKl2QD6+NXu;)#;XNP&E{KOb zIyoL;p{~;%AF}Q9ouEJ3LzZa5EsruD#g(15Hli39Uw+5=l-=1llg^j+_Q+`@NGm|? z=Yr`yqfsT!SLV&XTt@;czF$$lR>leGrsn9BasXI*B3t*?oDKAw(a6TU(YA0gff+p9p z9iXU>^LJr(k6=K$rj;m-@bMRhV9(ULY0szXVy?K{(cYgo+T>~i&~qWXuCbOs%V=sa z{k4$xzFjr~@nQ#f#Y^Z1=<>zdC+v)Zl=ihk_-~$_z_aM3bq1?%JP@=ta6Y(3LuP-0$%(_AnAWRy9orvPK?J_6=-i{d% zaS@IlfP;CYbp8l@q=J3`Ji=_F48oyrdyT{4ZHGg6fqYcXNaNBlM2I*0@OuBqXRe_) zQTrPPAJM&Igzfu8S@1fS@L7$%X5OO5V*6j9Pl6=0a_z#%7wV&8@~o4Kn5)_@r3u|N zOhV-dI6!l56stzo`FgSbPbzCOf)?#KhP7xbxv3Ny5=$fEc1lU*XBMfNWm0eh8Ky+V zhb6=x$}#0EZ&N46#bKwN$Y|k8cK8@pb;S|X38k5Yd(A&57Q|}W1hgxLOH!vS6k$2! zWIsqZ>%n7x6Hp?OU4&ULS04FcQseyy^EUDu_f-geH)8LY&qc7%Wf$A&S!k)`+m9`0 z$!i2_G8Tyt)Ra$QaHg9XuRqm_MoBgu+3fvPbPzI@DwCQfxnok(-EYw$vn4g(%g)&9$Z)uKvVI@LjND$Zb) zL`;Ls{@O;>0ZoNXtp4mxDhm2`pHIx(^8np!!+~ulVm?$Ed?p9afm@lq*o`Hq_mL(z z1DD7n-&}C7%T@}>@7qtKmA3Yt3e;=O%3RniVyf)ZyEbh!2Aw@Z9A>qO$G=LWU8LPl zRQA8hwk-g{VbM3i6NTK#TP1(?qrH8F1O7`eDLYF)PVrB)*7t$cz3C=wySrN;M-~23xN#^9DL*?8pQl*+p!_4wx)Q#)l}9`1L=H|=>^T^Sc`@LZ6%Am)Kg*l zHJ`mzy);)DCckl3m8ujC&p8jU{(&FfSyi=zE)=nOzumDTplhUMi^gd(lE`{+hltPF zHZm2in2L%7RiA4FqEZO>YCcXy=FIV7gTrh*9Y|V0qB@osWj=1NO1jSh&=G~w&w3Mv z&~>gwlZ0{wGZp0%pEd{5bvA6zJd5oNe={?=*=kE{{f-Bmy$DHj$pO!HeJ4($u1}Bk zv^2qH2P+I55WkN0@rF|VHx~dli*js&B5XBaq}Bd(fWJeLKA#}!v~f!-F0F4=4^}a` zbRfmw{kpgjSihZ4XX6qgJC)+1-A)RVK9;2t=}XTxDZ$T-P$5Qru++QsM;+Snpx4(S zUxBJWy3g)PJ9V<#MoBCcu6?&TS@-S2Qp~tGsK`62hDSNKEibL`Vc^OL$_jd$_~8X9 z^tp!bSwY?M8Bcc{h@gyg-}!u{(NOrYad>!w1Hz&2jb`bF$6IdUH&bv8*hSG~X|O(Z z>Zw2(ku{L%QfLJ#q+^qE=K^XVbF%-w=GL$LA#hEE;iPfpjaXPe>^N8c|C)O@)z7MZYzO!Gwl)*xG zxHe1lP(G}xM{?4YE|^S%q9mOc&&$+!Z@txul91Bs7IlVCmflFN0JeGI$j<2TqzAZ*v>ja{?65%mK2Am&&7SvwqI`?J79Tua zNE@3DA5tMf$x%-YqJQt7K7u=%-I?G^u@(q#eDkRq{04jIqE1EH4GI!*{CyDF$`6e%g1Q#!R@-wOfIsyl9!O_a=?k`;^go(R&;8_f zQfoNvgi3>7F#tgvLmz{LWByY<|G(~d+PQj@ggkH^elLFxV|x|b5oDol@h6^L?#Lfb6* zixpmN5R2fBZS$c6G?vlrBMMk|h3tXXW&eC4`^q(oCOk%OjdQ*0rlbYSvWx9o2YS?1@_J6MSzf)UiZF!igL& zd`jQikx3;`twFj|$hc{xYC(bZaI{A>Y1}Ry`j!tH?0m5Ui_8|jZ z0A_`4#Z-V0SeHxR9kd+hs6I*@FxMRxpo*(v;7qcw9=pBA5I|Ekk7`wcJ-v8^jb`Fq zQ0sAu5>@qs;Uj%ut30zg*0Xhnn8GQ;vWau<)Gk^1r_mViM#cjQQ_I(0ngj=;s z+ckDdke`MGAFesUe0&>K-S79g%DiQKSK&Jb>M3f(m-gILA(2=umX!NZ7q#ukGQ34( zWK#6E2;oYKH;g34>a$vqHY=9hG?9RkrEMG$Ros_rHrr@>(i)OO-BypW4ZUPgrz3cY zI_Hy9H_JefV!78fz+YA2u?A9AtN#Hl|G4VW-e66r%1yjX`;xX^*nPyj9n`INOqw$~ zis-A~<`7?=_4swo@2>VUOVb~Vff9D=cYdRU>ATSV?1HV!-S~%Q^$!>@gpEcfRL-!F zdKmO&Xr3X2h41E|)@gfmt6j)@>l~qAO;PwV)>_gTB_y>4F%Xfg-OJ`Qil7AwyY8@3 zHRF&Z+^3aaj_%+4S zhVIzT4q1J$z_Dn?jwDU%4VIhL@G0Hi@eLNJ6enoE^v5f) zN-p5mit?LVU5}7*{-LEq@EP6tT@`svd4s!GH1d#!(~C3lpaX(9mviU#L7O%+awDdt zD|T}{ts#QHehfXyil#7;Qs_L_SY}bmf<|G-SrziNNzuG5hpH4#W4%1hwgH$|D>aT#k}tX>^<4lH!ozMfCbt)kq_FkkCD)d*}6HMjXHpW+C~C zK%B-*ljnm4!A!8pp%3Sxj|oHl74gI;uVl{*{(Q}AAN)a6!PP&ON{Qk4@MomNnG`Li z1q*0`mLAPt^F8Xfn<}5`?AArrJ>{<>Pb)K>xTd~P{QbKG$0I!lApEMn4Y1t%6A2Z>RgC3u9!x?PQXvxRiE`In8)grD1iDv`z%H)oHV#g^3&<8sD0~sKYFs zfihE~4x3;YnBK5jC_9Sh3reyu+3g)jk*Z~iCx@%oG@7!UqGAb%ViJ91!aj4w4l`w4 z>AgyHx()tG0yW<|s{zuIHM`>G>6`ze%*=+RC#QI5llukSS z1$(;IH-fU5%R$43Bkhl+$ui1}5n+0iHPQ3bFNh$1bl48hI>dWaVUEBNv$ zBZ=XUF(*l><-aypN>sI4zX9(#X}Gy*IfmFF#~}~!NbfyobyQVYk~=CB|}7j z=;NP8j*(84q4NN?8zJLF5^JrVz`mi=!k);?LgBH?ro$EfLoQXagXEhIWC#+1aoAxs z%qdKPD+aMu@5`Q)d@OU@bU6=w(j_@`n`F3bQ%A)Ilin;aRZ7~85n+%wCM)|2T4UR` z8}QAv*b{!qL%l85eVg1~b*LsF;zO{VH@F&A7|m>fwdfCtJqdj|au-v{oORPmg7}Lc zYV|``PEhDu3u9ywiI$39(u9>(RmG=ZGifc6uFrFsQNX;lnCCBqn2wmkQVwC=a-wZF1bfuQ#^H$$bDRB!^l3gi^w1>n$x zs}D$Q%F{l2hi%K{MP6e3!f2t$3G8Ew(f-!QWi!J4Kz}l9zOsmd10qe7u$ZCYEmOE5 zlBl^pIdD|XvGBzLIB6B=eSdkOZoD_DcE&|s0YSJ)+1!@@iiu{1j6vB*{~qT`Z(%6@ zEF{~>7e36p$wB!8O-zyWaxU6(%ZvB8edE&?0DQi-lX{6~#pTG1+r zHi=i<1`{#~cCO&1=A%*$O{}`)qoFp0^Wso=_R5S&Wi*A5c?|95ys6a+r^C%w@QQFz zknV-f*U>II?raZpr2mHF?dxI8)&Hy?`oD#Pwx|!-#Ev8fP3vH_$NX)ZK)`5mX|pTr zUxd0K!@7@>6GkIqgAFHhWSvXdUDA^4j5!qkYA2+$oiG*%;VF#;Ve6A#+fsrQyCrlz zF9&-5J3H35*V97Y{AnOmR;*>IL z<(+3hvMzITMj=E&d&XiRPAcU}xog9J{bl_syK5$|YrF!j%=Jw5*RtO&FPf#mLU+p# zKT6a_W{dTj>?MdBxQ`Qwf&xX?NcM|B5Y1NdrIo-}iyrCTPLRVBE}Be(VVyu#BAz=*T~!Qr+=;}F#zARix5E0jL-8~rcw^MYTpZ|> zcFLYD#^le43e+QA=lT<}tMyZdezT--0W$&G8rKs0C7QCGc=N`{Xy5){6+Mr+Jq4ie zMm`$v4(7KE|K^IspM`N)A}&x%1Tk}jx^jQJB3b+ZS`aQ^WYD$>8`_@u(J@GUE3~3W z6OI%1D@fyaFRafZPC8>oQ2;vd61E1HV+RZNsl*YeF1 zwQ{(Ml60xFWjWjIUzW+OMOsB{aNO%ZrKU*u`_&=R^?y%Ey8WY>HN8LJ-+N*947ZZ( zo|!8~W9g^eMLm8yac3w9GKXdU9v1fUtlg|5GsDgC7Lh{n^6EmE-TGsAo8*guP9Lh` zr#={VKY+3hP4L9~>~&Az|FB~X9U5iX9!ARSHjfL5<0>rArjwQ81O5~fByu+o%xRpF z<2Toe%#hxCa@Jl+e9m)f-S!*9cUQ3+wCjZumyi$OtJqM5*glZsYYp<OC;W-~oW zk50`v+2tZ{a5E5;wU-bcRl}bD5Yn3ahIH3!BJ1-P-jdi>lM3IsN{R|D6?Q+g$dsAw zi^oq9%2%x}B_eYyk{68YP-avp7#WID=NP&noxg@A9C*s{1<1u=)+x31M6!~5pY$9W zu<~y0l{VmTn2j&fAd(W-8h<$Wg=72c%>tWW1?zF*wPS z%zj!q0Fhe%k`g9?9eScq`qOF^sBa1%R)rKA*;Q-63{=2)yT+LDb6qZTVsRumk_Bhk zIOGc{ZEl^oIZ!luR|&9%CiS3;#(pq>7fn4x)>{cI1*AgAcf#{Q&HWyX5za8}96J$O zjJ5^e+(aS+FratJoAb5{eK#tEAR52n2X2z>*CD%2C1bOOB}X)3YH$#+addK@)%@IW zqVRB4kIBBmx8##V=^5cJ@54~!t{^rO+~HM+Sh*^9)#EroT&ACpP&~`G<2MDR*P2hR zBw`JAoO1t&V*GHMwCwz5Cdg)#6ZvZi=B*UwO^*`Nau+mh!{;tRJ zyD@3FKlS78R7c_PxJ`1z__F+$wS*>E#3A2_x&Y^LR3jQ!eB6B*3z9ZsHN$UcC~k{u zA-*QvVdpTyCZ=w5)mcc%-6<7-JueAT)cWH;MI@VxL*MN19p>@vye&fLF#l6eS~!LN zfAIU8`xDn@t^IE=D2#pY@X=|Y)2hb9r;nB1%pqJrs*V=?P=5JX%B<|yq|RwvC!{p$ zcPjs(+ceb{6%4y(X>3X)wlphV&BT{$RU0j+8Tvz-s|% zxGv}>{Zd={=YsPJM8#j@Pe9XcrTr*YpIBtDyI;KbO8cS~WMiU8YE=>Bxd~SBAI^j+ z@PO=ejCgGaa`j!IvO!q-_Fem@2sHTe+7K#Qt1@9gJfcv#SGLv~z;~I=2&piM@?1qw zrz9!_nL~9gG%Z)cw^TdDEfo7(I|)@Q4*fG~c`yi5b3GBS3ydMeL(QQ5 zImccmmJtUIRQWVO2+L&ul;U{P(~aI$zBtI^zbcZ!ZaeW5Wwr~a&O!VYmqX&*s^$e| zl=(1CcNUvXCE~|RbaaIuCH`k|LCR%%N&+jESSv{xE$fW4OE74U>rVx6_(Iz_K!%ok z2lk)n+z#q;60Cq2Ye-G+@?n6!>o!krmKL5nA0TfdW&}342qjEr1su0s=n~64z)Ot* zZ!$etf1 z35q%dOC=+oSm-F(2}nb0j2<>-v0!7=F)%u0x}>~+6hpEV#1+~)!RkLm7ort;nK**A zUnvSR?P7lk1CBk5vT*HsMxX0y5@Q2cdJ1$C?S(#C`Du2tJfSn%+_}NNN+UhjlA3=k zxc*mP|FLd+AKvXAGt9AGGqS(=n|HrkQ&Zbj)4j8^)LwZi+CGl=e4U(N4lhMa1@vT( zCdbCMK#C6rAyH6>-Y%qoUF6i%V3^b!OLQfWIFL9Kfi9F-LQM(dT&e3^huH<^=LLPs z4*`4kua76g#(wXG%@~xJ6bOZEe$w8YatoaZCVGy4b@Q4phpbl`Py8G+&fpR|9bOJbdWYU`$t(k9yHR> z1mSeR_TksNL;BE#H;F`_kBt0J_6@wf_;*E!p?~%<`vbhA6=AQ3Zxic@Yn!zwMRqid z?Ln(`Qb*!G)e|azaT+MxXEr~PE{KGGMb*Q8V9*kj;vNe7%E9oc$JhzmKM4fMxbt?y zI1J(`jKlLfcLJQWFD28mf)8rWS~V17S>dgo20>Qvsiq>MzoU5bzvFHmeKAdyB zyNwNxms~%h(p^(Ey=?QR@*Sb1!54L$fC>``KKo|{b<=?Qg;DCsKlo?+EEdO?ib5%r z>ShgIMlS8&Y%V!Ra8f*)0PI!rfmcw{RvKONp9H=5$wjIU3~IP&mQSzTM zSW20y-O3>vc}R>)41Iqp`Z`Ae*(J$964cFu)O&%6xwHdI@2-v~RWRTywz6mVKrFnr zAgt@WrAYnPlWZ|;Ax81pb%i0}dpD)fL~?%V?bs)NlMwd9Q@jn?a2D&~4c({AGN6gbsXGK+^EQsb|Q?=XBM>hQ#l;?Z+hZjO~=i zBm35Njg!)+YoyBrDd}bes3k;7^dFZ0%F|TTQn#D!$#^#?d})x0KX0_3ut4d>4jrl- zE_ouF9m|t@l+jEZ+wfg{RFTannJ3V_Mcqvyw3~jhfg0|+Gh5ZgdQoilVBc+l-sKVB z6r~JFRsF^4n2Aj*Zx1IuKBMHuf`{~2 zh+A$xI>vEBb-oH`eO_Z0i4)_e90Psl&X<;HzrpFIjJZ@@HW*LwoG&z=4;{AXKXV#N zdI(BjyY3Pi?Wo zPtOAnfdManIE?#(;t)k{OnU~p8V|4t)J{rU++@kiAp>Ul#9s*KPfmX`oHBne!8Nh< zOyr=Jw(K^W1MRFAYZuTND^U|8(ao{&G={^>&9jnbPh)6{@go}&{q?pFBh86xX%nOz zeFE3>8IdUCOH#DgRbXyCLJ*JO$c4LN>IMDrOoF}eua`NuI>^|`qpkYI?}d{vRU&A= zViZ9@tT}Fe=6sAU(1a`P#$2i#;f;DDN#rr%xevd1G{mx9N95?7=s72y z{P_GK(wDOtqKW!o^{+HLzVYVEFR|FH{un|VH|0c#+&R$%v_@M5Y+xzVl1%?9uVZa zV%jsa0N)wGE8IpaeJS~ch*eu{_bd}hVYtf4X^%kKJcRVAZZ|u6wciOiH#+QidtZVZAAiV%8j;9 zekE;(rf4Bt$T3+kjhWFIa0wv>p>rb429u|(SkXin@#ZKd$4@VDA%2Y~`N@YlX6;{a zLH5PpX%#_1rxf?MDj(_)pIBpDz87Xkaq8S)mOhOzEDsQgit8V;R$Oa< zP4o2qY+x}NR)0l7<+!De2D2z>1SNoRebSEp0&i|K%qtb7Xv&b|I0dT+_sH5&4dK1I z-t%`7iMr+s#$z*$((uk5M(^_cc~}w}hGcKhlLG@aT#1155>c{y-Q=KUergCT_V0 znjcfJyj5U7eL@%8!Z=Kgxp7CIk@_>Vvs%Lzy)sXzL_t-ElA7FJ)1!b1jXjHOsf!Kc zdJ}}#!2S!a*Ewo8_UD+9_$n48@846xOFU~~hq|1^%y~?@lTJ{$0@$YvI@1Koj;k#H z^aV=7RhR8d)Y`tsoyq9K6Mmw1QrL%E&xaP7p`0PWPSGol(+^P&#aqaA%CTJuuY@pD z({m8KQ02$~ptM$4R_%=lzVfHa=;Uqa zx74GDgNEYp=lQP|Kylk$%xg~0)HSQ|NX7+%JQzT>lv64uuL3Bao=qc#dIs<6Ap||X z6F-0ZR4{5^Oz^p|@LqL6t;F)qM|v$ujQ}$)JALi5g-j#v&iY)@i}A{_2e6WVcaYMv zf^D?WI~Cbk(cQKGIIEMlvZ+ebH*znKI?rq#EA>pf*fzn^J|!W50F-D%Cbq>=la*QIz*KM>;v;a#N(y`=aL% z`rhkrnYTUd#zU_SqtlZhTA#H`$uvnN(+4G z4u|1*<&OM6pnk-Y=#Bo^h8&*BAm9$3?`XOGLb3vG)ET`HM4p%)ihREoGK5_*ppV~p z&rA`2gt+HtIrO6hb`}xP5aBReJ zpgf-JykkD^QW9(^6Q$VG(Q1c=Bu|?tp_EQDAun72>uhqCK;uNN@5PP%1; zw;YU4rGUS|!j ztMibYQULQmGEw@>*9Z2h$iq*Q@H6pwj<2c1O*(}g&|DMaMtnyeH|3%MsKLARkTo4i zOe~N#|4GssN_Tbckti2r&NJHx3raep6!F8Uekm_bjk(1rbi5jPjT${RuCTlcX@|$< zNmH$C0E$!J{WZzXICa#88y0Ron&0_1Uwee+&vC zBRL1*CNvV%DX|Y>3XD;^F&DuZ!Z1bHJZjKDX)>h6GYRQoJYW9UpY8idmu(#+_VLby z{gEKoOxBtNPJx@dQY1@BI!3FYk!JU3#yoY|(srgZDCNVLU;a+p?8inrhm}s@F14lM zvAg8$_1AUR8Sq1@)e*Rs>C{y9{lUXnq4lHL#iC`4)`~UPbU~uGv^v5&hdy(+CNr-w zB_ukC?B*Z6P|zPABB7737`+_H$8PI>m+v-Pes6_(&*3UN4q@O9={hNoit4e{!u@1^ zJk)hNaM6%VW=WWX9MWXGz`F6T+^wn~ZMwYVi`E+TI5a7UnCv+^f3Kj|2m0Sq{K-v_ z@L;W0qlqjSx%jP12nxhRlCpCSVNh&BwtGGmmjiv|b|98u&JDAFKOioVaI&@>U_uGN z%9;7C2AfIU=RL(=o2f7*iyw})jZ6{UYKSW&wqnY>2qxVcz^+~~xWPPeFZr9rhmNU| zJVLo;$*=9ooU*H;cfp9sY*sTx^0ixw^0OF{!C`r-S6H1DN>t-Jy2l6nFhy@1<_g9m z0wsZ>;j4d|>1#H86wwo*Gx9NmUtPLISZ{5i$%Y|anJO;9hWiSLJ3e<%^}@c0&co2o zwV?Vj@Fz+>0DEBva{X_mCO|G9pZ;496(y~4C1%Pvc?SE4Ig1?Hicys85mj;2^qv?t zkx|hqc_zsSjqVSf1JFhjLq4twE_$LR)*z zO#|u1a~LE*uDI^di5phWdF15lvZp`5L7#<$DCrV*P8%vEfaYP-?adcO*Cs31Cb6`K zhOe6~>(*t+U95lo__K0}LV%i)sP@AW+fr#3Hy3kKe-k?7tg5*Yv7cY_k@yO6*N_$q}{Iy zF3O(M!6{*il;COKK?&9=>bQV6v+OmIVDMt7mR-~{BSp)ti!KiLv9ySaF-Pq$rgc)n zf_W2mfHO=nJ0QAYr%ARMols}^_4~=BHyDTN-cpqxvnl6}S)4aA zGqq_3Wo;|g+T!av3knI7hwk^pAGULD7RB)RZNR@mA>*a zcQctff(;SIc!RvNzR%TW-a1xzyNtq5o-aN!x5^XF$r;q%V;)rzw9#DjcYK|vm`{ni zp1x+5p4>~?n;Oocmu=NgGNL*E;T;UAn(Hv-c?*@6OsNLoy-nVgkPh$J3oS=UwaQ4p|2C{g{*n}x%d3w z$YA+^SSETMYI+gabX)HJ7&NICbET|{Z3+9TlaU}HVp$YeLi=%+Gi4H97(T83b--5m zfffI-VwZcjpC6)0PZwm;2H(dkIMe`;OB&LH*icTv$Mq2j5e@lr)|(ZNQIxtqmtIln zo7-kPk|M)}>gfze+h8neW6Y^^==TPo}$>cD< z{<|Bi&vE%e4Cc=C=OZ}vpR>P=pr-&(0fS}VJJt~h298rhhqYVemxzs#zLe;I2ps1b z#2Q}2Hjz@T-?|G*DNcwfS$Wq92fs;#EB{h(B@w!YrlvzIbZH*&($(X-`crKq381Fm zhcQC^e76gWuII*yA<&&RQ%;ddo%fGKSJ(v-;wg#fNu!;BFpLoYN769}w)1Pm+yV9p zO+lA~3p+S*e%8YFuQ4+3C}0p~lmH24dezl*p41YHWFBXa$Bt- z7PqUhyrY|0*BfQW2;Jx64!ht-0KT)a(Lw74q7O^{9ye+?2-gHU#bOZ^{nM6iicx8# zK^OefZ&Yt@8=tdFlKR|j$m#@IKs?l}F#2U3ry{kv3qjp77MxjCDTIVw4r*le=?3y$ z(2M%!c%4l2?+y3cYNBW?+a*`E$u`4^wT8S51;o~@#o?ESsOECDcq7K%NYiL1`c@|Q z@H%Np7_C%)^Mv0b`yxc2XDnJ%v-D@BU5NUcL)&qnse@FkT@Co1=wKikzJxX_y`F)< zr~Rl9uBww)ivh!WQB9D&WXMf2u2oNWStfm)?;8j8AasH#{GO=S6XK}k!T;Q;iCVF9 zsV*^v0dj0s8#Z`L9;c1m|ot;ld0+<(}Orlac;SZALdm+08Mdh2erRC3r%x2-6LSe^1#At4`giWVZ1eX%{zfqF z?1(nCwc(yGMaCqP#%HdPbAXLCnFmK-wSZ*O@W3NC{0>qsmpVl`tL^d)2!dqsun~|hEeGdC^oQ?LWJ71;iQGx zq^L@?2OLF$>98V9O}W^)Nv%Eq8OD;UKYjTrtjw@L^n&Kd_6gFQOHQ@E!85_g>kRFn3uu=+k%;`;pU5mI_(4A?H{QxK+Y5p`TuPL^qV`Ju&{+*4%5 z+Mn-7A)@z>6*unh1W!u3lf zuwWI{6`J3Fkoc%e;8k14?hnkmC5D*E+u9}TqnOISt3AIlf5obsMe=wbE5GBl9!zr< z7$|{EnxfX`Dt1@2S`-rppWbC%ZGROvZ>zMR%sYvmWx63pP3-d#V)@_Qx&}mgNhSK@ z?0ZjCeDS-fR=jIOBYo~OGQ8vWvUq(a?YW*?&Jk!KrKK53hUNyS0p!Zc%dC1ixdg#k z@mtbKrMJ1O%-h>Vo7;g_3($Zx2cI1-m+}v3GzwBI6yD~#yhV8T^{c8qdBsk(;>)L! z_H^}J&;+wah^^`;j2XMo!p)K@&Q@uBt~9o_Ld+EN5dVgUQ0?`mZ5T@fdi#Tp9`J;A zGi5_AZ8qL*9b&CQc$q&uv4ZXu1$WB>PgU4?K(`Rf^&XzewU4H{9QF-A zLFWO*_%guEU%b>7%TAZOq4Pk2WRqBxgr_)WSa) ziDi=HN{4txkdg>)nynLSqJfh|okeu{n&CKhu!ozwgpDCc`J?Fg>}y1-v#?F^am@q- z#8_N|rc$P~nySb z7@IeSj%igsBkGpJ7sQ{(#Ow8E_xXp+x0Z-iqyTTy_ea~hZy0u zrQZZn07Z%{FKj1VR?04^yyut5Huqp!31{j#&p`;sT^ zH{4w~0n5vw_miOQG5^adoLu|u(v<5_TGUQ-p%e()MBtmwlW1&Tt&R`P@3n9X7R01P z1t_(jPiLr9E%+ZC@UUzKaR)=FqKPX5tcCF2y&pu7vV-g5#1)01uFzVmuXD=H2?UW9 zd&vgi3<$1=56qrM%4=z)Im{qxu^1Pmcyt1hmP&Sy^ThV){|ny{||xjujE?&b&%?I_vBmHVkClxEeuf@@qCh= zw%Dd{(iRA7LJ3dyZq012kGvPKKxuPTCy!=* zHeUZ$JJj2qA>~&okCj%yt<2`xn&xZK-L--;yfwUO+^Ify8o|<}?1gp61_^^!su&nn z!8;-;DoJcXpoM=ZUQAY7ugaWVJ(*hFF?6-5EI5slAvLgG=~<{QfNVJ1~)W? zRD1aL9y|NZvc{v09j7k8mRWy(H#y3I`|CCh1MGY)etI)oSL2{rRRg%e6uRN~5crJ| zV|+09y=GnsK@!wJi+E+V=Z9oiWolJJ16}yyE)kMS=^za-{nM0eTdW;BujuqEqLuuHnMr4u)f%=4H1NaDx=Zf&dx}A4uE-EeC_as@nqOt z(@F6jd=!1^slOPkeZ;5&1k0F;pT^~i*vyqq7O5CWVi!vbZyF^$%d@I+;1o8Kh2E&Pb=m zUEto!`{ys-GW;av2!ud-?1_bhPu~hKCXTZZOgq{;sEOS5)GV>Bw=6f2BRVs&X7sg& zSyD8m(&}IBF21WGnKhG^FcZ;R=S!xHJZUr#Gg>l>W>e^()aO{~a692?&m9IU8 zu9hN&J^vQFYG=Ktl7obK^{-1RVS~nN)fa7e*?N|7S^$X5%hAfOwXl6LXQV{FkcVak zU+<1PjLjoaDDEu*(@CM3S|YBRyC74P{RPp5=~tE#744u@7kBn~=07$gy5esJI@2P^ z^P!$8?vyVlhCg_qlq4zbrcg_utE!PgVKj6Fm?D`8s$3R#(4%fK6(_GK8k8^z^gVOq zfB?)4M0Z|O2=-P5xQO4Q)9SZb}OBx{tCwt6){cT%cq*i9j z?Wy~e(>ex0lfq^NdgR$uX~n;w*8t)Ri554$jMmap$h{~LNpUg0Hh-7b22$ugS|+}m ziV(AFwptJv-wJAhgRBkYL%2N3R!in@4@AKw&hpCAAp*X*SWdYCquB@TNO%!Rr zItgAlDXFz=Sousy1&waL-$E6n|IUv_)C_FX8bauZdue#Q2p4^gKLWDl?m+w_4~{M% ziqtpB%?8h>XGYynVb9QHpk>lJSWrZ~{>rtImDWdCwg#_}awwYBdFfUB|ca&o!Uc}~l_9vA}uD|yhX)T^2adNT& zcc>qNb3(d5eXCe??|QSfLLta5WT5NK?V-e;8_Ku6ad)3vx1V)Esu?!eBAVB!ik`NE}NL^b#|6Ci0MAvrljB!h)6FmJ3jmYhz?H~dm;?xp4O z2d%&yw5mV*w>B;3`L&(=TF(v^9z8#(uz07xdw{ekZk~@UDt^V9u}G{Ui#AB`1{!uc zinomBL_P_l?i3w!f@k7h(K>&_smu__vIMXa@>N@QL{715{!5+J_uD_|NXMyDQ?bYW zACe8FR7A6kz9=O0RwZd<0m(9ZJWj@B{s3D6xF$0ylgzNNGvx8?wc!RBT`hJ^7u8X_ zP%Yd>f~|!ow!+hhDepm4Cpk0e7XdArb}5`Ueh+zbnHl+!xWaJ7rebBP z+S6D&5%Utp7lKb?&7v}`)==!V0c{O`hXoWknO@^+u-{2xj?-2y=12ev4g?+?+;NzC zvV;%Eg`1^2Tk|fP@hmy!c?`kMJNBA(v7&ryPQcz9gtDfgs4#aZriKAQn;qsHY{1%% zs!*!=OgMW}89(Y*S*+4sjSWd0lTi;|U`_L(5;uu#OoxZyPq-b`UI&zDj}KGqt2vA- zve~vkIALJb3QVXg=UvK^!=e=@^=u?LLlks@T{D9=@($>eC+ny@=gUVUC6 zu{m;tc7tmpOM?J*;*YnVF;M>ab=Gv9Y%|NZ-+A9QR7$)q;0L`H; zE657>lG@aNkQ(i6WnW~6Lw<^x1`eqc3quZ^YSFmzMb6k6y+u-ne& zdF&Uuh?g62{fATH8JUnNvR=2@J>$vd_r23}v_cWXK)D#b)?Wj18<;ZA@^wy30iu4} z*<(%?>jzgiH{Buw)Z~@e(tp8Yi`UR!+rb{M@kxT}Z{Pqs3k?ls7N;pM_S~qwf@KH> z__QA30A*C;=zpP|^90PdijX%ZMvjQ64Y@n^oCp7fFe&tkQ&I@;($x%CAT}n&>P?s2`vS#L4p? z`{RPVn(p>ooV_`et6Vn0@{M9kA;}V7tadN_ne4eEydFu;xm?|{cl#=2M(h{(lMbNc zICA#bLp%)l00m(!lNB2T3kQxp9W~q5j=xXXKTYdELA8^(peXRA)uBM=;YEUgk|f~t z`X1GO%;}!T8JEJWqk%RY)o!t2#pju-)kw=427s=Sg??g~@8gWP#%~<5Ci^uN7pmpY zlLV2YK=dddWbSNYzj}YDK4UxLIQ7ckhZSt?=HE!EFLv~^N3z1^$u*gwau zNk{3NhRAh)cqTxt+fy~X90yAz?u<52SFX+xsIGDWh4AWhhld%K{nVf1tc|^vd+;5kIZ2J|xcMNZ# zqC*$#sHs2`A%32f6=+awKle59yk5LRkFAUO4-~PdS-REvS@)jYNqE@^9z6lL^3Py+ z04x4*4%YM5vLFFv$I}#PZZf3>0U;KgUQ66rE}V;faA>`u@j?4vr2H(3rN)whnB%bj zY5|&A6m(IoE}4!$<&|&J5Xn-nFv0G51s{0=7NnCIic0=m4q4P|jB?iYVi~SznG&uq zwk2_c;2vU4f*0>=a5Wl|ac(ETr1;s+C6N&R#fZIDfa|-yf%3`F3=92&G-h1*k||rV zfz0>84fUiZhyDPM9P1pyW$rEO{?Zl`;;*_}QDa0)}>jwimVn)**KliyN^L*QEEG44{F z(A+01!I;z5P@O>%KvqJ~IBa=xg!nUcjUt5rn(1L>qZU!s2UdUjMXx>x3CZV**d`--&H~CVYX2MoAZ;{F$y&w zM&kXdf?*y-nR5)ttu42w8o;E@y=_F9+|>y{&!&ZfD8k7h#Nc)nhSNJONG~)thZ29G z$r+x|JJQ&q??vWnKfsmGF87@kW!o_oWztwubgAPhlVCEI;(^H;)qxw}vOw5EMda-O z`=+Y8YDb;_{9!vHl?%0c14Q&F?SgGddc8@|i~t^hsuD)y@?9&dje@q1O~+P%$gBfq zb9u~|)Xy$v-sdBA#tP=pj)K@D#QAqY2aUcn76KjNv;Ign;`!42^X3im)W{^2_xW4; zC67tpDQlwg3uN^htPstS0C<;FruN!Sc5pvkAmh4?mX+rbo3Dty1jjh=_}{9i~Ef7Puim<%=; z_QoLnXl8$mZWEOAR;-xG!vBDBg6HZ~%e`>Eo$)%|D`985`?IVT)8MCecm}n&YbeNh zIF!8W6*?d%e?I`;5T`0Vgw67ZbemWbuE{dqMpLQ;I^zwuMv}mX@$)R~_l|opm; z>vwu(DTxANQ6Aj_kKcFWCl1|ox+H?0?ffeek9scmVu&$EDlgHvCVNYIkG;5A-ZGo9 zl=ZK5=&C^0)16;2`u$0m`-bd>6WoRaWWBfE|puHV)J}`M@<{b%Mb4EqrPA2@o(& zUJECA#X2t@O;yUaO9O^sf+5em0o#t?f}yKd!%_@=a-O@|78E)`Pmbdd$=nh~b$zwm zB2j22TpsO}r5RX!Y6+)#3H`RdnJcBEv7Om%L8J~qwH0@HrKVL!4WW*-_=G?r6j8^~ z;Ip@csEpMiBG}J1WbyX%q7T$ZbbX+thN{)1eT>{w`-)A9PrtnUXV%KSD|6bWzbOU@ zs1YV=e;fV0zR_SK@U7}yeL*?Jn-aW?K1{v6zZevuBJ7Lu$wsidMRar=a4939+~4QR z6OxB<>6~}?Znl?t2wgX%ZjGtxk%y1~Oi-_68c_~%Dt9(V9QD5>GCcSW8QksAdF?J=WfdX5fS>5it@+PCN#2_&aU3F5zy4=R|9l;S2+(02={y_E=ck zKJX(JLN{>|?{Z@AM3~MbDmniPY>q$lV+6_V`rOC&F;ng8=IC0%ig&u(yVsJa&J_#| zC#%<%Y)+XzW;k)WC>n5-Q2KrQyM;KW`2WbH<^csMp z3UG}E*v9n4C}Buc8C!Q4=yL+K<1BdLP3N(j)iV_e?kH|@l~rtQh6ESflOtsA&9UHc z%y3zJ1*bDY7F(ho)i@+EvoqaF#x8K9%Vt~azm;qC>cXTDRP+R``}IxAZ*5v;u;JyB zVl6Ulr+-?bN!%`w#>F8vHo~9c-NfN?w zL{*MV!k%Ozvf6Xy)@>+y9jO?(CM&2VnH1P>+FI1sPeP@LL2S@Na8XEi6ozSA(CDsV z6H@Ar04p1S8ClYX9^*n08LEu+wurk`p>`>Mr>*E4(G4y z4Qftd(s#UG0=b&Z)iH`o*z)s~^z|Hr^$!B2lIHlx)!9~XQ?NGqGikhD3IFzQGjBAU z|G~Z)^$bWh18apqneaAwsDPx#!Yq9hVf88vafZmT5z3W%lV08i=w4*ePhq%CVlhFq ze)Id><~zL4ugi`%(|mjPi95sbi#a~7-k|$xTtS!^uLhu?P{ET`-=4b;#*H+IOXZr8 zLvltIYYX=BnP0UR*a%?UA9`{LEN5u>vmX+>%>dP}TfC{7VIO5w*Kw9`atL=V>Wi@J zb^#wC=6nz_3HgpNM2g6V-ksPF{=qm6s|VJt2oZsT=-M(}&B zQyVlzXQQ;>Bw>tV@o<-Rm4%xjG9}vylwk3k|AtjBAfn}l0T}sR8Ot(fu(z9xxux^B}m0bgVy`6w&HDVXS1ct>a176z}J-^Eur?tHZz23KVFRl z$&!RIQZfz|At!-w>EfFsJZ(D2R4lzy=jN8Rs3Tk(tinAMgFJE*$2x3zCf3bl+toMP z!_U3%S)jrr)z4Ix>nc{I9*LAdSX(j>xDtUJyh8sjIl3C4f3C=fRKc#G=HPCR(fb20 z*<)NBlXWd-Z=@gVQ>4ly_xnMY2}H#t5W24@!QOO*N~f%XX&4vWDYE~Ab|(A_y&W$b zfLfg@CkyFW1d3YVVSQva?vLU2kUh*oLLv;2j=Pe0UPNQP>hNEJ=eJbvo4BjnyBDQg zas)4?0&kJ}*L}X-t6K^L5pJ@55MHG&??Dalu7ZNzC<;k~fE0W7g7DWbm4=Q%Lsu%W z6;#D54s*F~(btsHhW0S=705cJ%c0vaQ|D-uazTIlwhub_#_quIL&yj%48vGG(7(vw zQL+V?S673?i@S`sddDr4Cx2y0KdN~9scj%j`G)P8zz2zt^>0n7ed(_-Ak~-g&qSmz zjAjZ*;%akT`m=5uf=*%wQCdIiWrr?CQ&0XM3>m%{uJ^m$Bp32|UZk>H{5<1yH@CA^ z^nJOpkqBo`b2%{(3NkWT>4DSm9=rZ~3e~Bze8=@qzj*g{eOp7!g(N zuu@Wk(J&IRwIpJDGOBX>_Dttum(p4) za=DKiOcms3VS%49s??k8v}&cGv|rN+RvcuRr(h~N_Hw590cE+ipY1`T$&vx5$DZO{ zy(=q!nZz)43TTgYqDBv|mgPkH*>kmEl6%_;7Oq-JJ z#`ud){BnDC{Y>HMcZD??^4yv;X^cF)h1N{973Xzy7Ebll<-&Q~LFL`xP&r~#jaY9n zH~LJ>wWf>J9J^K<_8F^}orgj(hEUJvt>+X)3pAUbfpy`SBD@^MwK&+wqhq5^3 zJ-1)q;(`ea43d78x46*!E7`3O4+@!`7A#NEdpZG9Sc`gkGf_}9vLnkrAV41?CV+Z| zvI~4?E70nIHxIzuePe5ykgR@A&!@>v&yc=klu6eRZ!|S8UCkFs#2lM(yor6 zbgH_$dvWxHdnl*T(E9a)Lks+;o5pR@?p2kWTzcgZ2!s;eR8f59;$6lj@0P5tDJDm{ z4@|Im`GvZKSE<-3bFT(B?|2}~V@D6k@FX`YnL7q)h*d{DO<|yKw><>xq(pHz6C zM7Ys*m_cQJ-z?o#heUj+Dn)N>;8{i_-n61>IfA}0W8Bad*W0P183s#%VUNZ1`ms>M z_C|C%nez4qUC1s8gXs^t%eI3>yl}`igrC%N6vLRm4*w!?Jh`z6tace=*oVG2#xypE z10P}rO4UuV(LEM1{hL^$cyMo~{4gy~^tUqaauX|Hkk4e zt)%W2_S`|47N+HFey`~-0rKa8%Ywy>hP8h9{HO^#j|+bqK)XkW*5<-XRqVvV-HxYw z83yCFa%(}*kG2LC)l?@$Wt^1dOofqSf64}m6pK%MXfnMk%#iR3T-}5>EH3MTT6w?luaz#@US&#%}yOm#s=D^`%h_E z(>%TN5)H1z@|sdi5#rUWb_`Hsc5^OZCi-^lKr}KBE4~Qnk!i;F;_wUsfD5izrseY- zQYLB<3m^7h&e+9eie%45(wFh*sqRcA8Mx;)|2EAOxsC>nzw@2hX-kbpl==Fb21Cir_mldEF;^ULmFh&SeQ zl*&%Oqu?kLuhYSsn7=kJPutzfN>jAAB^V9NTpTaJgrx$OB!*;2{6`G zcBKaTYjw84MGb=Q7n||+>yQeB=^Mb*TsdK2!V)GIzpD=3kWA0Sr-H44_AMXu-R{1K z%sLoB@{T)PonN}uum%-3+FDx=?+T!$CT!w6&9~xbDcr$iyV#VSM$;g!w7v4?9dGI{ z3eq{}?u18MMLr|!dJcWoe+?Er!oG@@*-$-WMgLU_4}SN-7XGj6;oc>L z-5q1uH>H|lImFA#a5y*(2gN=5jP z%X{!C29U#fV;hg|E1SU~97FAIT`H}}IVR+hBv%zr?YeuwAH>-`N8`~=2nc;8HokCd zvo7q&hwQRu=E{N6J`q#(cyEYCoqxXm=8!V)Ra~}aCUo41WO$!%_X?vY zz}3Z6751%wa3i=p$5t1qh4P+LAdesY)*-=SWOU_JIc4r!d>uK;6#AD~SrWhoblM$;-Rya6J*+M%uXg`E;$C&*V3e~^6h^`Bpw1b8KGtA7l%?U~yoL#UEhzU%4N`z8jN(A!hBwxi&Ilpa2cxehV> zVWh_VUeAv&Q++@<6ThMI9}>s?RU?(w^6;Dx!V7lJt#}B=ZUEN5JI?yOWl0|F8tp+| zjI{D*Xs8Wb$C*Fe4xf*OxfB~=O81Uh3>0lPdoja*ge_Rt;8o3dZwIU^Nh4YaY z)OH*aygUFQV^@u7@IbiRbiR2E9*U+zo}?T>eTRO)7c@U80lL(vk}5;12Xl9Jh*HYq zes{KvYQu0A-rp^bd&^OlN(m0+?jO*Y$j$fb0Ewy}X8Ko?*hV?yYnLZ>3OG?i3pt+O zl{|iUXTOR!Rv_wGl@-lgIgEK2$w4C_4Wz{%<{9kN(bi0N;JI#ZUa8ufFIuX8bgBs` zw=_==K)K$q6Kz$>59*`u(O9p2iN)bd+ zaQ8}gK;MKghxcl=RBg&xBFn>HE)so?|Cb>--I#bbq=#xt|Qu#z4y*V4N-%k+D7dB z3dqVp&->ThzOP&a_pE@E~)IxbSmp%`#lb;JdC z^OQIpj8Y0gof?yq*8@6+&BlIsea;wl3;7~U`w;48(1I1o_doSM+dsn_9_*V(C%b+- z_roP6g^jJ+*=|w(Kc>Ecy|QTAGPZ42DzXfwryJ#+qP|<*vUy!QAM5mb-(-i zKkT{Z8kl2DPbONnNdSpd5K$%tRchXT8?MRm3uP$b5D0O}BL!LvIype(c?`&)wqy2h zXci_073y5fkwTUuHu`$bQMDDBvzMxxK#4~(8uSKh$lLFy?r^(bJEG@Q)7mqZd#(}W z>+cP0p*0-wxiV3sA`i?2WC(H|V&xbMf!ju%H;*veDPZ z14!bS2Il)QZ&NGbaFMi@2=54@ceY$o=C9i)nj0z32ZwFq28AkQLdREQr z1xawp-8HXEG?C&MZ5LtOHQp57U@R0PDyY4rEVQbM-`^@P+mkglqcD>Pv%L0TbTn_M zLZM#qA4sbHy%igFAXX^-FVpB}phx`m;^99;>Hn}KSR8#7rL4bkEp$vM^C5|(-#nup z)@@$C$@Mp_i8FNpKk#W-9w1mV7sIPcAdHEPmLRY$y=A6KBM{fHhjPQ=d<5&>fZxp1DT^;d#Y zB*k-HfywGFnQ!)HmTx^yEB5p;!*-h}5Y%m8GPcB%gbq4KJoOIZhUoIf+kf%URK89)@JkQ}qrT%mFfO(G6I` znU@HKpeeAU0#BI&pFCf~E=~I6ll#Yae1@FfNZ_J#K0WRMdD}6KZ5LoP4Kqm1><$-Jm+T9^tE}<ZoBcS7?gMuMq3GI$+2}M2UID2-U_>l>xEsp`;2{83|EYoY=LzI`I+Kf2JK}K zA)KADlA%(`Q~@4C<;jv5^XkA8jBG({SwAkTPhQ>3_~{IdUXdwsJp(6P*~EGp@~(!+ zKnltp`EE-B>(wyLVUhs{0eT_1NTqv=EzyW003}^eRM-a*H7@w{mf8q0f!@=s?6O&p zh$3r6aU-r7%lh)xAs}wZ2qwq7*99A-iAhZQ|ajzwzR}3L7A8i#v>n7 z9(R)?te*OF^#oAvMULOU8Gw&R&{{3=0js7H^-?BtI_e_GihcjVPa|PKUJl^+7~(V- z^+yyrN5DR<{gr&Z4qUx#n*2zOSODRneq%rOfDyDVxg#Q=9%mW*N@FbcvcCr$7=)w0 zp$8ahyV(yu^>AX82Yf>G)^Y_L$VK|P<HNQVZV!8{@0s#*cx3ML z%HR_p)w5xfe9By(q_H_+4(u`H#YwSdrtDw!iR~IhIG9dVIK=!Kqfeos#A|?D+euZOx6BIH-NWQ!quP$ovk8Q&)$=u?WcZAFaZLM zMA9if(oplkrusU{(UIX|<||AgYtZT!OoX=Rjt~+j>BIkc0c_e_;Fn_Yr~gG$pKr4q zGU5w;yA+WM5rxHC(s0@aelsoEx06T~ap}gW z-t=6M22Uddn2#a$24($|qWVN@!GcE}!7AraL5U4OB8#qb5Clh_iDV zh9s;1ErHhlbBbLpkTd%cQiv6($C{)l$K(c*ZXND8$XgmtSR4N4rV6Hj$RKaisVwJM zZaI3`$fHNGGd}oT;>%^KgPW0<4e%bPvKKmWp(4XULQwBl?H0Gep9R&)vOM!)Lll6~ z549sxe@HDFb+Zq&x}4`n>(+8B5MS6!VK5XqmiU^UCpkkS^l(D#UDb@V!3kx2sW>#B7pAo@kdmkvK*h!%$w!iNL0A;O{n!P z%I#hi$^A2SVx<_-lI()j_j$!5a4uvz4a`8KeWF>wcWHzN4+ zKMhol10oyOpHUKhhewmFZ^l&f>9Wi1<8r!Y;m4ZT#O65mvFnd0 zl3|hy`WLk&HTFy~wNyCJ`QsOas~A8>x!vE zUNPsAV<3*zMaGmLhHi}s;&z>rqwsJT&n!me?7PhvXnSH!Y>xFwxIsie@{SLxpz9sj z4gYM#p!M(hU8BGUM}|sYM|U1PK4Z~o3Dlb3Fs=3^QieK6u2!_}_Hw1K@D#eH@|ooZ zM~*+tR2aJiEA5-s?)iUk9xWBKUZ^QRIYUMfTu8xWF6!SNXPwE_KG$#Lg+W=W`Sqv^ zyHwS&K)=5`Ot)zKENFV&rs5^JfML#SEA^klOR8rwo+;E!yh$@>{2ToyX=wJVAKhS!-_Ir(UG{a_Zu0|oh_s^|zTZ!iSdZa9O zF9uR8x)kadg;+?ii7{G#zl{F zmimrd45h=)rX@x4Q2U3QeQ=dgV+c5e2}}#c>h>a|3=b}p#E`5S9rQ?42e3z@>dMhB zB~_Fv^aqTtgvCTGJ%N0=qzd4-P;G@;phQ!rl4fCqIq&jbD!7~7x@33$td%Ac1+>|qzh z<(%L{_a#_*5F>V1Sz(6A57@ya=K`uu+#jLT>e}kDOa^0OBXE7wg6Ub*#12OHE zFCm5^i!}ukc~oH{0xl1;gnu|nu*2tMiCxF)*&$F`Ykn{(cn|UK`*b>yOkeR4+Ltlu zWi^nhC&*8?BUEZZKSmx|@IWH#-QOxUyl2GpFJ6lX#TNhMG2yxfn_O1>N={Ya zpY%7-%$kMs4grG*_KUk*d=CC{==1hQZh;Wp5(E4YuO9UxJM^Gir^6v&4gT!ccq07v ziHzUv4&L(aFB;Y!Yq$ZXaryc34T>Ty5th;&d!SR&QI&)P!stAIbB|^6VxeZ>u_?d1 zKtE~FV(si-d8Xgc$)sztYLBLDqi z@6G>{`0LF;&%};dgcGFMsxVc{4kl^#cmItp`_D>k5tI{Fi|&DII~#c3a-&C$hjrLBaq za&v)hBn*^YuiJ_y56@4eKVkkZopoOvVaZB&PF6d^LrljY!NZZ6$vnlVgv<_&_^xoPS|Oagb$ymn+WKu27L}t78p;xi+`HiFI`db7j=V zaG>mrA~u79|K>VE!57^WR^HMoglzS*przqAZwe^_WU=T`_N<)Pp|DCcs_m9=EP|d5 zMof-(Dx1iTQ8W!iwrZ~TgLSHn@27`!iBLi)5-29E^=$rV4pp2N5tS86b)`gc@jNg_nHj5KBp{*RnCFlygSjFq zWH~_3Et4z+-~K`*eC;Gtjy~eVNA-Pz17y7ZV1C$?PIbES!gGs0FZ*y}_OD!y2z3#X zdJ$AL=jvkzjWN=sXaae~`DjDJ9H`Atr2WkGWp%7)(aDd|U>qBt%ikOc@`*uPL2=<; ze#kuH7u==tVd5jtl~`Pj9~F6$>Ty(&=bBZobJ`8V9DlL*UvXQMn=wxU$$7Hl2ES9P zxwL^*<=|hhx9>EcpDWof{d7C%pw$`mi?GS5Ho;}fbL{)f@A11LSj#$=dF!T~;N%0< z#O?zMc>>X2fK}R;-eMh3X7_fSAhQ{4Z<2azxHInsjJ3?ns*JU}T_;Wi#%uYzh9*iN zDR1x%<6W=8fw1qXm2(WoF$DytcbDLe3;@wTAlrMSiZic&Vo5AMpihb}ZMRa#iQip!sl30S6OVdaBIeu@7Dc6IxAL_uP}O8@IkkkC zIk~I{TR@99{pp`fv$u+GllQU?-WD;Z^tQv5GZlGhRrRc2Z1Q77?LQIbq&)hz8P9n! zKb!#yL*hbP1E2^ow)?r7PLzujKu@d<@3-ZD_SZSTVv=v%%-JIS0)(Xlm1=5SxlM!c z**8$`xJwwDmw9+sR+wqk_&0B8Z+^d&kCQ7_=f$O0%EU&2oKMHE&CXp4&@@1?$0U0) zZrB+Fx7c3Dx&YLr$(ESOw=>d0yp*$juIhJXGnHpOs^nCt9stkO_3_`bAC8<(B( z--~x?qTc>ju9cNANy9$fE~QR9tCkPjuBwoMfuIPb-OomG#4iAX&Pw`JAX ziKI#Ouf6Fz-8_%?-tT0-Oub+}O3G5yDplv7Z3vZ(>&GYNYnD@36duMJ**0_c1(epM zbvBReCp2Z95l`!x{e8EfHF~YFtI~@(Nu9pIvB>}@P}BZ;h&2eUNnL$HD$eAzpMQP>g-LnG1paC)_(`pNrXr)1E>&VwOsB(^76pfhkpO3RR5^_J}>y&yPl>9YN1q( zn%=R=8njqNsRas-8fCO>Ptx?YCFzW3OZZ{%e3&7H{Js!kL0=-q8%WBxempRZp%r5s z_+s@I2kPfpO5^9XmaOL+%2VDlhAVNBpJulB#jt(*-3j|Pz+=YG6K^WNC=ov z3aaOK^oG&r?TJ(%*crWaifix>33m#WfeurhRiqZNII0^CK=_=fF08#PNu#9EL+pK?z_JJW%7(BoJdR?THKv^5iC3W*}3l0N!ZwR!ZagCG)|MUci`J9gV0~7{lY)@wA_U@*O69> zfg4EPI`hkC<0gULqLh}bAg!YLw9X*h-o?Z!*wndTH6p7YF;5u7O$>C^5pPLi>u006 z&H0MUl4~eO>c;FEfYQVXs4B{W$t5em^yFy7lc2AM_@~cUDADni3N~}!q%s%^ESpy{ zxe5?XZYtVT%Zxjx`ZJdim4v*HGoIstvbpAiKFT8-b%(0?B21`!lXniW7(w89Uh60k zB$n=u%6{d(^)eI&ADPLxHiOJdwpV_F6Y>;hV8pGuLPfcORrC|_C09rJJR*kNup%wn zCzm5sh@gx+r~c26W?te(bM=}|emsl~_AsB%5^#&c07@Jw;etmcdOsvWYE)dE%C(5O z$uu`S9`35-VCaro<#cz1zX!*~x8f_`hIl*z0Zuii4}H{Ryi;Na5Ah5S(xVidUSV z;RT$%;*fK70Skv?o|FuC^?9_#o)!Sa9 zJF3=hS|y>+6ro;-VL|R3{r_(uJ9_V8+rB0*H1ex!Zv?0`u8IQHT_Pf=_lm_3jz^m? zK7*bE5dwICYv#(G&Mrv4zwA6h@Zz9=lIbcw7qe3@jszyS{Y=-@-gwY3AsBMZg)}`O z;dAO3wa7$k$e8_S(%&Zmvom9>v(zd&6NyzqUL1eEP&bq^HY|fVa_Nm?3wK*LVH>J> zL@Zi!?30GiMK{6})&G5dnBXIWhTCnTV#J2ft&Zd<6lYTmn~mabql`Ybz?&{il;OPM z;pma>#*#g_VQQbCN*DZbwf&)S)r1Px&?0r3Mnk_B;*h3NhFNlUp;dQ{Vc?HKN0luq z!13s57##uL@7Q}Q4^sltD8yz6AtBE$;)ULei3!)gMzZWUN`+C$=cADsb#p)WmwOrN z1i&{6VmeWSFDR+AmDpAlYK!D}N&m=t_qhP(KO5*UpIAVk?ESX79cCS{pH`1_zzVUq;#ol!KN)PIuK}Mw5iK zMazShGZk2+Vx??gS;NU?Bqll;_5Ok4-B}fPnIpo&fdtA4s|25uI*ZX?1r+QY~M`U7-*HwnL2D{aM z5;Q)*%Z6N)`L@kNQr6bg_F3qnS-nlc*;~jx+=nod-}PbiE;FV8FrFK_bYB`DQnrTY z!!fZx4Dktm&y0HclItInBTOe_`jKI05yobI>Z`hDNSp&%Sv_5?tvFFAQ#I?N(t({_ z1~X5Mf+<#k5uJvo9Ci>Bfxx(4xUXF!+>$O8NwIqfLSol7n{Rer<7o%BpU10a51EVg z7=tK=?9L5b`~_H2);0dFC#5makqvHjXSE@oobfpE|0z0L6H2&+Kp7b1mqr^7&0B@g z#Wa~kZ*f2RM9`6{03n1JRSmd| zZEg}CDgcK;mx?*}nQv~PrGA~AT&hJmx-6;h-BUd^+z+90&w-w3Im7Bi>BK65=Jw!$ zOu&p;WLlwqI$|P}n?sHwR14LLOOJ4+#t1LswHy5HIjau3WVY*jjgf^vhQrEP+geY9 ztcOTt8Y1f41gyBq6?~R7 z{FXEq46!@4od|p&W=KlSIL#&+y!wG?6~=J?G$y|xrzu2jcq1ZdD7Gy@IrG8iyr(eD z&7Xe$`~cN7C-#O=Sp7m9l$QF3nuBa4AsIT{CCGFdb&^xG&Kzq^yb-&7fpyHp^}J;b zn`uA`vs6@&ImGxw2Y{B!x!x>dxLA^-@Rt$+0^RNe&^y zJN$PuK`X0MT4D~21#*eXl0G|u&pg&{nnqI+ZXC(*G|po1PW1cRDbM?$>eXRz{Of7h z(r-;Yty3>9>;epd(M@gwM0~~(W`LN*Vg$Dg$fXUwCm|;sx{dH}?1z4vzxmXC?7rC< z2!dqMMB{Q)yzIW`Qi#t!&xjo!;6LZEhbkF50T8NPEwDZgB0#PgdEBVaO}a`|Z&e)V zw#!J$c}P9rB-O1j`#1xlrAQOERDaaj)uJJ3~CrV3VN7>dlVDZ+gG-6>T#~ z90@w|d~Asv_}x<+D(Lb+vAyHcq@y!8Zz|@f=|BiUXV= zw*$I_by#AP!}V#IQM$g!+%1Eg^HM)GjingoxGcl?*G3m5fjrX)8pQ~{PM00=^o6ti zr6qcNCU-8z7ntz<^S+HfzPs6DaYo^U%Z}j*P}v^i+SamP4}x%(fQRj*e>>x@JAHCe zVaO-w5sn3}LxPXEF)5t%!wbf4aYUfsJ>&F3uAl?7Zmr@_JOZM(`0~QnHZxQ>@~BHp^5d-$R7@sGz-wr64h>i4*=UWmbb^8C<`#U z4IUhm9z)alDRy#;D5NR+CkrrMscq*;6O;XU?#z5W3JCL6*Rz1|ipCWP4Z=XezNcT@ zyrp=-L?^9$H973=KY0WmbyO^O|CsTj)@ry{CfjAmEhW&yH<$O&RVV5ZcyQ%9x(y>b zYPodAO>z9>bKQ0=mRo7{l5pRqTxX$4E1P&sD9wR;copd!eAhj1>Bg%h88z{vUAAbz zp|{bL>c<0j$OriX1~cwk z+jFh(7)w(0lxxjd`+ zyju_03)bHtc+X^6n&2-57}DVmR0?`4oG7gBNx+v2Tg%f8M(GZKDU4I=P_Bv>rqkMs z-u;Wso>%N{@C$erqEy;}rbxSca)BX=Jji_n)ei+wZ0u8k$w0Wb0@~h5dm+Y5R)Gvu zpGx!u-Gpivag-#UH%CzN(2mW4ubr!JTG7xW>&Q6-*Y_Jk-ao&1ci)o}hAcAOMk*7u zlH!IDpT!?|e*>=?8u2g*J|9@E`x^&W`|WV_d!4Z7a<_x7o?i@g4<;o?7sQNWWJ+`- zy!I8zHn+WG0gvyN!w^R9&B1#!*%ni5U*dV}m025AH5DPOJPBvrXt(%p3K17?5ieum zOIQD;Y#b&oXLool;)(e`=}1mV&Z>_*x6Dtcv~@+D;j#3`lx4dZyn>a%UF_0Awg_#F zq0Anzqu>J$hQS3ZuwVXo*X<8!z`O6~P9hO8@O8bgpfgJ6K?eS2$F@!|kB6021zoM4 zqs;?_2>IuftO32!D6S%F*-&(`*N6=UHPvTOOIYaj58>pn6qzPjhonWV`X@+2y+Dx8 z*!yWs<1%F7R0;<&8)R2B?DheY3+l~^*UPSyL^au80YmvJPqp?eZ6uO`YZ`=3D!bN> zDHW|wyt>^7V|yviNUC4&6ez<#Ark}>L+xdWc??F1I^ew`qKt)@4D35ahsSHWCd_Y; zGt}8EMN}aB6_H$ee5ZuBJdM|a8n6yUUFK=A#J}ApuZQQq?trg6_q~xb-f^+*^SEDV zpk0RJAE-d=gYt0%kvCBf*0*-nY~$X3cUi>fVpBs4!N z21>MS)GdnCfmBh~!(eIfb(* zpS_#dk?W3fG3<$lUOyxSGkWJ8`aP%S6MTw)%9>(lN*eQ}XQF0j!j&O7{S(c)aOR`p zR4IaUOS~mRI4Ck`RjZ)n-`J8N`q$;Ys&_lmns5Twx1ynQvuSYsWeg*rmyPPX2mBu| zz%{NsV4$$#vobk;{`cR7mOMGZy1*{uR}x9XUom3B=r;JRX5A|(bvQ3lAYZ%5b%)<) zmW_?t${tN*@f;c7PZB}8le-47Fsliz31%Ujc?Xa*`BO`$V86m3<%$A zTyYHwwa+Gzuah)Q0zN2O+daXg+8&{cFLDo@K;EiHRN~0KHM?JzCh#lG&@xO&DuRg! z2mHA>(q2}?oU90I+-wNTMiY@e!RiGKZyIHzf9JkZ+X-?yV_X&Dh9BKs%2mBe&B*4w zmSYm9$&O3k3^?^YkZaM{-CD^~3=-?X5`fBO;eo#SXPM$@vi&k=4Fv`foQR-qHcG&C zGN)32G!@%uzAMn#BRpx!Ui@P)U(g3BGo%CzOi$SmcEB7M`&m`OdX_jy)`+Jp zE&`W&F24xj#_`gA|L__y>D`~Js9;Eli#q-61o%XI33c$n6>R#V>HQJqJC^Y`t00X; zTKGa^ry($klq02Hmuc`NQU1(ORl${O7WKs)=W}o-{mvra6PLINpKG%mrO7~(Rky%^ zN_{J0_DC4PR7H%7X}Mk-X)#5Q>4$JLq^+B&&B)|jj+hW`Iqb~yM=Sg{bs74wzVGpL zyH8IIN>R3+eZj{TK#SRp85fS`>FyC!K1MVK?zpudeFx$&990Dtc)zQXTca6$0`P$v zd+37aJBN?ke$A~uV!b<`?el=Buhrnvi*vLMDjgoNS6jeHTeGxJzR$eGXq`-jj6zd8 zs(}#3W*kd2xEDp2y2J?ZnSd%TcGUF*h+=aB7q%1*%W?pp$K>6o>F4G~h&EzEJLk_s zt~SG*uzL8IjbdR@eh`zm=X(5v^vWnWIyJwlFhC8!2gB8V{-RK(ur!jrH6 zcSB}guFhn8wd{S-liY}8B(8KQ&2O59asrrDIdoL=(z~3UOrp(3T}*H+x$*yMUow(Cf=I>Gx@r)qbzGni7>0 zQ(AttKqtV>3!tv@THLfvcM%zfF2E_+c81r36FCiGeb$4t-jbgAjy*qNruw>aFCL$6 zI_Zd`qOV=5I!XF`C9;3kG%}_VwGv$?*+AO&>471MaTK#0x@*;;ABt=CZXPEiD47bi zFZ7WsElgFasx($iX&m-({Wi2P2?fn(yw!2Q?h6svntnI<{X10`^cr88nbUR4mEP=2 zs~}U0*60hL-fnkb!6XnU5-#Kmx$n<*f||e)wu>{n|ItlAM5q9Zbi24EP;c*q0#oS* zi1mtxCKvhr<)sx#-B?3T^_RMv3*mSk_tndWYCHGYe3}dV&kci0Nq}k&wZrJZ%QugU zcL0SArh1xx$X;I>l=A9_A=qCF>nKuRtdtP3Ua!TbLtYtKa>2ph{+7OBtgIWifdd|5 z!wNN80Mj4a@m(s)&_)tGc(G6j#G538sdNpQ8ub&Pd}xT@@wG;}ukwo=%5_&?>dWUi#? z5xbAu7unx<L40Rv}QL4j6B_tI|byU@?Q~#Ap zkd)30lm+?E>*`_7|C_acXUJQZ;f0l~OjN9*=hrbb4sv@iQLZ-x^3s`svPgivi>qLAT$yw4$2DNqD9FFC=R_AyDPOKm2 z|$<%ectG9&W zT*R*FAznuh>-0(p3F{KPQwW?qKX7M?+VA4V=HdebbZuT2fv7lJh9cmo+}3&G+ggiPAD zhajfY5Q!TTeO#*EjtwPuez2e^RMc)#3?7a<-w7V6f{eQJ9An#fD@b(et_bRMfG1np z7fjc;{*~;@hgBw!qKwJR8VC49A{hf~cjWu0I`R5KM}jY=&PZp)o5?lG5nfPoK`k-* z2IVHC{^xQ_o?aYAN^s@3^xB|NT|=v1UQ7y_ zrcQyC`h2muvus*6($m^|%%buH?qNrJGQUPvZi-oQ*Wx~!4Cr|M_B>~aoNp%7*Rs$0 zuXZtyyPF0gm|u;6Xu?f1U|{v@`9`4z7bw{35~n$Xia?vg7fojn;Qkn?Pl%fQ{nw?y zli>JW85@C+nhJ>rkq@+?3`~bL>w)$X{5`f!F#bzJQu>!bRpi55&W86wH93P)y^J`` zE@^fvUJ`hlJP7(g?*2>dF-(5vBj4vtxxtu6jLLSq>;RK0^Egn^bQ;Fz1QOpm};R zwWjRZ!gVB26u@_(srPGl6`YrL{omV6~MAiKj>h4v_r~KLmo|=Dy zy;y}Sp;H6;8Xty_`u$p@-NU*qT9z?`o(8jETe3{xty(HR>K%Nz%FTZ#a_>34;$2p( zTXBdZV3!QmIyhCd7AGr-{(>LKX5}USpnCvKPDBABi!GG<+JuBYeL#tP;0__+`$XV( z^iDKe_oR7EMf)P5K8*-6G^sYwDWF{zEGk3-sx53xjD3}vIL2v~>8oyvh2j*Gq0_gy zibWf6^6NRV7pZ2FKbL9P9t!;)(=z#4>p>0rAI-T7G?oN*C1}456K;oZ9leR&hs_it zWE|a=*MNZD&jgeOb&o%>q~Rj~(m&wjq^u`2lM;|fT|ifrGuy{;Im4F4)A)Av8SBS3 zm#!3bgQ-3XF^AA7!9td|El8^kMq^aC(GGbRE9H z&`~T*CI|`SF8C4KaLf;!POC!B3gN<9dM2|4wN?(xp}7ztStI!!F~hEYoXrFcqc}bb z9)(MR&HZd7QtbxPERJ)jxKJr|Yb+GhNChjB3P%{0mN?6i3Q}J^ie`S15 z93&7DF+`;{z~xOXtReLYSI2YF-|(_5^iP#hA3QI7%ChEgda#}K34=PzG%`Hr!QTnI znia|nn75vaFWt8)pooi3IFL%EIEuBEauil4mH$4ft3BFw>Psmfo)@dJVmAm>1{1qZ z`$ZCEtQeO%v8f#`KV_?SuVd2z@}4Ow>JyKKQ)kOw@7h%qL8dTFeE zxmxo_|AL8k^w&}rLv`>-OWzWuy%&oN$IKoP65$r9EHs1vRoyNMVebiu&)*$V5PanE z8@>VT3f#W!Z)BjcWJ(!H>~s9;c(GJ zn#BMvfw%JAiR5{A1od>;U-v4?r(F^lNfJNGcdMf@RO(#03B4ZczklLG7h)-py1ET(P0mG<#%pBkmt?9@{jA3b4|$ZnQ&xaDAdE_XM-RZOaBJL#RDH-S zkT8CNq*n1TJr<`(tx?jA{% z|L0vPP=Gw>Yu|D4f4y*TPdwUbw{9=~L5esb_zJl*xNGz`W6M|{z$crTj zB7%1^OOM+1KymFbtL)k#dBaXvQx$N$r5*v!o0xpnY@y|P6>utZ1jx>FzH%7|4*C*Q zJ*mChnofChakb<^x;r(@^K9$Ze_C#zJt3ixVEPTQcy!Woq}wbm|Kc*csh_}a2P!3w z4JusaN+5l@Mnrh=6!CRT-@d&<*3q1=cyWHnRUR|vsukl&+DLde+|?V(;V+*?-j}*M zt{O5VBnhp%B*ThjL)#-pok#Ub*^b}UYYU=`s$D(BVY_i{^EKdV+IvlA6jFM>0_~$T zNzR)3i)OFg9iGW>u~L7)!Dc+n9ih~<+J*0T4NEUJQqlg5aqsHs z=SJnZ_cliqS`8SDdW$|d-Frp*-1WqFxC{zRP4l1f$P}>|8U6QY1FhsrCr>S~bh;oC zR3eV72-pVS>(d1Yc0N1#oO(s=jQiU&2DMZM6=#LG9!gae^4ul@!uX{Tl4sJ(Xz1AT zzv!l!|BS0>e~u{wj2uuJNb|XUzeok!M8O(8bkBl#DJfqr4&OXkpW5lxO)KfCK2(Pr zBmqVU+x> z%K^?2S1!@@&-T`!Q1y9>JZw`D#VWk!UL8#IpIZ5Q%P!b{<<5+9pLM44 zbg1p;fyg7Uz1O3J-SxTvOTD{UugEtT1PM5uQ|nrl4&3Ukp8ne*YwoBFc;o@wH0$=# z`&W`0=GET7XItmGlKu`%%oMND31U=0hvNH?%_IANI_7gA6$5y_U zz0hVx>LD7vW1-5rXZz9mfeemgMX?L7)6F~!bG3fT92^Lu_&U}#bXUx2&>x?(GjcuE zUk_PD0ncIGWowiAsp)VL3{gz%WaTWeEOb$PIjz!@_ zr^CRg#@wu4uej*%b{tEiqD}!`L2B3;Prp6VoMCw9@UUJhp;yx{+RIOlP7)5A>v?G4 z&Et{KL=Prp@qCnGfxjTNB)B<)J5o)D9X{Gx=SFcuR4*v(y_*w~+A-FEIz)DGklKyD z#TBIiQT9UgW}bC#-3*v*uZ8furl4jqL(S4=OC$j>dJ7fgfTwsmIF^DyUPp}d#Vuj4 zp=Ry!T2n1TdE9wVRHRV*)pD-8NdZa9Y*Y<$C>IHIx^jH{e4ZL+e6S3WSkIdl2Ep8d zjsFzU1Yg-uCUcZkrjPp&S`3Wa!v zH6bX|_q7AQRpq@h#JWj>1hF3P*&~tURO$B6Ptg%AGu^F2>%RebkYnwmEif%My1})o zhWHRPW4kL{R`A5Gl{mP_L#H-6wdG3HNQ2KbUKH6hs;RJ{gtl1KSa2M%WxpyrI8oydAA}3PRb#j_uO+!D`G=5U zW>Fqt;n+lkDT@X%HVsj*d0!Sv^{AGUSEjEq?)rAfkSsH2~$~f81Xkp`@>6)COaX94#=O?2< z+pakLj}J`H3@c8D-&}l9vUK@Vqwe@~nwxico=)5&V+ywix)=r>|3GeKR-@EqaA;a3 zUhFMl&(VGK4Dvpjs0fAMyX~=|Klqi^)>%UTx)Y{i)GnSTqp?x|!^Quy2flxtC-QyM zssB&+_aoMAJhMUjw>HJByQR8en}v!m;=d91HZNcl)l^2KpawfPS2%bUjuKWG94^lV z`+4;zekZ7j8TgBTwC`j3((~0iFecD#K`yk!>46SE9lQ#Xa|W!1B6729woH;}Jts9J zCe3oi>R@w~VNzcaA#C$TlSmg^OLIU%t>k{W*{V%#*o25+;`h8?EOiJ(!Q<_3-8R1T z6+VmPl-Sx9e3)09c6HXD2hC&lVN~W4QRqt7rV^2`>I^}XOz_Jwbq?S8`Lk!w1s&(= z7V|ui6|&3aAYwSW$RaIT_c`^=xib`|j#D%1!oQyMJsrmm)jkq!Z3ORd5vMbW4mlyBM1dYCw?>7TU6q}+!{F=pRo&8{ z;u;5!?-BQK0(I6`ThUIHk`&#zb(F?rV4jDL#ubBW>u`jtT`C?1W;~EuHgvH1g75qt zHk@8&%(_{KZhU>R)2dENX{xdG$3+~8?oyP4$UwBtUyKg|<`=OA?ZFF(Ygm@9+Sko0 zt6kzu+)Im^%tBd~SE4zou=PPpeAZ9fB-H%^nY2j9(TbqlgpGPKM6l_#SuJv^%2!=? zi9d==1C{!vw1xVSD*ieO;5rW-*7Evs%xQ@` z2{4EVELQGFBi0RA17>0vvEP7@GaQ9| z4KWvHQ~_|g1L-42qJmU@lv`)q+k*G#Hb3h8ue6&ZJ66c`GR)1rI~*1A2+DWXG;vJJgLV5>O@O zkp=SIkZ`@eAKH}NK2P4=_62h54TzevDvr2f+2h;+6OtKUqYeuHd)T*uhPe_r`=Dxb z_{dj#`)_@~%Zj2&TSyZGD{f8fd>%C;TxivAoP5TGU@ei6)hzYD| zMa;SAEis&+Ay)yGKVd@=Ggq>$9BPCy={W*)G^^0yhy!S_q7>GU;*+~oQnBjfx%ZJ&z)F^G5eH^YkajabCb7#s_tld!T;r+r^nvk5D18PDiu4M z-l8>7Xt2$<)IMlMYuF|@$`EQe)%IiArSE{G*+~g5tD4^-VXa*V|BRyifn*7&&1Lq<&dp4RM?#0C#A)JNOB`p?x`?U4x%T+n6=!dNK?1a=DXtR?Twn(xrESZGkeVD% zs7j4QMx$^CN3hh7OL6F8U5lF+P=dob{#Twn!dJT8?8XhM&__^R7Y&R{q4iEcwt6`g zCt-2O)||n|XEEDNWeUO|J#JIt{AAN&RPPxrO= zUVE+2<~+RDeWim)Fx#zV@fN|t1>24Su$m+&8t#gq)ncYjoILpO85z#pExJMw8W4RC zt1<|S#j%+J8DyIDOk}RVT-!kQEjrS2q*%lUGbM}U*l?!5iQaB&Ak<&^{q|%6OB6rR zmSNJw?dP~oyIpbZMDu2VXBHt0(N=Zb`?a9&eRGc9%g)lEZ88)d`fcgN0Bs7wN~_Qu z5#_|o9Z|j$JVyJy(%^UNk01$AxAPY41|0R!6I$#SDKJv)q=E!KK5CTzF^7{FSNf_3 zf63I2h>e%t5=#~nt!|D$yu=?7xoHulrz@h{TSfEuS0oHsrf}uMb2yoGU)mUwsLL~@ zw}&B^*QMRVDt0P=BZ`&Kmi!*>G*SmbZbX4q=;Kq;3rK`fTM~}IVud>(ljUk%E;WEC zS?an`8HVw^w5rc$;8IeA%(V2E9q`Y@BRW)gr7m~$^IkVk2D!dAKr6WGLvgTKfj1j6 zpo5_B4>IL9NUte8c>#LRcA_L2ML*qJm%S9j4}DvQ2h7MZ5Jgo)STau-^;74y#e3UduKw;8ciNnn&CBHlx3U4nNC2GjiSx1C75hb|0W-QFTlh%}f0~ zElrH8(}7lvnKlO|z1sE%pCdB6L{Zb}E1Fokr3fZ{xaAVVC+zk-&KauSwionFE=v;@ z{NL;VLFn?c*dj1>5p1m>6qyXFH}MVO#nxNoAjp57%MVP)2DHmvpj|P;>My4e-p^mI zm+p3B%_$KIuHXC101AEmm^`|WY|==qnN)wSz*J%5#vl;;wMM%}ql0d*2|+h2ym#E( zJR6A$;%#VpL<^21^y(5+uey-lUDXOY)m}kU@#OvnSfAIJq{Cjsh9^Fv%N3=9J)ctVrw3KGahTR)^rFp`Zf|<9*zYdg4VwsWVm{C^7&LOr3M1s?PW;&GrFI`n5QU(J)9AKuM?sacgTH^`J2L0v6mywI`9uX2z++LQmV!r*JH}qk z_ln6B5}Tqe5#j*i5m*PL{zjHm%3aje*TyHNri%{+^VFI^bL8rzI1=2d16|?aNnEcY z^WmH~S?}O9byRyCPI~X6HTEMbu~7)=@+SC=3LS%!{weCjzb7rNqR^N2;?+b7)O_&0 z%)vKYt2@XzE~NjbDDslBH1%EXi7pzu=56faW^*`gM+??WVP{hh{I?fCx(3y8{2Y;M z#f%epilDwzZDm#WubRd@J!2_o7HPEHsVD4t)QpS;XUGhJ0Ktu5Q1+Ldc;o*=I7Mc%Hby zX*DI@7LIAx*?IqOw@3q-2H_bStSvNK@*BTME_OxQ(d+U2dJIU!Y6D7msDE}9;fZLtBq<5)2 zErRP=fa>dNB<3H$T`hU-NOtzI0vDwb%}=HHvr5e(+^a?m883*Y=u)yun`5xfE0|5W z@<=h4O0Z(9MHIK&^CH^psGJ*eLra@rR`ObZ7Jk!Y0KRpknT%Ybv6vUy8fj=PT+NuO zT$_oK%dkn8e=9Od&tnz}^|F?J!g8p9RTrH0N&GqWy|UjQ z_aA9MiX6da>G{`u7L;&1X|ysjEsEzk%LSVv9ceO3=U%~)mvQ*OhrKQk0gToXR@~{n z`u+ECIFeE9YyR8lYv)NUr?^p22$InHQIntemMFaf3ke9#6Ex=MQ8HFc^;bfGg6{9d*b>5{It6u8B%}(j_!wG2iK0|gw z&t-Dx6ed`&RiYsx3EOw8qBWXg>izE$ep5(b=j=aOZ#=p#l+R^0WqDC+=v1fHt-d)el4e{`W*t==u2NA_GFbed)dd@3#KR#Ij z_HH*yucphYi9*PE^Zj5_j&`|aM=_emm~@-&SaYdhQ?>9^gXGrzB*%au|6y*#f$@9} zXbO{cuS1w}EbK2ns-x)-bdqnHjthTQcg|uBp&}^dC5%w@mYCSv)HW57r;0GEH8!|H z-R)$X=v@z}xVgrf3r;-iQ3&vNGQnoYW9QcSiaXD&Z+e^<#2tDLb*g-4P$g{EQbeK3g>6x}Y&kTQY3=&+^A65Dkmrw{endHX z&KM%E762P9)MX(9I}w~SRA+IGDMG!5JPnff>+XC~bElW3;|@o}>|a+ttwD5bfUvoc zM?#GE_Kx|XoY_mSCba`Mf*dJj30hJ`SqvJFH(A77!w)d{#c&5mni4lLf_xjOefXk? zCImHZL$LD&!(U*+kPOaaHAF?Wk#8#LK9imrusY-dH?r~1 zMZz=FA@M?dD`;sB_-9mj?8i9uP>QgzD(KXV7SzqH>(=LP$@&fg<(nsJ2CzPN^mxt- zWBqt}RN4EuNY}o?In&V_FWy+O!Zw=ny0DGBiGV$pnt-uYt!CCJEx zV7;sJ=b55jh~}W;wVsHJ>=Aa=G-s=Be-u|%n)fEn*Hw}E_fP%FW(h-l2)H=muL{i= z7Rbg9HFy7afaPH43}eXeA=;W1Q;UCMJx#d;5vGWvXLeccmXUVE?0 zX7hz4-h^jfg{;35BEx^G@a9$zJi7X0h;oC_GraBE zN~HH=S0z1 zc`X-F)+Nq)lTq;X$2%gLw=&ia?AR>0iw7V6f9fY1)mKaVx`j=J^DidYzmq4n1s$@f zkKY}0xGF9tIGDFIf;lAjTS z+pavg!C=5?6m~;m*BWxlQV`)EBe=8<@rfFR4_%`$2YFt3$Jdc+kUbr4(8#(7LaloH z206tHHSec^<;Sd zVnEQs4~+`R7HqV}=l?{iL<8^5y_dYyye^pmvDjy5JZ@MC#d;Q#fTdc( zsb>igk{djaKw=O^V2@7`5ui9H+M)o7AS+R0@tIOoLsKzl@Gf>YQ)+bPJA~{tpQzyI zN-fFmbmK%*~_(TK-8hip_kpf3c&N&z4v5D|Y!)lb>GVSr`I)7L) z3=EH_HoW@9=q~OkkFQR&X4{G(Fm3H2uVO|(JLG*%mHkW?L~==rnziAvOtAK7 zdO66w%!N9F9%Vxlr#^!C@tqho#%FgXvO~QXDf`o%v!2W+)2&TZb)b%YEjeqDC`aZ=cAk0Y-{5q{(ihKBW)M|$dHPo}@L^s=(&9OSpId_&>8piAGBou@ zV3008So9Kw6Jr3~UYU-O6Neqf6_H!31tk(Z!{zFL!vLqu9DnOTujRvJR5jAyOB$OR z<@!+tf{vw(oCaD4l?zI;fLVIl)n&7E2)Vw7oOhQVl4fn|u!!eks43{i#X}n53K)p8 zk>@G_Say9GC^cO??Lnf%;_V!Q{KTciTvS-m0{F@t?EbR}GeOywYP9teEY(up?H5j| z;hb{~Ew)VCuR&7>ZU022H;PM%dl28D@eoxFDhnGYQ?qvSS*uzC?<2j8wX}&^3KK{F zj=OxHqN$Syt||=>AAY2G!c@M7^x7FEt(j}RfHSAdBJ%V{`irzDf^Z*m26eK-9}a}) zf6D9^M?1{u+rV-v))TbJJC`95(C*ihZ!pum6%5ua)l%l~oyk5O6Qra7Pq)?vM0E9k zf=-#=+urK-l|R-H?eb*X&==?GR{B8hBc~x0_5h0Cfz5pSvy%sSAY99c0^eiQy^RDX zGp`mSH_3J_E}A}6S@^GtW!qu5OqD@MC5;ql?6aid*XQ{tq}n!Ov9b940~r6yKy2L+ z`^XL!z4~345(}~M1GD<;fprxF=LVt!5D3cH|CXnhBtGpKQL28C)0SOGE%cRS2Y@LA z4P4RuBFw@iocA)#t|z5DP#?t>T?L5QYj zKZQZaL=vJO^*+0VsYmq>5bB48tM;Hd=ZsK?dwj{$u4|j34n_0K@-KHy{k5Sk5JF+7 zxTR*Dg+2YqbP9t*cDv=Z|?Gpt(BSPc;fBibp(h#Cn45C-kGO(SEsAm6;YUd}R z*B$N5Bf`}i5$hk~B}M>en`Gg3+M7#S5=m-uwB&D=P$Cb9RiM&b^EFf?&31wzS=8#h zraZQ%>hE@MZJtJ{vaooHa1wK&3d4fc;VsB5aN0u@@17aStdv^Ugh6S??B41X+a(o*eZ-u5juT^+fS_)m5#T|O` z?wo|G`c4cZMTK79nNZY~Y1>T%R*`F$>|Xe-Xp+pv5lnq9OK;g1mVh$S)!W3qv~r*% zX(rpuGV?6Ll$~P-A%>ozPL^cGH&`62Z}gjmmgPsjnR$Cqu7pml6t-R3E) zFZMzxIeP)@unD~*yeKasp|=oJD-6pMpaHD!FayLY0;2fH5_l4TT@xD+uQGDezZhd} z!uVkp4YlWg?Y>zImh#dl6w$A`=?yLFe?U)Ue5EnA%h_#PjyT684E05&)4N2mADW6@ zF+n7*=8NOF=l=OU;`pf#icw%+Pic)J@t0m?Gou3|g~elcv55!|vk(dZc~qIZI)hQ< zi(~cgqn04$AH@qX-bHk|sdu0kWDI_9_x7kZ1)J1~7uSWhllbjdg1O6-ZKn}Q$8Op! z^xJ!mOHyv%9T553*xAKS)WkNX2$+YHE( z^g>KwU-+X;TXLjaze!9rAZY22*h#?lz9~H3zkgJQ3^KG!F&Iyja_al@dxif0llCZh zz!s$p!PT=&7^q_;i8R+zw6UIVD4I!O*@=P43Xvpj@Mc=7tctbMv{jh2uh`ur{#p+^ z_`gPFI6pJ+3Ns4!FHuMB?%js`u#FGO>8owy*qWO;wG%w|$R>Itv#|#iu71;3(fA=4 z;GnTpwL#f=(4!~k+SSRAzPsz+2RCV;ae?%x zp9Lv^LotRMc8{J;!;yPn;ISBv<1+iRbxMl=+VxG7&Woe6kTP*}^9bF^Ekin!Q*Coe z?YI1+nfyXPhgMSZ;lN(gsfxv=Sx&Zql&~&CS>)*cE%y3bjE_!LG;dZ~g~L#Vf?8oh z^B`nCfITZb@|?RZVri=;y6|l%mIPB(u(hjvSm&X)mEf=L7Tnj(@Q`jqtM`oeyhW&G zefz`RQ@I@9Q6IQwjWa!?xV6`P61?8QbZSxUvGA}z=FQVGB0qn>Kq!ytQLA|O(c8UZ zP1uf&O$$V*T80iCBI04?qrk%rL0GonLMcHPBi^vzyz%hVV5)ze5f}X9AZ{e{qd-9h z@j%ClD=gyAzQJV*7RxUp=!ycrFBR>6%?dMXFdsgL(VsIuJ%p+Sx<(7nep9KPCJ_h3 zUHshg?o&=U_WJ?O7*YGDB{aGQ_lQldHROC5JGrs(&+PsPG++JYyG$_+$Cr&x6)csd zs7g%xFTAq|Acpn(HInI5pyS(_lo?Jhk&Fa8l)FZj9|tO&vG9&y`kuEN{QK4OPX8y? z&0YZFstKlCladF*DRxp+C0;u6SYss*VyDnf3YPN2gZn6K;NQa^@A_WG!x|amExO9< zMkmC-7-le}5q7GJ&ScnI-&5^?Z}gs9SAWbH{GSN!@?A_gEjaR5T`H7uV$xb3dx(-| zzmA0Mf4T3!v*YZ319#gULCgJux+-cv-;H=TqboyU+9`X z;r4=#U1A1A-aX^{g8_zkq0Xk&=scv`&|}JVr*!b|pJc?r;0>W_gYl^f7B?RZj#uu_ z-K7#jZ&NSG8_~e9wh>C9W!Gzd${zw3^O9YU(OikWs`@g2bj^hfxpK29j=e-^?23tN z1p0s*ciHEoZV)e~@?}HfssD*x{TE1X_UzkRJIiaq@%LkFdMi!qWMGwVl_7eI!FeKu z+s3e7g=CVJO6z@MpaUYkZGC?;zoP9ktK{963==I{jLX++(7;>bAk;ZyFL%V6_OviR zbW3F|>qpof9*=zKqLm>->Wxr67tDyFM50-xeOn`gh2~1DJg~f8+9Xnq`fTyXa*JSt zv=e3VWLHtV6h?H%o`(iqurTHcMoJ+wYf(l}!u29Gv4GIBkB4LJ8yHhk>4@%ivOF{^9&oV}2Xq~|>(8c*Lc zd02?nXrs|3GRYV1QZFf?z##VB-Q5w?oD3)7iQQPx5hUcPG0}wL&>_mr z;N~N1>>#iT!4beGj3`pp^!M*mq{9<7uqZy~?^zxoOs}G3x8wUM780-Fl&<@?0zn-` zA(uPd4w$KX%uR}pOohH?@Mz0|{*yv(E{z<36c)h)9mtmjOY{vQQeKRP#FZo&O=ijb zI+I!lO(bJ-Q?kD(#c#~N!A>-rcwwVr_U5X6Dlo zMwGM)CQ_-Nfz!}gRThT%H39~lkIl_`0lHIWE;CLAfWZ&;%C;uqk~f>z+Mn@5PR1sV4~>9KfT@H__}YMSXwkJwFc&Vu8`p>Y&^3S#f)*Nqqy1j|9o~ zo05O6V*9F|DfCNuqRk&2bR_eYjk@At>Ltil*bJoic`%8w2=-2@w6D*Cs@g~OE3dH_ zM~M=4L5Ji~s7Gu4=zdXtuh%)c4QZfQ`+lwicK#0Mu>n36`23B4-I8)f z|NC)`sW-vyF40jRyla7v5jN7{BOEL&+GuR^zqMek$AHW*b(e1_#a#`=45%UGBeo(M z)rmnjv`C;Lu6KMeQUjFfjFlq~bSjp*$%{`!uOU*1_X5n!@v*2~fd`YlN$B4i(zy7T z6ftdsj%I#NhDAF4(zGUOgi#h+0#y@kRE`w4IjXFCqekysA~Uta{O74c_9ilBovq@r zj#N536+~rY3u~xH*DWg@Z1tB(rg7lTRtqi9XUn`Ro&%)KhO6Yui9K1#kGROEh$mk* zH$GwrRL}2fI+0JgP+L^$uy780ut5M4iVKhsCET69hmG!GO)GtF8uI9>uiZD& zpeSVWi|$-B^E>8Lf$1|uFovY?$CzzoLLs$j$;Tq=g~zuO4CT#7d|Mz%n3LYhr#M*i zE*r~gCER&+c{O>Htq2EActW^tWyMx5L@+b0_%H+&inE$m9sE!II~(+B`^z+~#*Z7~ z7W68HPHa@RXr1E?R}yu1gckZ#9UU?ibCmpz11y6jswa9U7IxWbl(15iPCW^ak~Lr@ zqvQMxoZUQowNxkxRSjA$e-oja6^ziU!wUMzsJjZLD3F>D zIa)i2`ia<`_^}de1LURAyW%S`o01x|b)7dV?5L*};2SEHW-Ik@lF;Tqy-D5MxkN`B zd0&Z92`@US?+a^1TUR%8X^{Y|4uT&bKhcD~5Wn2%Y;?7q1ZrQVw#~+)a{52tx{jgA z)7YlvHpGy(@j={ppf-_vnn0j8QeQ#)iQi#`pKL$jI#a|FKtSKj7R)NeqrJqAd0|4l z`2l||6>Pyuh;*3-aUx36fVE{{_M>`>A-uspw6#Bdblo(@HYh1_zP3)vF`k{)s9j+1 z;+b^+WFOLhcO@8hXSOrH%*lHekkbyVd%|?2xZ`ym!5v2-m<9*u0V}8{^v@}9W|YA& zcT;vuuGqdVs;SBk5NzikblIkLY{ksam0{OCWrH%L0$vR)8~6Df+T+!B24M%C`t@R8 zKy7^o)Z#6j2uQUjR?&1M-c||_UuUX2#xjr7D*)kkxxBhVkP8CtC|57T>O_J_Z|=Lc zz$PVQTMfB*aHv2TOZAK}R5%K?EF0?qv*GFG^_Iqg5i02apGC1MjmCFWX0( z{yv6AxZ?Zwo!uXI1XQh8q7e7oe_^k$)5H6;vMS zS`KGSb6Nc!=wfDOtNlJIJYHxPCu>kI0LiOgnlT}h& zvx_P1=y(#p`ZTU4<3;tjk`|1Urnp>#;ta~c90DJ%N6L#QO8IZkF!?G$`B$v_#q)3P zDe5kfnW&ZFHPBu6{BaFQf(>6P%|qJ}Tw#|@VJ|Crg_Y9s1$$ip8pV76fesnJp6@bl zQ{b_X%&$2YET(k&#^v>91Ln!7gLf9GdTQB?Zz2CA z{8OwH>eIl}VoNuMtY2E`5?x~aKWE+%LL)LF0;S#dwdI2%=bdYD;n<-nWlpaRxNVv_jya zF_4W$W$mpwQK5XkpxP-1O0%R=&HYYb{$EzUp7PZ@J|xb%JKksG!4)?BEic6>t|2wQ zrTE#fgEP-K)}x^ew^8BTY3(pH{e4ZOZg!G`d86muH8Yc^f120KOHmJqp}&;IX8zj? zFfLmly>$5sb9o{t)DswHmpWPYo11X;zNONs2^Pft7qUtdGIrBDsLy*Y>1up_76YHQ zsYI(fZ>D$z$*bKd%o%b3M2z=Bl+~UMNKhHIF6fEP>Das34nn*ktM?nUE>1Ap&!M76 zi?*z;ObfiGPVbVpM;WmDC#H3-h2KGmO7?B?=!tPgJ&-F&)K=t!>7GKtz$jxyInxyv3~NJSjgs3hmfhR=xSc% zF0?%3&YZO`er4wIB#$>DV!(BI1OcU6PsoPczmPH>M0YHRiEYw-XaWnY?Rl6ZkYCMHs=ZM#fuME_f-a$FFqsU$aJ+uE zqrFS=+fv2u8f4PIIJ>S=bnCn?vB_k}Wi!jdxzka#jwqg2G_)D3HY#@XwnfBBaB0CX zoZe)xL-&VL&fN@(`_fr7feCVKa~xLnl)%D{B~b*hz?;%hmzk(>d!#z_OuP^hcH5Dy z0cCa}Td;H7lDkk!e{BG;h`ibzMhgLn5@9elyV(%;nqZA+qr`#Dw9xZGlsdc}k%^%5 zo}#;dLNfWg5?-B7KI8`2!W%q6?TXhy%VZDYB-e_#8AdmpU zju4+C(v>_5Sa-gjufv-X4V-FKihG)>hcQ?CEg)*l1VI@4tDc$? zi{4G|j|O^bm?`BmIpH_jQ!1T=-{Lxh&YENye81_n=CrqFLyE!Rf{~-~B}-yAg{x1Q zN3&{{VSlPCF%dZ)3nCmjc%cTK;5yOlVYVle*~TyQ9(by5A_G5L`yxc224?qSz&r`} z!Eh6PUXrpiK8l*p!*=G8*+((YjD==9hx9Fug1z4^jox->i^ODTmje`uT2)ryMb8X1V&j({&Ao2%(W-E;e)}o@RbyUt)AYk>8m0~*LD|H%At!1L?A0IV*Y;3|pO0#K3l5FsQo@&& ztvYj~=HL{HpX59erzQ+Kf)1I~eBCs|5ip#rWR~io zRSW)$mg<~0E7oc8mPUuc*SkvStsldr8oUo%R&66*`G#R`p3v8Q$|lp}XDyMVEvxgE zvHQVT1d}kVxpuFaak?g~9z~3)+LC)}p5ex&2E@t<@gAU$`}z*^CJ;P`yWl`_Lx(tT zGOoCl(((Z$GK-`4^2I5wt!09~(O!!qo8`Fs=NY>5d0sa}*fH=lorp?#4R-s|4E;HQDzTWWT&$f$S=fG9#z>{0wiA(Dj1d^^5;u{-7=3tRfLG&XEN+ zvK5c(wadApdUrXF>3{X_`BwQ2?fJLUN_1Q)u}RrDo;8s-ou~h`nKYdTu}sI5J=1OL z2X;Byrh{d}48OmzkHqnie-$1_T9pFyJ~AJ^Kde>GTT@%xjm~j^iQ>N^Fjm;7|{rWr5>&4SmMGBiq*wp$Yf%Z<`7ux zrXTJFkz@%WW(;75Wyd*WbC|FyD4jFbR)(R&NG<6Bgl0K@;zFZlnpDtR*o?bi1mHX8 zT`-2)5paG{apX zXEt8JO3%~zteReVKv`p&A&_8N`3Q)ksctMQ{s|B%fUn{BI zq=MU)F4Q}XJ7)h=@KyL6KS2(pC_%6U%DdMkjth9Er=ofByAQ3>1ZMpS7==H{i ztRII4qghK>w*Cpz_4Jt8GbJ}@yQF5RZlX(A%_Y-96s6n(&UY2Q5EzkBTYH-{I(s|w zcC$a(F6dq_B}+3p(78Ss4cJhK$C$2sN`9t>Hz`R+o&fkq!81Ayd1Fvb0hOdxkPT)2ZC4YIy zTao_7wm|Nf{Ksfqf88i{;OwN+Zr2rht8jh700DP$%~>QI-k^g6gTaQ5BmZAAtu&&I z%Tdz@R*9f!-2T0OzKV(I>}FffbOF@4nqwaC6IsE97z^UPQ%NycF+6Oz9HMjR&dKN$ z6Ne8}_W=^CB{(xW!S3D6ENDz9Bu;D6)QEGihui!b25cROl))5K2T7dND&BAJh9^_& ztrH`x5WTq=YRvX(iw&)m`d%xD9K3WeaFs2-x|)(-x6IecD-Ee|x&~1wvhY!Bmbv{U zm$x7twxC-G`TCV)1<7sw^kEMoqZ)kpjUVQ|U;+L)=YG7SbL1;XB>@L(o_dr}b%YN7 zcB3PWX5Pf#8ydIOrovg$&}uAV_qoQ%F`7iYykgld@#}M`cc&}Sh}Cj`qQeEklzd2w z0R{V3!uh03k0Do7+^9f-+-V|^_3A1GMa`bXbF|PMSw~@X^Av(2-&YKWt^e$s_VQz} zdm$iUA#S3xU-)hy@qD3At0!!!*z6q*?XoLV5;eluhH+?ncC#bp;aPK>imQ1VbLGb0 zBn8&TF*6@Mj|+zj3j8YL6Yubhk&-G0m>MkMmZLh$uL-CkfKM2Hsa2|Le|_dx$q0|? zeRxnxWZ2auVu^E+nZt=TrJ`QFgtT);&`FqPA|D7^JyZBOo*ZwIyern#*Y=hM&Bfaz z#YQUp|4k$LUmSuSNZ$?LwY`joSRKm>A=(z=bHCcvfg2w;=SE^iSA3?{QP<+rMiMY1 z*?l)v(Vsyd^B3O!qUdb7gqQT%IUZf&8bLxPrpyR9Hp$PV+wkE&viS;%F)$ShARMq} zM!&WC`C`e>rYhZGu3!|SPdJY&odY~lKj3Ra}PKjXn*38{On6P)$e!Nv%)(LlckA4EKwD7{~$ zh?uzKYI$wnUjw#v;h6EnH~hA8z>SZGmdrqm7?Nps>6J+#CeasPFMYIV1zWm)1l5sulV_!s{Z${I8H)=@WvuRk!?>W znKZJal`X>cwi{^RoiCT-0cqSY=9U^LP96$k3omTDXRtu&T0BM0^h;ziFN=@iNxyI$M9@udEL*@Zd~hgR8oo^%?E${QaU)axNDRF%P%V^+Z3TJ^~rlW@HI zjmv*7HtZ%tO#ft>SqINXS=V{8mep8$fUAL%Jv934Y5EffloXzob+T9@Q$P7Tzjhw) zZnH7Wz~Yoi8T8Yzq^gFQ*`eeqIFJ-?v*K(Atm#ya623cJCMB4CUbCPKGC`ML1Va%F zg*hu%`ZppUK?SXQJf_2=MvSaT@5vhe0J! z48}))Y57@zo)$eIHgtHy+j%FYk~RjJ_y;GoPEQ;~__i7M_aKU`FB&(M$U zIt~*`3_Xq}JGi+foy3l@ZicM*Z8CeXNs7E1huIbG632m0#bjJGd1jz7i9NmVrG*C< z7G|$XODs5?7JmP^L8&vln^R@i>9Z$T&2%ob$&|@a)zYehpJpPFXgGUzf81x$GCF=H z)v2PK5`-Zx>MZ2Ze5HUh+~)b3wH!jO!T>ZgX5IMOth#KxaUtlNbGcax9u4JwK+$8UT)1EIGK&A zsGab&Z$P>JjKe|9D9m6i8%~&V-I~kQEKA|NE5`?s`-KEu9o-C7`30b!)O$`tqvTkE z0r|;DVQjbv&9EN8WoY$VfigAXR%=NZZ5W#;v6_B@}4OdaJZ3%aXUeBO+f6`fX^m1_%hg;4MHVmgx;p+h4 z8$L*^hAmBFFa{4a^@LX!c7RZUSJsYue046bp_H&9t-2e{dW7KvSGTo!H~8pqr}B+4 zqu^U2Wdi{ws^?|`58pQwI`2P#GUwV+Uu-R%KZvD+QGWr(Sgtj&} zB`oSgco}hW4D!ta0dK;b^znnBhW&Djg|fVBg!Jy)i23_BewJzM>od^xck*-1}5_sgx*p^)88ss>PDCkY^z}ICg@BRgLV^?buGc zg`m@jJDYXh%J8_Z2KCqnRJ4`R!&?>6+@IL|lwC=$%NVW1#|*^u0FeOS9-Me1NqMu@ z^eu=TTdu+@K%3hr`umZnH>a|`cj)3(x3&^=ql%gcGQ)%%aJbdrF1B3m$MvXW-Id|Y z!KNM36Vy~)-USz^Jgy7b(~aghc}&NoH=!ZF3cP1C4dbewQpV&1%&Nv9>&i1BOj-Pz zKuqpd+xRw7Gs#Xk?B;Z3(3**TIM3f%RD;NhwjmZ6DZejut?qi5p7ZKHle>x3y@^uuhZ zG{lT$bcsx9r`oI8l1YysuNrE~>~?gnsEs|^W(&D%mL@*LgPXz}c+)+^czOLGYet@feo-Fv`C7xe+c(#_fFGmEfhXYY;Ek(G5x)`l3eTO5kdTb4l+eUPOYvnu0<8-F)znJ zKybZy$?hgo{B9+Qg!v=Y+{C>L<99u^CtR}pK%`5Q2HJ^Qy9N` zuAw2r_1$7~?B4%I?DVg&p=Y(@nJu3%z6+n19bPbF;|D@g1 zy!c`F(p#&+RjRqP&-7qmjVy3Pvx5GHw3(~YUgVq>t%`27&bH!sW5At(6TZpkItnqI zowNhI`Dxy=#Kpv)Q@AvS2yCt7J0n0ssJgDkYQcU!a0+)wEclB|>h~9^An4=E zqWIi?65zjzfxU2QVur+d)_GZm!w3!uytoRcPadCo zhjmyDiXS`4NO}D49q0Y)8!qK0DgOxR^ppl>=kH`#Uh?0uy{+BN{I90FC-~)UcB8Nv zlS^pJ9=SB=Vtz!t{sPFNANzzLz=ihhrQ@h${DY_5846P*6+Vi*U5Vf4Jhq=$?IkD9 ztckE$g2V`cI}`ztP7-2@PCsx4V)^d*NI_!QVU)?s^0}nu;2-C=RJ>|ZWXC#UMv|1N zR>(;OdJIn@slxCveM~wMrV|4;0mph^6Ew26a8+Dc<1?73Z9QIT#|HT7sFqA?**XE# z`);&py2zs0KxiV$VWU8(0h*gbc!{04b#X+kg~Jxwdr?v3NUiVE)C^4FTG7mujKkG* z++8?CVF$l!u|5*jh>}P1iLGW4ewaSpp9x-mbI?r!w4?**7t=qjdal1Ce{9FNI_3Ft zrNH>Tj8>Nk+os|3rY;56d~b9IeuU|t6C53DDxoFnqfgAItoNSSvpG1Ba`P2E9LqEL z&0RM)+*u-^S;vn>31hmlyx|d_HEa(mukzIdot$bFBdjhJwJu?s`N=vjavWSgdOrRWfk~_;?+P5>~D0bS0$fC+zm4$4> zw)V{ZCOe6DsgTmY=z(=Wmd6OQhx6|uz=rB44&5>DbfP+^x&U;=H&a^A7fj=YkDu#5 z&$GZB_@xk6=}2gp9KBq;HIU^-t99t!35i9EYE3&GGu^RIvRlI`2)@sB9S8t&POLW# z`9JDjSU!d#jQd$lkbhh4C#;4zJt60ZVn(R7h;o~cX>m;pyf=gM-1$O$o<9i$g4U|M zyJ^b^x6rY~4h@n*r!o@L?B&i(3c}d=!|0SQdb43wM%g!)D7;3EbD5}`>uFr%V$q*V zZARKD?9>b?-p-eQ#h4OF^@TdJBsx=Fey>F-Hz5YHu{j6y9UQ+B829+a7i@i@uLZ-A zH|@2tFD9cRrF3@Vk z|E+i#Vkg3iu&IXrEP>P_%zl0qK-xG=+QdAUX)I6&3A>q1o23$;I(|xSEe18h)5zLv zj)*Y$j~ykEO3W+)0_PRDvCL;1&&>=CmP7R<4YMrVaHM(zx7{~q2$9$RzH_Xa4NZdv za)7NL+HhQpf)HjnpJBDMW?9$hb&C^=bOg(M)6)i)0*6eReF)Za83qK>$+UiMiAM!HMhgX4ko!V!a~xC)5wC(9j5G zlAnRKCI=z;2}JG6^=pNb|BtD2V6Ow*wsmaVwr$(Sif!ArZ6|GGHE66A8(pz&Hcis# z=A8TNyYG+qFz0y37=uTSWdryzHI@{{SG!H=Gvt0`ER|4=J4zf1L~|S9)N)N$Wq~xP1h%rM>FSi7`fZLZcEg#E7cG1X2`jQ2G=V{$vH~__(6da zZB^y(Tb;J1@81RUi8Czm;wBb$oz*gVgG=Z!R6+7n@?qE&U!+9ZTv{NVA=(+j)7)+ZJ9L%2HI(t+GD_C7u{O0 zurvX*Jf^%rlmTp7Bcf0c-ISoR{e z0Z8Nem%-lq1gpr#$T`BxhP+>Qli^R!;V*6Wlb8fBR+WEOF&vX;6|>d8V6J6gqg|_j z5<|c|-+o_S<>L|2flSy1(@<@?HK1mW&7^A~`$Y~KRWv2w4;QY$=s~PyA z3HKjHi$o-1p{f%_m(lnq+iMLp(FL|XIetmgIU7Yd)&4Fi^*5zN1 zx+(h*Ams|-s}K}dP0WDD0&7U#f?oVPN{?N1Vi6H$WneQ5XAaq^cL$6X6q|qPZ!qR$ zWVk>jCn)&xKh{)r;A%n{76vl&*rnh&HRXq7BKjQ#!dh~PXrR_SE_^VW8%Zy9B}!6? zn)p^w78{2WUEz8cOV$dHHgz|UqLw*w=ke&Vro<0nfHxb`h92r|yLp0q0x`74Lr=7Z z;Y1jcaLXWftsK7tz8xt1U1Vhqn*KP*5R ziX+x5|(Di^UZ9Y8)#I&EdS{?mAJ53h$9vVW(JcV7n?bkuP&n8DJ{ft_2$8IJ+o-$lVg_ zq)4zkgQ;0ZUusdo*p(j>&C$kzj#_by(-=#0ECtr_G^~u$<68U-<$ODtQTcqo_4+*0 zA?xT!tHCvkKVfQEK|(ud6+yh7ilgNKosVVV)*}oKta+QJJ{E=bsEN;I#@>_dZynIBdA;~rK++` z+gLlr!9xpmRQH}BKbn3K%xqsXpVLU{nl}CJ6IPgwUmNUtt zx^VX^kb-cHf=%?UbP~Wcw+1@4T11EW6dbz6^iW|E3GPvw)2+I_IphXr!Iq!glu0my z@?sOQ4VG9!^|M%g?JQyo8|=QW+T8v=9L{{;ubK|2_SaDbOkYo^_r|UXc5l(a<8Cjh)gFU z;MgETKa4JDpWNY@Nku|k!)UjaE3_en|BS|6a@s>0y@UJG8f*CmA3AK(fR!Zr=bE(@ zbBL=EnsvO|ND*1i_UAp`TIiHgLL?vA?2YIs zasX&!U)*b_`JVSQ`E+mn_qS#|YG>GAam)h2HtWKuL&*cVddsey91;RWK%pLVhK_9y z&HB2_(@m&fEn{%sL1IkNFnb+au3};qAsOy*L$srdf8`1}+_*8H3idunVV^B--kCml z6iC&7Mb>2W=0PqmdkVUO&!hClxL1}&0iWOGb~k43U`+xs`(OT18}29<3Zsd1g;75L zfHlH%ABl9lIRo*bchQ_)D`X0rK$Js?Y@Zy;vDGd!bQ(>!h;kx9&MwTQA3fhoI?<0P zIqiYOhR1XOToEz9Gb0R=Q7}k_eJe}ju+R#_k6P`49b1^cTnm38i~2nw2S&{0%JP|k z@tq70A94(J!~BxA=PSm)rSBf9^_z*+=@qSTphY~a3Gj_9UT}al4lHcBsS~ty!)3vK7wXng8zd8fx|H zlI@chk)|D`hiCb(ZIb+vGaZ~+-CGZ-;6MtFsQ?5Un9HsdDx`BQ=?6^QP0+pi+x1xE zfx0C$U!YZ8B?JPI{}Kj&AZW3AF=hFLx^!1;R@MNW(;>o8tyWtjt*K(ktxew4Dy)qk zFabRZ^Ya;fJ7P$RFK$ODv|j4_FKLsAR6VjE1EoYTP4=2z1PWo*aI;OxR`cj=*$?8T z@`Hkp4!={1gtfq+v5?>SfjRH|nax%eP|2WB*o#UE;l%b_Eb_rixHYb2{BtlHsJD8j zDchkg9#1IMI--zn7mP8Fr%+5^dNNyLB%9lLDTA?H9T5+3HCPhYp81|A_Kvemuwe!z z(7_|wiL8h8Q^QNuRr4=XNwx3w$YzrAAef9Wc6&jRy1dQ0P}|v?`Ozwb@_ppW8HcHY zx=@QvyNz=KxW6-s9q^ZYFWNSdR+J5sQqBmEMrad3GP;Bkgi}(|EYqFzDL?UAqTQ>C zJ*|PKsj|B^w`>L*v*QEpdO@DEt1?*6^F;2!n98+Xfjw0M*DbF3W~Q%Gq{fHO?o!Tc(~hemIu*tKJq6 z)CFEa+|Nq-DEZW`0GOt%sTq91LblBxdkaXzSIoixBA#x)KcfkTA`_}ukU8xN%{l%P zGx2&f$h0_xc6|QbF+%%>u(CS&a)P1rTY_D`umFOl`F%oQd%|OkECG(tG&%^iQ0VsZ zAC7Ydm3)Bj3`|t9MO|bbDL-@3M@*b8uFBX}v}^nF495sV{`;IV5rDXdw-c7)PcQLT zGWL7lp8ReLIlEidh{a2&dd*d#@B2AUN#H$2@Y%I+pjAKcUFRtmed_PO0relNYN?3f z$DqFylO#gZDEl8>wWS4e!_w*j*?k{&tOK{gc`vlAmH+<{MWcG#5kG#aC%JfRxAvjb}_MbnHClNEj>Bz34YJ@S>qb3eoo^MB1qB^1z8)P=3SyA9oI+fPXRVVv{W%hRZzoIoaW z2?u5fYiw1B3ziYlrtN9vB-LcQsn{7d$mX$#zr@%86dmg6(pU7P;XS?xx;mqPD6Z(h zWEa#(>-sw?zTd)O;_&{YknRbkI1L>-dtWu(5?#An-BZkjpY5CYuuFW>HtOe!!C zr!fvmhSL}!Rx}XSkmoi3oTf|oRgh-HM$+LtoyiWvvod!|ydz(Kg=YhCHO5eoM^>1z zY#qc7Tb5)Lv=*UyM~ZVODtH750UM-g0J}wm+@y3!b(*SklzZfgaWWF!1~|Ja9%tED z3fx7RmS@{31+y};1e`odu?s9BrrUs%R*LX++i(h80_5qt+lf$811M^t!wAnIp83rZ zmhaFgJZ*Qf>okCmyI`cp5DQ{2g+fFpGpL}PH>Iw{oF{5r)d7WAHV%1)AIfq~&{0ST z(c&s|;F%=^KYkYStjAgldc=*bIy8lF!D|Qpa__L@K7WMLBG$-u{x5Tj24l0%bE*Pwper-G2m zp7fq48`=+F{j$P#A_?F6bVHy;2nkiv;M!T>nVUB3=hQSM+WiF=_}0b|p7iv#L4AuM zf<|971gM|Yd#XmyDF)+(C7L?9oqxL-Pt-bA`1$>|udz{vZa|f}zwtE~AUgvk;XI}z zCAtI+OWbKjv5V8@B1XYG7R-Gcb>gyDBsYEGQ1~^0E@5+cZ;ouM(+WF;m#i#t1x_~J z(f5ylq)-RrH@|~=Sx?&^ynWSj=axHHo48O*0^<58AygZ9H`#tbkb0g|n>V(94+FJA z>(t^X#;dbcN-8OW3l{sO4|x(%7`6e_sYVxfu|8w?%Vx0j3A>bKu!yVr>Km1zb`8iJ zO`q(qJ9-PA|9~)i+#jI6_P5G`!aho*SWT1K6#<1yciJnt#ipGfdamO8;>~`-8yAUV z=k|1qgjX{6h7@^se)-le`7f*#545bb*Z+M>KR@mh%|bub2a4V9ex9?T9v)gyn=DdX z*UXx8>u?e}(w_$1qRA7C{tb)wpn?SzD7|$pVZJ>hmJC+j*e7&*JnK};{K3yQXhJS} zX>YYgcVztKB0x4*LD95Fz3pRhLK-H)V)(g6=`k7}f3MF@icHGA{xd>54m(=^&r57?NF`Zq znKT}&EDQ?amb)Kd9a<@@mbB~B~`tu-%y=DR^XmRG%FJ0@t@a zo1M6uo#Ned7PoDCHm|M)@Ar}Yf#=b%oFR6zm>Sw+P*|(pz5wiUViox>{L(%IG_~nc zZ&NNx*dP}@qHmI!6~8tW1DEJXSjJnf^K9{|ifcz5sF$jD1Zb?g0}^|XYFyw790kpx zX-fQfS5}_aUtJDP9!PFXN?e1whCrRczgqYU68?U0V$=Gk4_tg9eXrOgijiuPxO5O~ ztZ6|dDcUE@#u@erFGe0iQa}uJLFcjPN?l#*2K}}_T9Hk|0m5)6(>M}QQ!E3Vvd@4d zgBn}oxor%_gmg?V)d%b%`4*fWZRze=LPG&paZ3#9aS!>pEKeb23D&D?ev#luC|2R% zXjYLBSd+PcOAD<<=(>3wkDEQSn;5xb-apIZXtfUip524y@Vk-R4Cb-e)lOX=3N`B{%9FEqh+GZ$6)~9%57Yo8M?Jc4%sL>dwj1 zVKH1Y7-l0N9?rv9Hy#aR(&tmt9rhL*64tN-CeL|Fna_yWosuE(+QDjD4`9l2wRx+b zw%FJwhHq(J61qrSkv8%_2SCBPd@vYIas2$ZBXs4001w=ZVQ-Rcie?{7$)e;D-rU%K zOz@ZqbmKo5sA5u_(Zo0*alS*TG>SFhEZ1)$@w0Pwm03gJSs3JP$0akKdgQ`fzEZ7N zQc$STLmBUE*oHYHsC3lskxhmd2Uj5gN>?#nti^={D@;P{W(j&EO4Zi8JrL4^_1hW%CBN$jVmTjg`Bb7{uufKI; z%m>!zX^ByfZr9-q>qk??xuShM(yfdL;g)@C{-&=y;SpZAjzR2?ehZj}r?aGFMw*}t z%^S4-U5W>peE=rs2^P^A60Z5Q>*K)Tud&jxqnZe|?XlY*a5MG1bHy)mo8tWi`~VWyc`(y0OPPDKPeb zK^{E4RcR6uTR2{%;^V@{v3Xm=3))}(=!2dn@*v$6X+xp7c-4AOq^Jz2=#|$~td?)7!W;Y0&-w2 zl<8#>4+^87Ze}|Qyi#(}5mf}V9uqM*WTpMxAwOFn?0EmAH3%GrpWbWG>#i5_onVk0 z^=VKhZ=CX!CGM63^>dNx9&&MrXKOCEU|rWI{kXh)`uRopkPC}N$3%sVhe>_!lmm4Q z&8B) zQxmWEU3~v*2)*~`3q}3>mG)fO3cUi}_^?=3&(ei1w!IiGltx(w{6~&k&`E~KAC=PN ztwT4-j`6JBG1Pyb#(R7Gu2Wh^uK&-!O55fcz=Oi6gfeMon+z>*l55nslB(#e9oSK2 z%jglWEvchtSZU<$B#XJ;q>hHhXyzJvCvX{k`LG5M_||enW?OyDFp{-VRJLX1gc8Gp zPJrbL0OKrymY^(TTDGplIcl?O7%^dL7t_trb4g2rWR)XSC4I|t*v-d6`0Ym;t`^Ne zC$ltUW$~o_t62z@Uc$fWXfW5diF*3aUoX8k-OSQg2QKH@l2_i8}#*2sz( zQ>NE*G0hin)}Q`5*DD?%o0_w7l_v;dbG-xXqwIoMqep$h$CRqTCE5*!8Tg}q@{?{F(m?38$#Bal}CIh%Z zY#G5F%h=%QcZQ&8<_!{G8TYcab}}GNtfwhn@#rvWvd!~~Nf2g9PfJ4rkxWYnO$?*U zZB-g2CxEd>nuRAhUm9gD@^Ot|uwffQ_JPb44mTBdq~P|kdvYNSIZxQ~NWZnz6umBm z=9_1S#oRRZNSUuTu5@4w+qFcufVl)!<}w%2&Tz| z;N|OZc1#s43r2kNT*}hr-kbASj6kL5Mj;W$8~llX5lURs zE2w$=rI)Iz>UQuOEx3)_h)Zn=z*xXb&elWw69c?@ZTVX5o9l}Jrakr@K0{v_ik67+ z)gy{-gjLb&C{IB~omjtq78ixJ5#5Rj7HuM_-awXg_(eK6J}rhAFgmuHw`e2>aNa9A?JIcj_6zPFA`=47`Vo7-Z?CB21x{}p8f=JGz#!APY^ zV&{I!-J=<0Wm*WAO1*z+dn_{DA`fcRwyjJGrFa8mY|Op32dpy#sG>p0{YMjwzI%Nr zxAsC+`eIHBUa1FW^n7)lW*hdk4t+$JkSM;MfBS6AIYT4*cm3VRCMDS%UDe=5k?LY* zF(_$JJzsXske%$m{`9WgX1+GnUh_%`4H&-%^_X!^!-BOVmYaUex@YUS6Xx*e=8}yk z-sH_kkrDSX{gk|WX&9ffDCrWhFyS$p?@*KYP4HX)l>fIgDJX_Di2bZVj`Lsy9r z**{OdBSgD3yu%R$T%_A8?oI`^^gLf^Qmlcv%+wxQO+Ayon1EEJ1b6meH0qcPou%z* zOWVkfT7k2)6_@SaZAsEzZyqd_?ZS8Wrruhe_9#pa3WjKO@Yus;(TrrY zQAFQVWCVF=ox=gSe#EW@3`Hf|?9odhKhDkQLV58e&sIQ=9y;RMwxD?^v|uZr+7mUy zUHruymu`zP>G2wgI-%Jk$#8g`1{_lwBb#pru8^iwym?1*jb~9tlhmI_SmQY>^{W2Y zLQEj+G%PG$A8lVgfJ4`*JX#G5&n&lrKyK}UI-6zP$O!XMd6TnAUWi&qSH61T$&nxW z*%*E3BC%b|FWhW89LpJ#txg-+?C(GBE{kn1)ovo5x?&Dj#+ z7M-ldDWYok_HqZng*Qw6jT>bW-Kzwhw6#$RB+1)jgB(6sm3@6yz-&CSA9$OC2N2VY z8+sqpfvPVS#TaxN5%jB%Cj5c#40H`59jG9DGYpO`;qQli9+z)8PmCZ$ zK?jc!2+(&}v4k?x4b$6K;+ccjtnmR~-8J5DB)^BP9u9@>EYxiBTvAS`NQ|A|yRPXi zKdse^?AUqRGQk((3kDIz>5egP5{Y^Qqu#6N4!$_s(%emxs2 z4`_3WJMl1nmb&9pYm>g9Cy0-g(6{O-__zNDGYm$Ha{u~tWv{CuMURB`rsS9|&{ z;&miSygp9Mb4N!mz7YzeS7V+4n@GZg>Tol8vV@*GYn=o=sqX}COW=nAuo{aXrzz~SxCnd#&|$XR#B!HepaY%BHSOiS>AoY2^U=H$Zx_m-LByO zEevn;)(cN1X6+kYOZx%E08@`b96+Uvo7RZW8_r!A48ELL!(A?PGZ|%_Dqb}WK0z-K zY-TWV9PdCA|BdLd{TvS;8WEBoD}HZ}6c^Mb&^?0VJEHS(lYS zBQzNZEe!#|u&`!HCl|;&eAFVJ9|j!}kOv656;hKIOW2FG-oB%O^TYQLp*MJ{cWxff z#eAIa9FC@`&}s4Csd2H~`O!ZdYFJ3)d#W4so13~yg!{UNveeHJDWA~?5}{Kq5i^lN zQ;TKn+U$zzpaevX@QSColNkGZN85Vd02;!}>8Ai|+0(9X3J{KnX&5z(rVe`Me zO$G2!@1^lz#|2y^40w5R@EB=}HXN)1W0>g*A*W$;lK6hcUU;ePusPOt$yz6XUu4HC zjjh5tOx3JzSg_`n1*|Qg+I>ksiFQV{ZS-7xv#*h%aq@_dhiDvL#6QR-9TP79=FzmC zL>m60eM;t(meRc*?c>EK);I%%M5a1<@JGyjmnsjL4kj@Bx9ZwCfcjB<|W= zygguUm~tu$=wW-!p>&CCB6a4Raye<(Ze6@ui>>M`G%H=w0_U#h?nL_@5}pM@@si1P zWB{^5T&IWa4V52s?pqd#bKo9Bs4dr|O<|SncdU-2gD%}c*s3L}o85cPln<7rP-wh~ zaCj90ln|9lt0Lf+cseLhAiR<#w;ybUcu-*iGBh$;+>OMB0cn~w;En``wP{ZMM&#%+z51H#jjQuO<( zWzBcMHfZ~%N_J;_p{9uiF*2BWUX_pp_x_#}&^{!`DJ%BG40u3_$JcxXxeLgUF~wJC zPKu8NQCZi*$I<+fr*rlUqVF%X0LHN2Z43DXXb?1Uk{lTCB-AvkXq{<(4aOw#{>QR_z#F--2*%OgysIp9=fp1t)$no|LgJ z{&5`#y0VtGsmTJlH&&5RtFo`~6r%t2#x%>VHlhG;9!R`VAIqjNiKQ|wdW41{ zrVF>W0ii6?kmWp7+m7IOm^wk<7)94^1sVsl(c?*)9$#N4xAap3XOufIi0ldF%d3$R zOzZ0}e~M4W6Hcuw-R3?@29HQC&t+^ifVU%BH(bSkA3Qb&))F%ytPy>6q5c?1)6S}N zvXeDO?R>rbT*1*DjucKUZGqcr^Q$%+$=cIRDn-#+kFB&H8aUUvn69t2ivVTRxM+Eu zO|J_wWFa{bdz@qp$yu_hl{4_df*WU;a_~b;!VlEzX3}wtR{+pBZ^9@f&?`Y4YT?ps zvtrE_XD|bC&$VW#BvmqRjawXi_a$d+`f! zlB0LjVDRHhrUyl1wrcw%k&1b=g}b^rkz&WRXz zTDAJhILl$b!Gm{ZZ#f9e*_%cz}DsJ389n-spkuAuLsr z;JAlnp1|((eU>TpcL*7ixpgeQ=G6e8Ma7oX)Ck98kSBL+=6uu3K;bZhb+be|vZ4qk zr3`#~UPoN0&HmJ{Xwiq6xdYalo89-NWx|_p26oaU1FPC~VT6o7Bel~ths3(O8LZtU zc2|sqhe(6>kOO^;$@e-4xnoNpI^qYksoJX=OKuX0H0XN5_v|4EC$D@mqS6Vl> zAo7H9>am*!UV@);n_uMGMmt0#*pE277bL)D3Sy!h90KWE206>~9q{)-F)*rq*W0(J zDK2VgmWOgd9jFnlH?Ec}(m|)~mibi+nCT{${p_92v$s9Zrk9l6;dn4pOm#_QXIITg zgmkO&38qP}L&APf;8L=VKX=p;9iR`z1>WwT!H9Z&@&q?ulwQ~%3y0taM&a7h1xO_$ z6T5WoS9>9wjg+ALl)md;9as*Y6)Nn#$BuN0dn;j$x%-dW^O<78KkCvta5J5_UAk^& zQ5TcRa7q-h2~Io#;ucR`5hpv130Sq~I8AhO!Z(ovy7=9*zm2!7+}_6sQiSr?nDjAh zmWwH|DXn)!z@}4%YHp~C3f-3i;o-W{zyA}%{sUvGptLI~N=Od6PR%y#>f`DTf|NY; z7YhC0M~$UF_dftHREKH(?oW&+ry!~|YDTOb@URPIJUQqb6QYn7A4yqs=*$XMwaqbt z-g;z0lo~#X|EqX%C8waT0rMnmnF7~@^0$F#$JLD0nq`#UxeaEUN$GgiAiUdZL+arC znjW$YCR+(J89WjLEutry&%jW@+3o?G?@3+HYuKa-Ga|PXQoCoU#OkCXZfz+_YJ{XC z@%bZ(fD=5hAI@7(j6s)1XmVv@ks;JoMc6>3<_ z3X35aMp63NRPB}}shidhS(d6Fo0|W2whe-uf*x+`e*kPf1+BcHto5G!`fsVF2>NNg zJbwRO$e9w)c7R}sxUO?_5RI}Vq?Uk3B6uS%G#^PD%y77j&1F2NMn~Iy4Irw*EJ(=F zFdydO-+Xutz#j14zWbPo{yckAemi^Wi)}1j13p$hum4~LYq&l;5cjmF3qDmOgTlw! zOU|Gc$%+%b{loeEQGn|v_|oZXdKsq)6UaI$TZm-ivuRS~>f<%EHQ~xjs3cA zK?>v*3!t6j>Jzab&0=VQQqLamGf)5EL3qBY3vuX^RS_Z`8EuqNj2VLh#Y#-M`2OJG zzN;g+kf6~)*BVk)4+C_p5ECDcBB-nEd)N|M6E5n062_IaVCiI=mNG%0J?h9QLNalU z6Unw4GdIBminBdgIUK1!sqYPOOD@Ss1H$I)@YNZ7Ulg%uZ+}F~0`x;DVp&x}Xd%W} z_`5Tu@sl*Vn!6OLXHY>K&Y*`I70=xs>@W$m|EJ0`MKO7S1l3HF^et`rY z5#$*FF7ySYufd7!@n@=&VtF0a?How_R63jD^ZjFNwhq$~ZVtiW>BpUZ%AZEirB z)wrsD0tvRmwIVFf#LzJhXKM$7Kp%t3rB3;}@m2%*nr(K_a0qbN;sy8T(G%(+VxeBh zOp426{{(2FNGAcRZ3n*b)NjTJ2!@(x;Z+D>am-BK7IM?saQfp&epeD$QvzU+eLrB| zwgODXbcfy8MmQq#-V;4CL+P*Agp;C6>#qv>XS0(zP@fR=;CM88yLZ|~Re zRYnIq$UDlSk2ns9fe^TR0L55hy%q=SLkj4P97_SQ8qVfth$Frlexe;c6?G%Ppa2h< zOydm(XR!s=hC(R>m@ed~!q}Xxddr9>6w3Hxm@j)u_<+@XFDAes(EF0;d38cLP{VoO zt24$XMv;kTHEzgakV7)dr-fbM+;A2AA%ap)FE>I>cc$)k1vzsuS4vC(8NGy1pNakB<26r7%a3YpQ zLIy{mGd9Iehr*KIkWgh97*bq3z9uCny+-J@=#~_frvf_$(CT zFyEhWK{HiL-0icIVzJatuRud6Yz!j>IPYnCN56xhqX~wd?@6YAEL&$V`qrOew>%DJ z!(kWHV_w37XkgsaV=lkL)+$77?J|7-BWp0bWg)>Je}KEC_1LU9(Dec~oqygo)AkzL z;zUI;mj%!}UdWzr@AX)~znwl`p}#FO)j_t-SYm9fw@aC=s&(CUHlpu3nM znUKjUY_k2E>pi0 zpCz{H8iCmukrIjppq>{vT1@wkuE5fr!Wfiu=FC^;@QH*grn4;}P=UlPjx~2Wh(^^v z`rYi?kN8JdIhI&!U__0n7IV4EiE2CjVXyw@Uo9uugcORkZ1r5EBxK`q3ap55T}_dq z;NW@pdScCcJkDe6P}^V|i^M~M)&fY+gG3l=8M>eH-06=48&qru^mOc$_rX|8mN#tM zSRBNd;B&EXi`k9HMC*8Ff#2}D(Q*MjiK&N&Ok#Zd7h9ewmEO>I^yy@7kl+ z1puSy`nrwx9xfmQkIx_jKlVlXC}9nw59FPtn~yP(yTO@oU$8erccm}y(ywpJuWl!n zHlF()aPJnZQgX6||17$3gksj=mE+M!F=^HjPt<`+V_`9&V>S?Ah5qh+LH-Kbv88qs zcuI#f)%JkOs*9<^$+QJup~1_-U7{EeIQP4vqq7s&RKI3@>O-wr{`m|C#F8*OhIQ_X z{d~O>4*c?@5*P1j`S{hpB8>BUckMccM7!bf4aeI6+)Sqx$zQH@$lfy`T?gmWT=p0c z@fo$U`)D~E2}Ov~LLJ%~qNfN2!fqnUE}qT&?1T zvAWTp$EA12=0qb|g}Vnt1%yUh{_#VpN^6=vv{3M9Z>ull*ca0Cz{BcX2Rb89Q`LBK zs_}f>3U0(2qProMf7UYH$5|<2u!E#Xd7^W0d^PNdF?}iPILWbB#a!16jDUAYVI9+ac~e zv!wl>q0TZ@lxB`OXX3vW3zZ>>PJEU{o&qYk{%NC;MZwVhpy%7|OPg(D0fGe_g9g>D zTGk$p{?+;OHr_dHWk3Uh1rcQxSKcU$;mE|=4_0G`iUIkO1Lnps!`kMJ}bC{ExxUtaqKTa@Ho+6Z7vY|RZCG9~( zCVBF2U{>_2XdGYlDzb2%>>LXB4(y!A|A=V>JEU3nd-mDN0iGIbl!b4QBRs~LNLW*6 zrc~xTRE<;n6{ZfXvkS+quMcn4x=0E8hHMBHBi+;y-^eN~Lw#`-FsNiVLQqKP8S>2c zxM7_4v$VOO<-Di}zvXjjEr1fLvLTii<;z$mT4S5Nw=A(TC<8SSH3|X;p8HeWL%e=l zhOd1K&50<93d{&?jS_~2_>On#*s_q^gJnWpB09aV$h?Ahtk3?_c160?A-ricvS zF&;1q+)IRYWZwycmt!h5rnL+G@y?69Q+pTvM6h)+=@c&D~ynwGNDo`P?pd?V~TMyR{p*y2m!K zCA%*pgC$hjG8@&hNt=7L+cs@=zIq<=3cfP5OCi^XKUfKfrs__0 zJx^`5iPYtkdC0TdQmhw7j5C(8+w-s%cBK#HP;bVxT376pLD}taC|)%hzZXEJ8hq8B z{dp6Py|JgK1so!fZcJ*0;J)C3g@uKC2D&M(qA~*BRHdFNTm;|BQ;c*Kwj>hTv>yk# zs~7OeGyGLm9&GOH+S3-CQ8C|1kV1sfN`xDxQ^wR&`AEhNew9$&-6RatmS5Ht#XU`9 zQC2cEpLsjHveHh}v^5V~r&o4PH1GiNho!{aj>AIsE~Vd|JxGVfSg-}4YLD;j$rH#c z&|YqEflQH^E-cef%jS|Ul!*Cq&l#DgXpcS^iFv~xi9T{x4&2N|{C30-Bdm%l8CDs( z$$Adj>d167O?CEN;ZA2^RIaZue^G~~Ps_!$fqKq=d)uvrLiC2!)6j0_aEn6+a2&aE zIV(GU55WSOb2p%%B_ryA*)Cp_6hCiwPX z-gMnkXu&WfGln6@FMiTnub<}(+m#pdc}=mRM^#vp1Ia8MKXE3%dRb=+O?x|o#te&u zW!yj}-@F_&Ll=vAUri5lZxG%!D1$I=A_)Hm^685=`5WVFpE!gqwR%FlZPV||c=RXK zks{f*NG=lK2J0f+n?ma_pz^n)dB+0x3=pck zAQRMHQ|hNeT}-%@$Q~D$b&j?MIL=B{+Tt@?hX1y8%axBmILAUL=RU@_rRiGnVCz5#=E+{TEkFME8gT#RCaYPj96C87jmxl$1 z(P27#@#TLQw|47O1g=_>W8x6$huT62CSgw>7DYnx`UG5J17!LLqh*3w+1cS0RMgsy z)wNbVlf0y0EW?z7Dwo2AZQ;}Gyo{5x4(b^PEbU1ieXpK>efTMq7m87iATte9l?x9*H zY5i8Q)pTcp_HIW;LbP%`c0eablyY#laW=~So1!z5z1@g?WI4rUb3HNT1grEdj zBbSU#*R0%c^RG#RI4Q=%P5rxZu>Y)8j^qUfSS1%_*%{ZvVR*#PDU-v6Ov{VZ=Rj8j z1L;>NP_q*(+RM`&2I$p*rNFsm71v|n^=}6vUW3R>@_s3v6l=(bL1*?;IIrd`T$6j2jQMJ`s3{Fp1)|d>#1?Q$ zPjA{s1oPN_o3k8hWq5rjv5RS!myZEBFQ;KjkdLQ7sI$~zBT&Ml1vQ=xF_(dcS=Pxp zI`);D&Jwj+1*2Mnpj@%}y>OjnDxfLy3ekB~>E^#OJ=$umGj2Dj;2h3s&Rg(dt`yRZ z9V>G)+AqRA1@Z_cV49Y}S~pP(vT+u2ok_ARh328t{Oxr>Fe56GZE%OeJ+72`_2_Gk z<(i@O3{%c*Y2L!s=cDz*O#ODpec%wPQ8=gJz^q4MIAbdkQtc-_Hu%v_Z7ZP!})gjsf&PvKx3zHI3Lh8xRMxDidWwua%t zTU{x!F}c~43==Tez}<-nl+TkRpF2^}FN?7;6nXS$;ZHL$JjQQ5ygRa3BSw%{jT&bY zozO)*Gyt^c2=92&$IaU3Z8XE&UDAJ$Z@*kZ&_kAP_C4@?WuaiP>ny41U_7bV-QzNk z7JIUmLXrx9e%%002ic~`gIRFbvb)Gcw}fBc`lB;Qu;>$2RaeMRlK7hpqPQMeN15NN zlbm70FGY+$ze}D4(xujU@+xXiIuqvMbzZ=rYNbgcpmSZoHzr{G;b!v_IwD)|S<-^2 zRW;KvvMES`HP>?ufe-vRGZG31f^4xSpB7ON43J`6hyA0TI`#u{JbFCw*!~Ys=ipU| z`@QXOvg=eQ+pfuWlWp6!ZQHi(nruupxhB_SYijE4yVm>p{SnVvdq4NJ?~7bfRjsKO zUTNGgN0J{4sPc>l8e93rLUqAo2dprFB2ZXmjr z(j52!DSPFO%cjO=T`4xeay{LDkE}coHCITy3j}|mwGgBIE7fx^r&4+pCRuG1%1}+S z8BM5^>P*QkgPKtMv8SYFv$_}Z2wAbp=zIU#wh^Vvz81#dtZ@ZvT@bM?Gwhm}!;MXt z&P(jy9-Nxn^%JcwrIn{gi|X${s6^YlyL!YrIlrD$VR^9JN|>jD4BiM8Kec=!D)Br7 zoC8EA#w#qxzp6_KDy9qh(Ab5S>&LzgEeYM2U|FkAh;Rie+z#z&{^B77CT&5(6_t#i za90>6{vb~|ONzrM7Vpo6QwmR6i;Qx`%>;cbb-CM+<6|LH$<-f1C)Xg&1p18Beri!o zlj{lgYWOAwC<_wr+H_Cd;S7wn%Om(K=R^~8+O#^-F`O?V@h4VGAQ@{(aY$67=KgN#o|+d0=@K_mBR6EC9rabd=6w9y+_#rMGn+ zW!eBctV^m2Jy2%Pi=B+=I$`g0f_+K3Fcq3fVkSIBYt+E*?_y}}8^f;;K&E@S{6XQLG3gyz}oRqmh?2y)JvC1y-WG)jur!OpJ!s&rOYy0Nptv4(g5 zr{Yed@a=V){-+Cp{*WNGvIEMxZ?CrUF>#H}eXV#D7@I$SzzHlaq%<7Mk++ysc_E!# zQ!xP?G0=WE>J}0uln^`d#g99{Fpvg;eU@cg%bik9px7yx{RP)GFSZRMZPLS@$*O}^gQ;%)!`KRq#m7i3 z4UBZD{%2(}JG;q|Flo)ZA`UGcxP`dly3<7ScG%Odi;_dD%9K&(P|bOA-eDNS*tH-n z5hxF)&ye5sC$W0XG<5`lwoKM8MNd)duQ4PTO)axlfSq&ARF(Fut4WVl%_;jaZAj>y zwIKNUB?K~&fR+1Yd8Iv4miw+;xz?2XwJjRx)Va#V8gC+fX%9Rp>EW@eN=0*Y0_!0+)$M zNZ*69J@kC6?j z3Oe2Fd(clDm?9HlolrPL#FeEL_QYaY5tLUy_?CyF6kg@!sJW-~b&SmzJ2O8sR)QB< z=M|_S-lvpVG`J$jwF?uxPxmNYT1UZOY*{%&4>W4-y}qxHd?~2@4hlAe*IjS3Cm{e2%cVxgSC&v44B9tFX(+# z;zE8oN9hmx@p35lPKM(-G|Tqq;pZAGs$m!*yaPU)9)GSa&hzfUT2LLN79VHDvl&i!#S=iIpxT>0~X344u`Jt7+{wXhiXla zRHmL6-x~`d-C=FGRI7H9YzZmRS89~LN}D^K7U^2qMU6$|iHvYnM<;-Xa`Ows5X=d3 z8G)}20w2Ez|FXUh!W;k)=IIg_x*);h_1`|F2HBTYA$1L9#^_aemuLU6>ZAq7w+02} zQQKd-a}DCpxi)z@q}LU&Esb&=C@5XkMPh|cp@n%TC^kE{bI~-TR(awpkus1Bu@4vy zN=YcCT{U_OYW7F8nqXN<7FagNltN>efPp|*Ybd+zS53pgtE+FGiWg;aeO2j@&7rWY z<=U}|tub!MPUgR)Iu_EaxsTeEdUK=q(=f;ZKNQ41kV*TUbgTY*!aeV>Y0ohe1y;QD zrT3FV83z~CFgzIgN=ULCq0UKLbcew-KUvQK8-9j}?{wE*n4gPR9QJ};-lgJDub-4N+?hHH+Dz)%c!;Y@OB+nk1aB$GnB<<+W-93$E(YdL4v zTQpv(o+{FxT>`ZQo^)oJ3S=rc|l}lTbI5SiVmSvyO!qK zkNmxbXmJ2bKA7TKQF(Zs%$P`N;;u@1z`n9nv}}=EC_+iXlR1o|yH<}| z0Uh>Ktt$yf$6fR{p_J7^exrhP|C_kSZ>O#Rf&nLT?+<%AFS*c+Zs1!@WNlP0TbJ$l z9DFmCLu3Ao3&4!%8CWP=%({Rkm=grNZ_t4VXuX&)y-se}Cnkdt3JzbQ&I{G1l{e{% z9HtrSkJ$M1w4)#O1Urp1F_@`uQrDpGLe4x)5N#AC;@PYH!r&ZTg*eQZVY~B#Lp;XO zLV7h*9(G-C)t+iWeK7LsyZfet4g7r}*UI!uNWLmbC$GD7_7E=w({jE-@{K2t0?g=& zvRJ{A;uJIwH$KxoQjq~zh$|lIE@4>In%s_2y`J)5TN{^f4yni1%ft135U#s`99C3z zF-49g*;`>KP^ew1DC4{jsd0pdDDeZHFYm21z^8u}G{GYZY8$24Eq?QIb+4J8M z2BK(u!J1TZ@$oq&c3=O&5het!zO&>v^Rzt#th(}INZffOU}FL|o#3uGFC%EXQC{fnO-4p2Eq~>iiIE?c>;KS+tLx3_`Vy9o(u?`%vi# zc7vTI<`xH$l@;O~%sm6X_Fu%ycZZhSU3S*aTDj)J&C>lzikW4%OOtB1{LG5I#aW3>Zp}Y z?Q7{&qm>?g)KaNEnXEb(Zg>l~yUKqL!)&8NiO+@|8D9J9Q-y$Wj1{Y5I8M!jYQb2V zwqi3dig*r%BVcMkA@z}NwQE(6FTT^1k#iorH3o1}F|8Mn`9pZGPFrZ?cto0HyH48t)-((S>bM) zo!p}yTo^33)2s7hqna_AfHIy;Ooz=6sytIorhW!*xc55^DDkmx0O5G%Zfzb871W&p z-1sm#p>^EuHmQ64i^}!hXUIV;;!))sMpbXZDN5%YiaXE!lpiKh$!CZhE`Nc zla#;mVR~gbMBl|zI6wy%1$RJt{&OT^G8~MMx8EO|C=jkHo|a?cWoi8h)VghLx-^%x5dEkW0QmB76`XQdd2*RebL%-eN0+i?qsr7cnOU538#&ydHx z>pj?jW*=ULa;(&v^B8?um?49tX`e<%;!$v-$2UfzNXpo}#yY99tYYbF@$av0Kx^}HZ&p$jy<_g$$WqZls=xpq zQvc&lZ9zgw;r*`w%cF%D{9h}WU{;Es9lD!*4o7TI<}S~2m}7rj2}BOh=-6b+3X(WI zwA*fq;`?C?(KyN)gVfihdUOQuZaqqGkWBAUhNmP9<2Og6csh(&y(hI8#+fvYe`Q9T z*2(F4TZut_LBBL?!SO1W(PYH#5smxAg3)qeFhqUkeeK)qIi$)x* za{CUt06QoC{t)O_=1)$dSHAs+MLOJHF?utTI&iGrhoo5??rN9t%Z9OI2y>fWVEKI0 z;gB9eSb3j)%I!%=NXD6rZNh0Dr@ZU--fMnCBDhhPkLlocU^@6b8CM#{13kUveT;#mJ@G<4%0bv?X z`u^^k4({~yMtt5b;C0SxCX2l~dD=;^A}U_SHF;5X#Eidnp@s_L5|9~1w+u%UwL=N| zKl#6p4R_q`ym{Qv6+5;Z4#Sgiewl?n8h(tg9qzlSm$POsKPC@);-6^w(j|9&~EznZ;?GO*LgiJ1{8DNv79SK zFnt7f)pibio-Edo2AKy@MGY^X2c&utxp94=zs-F6V_t2+M3P;ksKQf)<$v5e#t^ua zw>N@yC+aEzk}-1=qU?6qe;FV?wuwyjfYIy_KqO{!-o(u7Rm7F!>&X~kvW7?7@_ zR*IP&qN}Bq>)VFCRL319P)BA_wyaUFSHa#&C&)Lj-?fR5^psrGQjIT+v>vSfd%5G< zmez>Rx*1NqZA-q6qd%D*zamU2+mZjaw-gsW@SMGoLHh18j`?=`Qs+!RuHyK$DGN5m zKKmoudm=@$7?jRMwK#VJjkaDITp+c*G-iEWwcJBua{sJBG-B)y&-z2MbPiDjs(>-=15V64gZT5=2Jhea?F|#~+IPcJRC=%9q4=1h??a3| zItSv|vAgMj5mHKqLBrwuftaEh_9K0&k+#pe+^{T`$6DtvmtrU_eR6^T-#k_R|k1ie3J?&#}X zEF^}t&Xc5nWGj4R-zdll$jCA?uFSMI1^fzIme!GjtU?l+y)u@JR`$YXHl28bk^Z_o^@Q7AGJ3;KWveO`O?YdrV5$_9p~| zEx{hqZl4myolRbFsM(y9mXuI9F(TfT7!L{{u%VCOd#%{wIM`6TXfvS)80t?KaQr~cro*{Fg3kC^#v zuxSW<_2}+Qr+RclnwwgGf^;n;NVtMHdy#c&jaM(Pgl~xL9Ut`N(QxD6zGqC@Xx_4+ z7>pxr@1gwej7u3F5QfbP{U$oJhqI8R7ZZyB9^VBwW~*F`M8mFBCK~A0=?WX_6Ogg4 zCafbUPKgrHKp_>2^^-&m@>I{@)Yvw&=rJG`$R%DBvuldvdv=J`&pCHP;1wPM2ycm6 z$uqV`Ca&KFDP{tY2|@d7O88HpfyU%vPe0g#l3;ey9hfQg(#jX29ddKFc=Yww2gD!? zw9Y7N{@Z$JJH}93;kb<^l8PHnW>)X-N|MX4gXQNW$pj0ljYV3LG_mntP1qL6#u1>B z0z4XOG0uik9aELzx<5u@LZ^x{PK#QTHSqMBj%ASOK`nIS*Xsp)3Az15JDu;i{UNsA zO8x;*8glY*;%Vi!cm20s9xI>t@&)EM8>wrN8mH|Sp9P}evBolpo#zTUmmkk8$U6dz z8~-KQdY&UU8UwTGF0|!pS{mnc@^w9L-THhiJK|;;^H7^JT4Q6$3}6D!56HiLU12i( zgCzI0BA~~opt4~aAscrgrlijOVV^XzGPc6@`JICk1iT1$T<-7%-2U?UpPbU@YX#+K z1pUKZ6LySb-^T3U2SSfWvwUHlhDevE9Wg?~i=bMUYPGzz;DO!DdH4iL5>knLwhF!O zv)M)3s4njD+E@6=u;tiP8S@J>l^l%)&>d&(#Xfu5pYF!@=P}1ILg)^)@L5)nSq{C8 zmD%N{zrO0b;|n!2CfshD5&Y<&i&1-Hx}$1&76!7#XJ_L1-hORS3!Igh(#8@2gh5X| z;&R~Nxnyiir(^rhLiD-W^ekVk%*WWP3)^)y`c^Gn2@V94j-M3gXsg06I%{-LEZbMl zdWu=1mo4L?d9xK{te8SvL_+DkOZ3K;YO0I~?_ZLnmAxrNsx?;P{G4$LEJW@@eqh%i z68gFv))g#x2)KUNV|Lsug=v5WNO8SQv3Sa*S%KJ0{)i9zNw*cbUBECUBSNw#YRDp- zdz9KaxEc4QtT|>b}DIZJM9ec+$pFCk~7~U_-1ci%>AmAibRvASDR|b?^>&~~>Yv^rPC#|@&QvaQJ=Mz$k z7&&Z}n$USEm_FLd7h=Pz&wsV2_>-Lnvs<3b7yOVgtbi!Hk%EOMcTaQ@B6jou_kaD} zag%im(&(0Hg2BYT$6~ixWXf+`vHV$yic$>xaU8l~HULNHHwYs@Q{mZbaC7gmGjgKp zR0WT(PVXj;T1}S|SD_vv4#{@+c6)=Rx;do+X0{sNnwBzqY9GWKb{fs3c0tmttlDh; zaNo4wuoZxGt<_Id@HZF>Gyy|XW@)QVq zaFmu8fT`3o$j`Pg#nV%kQBiAR-?_{kqqEN-I0wrh+S&L{f&y+Y%zFJF!2*2v21>#p zF!Fq_)?S3lZxpE}YVsm((T{r6J00oAQ{T)9rr0H1p=0e49FWK0Py>7dobP<<8JYfd zItvnheil3uCVVaOA5q9k|KKoR`-%p0FG7`|bU0sOe7czcbn(tuu5%kwG6&_#eP}aN zVN-fs{c(`Lf8!BW?ME?qQX(*pI4bUfjg~QbR!38nd(wx)z;Eeb^_m1Aw z+?->3+=NN)4CSB@?KxK@w(vjo5V&%=OH-H6e9s`k2BqaZ!I@2rlA7CV4K1_nmzV_z zG9Rkv@FZ5a`~)BItL3a$5ftpZn-d=10Y!V>5$Pu^gcB zl;bgGMj=TJxnZSw=rng@LjfdCzR=WE6zfhry0ImymkE~+CmrkyQWyhXZFts#f>!5SO@XO+9BBpr^`uoh4V2@ul46Sycnm587Sa z4tV|F^Zx&<{iYn*ol4M)>OyER|0;VKye&rn6#d=0oWZxtteYL6u0)?X_-fZ=;P-J1 zi2M}qXuK|qK7F!v^|)$A;2>ro71|4BAk#RM!2JW`Z|^4nHg8CN{=VrC1q&#)9OZog z)#w;h>K$9>c$>HhI86x_iY^y*-D+EuhN(LT0R$hB**qgju@qFA+3K0nPi}5vQh{A& za^|1hd)Qa$@Rn&QX&X5}EjB2AOqO zIqU+ZK@AhQJ=_ccajemH@Ki-9d5IW25?ho>>d!wu0~FkFlvA@7AKSp>PD5Z;Y*3{) z*SaA*Yd%>u$ZkFj%V&k%JH-Hx*@8|AT$@fuPpw|RBMF2Eu^c8(2r;= zVr(F7*~(d@qNsh-XJA{t`h_zzbt@gAixbg5zJc=D!Z1))EoON48)4nq@a`-(&hxv! z`0JK}h8X{Pn4ECAj_0neKAKZ>6^(J$@$rJFH_sK}seeqYnBR%)?dr8ZWbc~qF_r~*a0_a0=~qOB{Sr)c6F7?~B0O&<;};oPp^kyL z%}U_=EKTWjhObkkP06gL(s|*d+lwQ!Dz@NlFcPjYjFkt8)f-a5#SK=_>xMVKFzDK* zscwDeTM6oJ-Yl)Vn(~cwf#hD(B;P7L5AN@mN!aOrjYZKiIN`NctYnp68RmV80Nt<(S_oLqes)k9(LgKgV`B~MyrLZx7h#>JK%5HJ)oN36W7QEOeF4>iiIUX zmjs?MI6v{irPx*-UYSGA*K#O0t)haM)z-l5FFd#~O6Zaq%O^TVBIO_zcJ5q98;^BT zQlA3;V*z62fgA#DLQFw%@mU(ZR8*zad@aJF$Ju~|XEzoqY27V|m{-!N6J@nIV)W#7 z`&M8kL<)rMBV%ca!}Y*@`*_sV?PJYbX9H%AQf0+G%eO34R_nd;`;2{UY7VEeedbdK z=S;U6kEp#&wK)u-ZPE*r&AE?ol+QV*$Nz3muFFLq1%VTiLfP%^P(;TPf3umDmYk(g z^!>K-m*puWfmMR1u>{YWKt-ZPHz7X5e2zEm_o_zjU>tFTfgz(SW)x~V&=L+E?E0Q@ z#fvuB8Xsf36523Z_=7PQ?M>7;ZZd4e~(PgWI=E`|1J+)+bswyRfaXHMA#2(^HZEB5? zTAXcpo#JxrTG5&~?MjiYp1*3qTG6`7Ft%`1etyQ2W73sfw$qItaRI$LAJxe0g4eqv z6x}Zkg_oai_t)~dZMWx!r zOVljnpGZ8{D%43`R<-b&T6a5hwNDUr$8qj(6XZueH+(lfRdD%|C+fMb(!4Dmo4qy9c_(Zx z#DJW(5u-R3Wgu$nJaWaEwXmQ3+9$TZ`qb-@5aA2{b=rJ%B$p`0ancNRByW+bODR=z zto|xzq03{WaE^QLbrrN0go&LSW!cUev;Y%78-=U@=L>KS_Q}BZoGsYR!ZFG6Et2G( zHV2Arx-~oPJzdh~;M(ir`Kz&Md0n63s!$J2L^X*BPZD`ut;~L2?cX`$W8eITp4xX= zseamuZPj1UjdW%8+jn0?58+m)ntg*eC&$|)pb|Z6Ep+HE_u)}YhWSh(I8@4C&|aA! z7x%ctCVB+tC{1be?ja&ido`jcOoU5_@n-L&EQYyMa zV(UoFDI7)LmQiT;gYUE1DH68AzO6+UR)-F)WDBZT4QhzRdd||@*?2`3ZV$*^0m`-Q z6mkuI&waYusB_Ju!XKN|;U=*buFh`krH0`f1r!l0;{j{pAUe_HZ6-?Xu$y$ezi2q4 z%B9OLD_z46ZMmXQKN4yDES1Gr0Tc+zFbrLL{6EgZo|`>oYf!`ng|SRGT5a4CjPL7L z#00#1rsb^vI9gWp)g7njJL6jqLJ-`rXKXTfZwBv;%w-u*XND1&SCZsoHU^o8ietB7 zv42Dl6ti}bjDiOZyTUAmcHn$}6b3U~Se}+YVDS!7mV;0(QG-}$w+-U6a6$gPeMe9= z=_$jCi~geB8>{^#wsf~sF0Ik-%7R9{oC=9{62+gUVD!u^3?&Mb0_0UT{OMz#S`pAc744J$6ebhBbRDd)I-zE!Aj5h z%s|%W1AiGTB`k!&yN1V_FPhK~4Ug`NHDHF=>jeLP;bAcTiJ{<>nEMh_F%fIWsCV8= zb*L=uIaGdA=tFw{ivC5x~EZg|JV)g`@`mTx`m7rb%QR8F%J~6%Zpit|!fIxm&scZrFp-6_{%WNG}SZX!G8F+3UI#xm>} z&scqX;rP0_G!75;VyIHe95&}Czprh9P67jiR1?EXNF^LwsR(oC#RN}mN}V@zba-`kM4T?V(^-z(8Q)YCnjAzgnwRBy*p zs#K`8CojCa)GpQ{)wqb#?)eTLBh1O&k^>B4Fb%Yc_JzTi^7D70ct^d&;YaoyFZ7I9 zv`wQ1Ph$>$mF=Um=fNf+!Ne|y&Di$%*b|F#?0(OZg#yyYV#EaR1g&fa%TZi5+x@=u z+~ufnE=|PQgk@v~w}LI#5D+N6%wEP-R#}a9d-xWY17y&l$8KrPb9WT3xcnwHYo_{M zNJZJ^wXy;C_?({QWB2CE%ayo&gYFVysn&y)jliR3dlHKrk@YYp4t0a}GwZW6gSm!o)~j7gomFy`V~c5i0%MQwPM0&9hNQ`fVur3Q(rV|GTrG1O2%Z+a5p73a zDlXDvfs0TB@TmG0N2oY}0l9jZLYlh)wkzQx({FBUDlURq+w>oIDZ>JsBfaC^6iCN` z`kP=+6Kl0>XX1NB{x}}xbTW8-1JH5O zRI6TA*y^h;L6Hy)gC8KVGsv?bC^~HcM`fmI7?J6&Ov28G5(M@tXZbd`*tF>nRDYbC zTA;yKbS_>&nz@y_%bXv#Oq-(+Ef#0nj!)$1mwOZDnErCa9Q5NF8)fs|nceL4ODi~% z&hTwdCDv1U5R!~Y-G{-I%IaC_zCOz{tilJkX_YAfY(ncITX9uQ`8FI;CaG;2F9kO& z))T(?W(@x!ya6aq`$sSd_D47vjbaACgt$_ixqu1Op1c(18;*Z}RXlaHl*j_AJ)|Hh zz0M@bZH;~F5>dZ4=-w^mm}&A7L~w){efy+6v|7!< zR3V4LJ@HxHJ1miPmu*8?#;p;1n$$#FSw8j&IJH@<0I4S~>Gv3Q)1L8HPTFgAw{!2X z@nYu^s^?xkdlC<5Afw#24)l(e492R<4&0mGrP5L$6<(gs0S*50R$}37tD4G4Lc49I z*xrerSXczyo;iB@>W=&$Zzi1Z^JrdX0{x%JLi?n~LAYTzuJw?L`yGQ`R|zt|+H8i( z2tX40a%E`<*KYnYpc)hPFO{s%-X_DT@*IN5JA6FVV$MVeqbDeyvz~BoZq%fb#{Npa zG35{@H%ersO&ptjG&m4>w0b*c;5&ufJQNd#ou+E!fbqFfuHSY`b{!>9c9`z`Tz3OQ z$wI4(EA{IS1W*^1hK<#)Uar%%K+2t)1eP*6OpOLHgNh({^8O*zhrzTJEM;XaH|pG635%31e|FR`ufjZ$+T5@9dx1h!b)|0hsFKPNfO_$?sq9X) zL3l3qhYhnsytA$lnnY@`!cU8tMHNs>Z8_#9Y^D=l1#5uHq>Vxy<%;}fm+P-`!<@mD zMC!ACT=m%28CIFxgGbA`kkiGVB2vrmM*!P#uApIFD#9csTBCBU;bthvsk8aE>l!Ht zLj=>YxJN8m?H?s@+ssH+LJDb+t`bdp6tDvf!epAysJ1Y-hN{%&^rZNjvKC|cGfBZ6 z%l|st-mx4c+U>rvtEo)7<0Y7D0#%XmP)n=^{JNFaQl>g#3wgV1x=C4}*z?PPeehbe zkN<87O*ey-Bb)YBEd9&(Y=!_j58nsM$88yKTzzin#N4>$1+HEF0&5e(dsd*ov}wKN zSWDkuGfo|cvBEVG(VD4R{ZXPhHge$X6J?j)N8^&fHCKWVAp0A2Zd)7zpbR2w0L%4Y zvBl8h^bHmPGq9tw^UD|`nVU>8tD!f)!ETB#3L&-zDe85tz;XkSF%@hD8r&(EU=3q! z7$9<@9N_FiIecQ{P-Qr?Z9{7bq>qsXqz7R+~2l*^|SXsI!AoQkcoH$z$+qr z8xSX-RltK6mJ?j#x#_UWNW<}N7-}N$taTZLGIa^l&UaU28SeV_TnTB+>nclZSkL=X zuxE*Xx|Bi+VIfXHeneZLufsRrUM!5qM7&&i+S#q1AIy(uc6wa{-IH}Q^ zUI??Kl5y5q$6TxXBw5EAm3}buX~}bXP<6?#jZ8_CQTw)@9p@|cS5!}BSElp}js`)* zv+f0s=kD9|}lO9s)v2!N|Ey=EAbW;+)_7aPJx2XxwZp^Q7F~EOl ziPQ<15k#OwjL#Ej-2UHWj7l!@0h!bp77g($vLjtg8#CY){YaKn93k#Jrqa2@ws^jE0z^|wDs7VL7*Orpi_BQOc^jwdSkA;Pj|p_^%|Y? z#;@N@Fo&GvX8o>(VAo&$8$mzgcvaNbRYnN7)u_#9y?i$Y%n95&yj?8LsSR&IMHVywZc1u7p#2?d!3npx;8}} zyb`Kcj!;t3D}JHj|KlIrJ#rGDz`VidfxPx9Nq!vK^9Q&2XD`5z_em&t=`HH`dR|K& z5$1rGl0iaR&Xad2DfL5%F>z|@-isAFhu>P*YUM}C&+X*pJj@{YGyKJ;+jze}i#O^= zmb@){2*0YcHGP`xih;;2xJ{(`gpkd^>(KXju=G6iPMy^EOs9g(-iYWwsk=w8mPfak zNX%~Xsb!pWix{tKt*xp#35)DMm7weqO9g+J{An%HI4n7%=o6deZm{#zQ=CxTSPa9n zJ}$rGb#bPKbqTwDrNFDS`R4-xT4zOCNCQ~V+7PQDVa2=QJD&JP6!^T+z4)B#@#&`` z!}H8`(nvr$cSM>hKBUTJ-MTCDYT+ORx}fzo=I+)T>_x4L5#T)=h_AimGXKsQ;`2jU zF>5!rHx79u8G@OcdwQ9O5D0w^*oD+|_o-ym91l!LQ`EQ7FzQ0=$-4g}o0#ifj(3q7 zDDuP&l8hMleC*ov#KGByt;$4Yc%W^mZ`E~IR?4tc`eszKq_(J3Aa#-yX{-pyu?}^} zSF$Y{%EY*>?*HvrI&cmVo2>Pnhg6e9GkjMSF#+>sW>jy>WI8%kMj4;e@a3m7B+&}L zz0!b(7In6|tSBWHPI^7>J(iE|`dN!sglbN54P{?> zjaM5>Yv!kfaxs#TQZTeYX#`tK8UR+QCv2 z+h_hYn8cjd$Yg1l7b;4wOy$r&XnSjMeJR*^xm>7viC|UTE>ObVK~i{vgK1QooQstg z)x`w186iJ&cXR)L#83a*1xr0Cv4sT}H}{}F(#0x}KOD6jZXPO-*5*@jPr{R0Bx^k5 z_@~LQa1#|T3i87Wn;^_69+z@6%!Z3*_>7~brW1#9sx~w%@&jTm#8!0;PeJF~Ci+~C zt|c|1fwf-|;>5nqG^j!1?pu|GRBn502U_ZjGpibq655k6&Pf%SC$$KCShL@i>Dt$7+ljTcHj{fFzXkV7*Qp#^3pm!c+0(K{fd?Cpz&<%EG9#g^44~bONEP-(yyj$ z4Mp~4(>`;)4(csY)T^EX;d^cFh}X=B@Y7&wblDOFsfJiNPQ<^I?`Az}-Acy80FYbD z!lYq)X=UU&gLPY@y1zO&9jyhY3@@Arr}5tI7!B*)U`^;i_m)_sDvF=IV=!jOA{vT= zIkYAd7nyN%^%&hA6Dc=RSL!p|0$o!mUguO?!l-J!U+RC(@H!Ll-#B^yisPhpnkC83Gia z5RUp%-Jw=*H7gGIGCCRTNMwAZS6r`L;|=~m-5j&MmnN*(v5(pA9Gd1eP3cq4Y^)%ZPK=7S(+pbJ*N zoq#yH=bVC3cF}9-?=FzDjU;_hzJJn#W_dnahcypJ8urinD~|tzWB@SWWBy;o%TmCx z!hyp3q+k%yKNG=dqSqv%!3zJ?>!Ha>n&19!J)TbwRuZqJfco=$<8`qc^Ea>p2vd-?PLi|PV%QfKhmPN{z z6@R&v(CA`zz~q37af9W~FC0pGN>IJfw@Ix|ac%F(BwQaLb%nBB87Aw=`oIeG@(xQr z0yy~(lxvFgz9&Y%If@wYRVxicNV<+ti8n{GL;>vA;l%aEFfEYl zNz%9-sKHolvXNSfD&?j4EnfAV>NSHhte8;0&BAYs+7PSFqP>z zl^_6x7rOj}{z4DJSr*2paFgs(@Z5v~FN$$9M=n(h>%y{fQELoS50r@~XZGQItUEG= zi2{{n>lYc7)9*IvEAWFh!a;zO#9;!zbr2Q3)`sFF#Et@bkf&KW^;cN$Z+Z`m=+Izp z&K$K%s_c^)4zTgP+{ARS;N~aDb1?OSTkP%j7)R?%fNycldGhJc6P%@4usq@g*0XHx zFd~bzJhEJ^zv4~A2OMp1m)d`g(N)6;v1Yl6b_3|@fLvIW{9QGTFJ)ML09FPyv*%feF7eZ zAOLdXpd@zx?8g%TM4=3R>wUo(ukijz*qRIlcpEpH>+eeQm6tA*KbcC+;jQ2(Nd5XQ z(L5WjZno?bcr~y+>4TIQ)1jZx#=0k$JnYP|6KDynX$CT;tIynbp6{k#S^};F2Mu24 zKU{qO7QBAb_dezK{OCHl4MOP+R`A`&cqt0_cs`)|m@ODU`4~XiujAYqTK(}sSt9Ja zp$xLCy@&oypVL5Z+R~)4OAAE#iZLVEdcFPQ|7FIO#h*%b;ZEwGTK@?a^||MYy<|t5 zbjuvNcYSD`xDra$x!@Gn>~&4YYWR64_EKA0J8P+m3m!xPIJb{*h?nvGxIxuuC;#Ov zJ?2?(eRG?V_S)HYi&*<{BUCZHK_zxp!V4MqL@SO4p!2&C z03!p`bytCuYa-s;K82r}*f)God+VOHux?9-<*0VxkQ%8SpkI|1_9-L%ZVeX|U$=&r zIm)zX(;PWLZH&33N>C-U2Ng>5#vlgZ+H-1k#k-j9ZJ- zV=Y*4sbnF@U&T&|Bamki;|$1^W2(wk^7Sxkt;y7hj(qt@2-r9IT<+_6j*r9pZywjt zNlFaB4EoII^Z3_B&p-E;U#;y^AW-4sZSL)D-)rTGCP`|QWKKL-ZVH0V)QqPeb_@hG zhrq((Pe7v&r^kYYLmuw`F>vfZl<6NS2o{4w8z~jrh^iHfrdCEb);>gCoJriDY2@bj zyLNfATiZo4@zbPxS;A{u+#!;g~wX3-D@*vPoMM_0dkZ_RW*K?!dyHm@Mm5=7*j~P=(JS6^EVPitUTp(U2JGdC9ipx2A*KWIR z?A9i)FIY^6x>nDLAm*4NjYxRgXAAtxb_{Pl=_M7;Qy~l);=jaBt742jt{pbRB+qsg zB-GRl9iPHTbc(k2`UMQ8GfBB9W>W&-fppm6ps)RiWXWjspw!4x{!z*eM8I22%nfe> zMAA2gp)`NJyn0rQm(Ge=aQKiwnMm{=6|rCE?0JRiOLd`GJmgiNbW?tpPM;*^G{joN zdm&6iAn2hTFdho|E4>n~6qg7e5*>9ZHEOMyea|)nNXnWhu4@lW;OoV!Fq)$&I^xfJ z_S6nuJdsO_EH$nwj9D*R$giC5qU^p;M9W7ADh(=oWw%nH$p~SNqlA^6rOfGYEzR*u zDloXk(#}U+OzPIrn*W32^iar@*}0Eb$wc0mKHg*O8q>5#mxJhg&9xx!q|8^$V;$8Y z4>T6ogk<5Oc*?8=ahz5sHi)Gv7Xo9#Ux`~f3(KZT&}H%aA$X^YjjARN%)liEhW5^d zG7X6!a3DPx8D7Sp^~Ws;`{OtNA5rJPRcYL>;cVNsCfhc5ZnE3W#L4Dl+pfv32|Ks5 zHDQOzP1b}towd&Uo)7V?-}Aq5-QCQ8VO>s-JBkdaw!elM`F*wAuV-UipaO%Gn6`gP zjL<|%m{9InDCkbU3b$$qqLu(!<&2qPeigA0z4@m!qK=%bQ}kcmN+kBbp#Ec4*U&JC z<0>y+9rX`1d%cRf){(d;yne^*p|q;~K-Knbe9NrWRWX$yz18_q~<4<<1nc9Wm>@i2DT?^^ObIE$L^_nI$jQO#yR@HcbSekWDmEdj2)8KXJrnZsG+ z#c8-_)1~R{t8=-zTpX61xR0T z=4$Dl1BNR#LXtUIcz0TF-dI8kHzJ%sK5O;iaXyeA;V~@jyxQ9)BT)-pq)p$AoF{u8 z@pLA>EZ_nn8UWN>*!0h7Z1Rr68$j$a$V&^`xLvl+2D>Fpj^wB)Cn@8;?$gVGFWp63 zIf$B=SP(9$>F`Ciu!UtHOD`|k`Zm$C)!(Qq$pO!ma*ac70Q_p*G>5`zshle?1(hz8 zTuZAUbz(iQ$B7ODE5}RD?D(8V5Jt^%Qd|o+9%M%KVO~T>`(MDVj#NA_^F@Up_!-aU z&#sbKho{M@lm5+2GSu?37OOs%!HQuDi@{`RSDx4V;k!(s9ymc0vr9{=KGbAJZe@6d zZ+xBj#h<)C3YCoC>F@E1Hc<2_w?Z%*-H1RS-M zSid}j=g+}H3=STD0U&wR!E2WlC9Fewd)PWvZ5CCHcmFEHS~kSyIMA6+le5R}F^H2% zM^MeGgilyc*XX$nRT`*Sxh>$88OCn$84*bC2BN6$fEY-bJuvvVWg@V-c?9dOY7{ znWsH}{pi(E$kW%v%IAGgpdwkr{=Isi?0aq1>q$ji%g@5RFz;5*RK`cXKCab1l6_&VaJN4&YvofvGTu1nOdaDNz2tu-EcPX zalWM%8cIImOP;KQfOW|NvWJw4E&Y*wJy-9aj0IzJdi}C-EBloY+VZCs?J9&d0KQ`C zjFO3+R#H1=Ba!-xF3qQi3 zLNHu)pv8+DDHTc(f8_?OFTa*UN_bP-hP2F|6qa7fngG7Z7qOo#bv^?D{- z6+W%BD7Ps9ER622Xgok$l3^A5n_XD#3P=8C==`F|1s@{$fJo&{+cpyAVadr6r>E7m zWojeF(iYTOk@A_ieT4QBtS)*}bvuiVk`uH%@lf0^*=IE`fE@cP zL$&YTPirs=-J!`)7Y4@_dBVmnqs`*MZ#kPWs~Y(KhKzSWgdATWdiZ83kQs;BMvmj$ zk@?LMNBdtU3M#~7s{px|?2AUTZ=J54ps=-bvv4Xz=tPoV+Z>O~g{o27i?V#je9#FJ zPa&9){-=g_75{@$N*la8VBCH((d?z;>yN!IyTYN^Pkqrx1wj;cc9l;HnSE1-H2i)b z2DhJL_qT?6P%^Nr!H9&ISG$v|*Brkrj>Io@`F+FUrOLaT0@cgW%Lh#Z7K7`8Rdzre zB7=Yl>&z*nN)jszocL8L$y9qSCo8^!9}jR zS^kftrzV%zt~>^$*wSpMek~8T;JjvEX5BE2PV2uP>yhy5s++c(rcOriiK>IAkQH3T z?AB`s<@5~US>)?d7NnELi)Th-8$?0slj=~W8R(u<%?2IzR!-#Lh8|@0szFvBo`yBUL)e}LSW~<=CWB= z#}s8WSwQsWM?TFuehyqF`+<7S{yBfXaF5j2S7S$ofJKL=<-4Cw(-(?XlF*R?ud`_G zKw0B3kD-oJ2}b*d3b7KD7T|_8c3e9#o+3b=La)^R?~JrE)P5_UcNMr=dvFu*lo0Om zSF(H<$jdkl7=P*-#3d<2Y(}@2cc51+>o*fMx4qKRN{Z2HhusN0wD#uB!Ucy*$_*Q; zcb&Z;$6C!UpJJ|8;Nx3 zqBX0Nf>sep`Lr)>w!QQ=lgE!&Ti;Z>Iu4^Y(diqxctCC7T4!7^Z?ZbY$#5!yA_`S0 zF*JpbM)!EQ6hh9TcC+PgMIStwVOnUsKB&_J1*`v*Uv85zdLXI%lYGCzgphh}lni+a^^2btY91D^RACH)~JGUAl>gjzeMVxg2txJ%)$P(u5Tt~-km2N&%JlNQX7 zJh`Of7=K_4wQqpzX0iel3^ESc@p*FI#12uZxIc&SCIca6j9sC96SSa}*lD(bg^5Qe z^4g$mBYC;^*NSWeg2X4DfVam_`dm=7E;Oh!9;@aMXIUp^yq`+>ONi3J@V2O{ftBzh z&lJMTLH?l~6QZ*cq`mTrEspKeH8Hjijwo{GO9)?DGw~!#21<#yoP_s1tmGYTfYczO zuJ>l*25XBfn4f%CcPBQReos)J5?=%b1|!Q!sGEpcz`%`_E$4Eq+9OdTaM&FBlV=S> zi6w-b2f8+8>t>T0`?{Y@5zc*z2*I&{kJxmn&e?77uR?ZT^Du2`%b9@@pab)J3 zuV?5RV7T~$X_Y@a8EK|Y?on0i35WA@R@2P?)nr1BnL{7vu-#??@7fpP8yv80j#I|$ z8t+3>)FS#r4$w4WEDB3~(p1gpp`Gz*8M$dG;8TLT=ZJsle<#(JoUKksNhQzms%PCA$IW~4By#s^K+J%o@uT56FzqOwh5E$%(GmEB&`raf@9 z>+wr!*H1e+t=oYE?rq^jVpWK^Vne!Z7GnMJ+?3+Pg>^jDHrvq-6*EXXEdFBMYCWau zp+(n5CWRu%a5syL0gS*L+iK=br@%2#&F%z^l0B0PF{}H@-RrlOa9Jw{qLB(ZF>B=& zb!DM5t#2L`kx*O8Q{;RpD(V>f^K4n%0Ka92S+9JgcfIX;D~1A#!4}!QE~{ zQE>XExL#+3PP-BkLEb@fkL?EYxyhFWsm4~e;%^0JQ;Aq_K?6U@9a5vK)VElW4$Q2< z+<+xC%CNGdU(5D9e*>hqBIg@y5yb1=p0`26GmPLsmyZ;UL?`Mun0B50#0x_s+U~{; zjyv!1n^93{pIb7r4%=zavWr&y=bZ;PyHYxD(518bU2^h+^afPeL_xGJ4KQnKO{nUT@J1&_v1AaR%rlQXWsoocDt5*vupYBPFYhsbn z>W-vzbO)=s0Iu~hA|JIN89NP#(0QYKC{}h=T1=CrgsYK8tie=SC~@&=?$Ri;oqenY z_{Hy^4R`X*KM`E^8dpQ~gW!&4q*iHJflmxS24q|(5&C>D5w?Sm#1RK@VD+JUCTFg` zhzzVF#X)iKJH@2$g?<~;bkaTa-*!)IA>lp-uB5rl8($!6e*-Sg-et49+CN*-)? zauTnuT`42E2W5!1AmyU0mn9xvRk7seEOJ{%K6}X;uV*SJ3@~lOrBqCerxNSr5Mpqc zEQhd1TzB`W)khCsjWO80K6AlrI(F)=L_bv0m`{b3v^bt01R-aWG)wRze z6rV2Rb8%OcrW8zUyeCs(|MP(tr>DY0%$MN&qz?vBRJ5Q3#Yy676SZqk!m7v>PC8}y zS$}Hm&yIfnm_5`l$b5l0CsrABeE6~=Y7wt<33;+|noMtMYrer%m zKR(Ze1Yeph?j0q&r+TL7>5z7eo6T(&2nWCwV?bclFBtO`l+N1Ojq*(#)Fe$WQjuTU zMXGaZfaA!nEQks9I~6lcLp)ZZvXU5sa7?%ZxhoFth_yoEh zA(m^!O}%u|$ee9c^iXURZ@TZ3X0uJgYy=p8B=(pA0aMKs%x;BB4H(T#?^!2R%}UOr z%Wv_Xf?Xb$Pbo+16-VXOpgSgfPz&FVOZWlqMEZRrT9$pdQx~Zy*f`hmnLbjua!FLw zQR>Kgqde}>3t!CWI!bY%{g~iYAF9bgjrogwP$V1H5Ha6GWUeP#^j zB%OFP2fj4r8?32`DJtjwYo;r9YCrAqfxK^id?DWd(gJ8E`gne8Z{exA6zY;vP7zqQRLRP0Y2MF|cq>gP2o5tgz#y%C2|?TQ z)&2SXQLzMjptOykgC;V<2(1s1|3dB<+*u~3boc_F`rf0H%wyYpbHPyw zD@YoG_yYg$U?BHN!OLubGlU1oLy>ePHX!tPJhJ!1!E$`-+R1_Akc0HTj9rO$g|nUo$676$jHO}E(|4hO7H{6TVM@-T0D!NSBU?sEXP2S+OCqw)93+Uh5=IVs47NYMy`0IXirUGql z4+Fy{I4w002+HkXPAKMc3%;OOj41WF=+e z?tM9!Ja0tZX&0)`@n^q@)BpHJ*F?M)Sv~D@AN&~#*8tWC%iTFoRFCyGWLMk}4Q|k9 z9nRbQIwe2^Q%#t$G0z^g9Mh(2jwQ|~7%V7PLuo|tonzv3dhw<>Q<<7*S_!#@g?Zgn z$38m&PMt1=wDbCCb++FBa}Dwz<8Xzw<1kaH8~2#9aMySQZUm_y$UzsiLrMxn@2B~} zO9eM3BYfY1hn?n15hW2!Y@Ei=957OpLpXBOKQUX7b*snYXX&RA0L$RMmN z6cKXS6oQs~vGD$`stS(!v*15d&~qTq$dKJ;K~kgXx12n21Rc%hECX5LZ35GYkXAw= z39I$t8u|l{%9k+BsF>eB))W;!9OTE5s~#^kLU9GTv%5&E3S7FvkUSce3K13`SgFpc zXv=s$n;EVYLaFrcUa*r)!6h-3DS8_fv{_zW{5*0z%IY7a@A8rh7SF-CX8&yQ3ovPR zJV%`$3(OBqA5CVMd%~KkZ(F<~b_bvFSgEXU0|jT#gz6qb@8%N-c65i%Bo|=55CIN|chPPz#rT$_N zQMt9q$D{cFp2qIgd(7eQ_a>XYJ_s}!HE+W_S_zGKDv`u{Z+(P2rmt9H5@k741jsP1?y4r^xDSBPnADxF zu9y;yB110^jDp5(m`-6-1G;E3o*VoyN{Ejz$T;Hk?!GV;n!|^4DctG~4IXK5q}w{^ zcrDPW`dA)3L^P!cRJmcAQ?>~F->Ehy*0b_5S>{|0%Zkw&HgQv< zDk1x1w3sigM(Om$v`2rZ%%J4^D5Tkb>2Jn^eU7I_A$Dw*?%STK({9Js}4z#Q4us$rhmbYSyDqSOKHw{f?NRcw3{Pxfop&kSXIeJH^HwgfyBHcF}{XJHYh}1U=z&CrH-r0nB+Tha1fMu~R8G1pxj-7KZC7YJ24kk>Mh3@m)@`ZnY;Lg3HCadp!f_F`P2O6tat2z=S}a%X8jn(XQTmZ`pZ zS<*shlZb?7-)i2sX8UhP_4|viQ#M@sIeYq}Cahln)&B|O+v3QbcIke4)ERV)@GXNa zB<=1~>nGJ&fh9jPFJGwApAYu^jY$j)^dg&`@H(M?G`-cQbvniF!*fU*A7y1op8(tk zOw2uwlrTM|O7O6(bx6cd8IUf2S~dhd)Yk$*ard)a@898EfvnIWSjWPRnfN6M*k!-m zebI7HXtVjLtC1v;9hi|bW0t0ZI=XolKiiTJWDt8T8&XS|wR!o|*atCzy)<)BW?|{& zDFRjTI45@6?E1yfV|T-`7f>%@v7G|H&PqHkyXTN63?LsstXCDx75S{a*{DOv$BDST z)#mYpd4DgCe9^q~7)ds@Kn=^Dp~WDzqJYz)S$ z{$6YYt+%k{f5z!w_9&zmN8}@sRz^NwLsbkz=Z7+*bT<)}>WNp0K;dl3gk``J5*9pi z-sDi3fF$vF@dBx`B}Dz8(p)Q`8Q`1erhQMQ=UT?|A;Q-R7C)F`nqJ*EZAfH+YUcUN zvnQ5!UA$1lpMLV0Fn8e#XQVb9QQv|@K7DYrE56um-uOnVzIRX}0L##~(uTmuhM6?6 z(K2+pxf-jgtjIEz&P<9EgD_ta!i9w`7&pK7rQElQR&bq;ENoU9OIi4ocym}9r;0JT zCi^u`yPcvyh0z8&cl@*Zz0JoQh#DMvO_Ez^XUelSA~of9M5297mfJLHy*@E+4G>Lt zzmW*WcK!I)pgIo1OA0*D%BHh+4;T4%7Gonwk;nAjaL*t3dz*6a%fVj(T3qDK9|#<} zmKFf0>n;RR{c1~auS_XbDp*I*6;kRJlems=5(DyDLF}|a988BNuO|Z{!dJj3*)~Vf zt(Onf69Bp;TcpZEDW8WKuU_3}UAEp<#qcy=SN}mMA?tR9yDb3h+L*yTm}bLIMn_Q5 zd*+cl*M3iXy`-H1EC51bYp8ZMo_ne^CA6&%N}QGaMKiJ$+i9vkq-7nG|J#$O3>guS z+v}ELzdQe&k}vz0$b}VxGlD`|9?0Mz5#;{D)1yy0q#chV$N?pK$EPc;HMhGd5{Euu zVnXgEt8xl*y2p2YyXy_qQn)zuo=W>w&o_Rm77r!tiA+8mMq)R77 zI9&=pJTulSJKbO*DN#Oo$^P#75~?g&m$;Wiecy%V++A zGN5yJNS4YNU~E)8g;WYeFv1z~2~`?aA2T6n|C}UHan@WFUV9u++=$JQFEdprNUL0x zbo0ej0Zg-wrrR8A#K79JOWDTcaXYs=^zZN*5Y&@^{(axGXFC}kxo2H-qAFVMQV|S6 zsG=v0;CW!d3QtwK8}CMa&p4FIfuB#RFTetlQ04+(F67^~h@ypgXOR8X-NNK5uw->{ zw@8rf3J2{R;Y*3Fe#J9pZzq`@g)qNgx)Sz3LkRwyqZMNq+U1=D%F6#rZyUxV@_>sr z9}Z^8LXh&2ronC;Pk~1i*h$LE!5C&}cPYlDM*Z{%r10i^h=IFwnGNll2vo-cXg5LN z2I_`d9QN#?m|zj1>B7IPlM)vIixi)qB2bb=Wf9_(2vz!{5<*7suJUFa%kdhE8c&IQ z?&xONHC#t9u&e=P?j9?k8AOJY3ThL?(`PzCyN2iXa0D&l zCdzQq`s$8VqK#RcGeqHXhi5{KDf0mrgqhx~WiOJC_c@w5V$85jV!2;tz|ELGR88 z0z@12N@uT>DA!9pG|XStHgoc+{a9Eh#Eit{8xX0En==dUT{lJDAw@wr_Y2nQPzLn6 zQM0$BX%g2Yl~p>Q3vaRCt+T`mIa(pr1WY*bTEt7@K7~n=lxMWJ5>`XQ22gmi- z=gk_aHMF>P{Drn6%i`zkB5TVa9VkQ%=0YI&P&Y*$b40}d7U8dk{CIWRJSh(as~+fyC_p$pY-tuI=kQ2o@OPdj&uU5dPGare-5p{Fo<; zW(uL~=NQk-u7;{caP4zN{vssO8+wHzP_4K-5A5(=ppE zEQc#8+}7&Ew>PHePldOqJJ))pGntOKj#?w6)BDe?iyG3g(-T^akQ88Njy{G?(_%IQ zpZu%r7E8TlphFU}aZ>dLU=A|Sb!;s#z8bi7%j9?r44|x<|LKZ}L&4ur(64Wbbr`c( zsK$_~ik^$?#h8lzs#S^J!rkZwW%F8_kG`hscFC{h56s)0Jue*jX^BZ*-FoeLCDd@c zOl7}WaaxI&tIAp&+gcZt04Nof7YN7^l7Y8S)^4klMS8{m#Xd`+@$lcj@rOCt9(uYr zcube7)>t=U6bJ$^BFY9jDeFeBnwba{^e0Bg_ozu|8ccZ!)TaQrUf9YEg5tJa9Z-=v zUL!AEt&f3OErN zKl?PzX04-U;olz`u?;Ys8P~=CN>q^3#GHmpYtWZ*!rq=8UIN30oCe59a-c`9YT<0i z;!$2vKtqdD)nx8u+6@?8Thh`>1FAFvv2c$x zTyPBGDsov%h@g9G`z?np$4`80CyzuV8d|JmlXw+(v8a4*Zc(>>z9^MGU@<)u^fD?o z@YUe&dftbHiscOcp7|1txnW1(2N!8frO0lqg@1i-Qp6@X&rF%#3hh-z!j|TmXh@R9 zz1O^o3Cnv~*yPI}^wdpGd&}oBFc?nmAh{EFu^eMicNhFnHOFn$iY7N#QR1Add>P1X z&MllY6@&dMU{7W9K;7cHTb5hFK3aKNR`DYbSRco)m8ut;MPx}n-R*$X#FHJ}46Hpr z@E{4+&M8pIO8;vuIGprYBN9pSrSQ+S@lnUeWR~72bj|$j^xmXXd`V)@9=}k$+>dP( zje)^gE?S9V?|za%|3^Vn(LBj9XaO!49feVfhj!d)ld8}bps3J*7cVLAxDn?Ga(CnW zs&5lt5(GtnfVP!le4GKl5la7R70i3qrO zf#LA1@H#M$c+%Tz<6!eeaV@NsYS7CrM6~rvlWe)5Bn=oG0G27~kNpw4={K-O-F^1#&~{6^4n%9h7;fxAJ1=uemd zuzSeZ07h~{POP3X$r-q~{NL8&%oiJhNnzEbi!mwj1ywTAf{&rz2b+wdAB2ZA7WvSM zC>TN#18VP4!dLLt{?q)Hu89rq2H$0i%Si)nYj9d!sgI5JQOkyc{qH%$rTe)b&tl^E z0j&iU+VEl}HA&D006LM4<(>}cGCBa}~aRUbI$aN-gh}WdVg>rd{ zGB(bCeZOv~_}voxr@FSoB(GFcwNFk>MU25{?h!{QRUd{2X>oU~h)VkC8$ zHNft`sEj*|%5imKYda{U2}2SgGK1TaN|fgW(VK|PDV}E~F$2nO-V9PZl*5zOZHA0$ zp(9zOvK{L)AAJ3i0siM}Kh0r4&JzSe4=2$h;b%UfQ1#k)qP<)Buo>%~z!DdsQSxg^;wAzAH zMN$*v-EY9(N{*~yaS7H@J_8R1T(KvWiV-V2z*N`C zBkG~YYjDdsHCzjy4$BByHHjHxC>%1epwS{DVnQJ@1ln>S#iWu<7U-l17o&~-yl*iv z9e9iFxOaedjPsP!s+e6^goC76+a%BV2t-l!Ii_(IR}L^6aQo|9rh>h)5}kW&(7*PTRAnA~-` z4TNXMhgg8d?&1pQ(me;;Ku?|drN>(OyDeEH*+T4aDoQ913tFYsGOvL$uyl`GQn$COp8G zXPxX2!bgTld*z~bW^-e;Aiwq@_L;C(O#d|m2vZmvuCLr0#Za_m`)bV{0V1u#oB*43 zimWZCM+jC)r9=5;xE)m6d1y$h1{#yN^STE__$dOvRnr~uxbfi*QR)=!E?tfRqdt-Z z@}U{!$?>>aBZFnxz!kd|OY`YShk;O50>Yfx7!9a4z(TSEY`MD<(UqJFGuh=T?(1=R zRG#^aJER=IxM>;v@W>u)=~`*KDlL zdIGW}u5zLgcKf2#11DR%{3vV{PiW8y&lN(jS=SrMd)Lm;8fhG^fs8pZ>wd-Ov0CrAcV2Bpa)%?_EVXP@k;!!i(Z3g<2;tC!1dloOEjcWh4}8ueaZ>S zy3%3!S~W^j#YXPRQ?3j}N)6k}MO_rijBB(gO#rjeVGfGmv*r>(Qap}2WeN**#>^9l zZ9#?aBOh)j!-h|tO_ll)t}q|TA}Fspr-jl$4L3*;`;aTy2oNvy!dtHmF_>A^%zvM? z6hzh|)dWubBs(8%6AO>bG(S*|ONUxfi%}uUCEn0#%}y#c1Z8YyeWxTaJT4-_&yv)6 zvRC=e+1(*O%AvCh$wNah#H(0IaV%H($#_gH=*=upW0>NP&chvK33E7FWpB=}l3#<8 zS*;oa;L2jat=M(d)paR1+BF!mC|-+&{4u>|73hyJ8|zh7G;n7p!Es(i9Tjg@Q@qzi zFL8JKOb~N7(IRE;Q)uE28FPt|C{Nr4FS`=L3Ve|IgfTk%DRLxexVn}8@X<4_4x0zVp7%%>EO7v^;9{=Fj`s!O5h9!uNN{ z+B*vj+3~(Y*cJv4a!b6>Uyh8gna2bFev83ok@rkp>*bMe5Kp0s!&0-iq{pGY)S^7# zQRL&+Mz*Or_!jJ>W<)<-ZDR4t;^(fZbuM}pPDG)vA}$NJf@A_o%8>tTPP_Q2%8;sz zgiO2$dc%2@nz6K`Muw$X@b5jop^UO}DJrROHUXerk;5U{C9WAa!XvdDpz;S?;F>CI z%EipOlY|W&Tg5N^XUu|#aC>?=g@C?=u{1jBD@j})eq9v)wc-ZycrsHiIeu=AQ=$?d zNw{7%nIzuVXc`F?_s;g8RBWUJ8R3rOGzPj?aB!51V^`QeV{~p(Lt|L_B>4yZF{*26 zKOe3)iUv1LBR_nhJ0juBg78x>aU&hi$c(?hU8jzvzFiCR(|zb>zEPh;I@b|rs=jM| z8BD#VN*w;39@okMJ5lX?`14b~y=GE-$37LJ?|LOQw=K}=D)2>h_Dopwu%Cv6R`e-KE;KfBx zM?c}ismCA&Op%Gh`V{-T0mrFi9BI$5TnlB)wfiGlYGn+9mmU% z)v``CW#lzUW9b=M@1l7Ql5Aw z1XIE|K6|3{WkKCWLQP$sHgDnl0EP?ZHbB^uuE&jH9N>VtBq!OH?8!nU9rEu28p+5pOkq7{% zp%#^f)M6%hW?ezJ{r-=now~@F&Tqe}&O5h#<(AVvp~k&B>uH4;qT-!)Mutdq^vCpj zoDlakl2H5wu?k-)lUSFge5=>@#TMz^@Omy-usTWE@wRA z=PuPW-hS4rR8~H%7rq&ex-6<)s=`;(@GVekUDn4*qK&b;`tQy-Jqc4l%t3!Rb7rYq z?|z?6?x~U9sJhgp{^klE61L@~%L|Jt`m9M+aQ5Z5l#d!@%)~CQr{D^Xmmu*7bm%0f zD-Xq*tvT7fS99(fAT*Q^BS{E>hQs^@tz*uJ5)aUp!s%lv%aCV|F%h{RQi}e}wZ7)l z(-eMApa>wqKuS9&Qoo5RRSDDy?T4R?`(ErE_dsBgl1?eiOcQqzF#V=L0G+Yf;AhBi zGy)!UoKXt-nS9wr@;J;f72h~_AS}4Zvyg!csFdYT@CfB7e{l7>>9B<%NK-bfuz|A-t35D4l#Zbi}xDCFlb@^y=(1czQ{4afMEL1{-DxWfZ1`@$w`$y%fYp- zwM(8{B%h_Q-}8S3@c*--*XWZ3s3F!L%wXz~pyNNDGvjA-t6X8@G@JrzEX=K-wCjhS z=qvFmJ}Bjiueyil<^}JDTLgzS^kFn&7ailsuWC%gEOGAo6 zK9zh$^zASzptbQqv{tgE0pi?2;j9S#1t0IflOfG8P~ur-Mab}IG=H5O&*{|;`!#HQ z5Z~u>Lghc^lTrUkjmS(4iZ~{MhEkAGF_Zb~8lJctU`L6BLKG%?!nl`V65}CKD4_;| z#5FYEBV6fxn)m0FbBKwUSJ^1wO98msBZr|xhUjqha_Si+6}|q>R#&rP0S`oX*Nck_ zuC8g^KUn}cdqSfSzsPndK0(X`3n8#8$q(8RU7Bh*D%nqDu2%|$LX%rSb9tH-JX5SI zEjRBuhVtoa7ygpXR-1NJ_p?xAUPfaaXv1fiER@}J7VZ>$YV9j9+=80LWy)fUr=}PW z?$*u2c!M43&logydKr;7Syclj)#qlZH6Ek$mX)834T6&OY7*&c>BT+Q#~`)}i4 zxoB}C)R-ES@>2$+jB$F7$zFluvKEz>*foAir!shdSkonrzt<{)Gu*9z`qrf@CxHDW z>hvtPf6ghksgwy<&s@<+#wByku*eq?{h8Xue#&{YnkWQBh5M+w>>mEvsp$a zJxe)`%g0m8ZoPSTfj;1L>4xDPn)Dm6`x=Pa0D&n^_@mtP3kO)k@D*-LHe{B8%i0B| z+qtz&`h*MdeG1I?8qs2RHQ){b1RI}vdkR5pe1dcLOHiG!-Y`|O%JsrvQ&|D=0B@K6 zno-4f%U8p~u6Hzb4UXSC$sKkQFLNKrg^x;AP!!idJ28c-yre>j=3WC*Ez1)M#oh;Z z6tP2tuyf&1_P6dAzjc!SLu*fjFX;E?qxW+eAGla4%`AW+v! zGHuHe2b<>fi)(EuX}q#r;g3C8m`mZ_n6YsOBCO5t_#K#p)fv^ti@y9QgP%pQFmz(7 zt#u!oks1o|cLDm>38}z1bO!(5W=grL8{p%h(8q40y!@j{jq419-%+&X)ZP67UmC^$tEixQ%v^mhO-pB{yraV^%ZEk3N z*9ztCDDiteh1-NhpVuSwkmbl^fycNeaz=rjnDbMTSUSfgjU!L@EQX=&2U6QB zJT}JHBnVOkN3_ddr6iliCth`#-lA;vU#N{w)@2FJ6(+Tvz51`IF9KH)uCQLz;LNJW z8q^c?ERMskt@jQ1N~vG>*pkpg2t5<~uZI1u-fzj($pp=#a!H;D$QquZA4lby1@q(C zJ{>B_rD-Qy;p>Vl+f%nR%FxT=E9=4}N_e7EBP+j~O0##Yg2Bk*Zzme5nL|WQT!W@g zIBd7%1Zo5!P+#RBt6zH)BtXzdD(WY!SbFR2H3kM+HgFyrqgfLZ71;Pfw57bPMhmjk zBM=L2JW*7QP7orlBg>6mNq+`mP*w9NJL!@Yxie`r(lJzlh|tJl@+syH@*gEgV6BTT z8&cNqjpKA7BYri4z3QGC>S2>r$iikW3z;R*a-6kKhr3sa*GKVjYtcs>3(Y`t`RrQ+Q_H&DLh>Jt2 zBo=`UNnF{FFfUcZ9*s&ae$Zq`2PL*H<8^STxFibm(+J7&6WqvM@~x-6EbcJk9Dl?M z_R~#g;=kJm^i46Y^QDlc3L|LPpIkRZ@EgvY?IA@rSn}Wv;}4YC2RhJ?(3I?Jf;2!w#>VrqLC2sF5*LG_O_EgCd3FYS!8d zPm)I?*q%p^&Znp1qRXYU5}dMiZ$SAAx|5mxoH<_?_k$Aqs%elF5*Cw%KM<+j8t3Z` z(hm&K5_jS|Q4x-DCpn*3pxAI8tG-izxlI2}19|q5YLcxuu)oGbp!OOE`$z6sOHU8((Vjl; zMyJA?8D8!Nz0I(C>o)5XGgVcz|D|_AvcYcCVoCGpd0`}94xWRL2|0p&YRN1Z@S}uc z#6%>Ec{%k)4IveG`E;{Bbu=L0C~#TQlH;q5U8+qWye_M*&-IHPFPhDOO$h0$&Zm+F zjli2!?}sisoKw~j@OPBvEcB;llaHA!nUJ7euj{i8)3J-s8|dLI+(*Nkg1ZO^&jJ`+ z6`}xc+%p^cq}FpJbB}WOuND$z>tsesx(H}sJ3dzD0@kz)q5`6{BxTQ&054+De)kxz zLF3iFAdw3tG1t!>_UF9#n5a4$3SbXo=VJBfbwLlJ#N^(D<~qYR)s%WDlL*U;pUllR z>ixkAiVzBCy%~^bb%--p zwSSzgypU}f_gajbeSIB=sQba~d9mRu?&hh<*Jcg#Eg&Kyt~6g&w7KWl$X;h7IY&H5 zA%pel;15ZChFDvK4Rn+JZW?c5)DPoTne% zY_pUaPye--7E?M6b*htyMf7pqb{Ue^|M+xju_Cuv?w1rS3|&srtC{{$WLDjK&B*0F zG2eEf(<~^kb|SbB4S4n(n42% z52z6YsRvZS-WX9|moV45gL+uEm!$1S4ihrCQ|85t4GxPFYK$ua$^6R_cx zRR=m$doq|_nKE&SU?#ygqM3dWjo!_P{3Hfz0JQ~@lPFjvoyDz;+=}53Pi`F#7k0OVUG1}J1?j+5f7SaTR z6*rXQb4R{P1Q&TY+lGK;na8g(=|t-$?F=V4n$?tIWgVg=h+e5HH#}bB5?&i@w6M9n z;I?qN2zOtdp`RC}E$@W_yVpbCDEXuup@YBo%oF*$c$RLTSuTy43E9VEc28cEx=6ZW zbYc}yaIURcMf8lvRyz@zaQ2dpdMU2 z`l5^KCOrVCL8pNU+bW9dX%WY>3;BJJz|<&QW7#Hby-309kcL045Jr?v_Ypmxd>L=P z0?e6E!fi_@JVeyYKx&1w_b8`jV8_d zXioGQr;6Oh_wKcP4TLot2|$~iiQ=|ktRv;yT~^=xPSRgWw`o}BU@yesW~K#kMfP|6 zCm49Qz-Q7W&}cc*1#?8x83tna+)RWmRS}YVaCA{;E#P*b&rQfW?L~(Lk@g9R6jgwB zMX4Ke+s}TviQO;hJY8%24^QXdRq6M?{p^!D*|v?z#>uwpgvqvT+t$=fO(z?ZHJOuL zlb*iMTEEYKaIbZ*z4sf}^}76P45|8re^9jjJR)__3ifSf>O}0zbPgnRhKQ1ClGK}w z&{lV3-<$%UwP`GDdCksx-ugrcI3uMLR3yADMqXRgQbA1 zs@K6{`b?%lZZJ(GPWfn!+4Uo?N7`T&Jj)=)rQvL+sJJymcFLQIL0Vz>mKPM$rc~rG zJH3e0_yVUMozT}{h1)Nv)tb{@taxEYOQ|LXbCa;qi;>0tT_By#pv7eSUl^jlR5kJH zB#Uk~RuZB8dp5fzJp~RGPJOxI!jS;S63n61-ugVlt|D-*ZT{je+g^z`a?4R>?g zhtL_v=JN^rRrS2}l_`HH0QHN7Lkg$-TZjLWd@$`83Ox<#c`flOs-iF4vub;U*C0{N zK1*jJ{>SyT-+$5%^315syB2Of|6!@=#ue(ZlMX2*22N!a!Rgd@ljQHWlMga&Bq^qk zLjG(83}uE$+~4wvCccm=Uun=#KMiXjn0_DneZh9{uFmFE5}sWm`v|#>3k(k$MR87i29-n2PEKIq|Wv2 zw>@#0fq`MP`U;_Q0!?yv(ON~f<*g4%#6N|**Hf7k9zL2AS#kGLu`l9CNpNzUtU<0t zh9#`7_lIl&i?UaHD79X*`@{l?3qMIzf5BAE^#%{kWv#~LQzuX3hO4bk(QqT zr*QT2oz~}ISztfD>~LUVBWb9E9G9I^RFk8V@iHm)H^n%iFD4@-6$St&9%0P;M}@cr z0N7IC_(D?vWbaj+?PiOzV1|BF0khKeM z!<~xAyZ<7DAAf8we>;To6Y^{c9GZ&ty7tMy+SvtcV2CwYaW*}KrRiISE;7oOGT2*6 zmY5tK$F)Uja+(-kV+aoQK0olL& z*e;~;#ozDz6&3ZTArF%RarqD^8a40lApt+CSxOyDV~N{s4<+)9#RruM-J(&b5(Y!0 zyIeaK4$f+rQh9u~RX;HNImLD$i;8Kd6KV)61%Md6p)m6n1iy>UZ7@gNuu|~5U@2$> z+W~QbmQ5?e;&{QE%sINPmp}H7n~1|g0#|LLq}wxg0@L!jMrT(~h<_Gx1%)?c zB&-Vi>ASkd-&=Er)Qo>*CFi+n^1+~s3HEb*MRbhP-7sI7G>NJw6Gf^9E9$Bg)5N^f zNe47w2?o{UbW+$Cc;>E#kWdBMgnFnv{b8!!rrt1>wbV}B+c)eQ)Y@oGQPDOg+qr0t z^oCTWGIq_d@;SRY-Mbg5c{^UBXSG+QDkJ`aoBwQ2;-tZy)p-FhFd}7WUh3>&d4GQ- zz&#?$OMJ<9BHwxX^^MBRV@xn5rE6O$8*QY{u12-tX?$14OXY4(KikmrAoHCErqK6P z^Ys_vQYtYn+L@56T`3YJ4udWfr+j1C*cZbacT71KUf^xd#)(i{nIy0OIrsg;xE!+F zwuyz_NDKZ^2LZ zN5>X6CBe|Z93mDSDr+h|30UNCnUC>%R&kX%4L4QC2#_m477b-?0R_u}snJB{r?f6> z@f88;3>TMTcb2+RRZWW;>KgOvFCt;uC_;NW_YxbDLlqo-1k9G;sYNz7ZOagM9}g-j zz*Hmqcedt!RWSSN`t5J=F<@uEQ_b zUX9Z$zmF?2AV{a%$!Ab!Ps0bQDwU93qPbch`+f}9Zh`S5ftB4;%PMa}g$3#+y@*kej-Wo9BiJ z{6Ao6-)kGEMD+xXaCFRZ%jBQv1jgvG;&2g^$vP`&SIE^Ij-!Mt3dDFgE=uVZ=QrYqGCuVy;CzpMaVHsFKMo1{s`N5(ASUf#TuGjSLU^Xaa>}(& z_xL({DiRY@SP83Ds-LMI-@F?-TCprwTiZq=S%}^Dsl$5yuTZ|)hS>~LA*jk}BPBVH zty}w3;VCl5F*{_Ho8GG;pID&db}>Nz7B_FsuJ0HA0F8a7LS~9p18fn`Al!j&79e2> zX03m`9*X=93Ph`dMF1sU9_hNSnfYA@500hxWyu2xhrp5;?Ut?eJN* zpa6GKBM-gyEJ7QtZly#d#$6Mx43n3Y@jRP}^Y^qMQkdV}5r)D)=a``!^(iAT)RrOn zWIL+wx{LQ`6f0o?rru*ju5W~baTy233surp5dR&2ij zP~})dr=0ikiHqTK?EHokX^NNqobWaBzhsr32r^~l++5IIU9%o{>6&a8AlZvu5sit; zLTJc3X2}qG>QmE2+yqpo1xNrQ;L+y94r@c*x?{;WPgJzH^ zjX8PLU+jM{z@?LXTakuY;-dmLVGE|H6u7-**$|jIz2nKI8w)4}3C> zE$bsF&7I0`yzGhDt<71Gg^L#}W97&Cbz{)Rk8mZTd7v8hL&+9pSklpxYRB+7Qf+mJ zsy+`pJlN(F#ehZNhYm!PFs=3?|6??!aQvdn=^bg>vC5(2=KAkk0Xg8H-g(z^50@ot zg{%u$Bm_sEYcQ4b&tvZQip~+=w@foT2<>ICN}kb3p-@kR0-83F*>-UrG$tR+Rfgb& ze1&`z(0q2?3~qescTJ;`>8Le}{PnIT_{MsHpqT(_Lo_gGUg5@qER`Ee zm+U8JJgFlk2D3mwIMKrEd6Tbh=KP@k{QOYp#CGvE8gMloNv}JiFOT{LEL#6H8_`co z_^3OwZAs|ExJBwWwFQEEM`9zr9mV;%6MqsL?oh2d>lz9BOXiijMWzAxHYUVh>M6rq zx&Ivc*WnOg3|8`hdwP4opPHC}e` zo-`i+mLLZJFadjq_MP=`>Bw}ba_49x>XGcqkQm9~)dCW5$OI`LU5E-Euyd_rR>5Dh zDw5R9voVvx^Hr75HMftU;==T7-$>dt(d0`!lmap6yO^}3i%PhEr%Dq1b_~BUCS-iu zu}x6Ak5E#fONHVX2k2z9k=O0u4_E`4PIi)#=gI2sUJq$G8O;5lHN&jZtk=SrDzCm^ z1q?+KZ^Ld^!1KP*M=-6hrJR}$$zJDOWy|3?gt}BwmOy4*q(6wG-K5}FkmVK$&4#vE z0>bt5=nduu>eH?n5GhMC8fSucP95M7w`18#(#MD$9y&FOsl21`;`1mne84SgR3hkS zL&6Omy;p^B#?ZtpG+M z$I8>PG+_!-=h7o(0y6pH__Q;r+DZMnUi~cfz z$J6kN0m{`)NRxq?Q+p0XiAvq|eFB!Fih?aO;S;P29Z$v zBWMvKG*ECTt;&cLs2ScVRk7(iY>P&XMO~~CYb+jlMxU1+A7-8mp>)0X`6(gR;TgvN zumCPE5Bk}0H~uTj@2@KFREzHO68O!`EVZM?V=&|Q{^7fX_)q<(joaKxBy$oX^6hS| z2O}_~)?c%6OoOrinD-Yc>?}Tav?gD+X$Q-Tjj|8|*&hesx-iv|*A|7OFDX}vNOj(g zA2yk_=3fLZeV6n;oG*oaWl>cysWoKu(=6mN>VVu-BT6`BHoq5aF*==`bAH&IZQi3S zfP8SUZG1FJ#X=8DqJp7dW5~W?4SEn&vc)Y$V-LvJJyA(zFprfaU|Y+gMz&f9UIx}1 z)mhhnC>Uc)nX7d_8HqJhxLRFazHJT&2+dqxgX>))CSo#!w$r6QM zYhO7*CRuxz^*36ce0U#-;8kGiys)y*m&##!^b2dyipTY|--U)PX6_wZt3!;fx#B@ObXI|U)QI6#UvA>|xhxL7OYbDM zdGulJW{@nc01mH2`8457aj+w75fC@{U)Y?$( z?omnLo?$b+l;3ow*{};kI4u8YXJf>SaZeZ?fd*afXG~eefZzA28pkvSinxX?2eCNT z?UMvsL>lNN4^q=9_fgfLA*-t6@LgwHJRjX6o{T9^mr9s)YEDsviSal1k{C%->?4Om zY1vg?Of#Fb7WfnyvY#KlY!cvnIiiiKX2Y?HL=sD!@}=~ErxuSS1`@Z480zlZ{Rx^P z3-^pj4{4rGbwXQBplp;2VLHeDS{Ssr9v_Na{9|?~6aYo;q+UxSJbPJLK~f_^g!-Ui z3~p=?iqQLyL%{wwbS52odY&=30yR6;9qp<8PZ4t3vUMR12SSnTmOD;%>Tf$B6(7+6 zTqmPJgo)Dy7k6Lvm*!*u6O36T5f>;Q*gX;tG2t-jX5T2?c1f~>4~CenzuYwCdpswej7>y2=pGauRrGuH2$6cn!Q%u zyQbO-%7g|%IEU^U|Gek=#p|>^ z+`jikMiF89KFMWR0OYJ}FcfimO4*R8zl$oAlbhN*Cm$%Ep4L$!X$M;4`dTylcXeX1 z2I8ro)DP15eY|xF)2lx5GT=zCmh?z)Jr+g(e2(*y!A`e|CB9b^`^k?YpD}xK% zV(6!R9D+@=6VA-sk2Q#LKH)Mzc}#HC;y0Ath_k$H&GB&X=+ipMeuqKDroYN3O9^FwDKpK?t#?h}I& zLx4U%Dp^Vj?~sKtS#5JhC9-wT@&w=s5QqKcl?w*~n9^ey z(LMBIB-rk&9Xup{&2kMr^IZSw1am&HA?7duFf=8 zEHn}}w{)-Zp#1)Z^vtRp{(iy-pi&+=@yaLV09E>p?C3s6q)sMWAQJveT3t>~_Tmn2 zq`4OSOxLx>5r7N0Diw_?{5MuOsPZZ!8tmEoB7EkXf4~aQB86-1rb`cK=T0z`fWhD0 z4)YF2`W0<2M5yfr3IPA6UEn?L5U<{xoEAWaeN6kNMOJyvBL^vF9dbT*8Ur^48O*PN zE0UNxvE?u<3B%Kp?M|zz07Ixt0K7R=rm%9h<**Qo?k&;j5*5tNUlY=f!|nOdYNdT7 z#w$@azSE+f=NeBH=D%X|iPim7dl^EG zZsGQ0jGCw&IB2x!Nfr~?ela`pLs^F+OvX2d>|3Yd{W9Cz1{Q*jF)#Lw={i0OI?XF0 z9f+(}9Dc?CH>yr{z>hJBC?iVKi0w{C0MGhhbwu%T=ZfbJAeUJ*(5D-Zs=nn&{2?d^ z0Gd=@yWShj3qrM6ILuq^tP`4fRg~rbCaOY(Y*_w(cv;a3|LLZmv}ZPHu4^O)s%Gu zXZ+k~o^Vl%iNR#EzwUYUWmeLz(A6)WeaF^(K3K?@hk7hncT+HpzLq=LnEKN6h*NSq zOwI}^hAwMRP4#EC2V4bRRT5ywQVnvlq-_>omOE>P2sdtT5Y%^T4K4b(+xjHa8EdZ( zA@8NDJtqRoFhCVI_ZNqXnR};5s(j9o3)?vk-0OEb^>loI)p>vOt;NiJ8-(A9Z_TD@ zFa==s-exF?#K$5e_JS-97o)x`O~5MSuEwW%u{rf2@d6;)GB+1@`oHjqJg#oNBn-*? z>Fl~OdWgUq{9E)v_34Pt>llTaE0ZepcJ#1Jc;E|W>GRe8ioGa-nzF~0X$50uaGU)a9pm`5sNKRt z(Etoz#&?H`8S0hhwzb#P8Gh)fAC%-=1|XYET52Quh=r=iH6DBZ%eQig9wq0ulgJ_! zg%k06mhL9VRGg}k-PBqA;(L1bqr_98flROlqNr;-3W`=+9xJtu%6i2$iw@6P61C{6MYWR5R-sVMm??(MfNb}fM>%vg_ag%6|kS{hY5-86@-czA;dsF8Zl7k()I>{X|ELt@yX0KT<86penr(}>;2x5k*+bpNP zT%U~l{?_Yz7$JdFJfs`kJ3j4A0V2LEjxQ|R#cY};_%Ds%!_=C1hnRC!^^aAqK*aNk zrMIY3S}7q>nHH9t9A#Z^G90orQ!XJ5%A&m_59Da&VpVpB+<9VRRlLy*sndlt8I}Dd z6!8ez$?gwbCY9t@gB`;s4+~?oD0$*kO7Ww8j^5JjFE)u8Xh*M<^k*2;0Ijz6;$v1# zGG{mS>_Oj9nyW9=)kx)2=|kg(X-BB3!o8ONhN!GWaAv*5t-b=!5CR&#Z!Iw8Rg?RK z{pfmUMGg8p%m4UMIUj#(`H90nZ1GJJj_E+QTtg zHz+vfdP>-~@8bL#^g&|N!P3;P5O|Q?oIDZym=f56k%euC`5=4DX=<)mL(~-CEqeWOc4NuYso{!AslR-sk432xRbI!Mw z7`K0xz($LQo-W(OO+8Y&6*fLf@Mzp+gz?uNE~Bb1mc$ew_?JMN=ZqKO6-K4|h+o1p~kndlm{lY4|7=o3mJPLp8UuR@cps%`>%ls~JGWWZs& zYF^dJ#+a$@?yABtj$p$A;Wm_tixnD2fhB@v>j=509?xx#stcQG>NOKDbA2%Y5E{hJ zL|lTEur#6W{$?V_9En@dCQM?0fb(6x>`dyuG1*$0pGLzOFucMGEk!rZdhsPcL%zgy2s%KW-y61qKnYt+P{qJ^pLlN zx)sM_!U|`K=q6bv)7?Lscz?_furfM}lrbaWQYC$la}|ea#hoKjte?@-=Qg~yx0>aF zkg^{5z93=yXV#80#iOzG^}8hLT#j0D8$VSsPVjQ&d`1#mJJ-J5NQmZzw`JZfY)N}5 zt^)#YfDN})5;a+0iUFSq1BvWVzqgn)55jKyxiLLz7xj3BoVE^OFDFf_dmgEWHWEC6 zW$8A0rHS#z?IBHGO8FS1oO3c?(75TyO+=+xP#6CNz?1PQSH;Vs4AY|%CuQ|inu^+ZU8ScyU)znQ5K zP^v@v*o^TBOMfpTGZ{ahI}V+BwqQld`c;DSMm4vc{qH12<;jJzLX6dAJAIorZweTu z6iyQ$f$-0s5EW8mus>Fw;1x+%G@9{dzo`PObWdU#lT$^QjK+=*^aMgnuFzim>XlEO zWo$ze&ca4ZS|kNHn~p>zkGi_7pvw>K-c^#(N%u)49fvU%hEnh{S3JE~!ChTdmvzx5 zxGYUlYDtwfdeBeXegr)eGeisp)r}T1`86-l;+wu*&d|b=7EN8}wx4t(uP0#1+=b;- zxG$_`!<=6lVILICYS0^m2j7)_?`8AC1ivmtOwGobjx7%s7{NkhUfZ*~2}s}$L7<1N zWFoU4^ew*WrP!UWJ`^RU4Iez=BW@fpr(wQ;?QBywfuqBTo38e7Lb1PTj58ls5+S@= zF42kAbT5weJWQgEKn4%~&wb|iZ~g;D`?2wAm{%xV^n+3~rttmp1O9bJ^mh3g@4de_ z^0xI>?|C+}o=t)8%U52Vg%P`?&acBL8xO=R!=1PP8nE}X0h^k4=1fBIaxkw` z+GmJYbK|RWFj+JUf7=FHL2h>~E}3SS;a$1FsK7TAy5T%VA+Pcv?AAN$t>S zfQ^G49g`x_gpWch3}8M4j53`Q8txU-acCq--Cy6*k&G>h?b67sIlb9v1SRX-!#2BQ z(VFcBG)3;WA^K_21M;_|foXIrX{ofBLm=6oFn@()*!j8kfRrX?29ofRtcsq+!q!>s zl7`{is{PdCt!IC)n0q?xaPr4YQ0RzVUA*gSJTHCLjqS_75B(3=*f^%KWnT1pBA9IFa!~vVGJUO zz@SB3ZJK5IP17%n$gAt{F*O=yE#MxA$6k|#6#rKS3~9c=RR*VgperLKo>6YQYtnzb z3>JrhFGNS%=;GVH?7iZC{m~l)4>NL5G^}O*cW%oQt?EJPvi3}k%0W*=lq7t6FR`0| zw95}O@sG@ng!Hxm4^dWKt78H@LH}lijmdlJb=vuZ=Y1Qw@F?K9UAUp~l%W3%=~wO2VOFSRjTVwm7JK*evoY@Wazo*W zwF~J;E94{qX(WxAK*9)e^Y~>LdN#g>sU5DRqJF~L5Q#vfjkZlW7JVIG?AWYRRSxUq z*knAhWFVm%J8q5r5AS{uCj>d=Es1oS54mYJ0BXz{0wxoF;HwqiWt6#!ej$=HJq1Eh z8yttH2;O`>-+~xNYnoKjZ9W!olbUcyUp%RUDzF`?4x%6NBI*0`2cY}Y`(in% zdmyX$Pg@R}EC_8QFBBRL5tVHL1P?vqBx7)D!bAnnI}W^8ZKx{cWfgGGL}C|Q|C4=5 zys^(!7rlh`l}6ISg|j)?j|FOypI(NgYcIuy#M~-W&fA?q;^rq|v))*3_}CW6e)aLTkZ`d*5&iOT~-GVogygj?l4HKPLzAir$ zohFbiLZj&mq3Nx24&alJP^9frh_%*S@9jV3FnBmAs(}K-{WK}j6WK%!=-AJ#vYg*p z+yc1~>}FF@2!;%X9bK`#x5sq*RlZe1+Z!^K)`sZoOQF#Dkn2Rtk=cC?mmCs}>O9mgM ztto43YlR(gLal5278!*@`6nl@nw&EnSHA>1FWgVx;|_l}U!FXT%%Dwq6t+#0N1;Gv z!`xDfVk3EF*#Z?2NNSaefke+LfsYEGcr4JU4;uu=A)ibcYGIRwiF4`VT>zm&ti$2; z{T{n1cCA0d9!IRY>G7CH?Mw=GyH-3bj%TF8oXo@0(woXj-gxYx^T5NI7|Jh4U_{!>k5_<)Y25N#Nju>DQdYn4ip1-dAxEr}<4cLbVxp@3aeQ`Mz z2ODq?b=?v7)FW7!zah~S^!OmM4kv>y5aSis*dVb{2`2Ha3p~A62WfK$PgcPM+ud>V3Z_lR$W_Rx8luzXO7J8ak+r$D-{CUz(s0hc--m!AaL*VTkp-y` zH~tXP(S_nd9#-(g3kPF#ci__tVw>e`&iF%<*>9)z3d&%TRC_@1p6ED42&cKqMcPkr zVEiRwpr^p3$lt!r;7Cv3YBtHWFo%d@ z#=*D%8Z~9F2z_7vvh>mLmsGBmAQG-@ku0RP{8bQr>a~hW@u5)hudG}vDypncGF^H? z7NH$(fCK^~nd6`8`WzW3%AeKF-w;4~sR42qQ`z9Gfr7LTCw<4<-vKAEh?C5g&5|q2 zo=9R+uzJDhw5IM;u;2@UZ5r%^Dj;G3$D(CB8;x%pHC5UtLc-9~44qOam?^7uHD`tA z(!T0WnYcfRE-S~uVxekrfa}2eUeLytzYUWUnxd63qPsGE-Dv{gXq&zwW~Y?ex##Nz4!xY+U}EIM z227o@a`>ArDaqTIv17@HPl@Sbi@vri7y84W`$e1DyumT-v<);Hxt zU-J%+j<<^;8FS&GPFXHcjuZ#6!&v)(O|>d#tmiVAYq0mfyV2qKal$8WLYsEFm$G&b zr{P9_Dd6Huv3}D$mqV~XpZuvg_>C*LdBk8SAM?Z%QD`eyk6`kSG0h+4{`+rKtY1ab zF|z4~^5IDJM#Wi_@HW>U`)V6vD&KBVQI?Q$8+{pPp=0&%*&iGYxBS-V%x%7+guo;%QK^LlXVThZ}Twyuq`#4*Wo^?6vt0hVa)Z4H(=6G+fYKY#_E8RA^!49 zW(*%Wm8}-{GhJ>3s|Yq~J$a%36+}_={E+N!=4-@c zrD%Tyqs4ff((~Cnwl7BjX68riYc6i6{}P}iqg*SQgW|BF?5iQkl7SjS zrDR5JBKnb(kocvp>>+5|v%DSlPp8%#O7{Y7wo%Ha^}j)d4oqFi1XDHz=T1HO?~7%; zcE#ST*n47mG2vHtVfK113z;~ybk17i9gEXZle9&@Id5>Lqq?o?WLwR`Kucw>l^+Iz zBCLyw;)_zNRgfV+$GHm=X6^w8iOr|vy_-26Ky3wC8uS}h0 z^xv^Vg4bYZ%SzK!&N;0qb!Nz~e!9y?aNw#nDM?eP|0SYO1+3tLIQ>pq7>+S9IzIjdEDEe zfZ*E!NO&v*^Ia*?T_bj$yyeam>%Pxg*SDsn>ggmmZ)TI;;hNfGOnk8ZqI2mBwl*?uoCAuvq6J_(xJ%;^VZM|r#*p*s9>MQssBt&nldVSFd(U{ke?>+5zi{+tI9pUAQm>H)K4&Z*#jt z5q?fy4)_INehopW#}2^q%A64*jkk?^T~!!Y*dr54W(}Xl_GIdTSq}88kz*Ct%y3yJ zZue1fT$YhvK7k{uB6SkYaTom8AJ_oznRLL=e8cdL!Jt#eHZrgWD!uv!qr!S2I!7lc ztHm_CR3~Zr1PG!J%|9nsXor+ReZ#!bN6Pfm@^oFQvV>u_hat-JZ3xgILnmY|_nQvM z!wQ9};7bIjvUpv_%UnqwI5gg4jazGJ#kspO0BJo6S`L+9yZdU@id3VmP!7n11d?rr zDBvZp3UgLh|JQX_2LJ!AduIK}qlx}cDpXjmHM^s4#5|folW=`zSX0o;QH~V4^6*re zR+!e;RJ;1Z7r?pA)IZ`I*fU#(;oI+pBj-n#7T6UQ#s40@_Yz-zW*S@#5*C2b{O-)3 z&;rg_cRyLm%ktmo_^{t)%OR{MiBj=(+IpBmej+Ins>&18isCRMBd~q%N*z-01zEzE z)>I9ko5VT*Y1Dt9?rZVUP4E=w{W`xCABkYf`7X}`X?V}?my6>aoE5@fT3dSMkXx$V zako3~dpYk*$w0Ui5#4gGE0wnhm8XQL0o@T|W>(APAZ9Ktu@Dx|BgW zG8J?J$us-Ah%B-QlgjE^n5#%YR@XUfETmu&)K7bR+)@0o*q;(_J{H2O2x^G8} z-Uy)zMkrMTJE|*-jN;kXA}2x>ktvI6f|9`{WIP?N+lO~)p8w30TF{@zJ+*}#6|zbv zJKZYzZz_K_jq|7@Igu0pY}LO%Npf@=OWY!HHSp|;Xhh!``Sz~F7$*;9$mYh2hpEUa z1nKJeVPoQwGH$eQ;%3G!b=+>pX$*3IstFNGp9TYjOF;%9-;*Q;^lA!z*R0MhmG|ZzOz_yi#OswFWRkFDr4Rd)+F+yDd;Nhnepz?!#;Ri&! zf5jSizc3YG1@L|@s~;e5e42J4@ti6NqpYUH*pj?HRQr_h{6r^Gf)njOm@ptIY7t20GaTC|<|-UWdcxUWaFDE8jwRPMoLGR!kOiMI5-TW>~eRn;n$M^fQ5YcY+!LdKQ0AuqcOGvp5BM`X+Ud;rynZ z;QUy~(%jr~z&&f&);o@{8EHWR<_ypv2VD=&fnNuc5b#iCEuI*b8%y4J<^3hS#Pyu9 z+34YGkpY1PDpG~!|MAoGAXBrJuhYD^QnXKO{|xS`@GSRA@dhz?Wf zgpkL0vICVyFDXk&6NG+Vo~JpsR-%|mm>t{2plMijb_TJohKl$B+5{q^PB?U;CQGX? zqI&y9(oH%vs)}24EzJkG{H9r}VLGl*#cqJfFts55JXZ&H&N+5m{P~)H--g8NyAUy- zSLaQe&ncEyf|{xl?M8L4)@GISM=A2?iJ3$q&xNQc=pio$5rUO`Lc2`etpyOUHSpt+ zlgh#FkRon_8Ee(N9#wAPEMdYN(H6NdKCZE=^b|i=8AzR1_=@iTfQO-NEp2}p(DR~5 z6SzRq#m=aID6{-4>0)p3c}tVK&E}0&R%oh?`mV=FG)0ENCS2}~v&QUOZXA|!svgQ% zGYlZ;LiQ9A+xC1Dd0*lUL@OK^O#b6jgBrFF=#ICgMfpeg_B4CSvz802pHwTY913$3O9)ZQb!}9nyC_i5W6u8wv1hP(v3MF6BnFjYF+pC9}!uw#;$+yivo`tfkil zMR>C@op47Zlo2}KX2%h*`b13m#lm1RTseN#E=rC%(iMtLE-~65rONyEtI+2h4N}Id zZ5(Y^F=f|R-3oOJ`b`)Po>?R~6d~b)r~;0p)vf#JmAyMWjse?~Dx)8kgWUfefxcmM z=}Rz=(D#K_aY6k%rOR@j-|`Wu37&9Q#@PUV&G-M-+8ziX!b$T>NAE$`y73a<$~I4> zhx`KQ>en+_RXy~OyAP3QonG+;!o1L%2NPW2bT|FckaCq0bZ9-j3=N1?UEIaRCAlVO z97=oWd5D0d>l^a=#?|;+=Jo#&j->Ci@NNqI5>`?sjfx)R*a`czgP;xEk<81j0dC}q zjdN9sGpOvbq8x*zp}J!po}y4BNZebiO*JicXpT6o#J^dd@nTY<{q(2xe6w0*JY)qBcSSatm>1>rNSTU2DBba?nRh7vCbM;2z7z+5Rn$_8)Pjy69 z_(Xe9v5VX2L7-=|s7(pjiHTil<4$mY){6On^i~KQFdPm+a2Nl%AW}OphpfaKUQ4M` zuzdXY*DBS@R4ZM0N$lZEe;)}Ze?~GV+)mrKVq6_{2>as>nygpZRXgpK!1tSx!{-3m2=s;0Uf8O9JL$rIi* z$-twrNDSe61bSa`D#mKi3embtsU1D20!||wne8ZJi2xrxqR-lrnrq=|MNqgH+i7Ba z$%)Qru{~HC&A9|J`($lY>GAS)>2e$EgKa0HoW_?q;dUU{jB}XPP1U$2+`SZ=i7Q)q z+_OaIS74f20@~!2O2frAY40nuL3UpQZ`CdT8G)?v$M2k6q!wE>{Ad&m9hYW?li!Py3za|Xqz=p<5SQ>{V zPACX0({N5J$25d(R#7;<5=(BQj}vKR2w1Of2}#K)%0}YqBXiZ0sTlLPbZgN-3wPbc83a%DIDfmvn2ekrp=~C+cIK#Xg0T@h@jie!( zH){Zg)Gy?gL%m5erZ~Q_*FDlW@=if8;=7{?B6ZUnV3S?-iTS+(pZ}AUu8T>$DyMzm zp6syn8vU~@r5v2ouAYidLLD+U!Qw6CXq{DI{TM%yW*XJ;;lGtiUi3S~)M-nCRRy&< z>6lG%mObH0mF7BFXw-OabTDywbooRC6XNk+SMc^CmPAYt@?Hg_1)zWBKDcB;F9Ur^ zi;!XclN9pTfST*C7%wyNc7SxY6!46{S!Euc` z$;L`u3vJxCPN_S9fA&GWec()FX?D-v=73lZBJ9=Wn!S1A?+xK%*pogU@(BHM9lXZM zssQa4;$=-6*j)!lOogsqEwD)j;bS(OiI~P32mOvYEovHTOc5Lfta;&U4w^%zR77)K z@z*OE3td$^K=&wm`P-9qICcdwGeCZpEC%NsA#1zz1a>r;J@qInen}KxN-$VS-{LP4 zkJBtYXS{5VfRDmROD2`nUWR9`vcQi@%>GuyjL}`T8a`j}T`#^-Xf%yH>3{m%WNI_xAScYDL3%Ow5Aq2Z!sV`36SS8RNPkfl5eN)+X; z4uWj-o!0gbPKwK1J6D3S|+g5UhCJg9T19QJ`cXJ*LCev`Z>>q9*24xnGY~Z z>Z@S$O{PNbuoY%w$OF43L`*33sqH%5mqsWF#k-%WJggvyJsP6LDO1X*C=}Vl7M~B~a$1b5>LH>) zlKkWw;qRhHL+>2lun9v%g%z*@6cs!mR<@{Ty}BXvWz$asLFh_X4pM}f|3a&A!I%oc z%koY95p+b)lld4~CjJ@>B1gMENV1s67A= zd2XY*z8oJpY9!FR=Jmhv+*wwGXBtjkntBLZWeQvG8bDFlm3n~*2FKdLEu#bU@()__` z(+^)o_#T@A%GrBj-s|7V_x?`ySv4Gb4LZB^L8=&ei%#5agvkrb4wHCWhUqisc89}9%f~$B2KZi4X`2fFP%$?7 z9<^}*bs(A0%(deAZC-NGl+{iAhT`*Dl5|?BAR#yPAg7~%HK zs8n#;5KwnLoGGXVjXDM2=(96O-uM<=4Dl!+@}(hS!A7ec<4%Z@rvL+}bA!x(tB>#n z{y(P9!K>1+-NRvL+xD&|+qP|+Q_W0GO}1@L)?{mE+nCzfZZb~aS?fFN{14CjKKFC4 z-}Srl2*ye@5g;W?p~<0e5p}s?f?$5x4s~d+p?jj>xz1aZh~%9iq*<1R3n0Ui@bx7> zzhr8~hrG>f+__tVlzs@mW&=iGUWo*_ylkM=MGYS;1PI;a-(4levA@Wo4QUs0r=ny!I2kq9WAV$dHa zlb3yALo{DVdClWjd(E6&LD~Hr{j008dC#gKWSu&*{(n%Eso5{L z^>-=1Yjn{+m7pkxmm?? z9DzKe0T{j?Q`q@NjfJ&#QY(36OfvgrcZ`JVC__LiKhuMtUdXIv=C+DA5gtC64 zj%dcLeEBgm&>+M?u&xgVORnOU<|cyZn>H#rMCr zLZoQK`R3YSREqun+70G7)z^lM2t=Lb##l3$084WyKH~8st>Ikjh}0pHr3A*A?Rzah zm+mD;Un9w5SR5>=9fjaQe9|}%I~hMjpk0YZ$(0?M4qP@{2dcx!`hwteJa>t5`|=(~ z@<19czK9m|2bNeAu}1WG*;2pI?|5-MzjwrKeMTiO2DqBr#F|m4AYw$_u23);m~m!k z<4N2qfSMU~Bn)s5a$Isxl6DHF#7mYPX-yW-ef@2Vc=ZTA{j5^D&&rkbm~zMMng1j- zVh+D5bN4k-thN?JEN)2Ry1SF=Fo0GDEX^<=4&-)OJo7aYAaQ}vsXg;|j*4xQ^TLNX z>&F?7kZwqZi9O)PJdZ?3Owq%nfxCTz>GUR#Y>C8b3|Z9zJFb|vZT0xLiGYjb_Cng?a)8>=MbiDN0r8QDizh#eR}?>!&?|Z{$CKHZ=sO^Y*R6q zi;*T=YkZGy(xAiCNiV6d^=q3xCA2}pDK&jL(-G z)Ge#&j?eY1|MUh+uHUCzWMU2@QQ{D(IHQnCV(NF}iRnExE%JKeWBhfwsb}9W zDeD<~@;*x=0fr>$SUaP%qkTr;TAVtW)IcdyR}QqaqH0zID_AcgOCKgcFqWVApC~{d z)94%EXI~EcLuU&|pss-yH)F3fBD;)~Z2vm1C~W?i_dwVDSNnYGg)dRixI`tez9zAc zZ5<}PX>1whb;%g}b!hu-cQ@ZIcYHI70+g+RBnHkrEF_QN!hvyre^QZVJ%L8ftADtK zU)keR^3J>a@?Z_OnMrW}Mw#+3L7z@{u)T<8+T6a#q!9ZiaOkMvq@l-um|7_r$+KdDXuZRxQW`^uAZfrf9PwVnpJB zAA9n=aZOUG#9^o`$?C+sM7FlZl)U$vs&)OBL5{cqqRRw_8 zV$Sw#ReF9{YAj#MOnquTlr)c+(XGCSR+31w?ypoV_Vzm@Jodii8Hc*fAA zeO-U^ll9#y(&`fQ<&Wlf=$8LzmNZ$ZlS(6}6QJEt7M#xBYn_adXz@QRTW`gz>>ruhfx>By3@2|ba8FAzg(>A1>zd?< z9P_7xr15vJ?>40;A0FATn5DZOow2c^8)^!2=bPUv-)Jkv#TCFEb96^tX`B4=DqVkP z5Ml4BxrQ%x*vwnnZfGu97@_~>I;hE$NwGutC_G^9q&5^W`eFG;b(t>&~dPwbL<>Pam^=1!mC@m^FLIdcE55nVe;!aJ0$w zxCrSlYSCM{Te7Kp?czrDsv!CTE*f@})j%_-W$!QMY9Bifq@bnnw0Crq6WGGJCT*C9 z1b(S0z9NR0Y`XMEE~a*ytxC(oMZ&e3i}5Jv8grl^>Ti;=Cv&X16{tWQG~%i;plqaz{J{ap+eClYCy2X2u~^!qya> zyDrk#!*2XH$3|PR)ku#E1==P94t3vXS?F`?ICJjfB@Ve%hnsvx8I}}I>q^3 zq0hMA;(=CcdSYd}SdTH>hSK>4$S)NC)b-x}{sOXfm1RKOM%|g{O{&QiPSbp8A7)w-#67b?`7y)j%LVWM-a@l@2+nZ1s z`%T9ipNf^Lh!?L@B)TrqNyWUt0= zgPP_!@gWW&=qFMzx`65OvuKTe*Uc~ zZ0V4K$W;Z|KluBoy&zdM^O7*G@d%LI?;+giBj&AEZeC_4xO7$6CYj*61w|iK5<6w9 z=N=YEY;V#E?hi9~^!){o=8ZaLZp*?_mQ|GV#i`_}A-mS~opzA5paO{4PWtpWKlfS` zt#btS;0F4mJG|?70W$l)oM?twcA?r};ljW|= z^a1Xt?jy}}eSI@>d_2qZHDdU4%@@$nPtdCjvwx#=&C*GAt|9yjFJPmm%z+m#pd=I1I(fPQmu%Ia7+VwGa(7Ae z{@K?*)VPA>vS;pD-5qtsme=lvc1Eg(^`;V5rebzEvW(461wSr>2q_Iwfd{v>0SRU? zkX0Mi%j3(hb^gY=FGmzS{n0R(fYc@^3RSY&-d1^n)mLTCE}tn&W8Po?xHjTP29iO+ zs7`VA3CT4tLA4wv{z5uLZfrJbZ;v=Y z)YP#DW%Ub;W+k;J`l^q%`{B_5BFE?_s!*m{;>pXidG&gkiQi8OdYqVMbqE?xm%c7e zRlRvB#zwIpT|PQ@L{-UVx5IN9>%i4Q!P#0P;QsZYMeL?abBCQGf^={^i`yC)Bc@11DowvNYQkqg!OV26?|$XwNhkqt|1uZX)+ zn^0MNMl>@kGC$koQb-Y?HK9^af($89PahrR>GO<=vy&PD*yxmaGpjShe@3@c68vnW zSZlmG&O)QaQm1W1ZV5hZ*dH015njFzTYq-l^InJ%pR2T8kN@qY#@0>n?i1p+WPsoq zN}MRpMPtziqo7_76sC9EM(k3oM&yNUY}+&ruNcSP>74#@I(axVsv!G+Zs#J>^j*)u zT!oq>q6Nl+L?tapk#(Vruuv}i9RYM`c=)ouBVBxK0%D^D^|RJ#{THu%our;Bz(lbk zS7HzF0e&lFKloQz&Z~(z%Eixki9xaCJP9n>jM)@R6;IQ#GaCte#*|T=-Uayj&=GD? zrhPk9NDUfH%^XC3HNlx=|tNoNJ%Vea zO3k6~7rIPQRw(gNI5djeUs0zj?m3vx=vX&m!T5L>&0Y(!m zZ&*HXI;rQ~tbymDz-Di?g?u^E>e+g*3PhF!8Z_G0PwY6Mcl^!!)RrBROHaBl&Ksf} zD{^sYdJ&}m^eSb0Fu{6WFh6$S*+P!}rNOedR?<717$sRc<7-#v<+((5h*P zr3Mx?t(L6Lb>kFd_NH-)CO&<`()j#jpg=`9gLli9xMSMrGsk#}mbqyADBDDGi2iKx zSTScS2#`#Cc6Zq}#xkkmi7i- z727qYr)FAD^mghkZuLcSrmG3q(BDKZG0+Da{>S)1^4az~l_LnwKpNcZZ1R>b$o;1L zZu#}MR;3?nH{N^y27KhA&1S^y9I?z(wL=hJZ-Y(#Oxo%*WJA|7Q?bsSX!MsK{{`#J z6OLFLbaO94n)5 zLjBX~3yn@##wM0P&_9jg<<(uo*-KpBsB#b75Pk-{^3GG$WXcog*}*_ymgp=Im3|Y0 zg0sRDm2^|9n9i9Jv#S6}#-P|&1<=mJMZzRZ*F^>!vA=HGr~MkSDsLg!D($^>TpY0( za4z;;;0nqi`Eps%=@P?DokhUVnS=5gYq;&wa~1YT*>%oG{x7y$k_)LIXJmG#?fLC= zR>@wu^tBa)?%9Fwdo|X<9UxI6IW^U}SiZiWjV8lh)eX z``K1uo)iuGtTE4*6u%gCw9T5jivJc+(;B8H4~O#+NFILQEFhac0>zO(+chYxxr+UWWG#6iVwo4nErk&A#?= z`KOt${Vyz}LozOcv2^xdDF^RYDhahYYv7j1>lUyE<0<@P_~MiP)+XpD8|N1NE(~M; zh&X<{R5k+rUmhNrn)KJ*_i=|_CB(N#4YyMU44ijRdn~3y+&!(li*@^bAz9b!!zR)C zdz!Tu)4T@rK1Gd|)SEXGE;|ghHa7PCMn|$0EY#(%t%v^vZeZa^^pG{HlA)|aB_JUs zzFI@0iQKWCxq^6lo=`S?FAUJFiV6X381>r0>TJas(&*NY)`0F;FrtpNPe9twPo*

    Svp+TLYUIys`86;Rbr>hWT`Ge+EP30WD2IDo%>tNlGBK zva{8R51JAeNbK=J(z&|9jD-=Qh2o%E`K!vuo22NfwdFL#n^!Gy$S@V*hNrBg6M)jbgdYz+v2#(Ud%dgnn6f~(l3NfF zM;D|ri>qYrA%VdbQvLuZWX$4-X4Aw@tU(2WiWX91A?qR&oUIf~%V+?aX(Dnm)y)or zu1z0ZfNGE&_k_L#9<$p^cZB73?t74tqCM)mCW*Q9skrf_yTz@=`B)&p&qb|FC4AOg zby>hdfaC4}0?Wz8do!sJ(sM1r-=X#Qj-qH?@WrGi-KR~Sn);u&?*Z7q0ya&1z{dzT zU5wY9T#u4Hr&ir8;bgC)st64ty?Z}j9EO{TC#JJSJw`m)QL!v`!}`V9n|W1Hu_ibS znYxIMdR}8A3NH7=9 z@9>jDUVg(dH0b*xwuoVya)K;c&fMny5f#E5{;m3KhFuk~i4E#;d$RU1-2e@;rRGFj zS77P3g>IvN(}!TDO6q5ma-{(Y<#Y^j<*Ag5X67UZnY{{sV{jZBQ(wx7x3VxuAo@E5 z=xs*lp1dNhl7ldL?ZdmC7kQh199r%{^eo|f`%33533#7Vc`LSZKhm@7La2Uz-eZO* zY&QHldiIwelWRn8Hlsh}5d63(U^2X{R2u;X^TGcxE;yD|T{EgjAZ|3*OjHZLzXF;N zJ$3PiwG|f|KG>pWAMplw1@ep|oIpCRfK!2KL>HbxIolY(tv^sJ!Sa&zGKjwE&~p&Y z84uh)MG_%oanko{AJtLPe!}kFQUY;3d6IJ#-_jE|%Ia0~E%;7fO9GYPe`peB*~fEw zxgfzESg|m-T6Y)D6P8=p#ZV@W30CRER_@*2%b3qBKg|645as7&Vf zILKm*ut&5SjZKb-;$v?Qg+*XrsVh~s01G#p;9;tvRa*_p%{o)*qLoe_G&p ziBY3j3{aZy4kDN$b-^_P=&ze^9iG=r<3$qiq252$Bygn+8b&JJX$16`D5Dx8MgRd{ z+VQREQZ2vA11*pyoD^V=0o)JPRI&r+M&t;GC6Q)KOUw!-N4kQ}G%)GGu_dn}+dfHL znV5S=4L*y=itv%G{oCt`R^sEoyU1oJ`GO&J!59hJ*$8nU+@yrUaenLzA^-A8<$aJh zz{h>0ieEusHJ`9BpP2#)w-CifVYCzb%zfERPC5-CYm>4dD~8F{rm<={nE_4T&_6lR zx^Q}W%q+CF;CnsbAUc@bek@}?Fy0ZyLQ{|Hq1UNH(fOoN?Rn#;*NrS?&sN|(%p#og z+^pMbb_u75#B-`6&n!)o-PnP67(4r121G&n3VP}mQQ1a9LMr<1DjP}pO1K;@qHWzs zQQ}p!Mw6@pT9ck;6269P=loXSM;EpN0eF|eVrWgf9gcgA%@PlVuVwWl*7k@AT66_J z%D#`b?Ko~lufGhD-pYURZD7wIm}tQmf(e;XfG|`@1|m32+rpA*)^A1*yA}>w`lqL2 z77}&%Bq2%-XA5lX*Tmu^2$zeJ#(Y+o&+bolAwlkOdoXzH`6SJvh}#(s(TA$N8E93t z%jOJSRa=8ExT?Px=x)lS!TVTl^q~pgU^@@`8=x*O6ykjE`a?5=)j-Kihn}Ps4>i#V z7hA`*!ShtFjxg3}xz{kbsL&htOOrg4BZvfmdHQQEA>-&lK`7{@!5)10alr!id#&_C z8Qz7S;Hz`Gjr-T-bGa!56u_r2D&_iJZAP9YS(%h(1h*+lA97@CtihekH`2`lsxjK? zEj95XeVA&p_#V(W&8v}`I^>6`ZkLUQMAQod9lGbQp}6jhCtq85#)NI|NZUk_`9!R+ zNMrq;;ZC0`*b$KRq=5m!ji$Tjm-&RT)X<<9Q0si8@HGi*GB`OC#V~VQ92Pafuf+;o z70sKmLQ{e*l0#Dm`LjRnKKCh*=*5Es%E&>Kk1;pXZ#eo$B#i~l#kGC{pnb?8{`7kv zy;~85IG#_47_+y4c~K!{srN1^MRic);4RRST@_I%-9?c|>ispn*L}>a=in`M`Z4j{ z>a*m{l+EC+?H!t}dyCj5GxD;-2TEnG-r&1b9hf)-PdH3Yc~iB~BRb(s_rW0<5e1=U z^3q_E!4wF+@?{6pRxL*RQNy!Y zJ8w>e$rXo49sWvlgO<;K3h6$jZA^APaqRW6CW*o(10UmAJl6$iK4kercCGGF#9frR zN!?Nqx4d6{cQHSwetZ?-cl-(Db?(u)vT!vsfUYX(h~H42A9LtdM|+e@Iu&X_LjgX9 zTs!=a{kEGt3ohr068UbG;+Qxd&*DBn(MY9Gs=Pa)JcwY}9bi@fVxqLLoTH z*wO92z&WcNs`!Yp0>`o(*6!N+1CnTJ8cEhD`d>H&#Pm~eB2;jCv1QkVmTF4ZVXvfIyzoEQ$f|&)uy=cidib7V*)(ba?s{4CTtXP#Ed#U!=N6 z@eYYZGyw)~NXcJ|rvQjRJ$EccuLk6C3I*4ZnOyVTe-t^G5YADUxvBRwEh7|b3~rrVGDIZeURxj5u)lg0EKGRH<=dB0^obMH5quWPOdz_ChgjwVM%(*GkSzWb3dxR*MFcM6pGhU%Qv@an^9^!wiTF#S{l-@g@e(7RY zQWYX%>FaZo7fl8sFw6H2ZP4*+Qf;Sl^Xog{9wk(G#w!%?MX|!6YFr`u`w5!#O?O>M z`2#B%$vMaK4$stx`17|YbVd+{^h*3P5+98*deass+xF4s&=U8a$>EYf z8U7_LQ^B8gSp{8{MnoxFf}9~i`w9JU2nM>B%v-k4pqq-4Yxk%7Pjqrm23OG$)&+k) zfqA!C!HO_(q^O=FsJwtBN~;^Mv+@d}V2?;yv_JO!#%f9BGvtRspmq7!@aMN8`y#7T zLU{MeZ|1+1 z?0@?+ngffUGY7YG74~~qBLkTzC<}k*u1Dkis8A&P#d-~m7YC*a8+M)7Kp^Zh!_KqI z$y7TQ3l6z=8rL8%fJi|PAUQ3y;j1%9R2!XBKoj$PrOyBVw>>HJF3(`0StN#cgbBIn z-*kA(UJORN00tyIIvab*&r*3~DCH;40G&V_6`DC?d$eO|6V*wp(V9;WZcdt!6oUrC z`ySdmy6|(INCeR#-m%wKwwkk8XHGFwirz!gJgTw6Ktut zA$45OzWP7gk3Jj2HZ>PJy=I_;P_=HMVR=1qzdL zE9i<*|{<dV^t~MSc;K_C@zWwbseG>a&csL=mPmXFgZ?{o{@1;px6#SX zubvujp<&&5oi6-u*p#|;&Vt>-R4jvF#+K~_zoliz=a}fo8wDOvt>hH3yd7(=dA`sC zLhSf~8Odp5fowDGKN8qN4GBjmLSAI4?(kV2 z^bLYrg9tLd=%{XMVra&Dbggtd#&7D``8m2~jZux+-e=df~! zC3SK}V6Fx^`3mjMP)25*QlUu!sqwGC*x_jxFvkz)ohfC496|m7gcNbA2n>(wSY=$b zyYER2jp7d7qIuZWIA`n;$c2(h|>MzbEt8z-tJU) z!XSXG$CG7~0O@8|#^~AuM(qG~3L6x7s%qMf*rR8Ze#a zV*DR}`Tkw-DDp

    0-Dw!EpQ?_TJ^!)-B2I8WHnbYwvUNKJs-lE3=$w7cC2ydHxF^1Q^TMh-z3! zbPN_?WYgv!poQE*asz}=VO0a6av95_BoJ-@Lc%r@0tv~Mv9qc&>t^0p-t*XdeKUea z#E6J>%0eK~AuAg$XJy^H&)I8z-<t1Td-VmjsZPl+Xh%9fgqn<%K><~jo8$q3S$w#&KTCjxw2!XmZv|r zqDMX}PvSTQ*s_xQtGF%4r?)rw8~j$rZGH~mQy4x6@J1u`Vv~2l zdy8AtO-rNK!bY3`vutFz+4sztcgyC%i~UhHy-brK44TZPvQT9FDN_jg-jw!%=O7`J zabAk1T9_Kb>gH{N;*F(6MvnoG7i%EH-aOI+LBW|N}FFjM=@|*X_u*%Hvxm>ozPuslGr&D0lkRYm4eJZ}U>FRCD6 zrEU*cr}dP1Ser=8_d!l+F-*s_iR9qD64S;IyS^Lga~GYgW`q7e`4f zlHsi=&|L7uptg%SpEg%%&%a57WM!xh$>uUmMMyaUn#7m8S_Q{%a}+H)x*wY`nU9Dr1OW-=uT za;1)AUGd)g@8R(pK+JD_=R5eN@Bcx3`2Gj@#z$Ynx;)?z;OXhP)P8zgasZP`I=6m@ zG93#UbN~4G0+-7LU%vVhzx$&f0;0HF4m`bi3)_yLeEt(0IB*;bfA{bG9en)pCpd2> zKK$ATc>n#c;)~Bd!4H1xckt@D#u`C>Gpqqj?xe+pU(%?a_MLPdMi86Nyq zNe2X3E;(vorFx`t`^572&x{g1)&W^Z$?J7#L3dO_k)f26Duo6)WT0BYJeoZ>9GREC z_ZiWZ*-Y*8Ob$gzyN^o@NS>c>Ve^m}PT09|=I^@CUEXC8#pPIV1D?(&cAzns@Q%n6 z>nPKBudM{tma;_2zrR&=E?5%Zwb?G^O(UVCnFZo^TRM3AXM1Qd;tIy_X{v!4`?m@3 z@;J-l!8`8YNGdy4-i!C}z3Y4U?(v9^eBtF* ztPMPx<6=8rqHQn@dHw0(nRG0yoZK6LGdw=5r?YBi~5i|30hxofQ5Y+7Y}wR|xpy>NC=rEbkMHp#$SC z`MF3rl`5eCBN}zEpbHzw@6EE{TqOf4TMx){s*U@|ug`Z?dFG3R3Q$Hv=Q%vkdk}F9 zp0#a=##D-Ida3+FH)C2!G*X|(=6&KLm4~UP84+p;Z}$Ao#dg&huBaiFc_=by=TbP% zRHu1@H>X|0vfdUn&>6`;*o>j38b{MNPf-`&8$KJ!TPWZrIu`E^{a(PfRo~5|%exP7 zGd!bf8DWY7Jqvgb?K{Y=nAcFL#=2>7q#WJZ>{Gp(102brIPE<^M+##MnOddMkZm1F zQc-dZHV8)KI~SD5A?$3#xY)Y!D3@kMtXMJ#)e%ecJj@NVX*YA$Op7{ZM5`8yA-Pq3 zTdh5r5DB@Z!b3EMeQxLk6=_SPI4v#1>gvsi;O2{)~q}bOCpV^AjAYwi48_p z{h^>~ipwq9UrHX~Q+*jKI5%3O;@@3)R#ZedqCSwy8kM$X^Lv7|RchA4VyW~hqf=Q- z;#rP&-O%ccsJHWM$a>!-(&vz>K^ZyT-aN3yN+0WTb}q{ndLV*oB2j z(%Lh?!JMjD)EKR}=&@R*57u2FW8jzPST5(xG?fHA({gaNvIk6MyLu1!3jWR1z}0Yc zo@0t;6VV1GsjQDhWeyazVd5lk``&k|J1TE{L=TgjB!qub@vad*Fb6%NgO}xP0Z}CQ zcH939^}hs{|IE$a{Cpts|BNB=*WK~YF~va8l4}{tA?m(r(2xfhAtRO%?*)fS`681Y zz>9Los0B^_jSI?ZyX0DtJI+3GX+e=0aosQp&g(feHoXA=7GZa|9no2z^ zDFO3JucySHedfkUGMz+iNJwXt!81MK6yeI(%peK|srxGaC zAOTQa0FK=5Yy(aa93DfOn;2dlj@JS_$;M~58-9F7+Qn}@zsB$E*Z6Im_!JvIhT$~~ z&rE7Sr^%Fue-0ogo5sgxQ-^DMq+*mToeII22uC{8Y+T_1UCsNd*I#m%V1qj$(6|r0 z(+WC`$!nXS!jbQh>adR2)+)3b@+m39F0gMgY*5KkVfpt|yC|6cYsz<|rQCOCxeae` zuW}QHoY25ZJLo|S(*xAUX*XiPMGU(lx>5Eoe!k*4B(gpih%2Nt6LB$JKf=eX?H1C7rOlKmR z4MIioo*)a3vrlbnwa1c3g9u;Yu&?DAu0rKzJh#~SjAWe}e^0{Hoy+x#ZpMtQb^?Wc zu#g!2nzfEHqBq_jV@iN?ksp>xE2RiO1+5`U!TO{VLn;A}cX~dL%Ng}(1@|M+l3oWH z0Mu>PLwGdon>9fV9|Tq3dc_0Bk_O%pk`*hN4XqaIq7BQ8WoNA|GNLK-T`kU2#R@Z7 zOVYBqGVT&>)ne^Elf3ffRSjj%=6T7cIIVv#zw#cw@y&1ITOWN3?|tPxJY27M|AY7O z;^m7_FdmAR*9YkJ0k2=b!g+hfl(GnQhfcZU&9w)eu>ww-r#y!cz(X&)vGV? z-uoZmP{r^3=!bZEd&4K6{1`v^~bK_qo>L5IauJ`mXr-*huVa#Y8KOxN5cJ$J|Aqa#+bE zDMu+HM1~6K?(D@~x?=yi9m(u9`O|x|k8y~RXM4(KTQS$r^RhGZte9Sr zz8G-rsCjlP#L-H+)R$&Iw{ut`zYExZ0>^)3=Ktr<2NM6c0}1_WaQyS-S;tPVZ*K9W zH=YD7$sPxpbj`+CnrbvX8fe}9Qq|_G8M0N2?S$EwrL;@)#VQ$LY?!#&pA#*()!${! zBnncj$t=J;v#bkjPMI^uTVwzsc{p-2UeTiY0;>++dpATO%eCNC*vNcWDW4oA>};qh zttFMVChPoX%06Qah8dR$53Ra@6UH<{D_p$da(EgkRV0k?<7aH0&F#5C>B&I;{?;*{ zE}e#=LuS86B$d$%$037FDQYvk=M=M~pfy8D*{Ti+J7rni#4+W=yIrhYan0X{9%k_Y z`D20DK`0evRMw!~LydH~E9H%fvE7s=ZKsmQEWm!?rZEqHJ+#sWbRAGEtV>+nYb~fM zjup%aSxl7)Nj(98JCzKo8Yv-~YKqj*?VTRe9MRbzl>J2qC^qL8@VcD>WV z<~y-%S+t-qgE{$yam?Ciu5CQ3xMWHn;qF_e(x$S4fw_BLV(Su|Dy zw7MLXL$BRqHfgQ`)#ByFwvp_^^!O>sQ@`iAxI-s^Dit~l+|DJfg^CSdz>4O3x!?lB z0|XZcUIO?A7QUe`@X_&z?>@Z1cdrll77qM^KH|gUzLz&R5v3+qWlJT8cx zfJU^itbppC3Bxq8RK#Sg@H+5Z%#k4Kqh!=ESYrg)Y{Z#`x7x;9!RxqKs5q{SuT;l2$36r%09%0##cMHqCMUk!&-e_6AMR)T zgSTJcclI-WbUxu@yv1kO_)>sp0B06LCVW@({K`l{v)ycw5mh-&d#3l2YbpMtQ*@U_ zwiS30?!7ff0!@ntfl4Fr5gEe&s0!>8?HM*I{7`ok*EE7A=t~asB)L6QpPAiT@@S(P z&iC?qOAen3Z=C~^{jNe|q*3&pC1sy6Zh@)h+QW8G|3nkzMYQfetNS`Xv-P!Rp4oWG8y^DnBZy(XeZ0Te!-=CNLIaSc@6w7ex8<5 z$#)ipQ1%VJR48ZWkPo#;LBb8)yyIR68={zI?t)!!&J9M6mRL`Ob4V zA&^-+m;smT1s}Zs73^&|pKtj3*S?1D{h?pN*T4QXh&Vp_=v(;cqiER_0V5gm16mxv-Bbr#oa!H1@ z33tKZZKKt7~d6P!qUzVC}F!U{hy_)_R@=NJkLi0+0Zn<$_iQ1puwwC_%j?^d9?eK@HQv3sNK3_)+$o60@-m0Bvb_f& z;~a*YxypYdz+V)T|7tTL>E{E9!I03)Up3gj$f+?E3-!fk?`0_oLo3dr2w5FT7dheD znb_!;0(H2xP?2E0wBA>=6M;RFiiMh*Q;Co?c`FL10(+N#iWpld>1#@R7CMaqeZh(v z_NFW+w=~-EO5#46$&B&L`7SIE9Z6ncrm`EcO4UZJ_+k<5YD zBS1WBt&A{>XXCg$U@0IM#UUC$Ll@5TmXg|p)27Yg$ZddT2ei*nvaD3MSTaAD9I_b} zU^}=Iw;P`J4MY<^Kc9Gde#(aaj0oC&HYklv6o*hbEuUG{9S3EwZ8$AGt6K-8Y?SmP zN)M8eTC#hvl{TUjYnAj9^Q^N0XO*$#9#cKB9kwAhKso5__ZKxp8*{t3tW@hAjz%gj z(GLyystCRX!MEfA-_)1*`okl>b$!IwE)V#Hb-_pafUlm47w))v3@UVIs!Gzl4`>a) zY$(7^ZN-PPkj*A#*I!a$vY{z;Kf?VTHmJSB`h(_kI0s5Ao=-zu;znXTISN@oSM2fF z2AkdT9gXLAi{ez$zzwfuqgBG>ZAmkhc9!Wp@C*QiTF4GCJ618!%g5n1`@ zJ2TzUdNOI2kY>UTOU8rJ{(M z8{)Yho&^7q(xDFc(pI)%0dfvtFN-{fVdc3tK-p>GI62-PsTY-Mzy^`OOTEXm?d33g z?>GaER)vtoo82o=_&wB}Htv{mzF=;0v!GoFTT#l$YojgRc8W}LDphB)8Kmx;F(Ars zw4zhf{^`^_ILGF#9^~*bSSOZKL9H0{*ze>3#@ffv;(4WwE>x!J>z0{x&@ycLK+;-=v3N=bTBPK0i(q|8Y$R=!Pcoy%z25QvGbu^V^Em3 zBJY|EVyUiVR4Lgre5Z8m!$(6eQ%LY`jC~J$c%H}vfK?=3L}VW5?YlTr_vn9$J2-4m z*^tsw1?we&&kTp*SE6-i0sAx@x++};*29`(aqf-V zEo95@edR0o;DZnF;Rhe!y${~Qhwpue_da+JYh7UO_~NtA@O*oV@BYFs;)4(0$J28t zAdbrg0>Mvy@?-quC!gZ=n^*Yp?|zKu+bxzv#|3X*zrxeg6ZTF^JAWU3Y{sOauHVh) z_-uSe@T}YCEs}#dPn(!TGF$`>62#~Yh}Us=S}YcJLRl-_WU|wo)@TfM%CM5G97z+R zUMHFkMOqJdEft;QOjSdORvv@3pc~|1>Y{g;j|UO}Bi33AR4ElX%lcEDvC0hzZQE{q zo@DoEDz0MY-+!h7r2^~pwp8MAxxOt^yr?v-(ewWy(Er#4f631C)1MC{Y9OJ%0hd1p z@w+%o`ZUY0#fj{U$I3~gNqfNQyQ4&bKr<8oPm3B|OJ->RiJ%+k{pNzHG|nuh74(6v^r_z zd&IbFZPc8QEz^pfnqV27HX9PPMTa7zhX*p&50V3!mZ~@Ff*U2zUqHC+b$XiFNO*0% zIrQS+r7pwOY75y>#Oy&Uu#D5BrGf*_DsEQ%d-3O=5Iu4OmxC6Ss*{f-?n}nVa_i4c zmLwqU&=uM0SWrd4R1ZyiMK)Ig9LG@+Q7XXYcnDc1JoC8~*W-%Q&IE#9VvvvrClF~U zVv6-}jqLbr0xSzgZpNbzUyvgTynQ<(sl&XI65Ji9J8n-;xa|!)4Y%`FF_8NVLbcf~ z)L70@7c@Lsr@mBsWg!TNQ+yQ5Iy!_ObM%hr#^7rc`-Fe6OkDSp-ioJSp&`f=~PzpV-E4 z-(KOj&bRo%`39fh#24ocpYJz#3&$&Qyh%nxsLod?K{Cb+JJnAj;U7*=n-s?<{*Fu( z5ofA|!F9Fdbv!c_14*^SYT%<-`sL9L7-giL(kBI89<^C+ephW<&8$>8dPr9`&(N$; zr3KG+OZ60?zDntR$ah7GlvK}DR=$}^*GfR8#F}c;e4p4{ud*)ClCw$~AbKZEY&B)o z5u^avfM*7iMYT7gUWAfV3=Q@b6-~8av=n`mt^RmrlmO^%_h0#D_jAyWJC{c*)R6NK? z%V6B?Pz4sqGu!%z^p0P3gh%WQ%WxfK0dLJ%=;Z5;PcDMmX*?dlimEyuXhTxksL# zjDckVFda2Kq4hzTVLCoD5yMW^jTR`}VN+eoF>%37z1a;_{p}j~lW*jzz$x&xOKIfe z8FKXb<0BxZ(r+qspc5gfh*S^jiKn`?&HQ7GkTOP3JCIi$kck)O~XB+ zN9}}d;5=_JsBpvWcEX-F3>&^T&drSTxn=XBhPwmXZeQ=cbEkz#Zkd%cfn~#d6TWuxD=0-*WP{bXMAs-r$JDEC`jaO{KcODt>Y(6M( zY2Y|s0(gYtnmS4=x8@y5jM2!J}XB2;e~kU%6bcOmGG8fKWqSR>YI4 zD0E%1WI_C_24Z{0)I_^WklBL@q0~cu(7~H8uuM=f<;d+-&2?H(u|J=N+KM zr@is&e8L;wc=LS2E8OrF&-fhA_!Nd8;lyVGymrK}B46Vz+SP?wr7E~Ld7TLdOomcr zd?@>d+)VP!&6!21M4acm--%@~>?h!vSFp(Rbc=Umk^ks&EtLU_tSXqji^2vb4;7jZ zvNwe~#sPhkCM63Z;~0Ce5$ez3AeN=EXXCO4s-2~{ksD-1zE@vxIrEh3l%H1INm&47 zK%^Pni`K=o#C7G6g=~hqB(7H35tQeWjFAM0Rv@MiWr%cyVHM9K-l8^kBYMusNHW}| zk<5y=Ji<1tB9e9h8}&+=liJ3r#dVS%2RMfO0t22sV1k;=Co&oGxn-Te&o=JbabH*J ziPi!sHlaJ$8ae!4nRY|Fs&vE7JCWezig&E&P1=-FWjAE?=AKcAj$U|5OS=b?A=p3s zy;Chmi1+}ZveqMlTzs_xX(baURZbNXEvOiES_;_2SgKtZpBr#gLH^hTMSECwcJg(k zpXmTEj=1z%GUw;3Orau6B8LWKDk3%Kq?QH6WN)D_&`7p)pbs1LVEWXqtwzIM2EqND7m5KF?yWs8}{~Zg(j2|Di zOD1y>cI!FJXY4(Pm=$y@9l;tO_kg@tI!Y?1cPn8h;*&#YVPB#>v|Y)hd;h%;@XLSn zkKn5xeigs{J3qv4{q_%FwsBn#xZQ5}^3@l3`+URR8<)$%!hzd<&gU1kP(mtn&7zJu zHyds*)lfI)RyGY<^oR*c6jWP=`uFNJLKZI8xUpT^Cv^He|wm1*a(feLMGm z@Njm60!e=0NGxocQcOk>Id_%dr?r2F$QLS^>CFS7oh(4;q zD-ZbEauaUCOFEC~8TBWg595e)swgqWGJ+)Odu9NJOy6fAFx|xUvUX-|^u1CSCYaG& z$3_xpp`9J?e04Zp7|e(ym=iGLrjVr&J8M!8G7Yp3Q8T-n{)KFPhx&v15#f-7iD+R~ z4oIYAj`b=Kq?ew~tI#S>*`0@~rwHZ#E0_$*8^{=)S&U3-595w(u9tPhUHzU1IJXt1 zScD=Cvb1I7Rd0q9id1yw)k$FV{`xo!llX5rVlUv9SW|ihDNnaqcGvQQFjRx*fn0m0 zu6biP7(mE|eFXF^A!%V(Fz7mJqP--3wwl6@kXj!rJ-=&V<#)M`14|b4LU^U(SeJy1 zMY6{+elBc74~=w?%YvP;VG2&css`W9acFG4j}@s4BHEj-Ov$NCbpYpi;$|oIJ~QFL zux}gtjQ?)Ku%9+=xM4r8d->_Wpa5-?( zfb&Vl5pFk}0N$RrX#Cg08{7ElzTwqvL9PlUjj!XiPrvEq4Z*YrJEv0VMu*LJXQUOx% zRx8d^z>nkkw_%?^KL1-Vr21Aq@i-&A)q_JBM9RJS8N3_mNJz{fqKveO`jM7^J}H|h z85^3PD^oMLWFSz~d}HZRTdUwv8QfDj$?tUy&J*}^rX>gU-S%BnRcS|gqE#ne31En1 z5_Bwd+bocb?eD)KV8Gic37x?n0 zufsl?n|8URnlSRMOZ{t`Y#XoiCO_l%WDURpk&#)YUI{o}HZAMKqX7$ELS+bs6~33B zr4%lOIcNMm2++GHL0Y|33Ol9{wu zPoG~$laBP~2h-w%8*iW9zW)7d*^u>LsNzp@s_0ofmk-%HF~N!~N=wC{vd(5P z^Ld@1tx`P*&Pr488fI9=M5?SLn4>eYdBIg)Zc5g?Edt2Nws^;MHV#pSQ$Ie?PyhfR z07*naRHFYbon2dWtQv|`(uMmyw$NI3uRh^1tOj&M+#$*A*{mvaXyRbOJ1Ui1`N{}pW_wEHtek~jFBPPV z#;8T)+3)AJ3U0CYf$ zztN+`YZvN?i;`tg~RYPuvM&fP^h#wh87eZ&RRxm}&WxEwpCkGB9u^R%OBQWQ zqB+0L)FWLL%e<*TbIrt&xNI%3!f*+4`9^>v7l5sbCm2oxPR+r#w4AwD&qF57C2?*f z1g&le`g#MYssR;RspQTcIE9s6>~>>oS>20qQ5Vo4d+o7hb0XEqfvF*y&_*`) z#gK{1u>L%ZL<0g=>4C0DWvk~BjyocWbF*6$+Er{DG8-Uc^Wq~Gn@wjvZijjb+Q!ea zp)R1+M2A`}hwPdR5uhdV1x(Fsc}m|c@6Y8!?%r`5)kK#!xg%Xt4asJ>IVd{$EIyKV z$gL`;yyl&_&WkOp2;I%gw@pM-*-EFF+g;tEqEs_)vh>w(>2IxbtuoR zTF%@x3w!`A?mC&0^!)sCIJ!qe$0najlTI(o=bVz~L9{O$izF?+<_DG@e;oio8c1_} z!mW2Qq_|jea8QiOU@JNO7|vt5;}+m&q#iJ(3(~a>2gNnMCqGLXN!j&Mr^@8f&l|2}^Flb_(TPd~+{pL~ipuip$f zyx=&ZAvU`Kx)Lz#%`$L{?;9&Y-ce^OgJLGj&GUjM!SzYHYS|F;YY{Hq51ql_wveC;*Jq2yOu7Es!U zWa)OYP}Q~@=*T)p!6v10a7of;Y$V}=sx1Lxj7)sGO+I#?OADTdQZl^UCE z*cp|0?hV^kJcj_tAy^K#6Sp0Q>HN>Uyywe3%S}GR9g%tad^~D|x3qz6ABH0r{n8UI-v`!o1QNVbQEmox^CyY4KB?ssbIGKTjqP zWxiUnR!J{?Pjqt7EF3J4IAcn?Nvx;xhj)fH2Sw5%z-@SG0>}Nv$TpsH4PT3)w6e$? zcDvNf$62AB_Ehf_%kHWbRt>8KBPoY+X<1_&)=E!hH=bpZnZ~K24%z)v%vLYNqR}h4U>=`wU$8gE^Ss-n@FruEyx6F_HS~7 zl9k1%@Q8p-ftL2-#1e{Xp3|fy#WA7%tz%^QtY{^K(H=t;02P$dntJpq5k2f?yr0J5 zq5~uyDGE9gOrl{<2Rhc>H#t-u(%^9_dieQSDUu>7O^W%`7SbA?6_d)El`zh6(Fxk? zA)3^gwmLN=VZzmiska|J-|Ll~y?NI*+`;%7(I2T2CVz~c6JK{yyjtc^mae(kWeurW zl$Tm*>_w$GBU$f_5G<5^MI)6V`OrS#;XeGiRAT60IUP9!k$fkGDRiTWMAZ;d!?sdD z<}t!S6k}I?pQcc594;62Y{Y3#5mpHio6@Sv`d3G|16K22oIlst z(|yNm|7-8P{L0^adVc=9zYhlz@~;Twk9cihc@w09fK5IhNX77w=F7pu*~WB~up)TN zPMP+QB>^u^6(3)omWR?Yn@P%@Df3gtE7wcOgaDV8CN`;a{48qBbUtut!5DFFGHfX- z-9+gC61R+rlf(AFip$6=lJahfwj@)=VWHwhJLg8O7*)bUpe+IArK4oP?gs`mc*3`D~Cl|P6{s=BgxUIIciTd>mGH* z;L@aols78RvOssz5X_``j>z3l_*2(IcgJ7D|wHM6=04&i2?|3xXw44Ox7r zm$iarAPp^BmkTbJh3j#_!@~m}A0P4f;sstjTyb0u$Xd8u4_q%-T#koeWE=r<)TQ}Z z$Fp{8QP8jg-Z0 z40ucZGtxaUH{@~`@3ks1E+vq(a#%n!7lCi-$F1Uj+pwWoeFh=}oC}Bh%EkmRiQ>jw zLIK^A=76bsLLT$nTi$;MamSIHA9WU<7S!y7q0(k16@i&&m982S@(U1}HK{z|z%iTk zl6{s1aKOT37REB<)>@VwRVnzJPdvR#MNi9AiU~#xZG$N9pkgXDYz>HRuwLrF8$e-6 zwN|AO)L2>j&&h4o9^l}T?dsSOu}ROE$l9=S$WF!Mh|%@Y9IChXOf%Tt8J);?gWhLt z%Zs$Sx3}_S5$J4Mx*=7yY9J+*5lDLuqYahtzp`fL89Ii}o;I&q%+I@4WF(_#s~E`K zsKlsFIGJfne2=QoFUh^-u$2(Hg}dlWjA2-nO;&Y>4(?3>63JD-kXREoTC<^%E`aIY z$>t&LUS8}Q^k?AB*&(-piCy(`^DJAmLP%RAXj3h^d|z#~)vx9yxf3c~XDYX>ltsIY zxNtUgUc(YTlbIWkn|Ln|IPDOU8`PckUt2C3;HTsFA6lTis{J0e=uGp~yM&6ezOZhU z29OX8E8ewE515BDEcHK1&QaNbv}`fXWRQ^(QPT3XXLb~uS9*ij`GT_E2J^$Kap$w& ziD$dtqnXTUQtxxAvXVVeX=9~c=E85+nWf&!1X#u{m)f{_C_F>WCML}bUsrX!$l#vD zd5=_elVig;N-5>mP~p^&mR6K}f<*3?4?3brRbq+r^2JNMd3uG9fBfUv2eFxD9nNRw z+&yHo9uzxcm^nPG8d8RBXFQNCLa4MD<#KT#X=eIE<8|0^JXb98yuKOMnx1@u;P6A-jf) zTQ2;}vj6ak@$R4Rqo(IEmW!sH1G*aa4Tx?IcGy>_j40!|bj~x%sPva!aBH3?u6TJn zZ-4rmAARHLSAOMJ{`)tA6j1ksM9^o&0QRTA7djR`VAPo9KXPFX8%3r)Qj7C&xxDhH|=BKFj3 zODc@qLHp4#E*|9cP`!r*eDD~T!zp{%T^vh&_P(u-z<7ysYE2*$Z73o=?&H8=#;R6f zGV;PzJ2!d)Dj7pnS8h^^6`jzLF$;HvE++VM+CB;hCUDcTp7?+yC^r##;PEFoLs6jj@+|>74BchVgyGl>Ny*IC-mgJ*(6UC@Hri z;IJA^g4=}~hR((=)sJ+(Hwa53xJbJYDn$gncP8~7hEKRB>t67q@!Tg|jD9#m&fFc_ zP=g20;lp{}aNcgPbL06I;K^+}@6#AWkQUCETlcU}plI>=bF(l~UAcL-pn2zm6i~-V zuGo90o+i04wsE~4xLmLKE(%_}e1XS@2dFN*eEEQf>lMf4iWe`gP!(J+4_NDf9>K#< zrAk7o5KvJZN>E^{Gc*-4oZ{<>?{bkbkTw@eL_1YS>T|E5#n?LdhfXVRc{u#`J|HRm@<(sd}`*?R0}xoe)Q-eAaQ~|Gqpt@XK;wprbjkSbOpPQ~){rR_f0}2_NY=?l0)|d}zHMfS2`xD-SZ0@|h z)PfNY=%p{rfqqFBTd`m^*a`#bkoW6inhF05U0qC;U_pl}fiwDUJUbP@>n_C@{*>nz zdIy1){OoB9ntX@!0FA^RNhmDmklIZiBU8O$$~D-^`kBg{Xz~a-%&5;C;?;$gTXV7g zi*f{m;m1}4B#6JMa;k+EVeE_~HXPC?if2q}`V~ z?cVoMkZNf?VJll1+=#nmmO}D`EP04s8+FsB1zPW@amisi)lN$pze|sfjKOLAT~#e;W2+_e zRB&N=Kzey|$*$N#j)SZQWql7i6>~Fcb&V#FYd&fI02Koj_e#VrYksf)CKe0k37Mu^ zXYx3*=}w*vftO6;@i8bgzt6CjJRg>1oMm!V&f5*!47cYKXOdw>)6#wJk<{UXA(iK` zO{?<#`AUV?ogGg*LK-^M0@BL>)DW~e?v~068Qc0&T{(IvC-)=(AGpsLHY*s}NrjtC zV>uVZc;~&TnncR9QSZL=pR`2ER)v|^l0zn1nio@r8Mn<|NVy!+ReA|l7}y2`no9A2e-9|{gr~9_xl7wYD-l^MY2NEb#fBxgUiJC;>7Thh z-~RBQ`8WT}U;7*X-QT$V{!mB={*;UUa%VyJ1zStJrFT=O-W0TA*+(R09m?97EA_MJ z$)PTWn~uSmDA-cPD$3pEkA%Ir3rsCwhSuo^^xIS3<_xmVKZh5b&*^NQemW#jlG4N_ z4Faa0vo_h49)jV$r3|}^P|9!XFf5Ov4ry?OHsC&?z6r}(FDK=jA-fD&{sb_Vab=W8 zJha0S}dfMuaEtRFCXY zEIF{Qm-zj}alKrENdaIj!HdV2c=6&TmM$EZ1M65=$AO2-73*@~dK}P99Jw5qfLCcY zhj56b?PFcA9%7z2<#6?_Y5v0>bHKMINEf*Y4tF$$x>|abz zNa&`~&rZGz3)O{6hNx&nP5I+cAjsY4m&I2p_q>}1A8IVeI5QH&n;TRPzC-bqTVrXB zg3IWtXCWrT=9UZ;e#i9*@%&FKn;wi4oLP)R#!84GA>Z_DYcrC{lP-iuPWw*2=Q&{1 zvBA}8X{UtNwF^z}LEIkv5syR*TR8c3!J>g@hA|*|iApz%rB_p0yt9aI0o82dd z#JjpWfM8_O>y(LbqCSY2_c5WKVuWD)K)s_&t6`H$2Qv6O3Yc*paj1y~DT2{-BTk!7 zGa;eAy??}neAk@EOOaVao1r|=cX%;^J~!D&=~(7)gK7D!th(DYBQ^0(r9V8Hj4h<4 z%G*27=Oo+^dM86~n5VXimx~YF>M$&5xi!qbPdbSw7pVQV z%A|b`9#{^-s!z^(eBhosP;x}O5EPpOcdBK&%aM% zzxnCU{6GS-{-itpXis~+pkBTC8`JE3fi$fVDuR=jgL5MjMLV`PqXEoqJrj#&WdV?x zzsm>GP+nDWZY}cSSDktSG*?nnPAMfP4bDOiD14Q#YZjK0sRQxS=d|0~sp$jyVwv~s zHn!m-5=lg$+j?@JO@wShV`g4%f~S&tUS#7znitBiX3j6V8DO5hrWcU~1vl}lLX=Rk zSISLFO^UQOX7uwik%P^MOUocRtTFMS%8*QZ@Vf?MCTz42Oz-NnmH~3G(DJeN#O4%K z6Z*8IJZ!UhuPG#dOm4yp@-Dp@B_z@a8B3-`7qh!IO`;8l;@w_vM#}!YSdSt8TQdz3 zQ+}uu)7wxTAGUh8rY+~^E`wyx2GuyhmI=2)u?$q2u^~*Xq3wt;P`6B>F>G(!0T5B0 zd!N|niSyieKA-XY_8GT*!`rvdIL{Me2J;*2bm+mMP!?;|2*l6QvF&qGq;v7)0FG+H zs$~xaMZSY;X>%M!VRqtJip#?Tt~r>vTn=0xF1S8i@%Ru{jv%I5+v5ps%RyVCXx${VO@sLo;ur7gc zVXD!}o6LclcGkRg4y-AYHHeLcz{=--^(NlydG3hl!?oCILS4M{pa>aRTj!Owa?R&+$>C_u%J>~& zNuA}jP0q8tkRv1cOG@eH!{X${r_eQQgFtPJgJ_?_MeD6OnpKTaxdrUtJ&fJ<)*mZr*#l>r@$ncCmo_ zxiqM{0(fOc_YR99huo^QTr0kY=MdJXim?0uWmzd@u+Bk}G$?;Wxyy(ot>2aL6Xa*` zyO)_{WYt9W`;_-)e#Yj4bsVts#C9vLmW<|{;C&8yYdb!Aof@*|`5Qc`xuo>up^Wc@ z$|i3XM%7cS24tu}rmY>F0ystly*L>Q5%xi}#F+S`GOUX}I}qfE_$wKWT(!b?Dh=sIKpMu?piCcW zB-RWE@roFOuK2TjNR@`7pyhPv&2nu(sc=|XAIr#PW{qxRfg&k=k|bgGq?BMLM%DNp z9@XCYM{$r*L@VN)Ytv8var}|e0~Jp5(V4^q;xwRaWRmOLI?ip9HjFd>Hf9!fggZ~Y z^@ZvEJYo#IbbQ|27SvxgrbB&krRmiZnuXyP!x&^?9Qb9S88URO0n$5hw204gMzBfx zyJ0y5(vHQt^Mee?j~1t6*r)_|mZeFzpOT&`#*E8iOZoZ0qIpKY*@?X!CnEL2%<%kt z;`aQE=i3wBK84+bv4j@6OE9;H-%Q+kOn)ut(CLru2t}(bX*0(r*?yk|#H?wvIaI@% zajYvY*9(rzf$KW(`1pX!<%;Vih7!l+z~yqmak)TLaajj+1%F~`Y?XBhR;ntND3&UY zwII5n$C1O3eCLvpG3_AST4{dQ2iDjIr*AWx#FD`dfGdYmr3`3sY;W5~fkxGZy7s%y zVFw0huow~}IYz1JY_DiX!}V;Th8P7D^YSA5o$r{H4Q&qb^Yu>cQKqUIpmZ7!3&tc` z*YKg)KHSn%V%c1p`y>mVEjm6b6V$MJlyvQp9-V!7i&qXO$u%Ud?9vIwhXEX;>D(^&|HP2$xS3e}kcaUjmu%G*`^SeG9Lk;eS zm!Tt$)0a+IZx5flNL@L^E)2BJ!3pk^Il(g;K7Xo>l1Q&M(v2u>MtjI-f!jdY)qvS7 z)NOdR-9|SzV0!hOc!O!R{w%GcHqIgrlH(p&s&^zbNj?@ocgvT`>oE%Mg_#yFLdRyU zwx}Gc+kmlp#UkWqP>j=N4rMbHsjRVO6sD2o-P&A+R=jrc?gf>EHmgF$K$stWyr(wO zUXlYaltK`PFNON5T5Y)xB~kO3s%fluI%_%Y^NcukAAa*z>dP2Rdc#KR;v6c8YM{o# zU?_X}5qQC>C9Hh)2hPRKNN^GFRG1vT767)EMqWyr($$JrY8fi|Oj@BOxKmMjZHDs= z(SGkRZDcMryo2Arv!3Y-N~Kvs&V4LH$RFDtX(T8=eXFEt8r2*OYbFd7uSWQ}hl_rQ zY-))Fbh=(dzIJ*AJ^G>E>uBt(ZtMM{ZDh|{tfEp!(p(O$bTSSYHSZjBCx_y`-NyC$ z0E6S{cEjG*$rs)+wv28jR9|dF?eY$Mr^oQDd1ym(Z$$yqxU}kyQ|$l%AOJ~3K~x{_ zvb(EFM@r6&Nu5?=sijCkua9Iusnp+{54|Wdm4u3qS%aqH!*}!|Tw-hYZ1L}{{y-Tu z%X6VbH}Xz=WG9kIo+G>$DtdjWNJ^Q*47aRPIVV$PogtESnn1lJ)5u^P%zdS2E2WK4HzE2_3dBy(|_U5|AkL}?bm+o zfBM;FNT~h^QUAx`+J~}Mc2Qq^KFGp2_lAqNSl;ximWRlc^rCfS}2gxT2(S&IeH@oQ0QmOI!I3 zweo|*mf-Jh&jF&LO4?ny}a!Y;O~u=*UD54cpfb*V0&ZV zHlEKLo}Zp0@Lw9P~aZYsB^>K8)}#g>wn~?^J147<2%UNEEX8g z)f0J#R!xu)Ne&&{TfTZH^6bGTEfL~cT!mt_3{a1ZR3lBodq{z~;UYwhUT-Vg90#!X zTP__0;SQeRHvK-jOe%4N2F=P9FV2LQf~=a9nYfV<(v{Eu8q)GEJV z<;LvaIZW26b{O(s=@`sGH*DGPP!&dPF!=pU?cz37(dThm6U)Ncu_ckVRcH_C ziMQ|+WCR2d4Y}svUhjW3Pqn9SE#J6PrA14fjrwfDWhZ*Va$w`VZI%HsUuZCGC&_W% zrIk$M=!oyO-f3GXsQ4U91)Yo^mEXfiXcm?hKaArN$>m!E(T7c^eEM0Z(WaxVGo&)X zMk6ITM5>{+>fI2e(pAT~?yh42X|22>lS5l&;-lW}prVW&o@^jY#e%0wnaqG3UuVr8 zeBZX!RX3^@t>JnN)n(NE)`}=D z19fDpa2Yr36^wo70He}3W=yjs@E0Zs{e zN5b-Ed&Iq$wKe2^@W@)z{q8Q5InXDKpW%)5&$e2u5@oFURJ78?@oQz}s;J+zKGT-A z&GFNpeu3-tA|HMG+yBC!`*Z*9=fC=^zxqG^j4~wTPl)?JQL^7bGM;syWJ*Iv5=q7R z75s#mzP0Vu_Vt?IZIQ}DDIJef39ix-7nGDMJ(%VdXM=pPDCzCt?~d)OH(i)p6y+4J zUWmbRxQngoi5EzR#-Oe?k*12zXathQ(A3I0e7X7TVNJ)XEkVP-YVeUHRvV<*yXZH+ zgE*EZ&j&Ytrdn&Fa6Fz zm^37B!es8NnwC(RH^I9&9jo)V2|T5hM#kT*JZ&77jOZR2ikOP%Y^#bMT8S7T30)(G zb;Lx{>&9koC~hMDcJW190<%2*$P1hq&fRzh?9#9kfcgA%!?~YvJ8w8o!|mxA&$k=QcMcmQhmV#jE2efh z_C5n_CkR_bUX0s3ixI)zyL`_QRc$w7c~0q4=vr8^a5)xqDXx!?5D{E2k9c_TfQRD& zmt)~_y~a@D@_^%5c(@!`3C6rWTyec#A#y+#a2yAY%LT`wSjPc5LZN{blIXRL{QOo+ ztyQr?{~qu1p}AGkjJwTfNA1LhPK{}~;#8sag*)sC`L6f#D!|x46ktxIR6ja|4K+X= zMS@Hll+P{8Y&QkayFj>5FIAiuH^pPqFXKJVv#&v*C^dqQVVk6?%cVC>!fg@+w)74b z=Q;-IEO4Z~z-{{P;ZCdIyzF+!Cgu(nIaPCWV>CVPNz(3f7^R@;CeerNc2vVA9WUq| zRZAP6(~78}>T{t|E$-PRJrHol;%YN=s=-QFooEBVWWbj>0Og>>tJy*)G*CyrhYc+G z0ZT?A)MO^e!XZsQ?qsf(VKYTa2DKpuVx4R)t3SIvDd{5#0KW&q#u{Z+@|R0*oCSV2 zk`wyROXkmix3~CJJJn~_o8Z_Uwpysv9a(o60mstKF*n|wERt*t@$%VYq?XOpvuc{9 zmlqWkNJV8b5qK~2=Pd~^;bOWDTK-%ObXt3Bc3v!`94hRgrtlG8sU7*Mtb@kz-17|7 z6?~7$C5&fE+e$IxM4AzteTmg@P44a*>7lIjuyc)Y@2K|-Jwj;plT*6yR_kA7Gne+{ zhD*lpM%$~j+pf~TSi87;#ko3W8q9#6OQ)8&bOMdi6KY=1wXYmXpc~?P?uwz}BgWCg zvNdd1p7J^wsVQ<75+cKTIus_kP%0Xn8b;e%WVJiMy>wJ(r4$t8leU6hfHfdmI^|EL z0OMt=^YQiFJ!h%pVL!t`7RKQCj`QfH^w5l7Wr9v8J9yavWMYc_{tQA!p3n4%N_r3< zJf6{OP(?AJ_Dq{YgJ3bG|1~Vm+SjrVStD>o94!a2RuI0+KN`zPk^3|kRc-L^82%KV z+%cAaJv^W#GTXzjzo=l_P;v^v+TLuGJ40`OcGVB2y{VM`m;z~QySQ+0r=@0D6i|rA z;3v5_txEFZl2`$Wa|oA4_M@s;MSSo+qfq#-V1X?4sHg7#m<2te$RcqRQNt?WH2NhuF&!PuyF;W4o zny78s_TQ7;I_cz5h_r9Yg%^imkuOm)a~5$VBWu%(=^Hky2C}Egonx!D1<@*+MYJQ6 zSXi3hduLnII5K~=A)xtLsA@@cwWPo;lh@qVM?4&*tXi_T4#+}nF3L-(y#*z;U$wVM zRXeYasZ#!)jHko0D zJjn#^aVT9LP%o%_<2@bsUa`t1?`b>++^j%%#uN~{D=skO)A`&Xr`laZ)F(`MXYdca zCCXgR*2bbCZQP2guihkyJ;EXu+4Il)01?SF%8bcl2UoT^Un0CJb+7X3~s-A zh~nHd-b2xh+b%HI|jv`pb~nk^mPa>bzlQPrnbjaky|;~?=N*SI0fgmQ?A2J;aEjZb)tY~2WRrLXNoX;GmT1cR zJGb5vPHEPOMD;xt^C<>+kUWocHeQ)PV+o+fgmcTd_hVtOt&d`}5!)B2g!OjyH6%9S9!^+OH`D#|qqGujr`74|!5G%MJC(-N#)6q%87Gn(JV_C$FI*0j(wb@AqG| zfz34p7)%NssZmHc{w^Da-ZN`_V_T102;z73yTlg9P%$!Any1PD9=7$W6>=T%K9&`% zTUW=&PfMO+-6oHCRRwY^o7JHnV^9^3K{v6`-o(x$RT z@3;vu=+MNWWh$eOw2O+a9(S>lRiifbb!YV}}v z@#ShJT0buuRy>{Tjrc6voo>w|LJmOYo#LwoA3!KDO&o9Dyn%?|Ti^WFSO2}g@aO;2 z|NJli< z0lli0L|89*u8#*VvzkW?3maEuL2C_)t8D@+mO+pXxzF&jqU{!J6bT!PKWk;d1}xVR zqd7K=tina#*;Ht4B#=-qM#hzNt`0|S)`>GE%=+)DqtFXgWb7RO!_>}DNSILJ%Wr8$2|A`U0-fYPh!Ds`mbjUkPX=AqOa znKq$Ce2#y|_QuT|uoJiEjptiHF`v$7oKMeqKA$Uobem%xE1!9ViopyqxER|!+o7mo zw)D0Qc>KFnn0O|^xMQ2wRUDiMjln{yAl6!V@%Vu2#T)Q<7WR4Wu0cSjHCgMtQutyluQ%_8@F>xG|3fx!a9J6%M$W|D zdZeP;0$SXX?;@Qxe0(MyX{vWR6&7_mlp~hs{$S=L6QiV?-0We{@M(vW1~hwlf32B&+GndNYV(5n zcV&n*y5XiOLp(58`rKqBBcMFzakjcyCuEgRUYgg05vz^~ltkOcGLHOSLDtI)BLk}f3#V72oP(f3!IlKfqNC+& zmgQk#6V%A%kaMIdTwy}7@d?UD`QUFigp{e#$w83s6oN8W2v>ut~210gWIGSi%>47 zYgjjHxn!i>F>!b&+x@V9imT=tT&AR+&X=M#rD^2SkW6rhq-tK;G5ho&~l%fglhsF!sPP7-`W&R0bY@J=?^)jrz)q>su>5D1D)wzS1p*GrZd8RcBY!t8oYC+ zzVb6E^1P*wM39*F#U-HAkj2zrm9NaClV5Ujqo&`_0IB6owseySALM|VMtmCAfJwMK zy&Qh>xR5k8CVsC-Z82N%K%QAk!rl_wgWT%)gg6Q^DMmoZS&PCPGc3*F3}VYRZI6pK z%_-lYl%9ATtSf)0*4`=7uj#XG+e{{{1ulx=Tevkd4WfqIA&JCv@E6ceb1P21lksAi1)`5(3oR(06a0CU3-`*&Njp~tc;;AO$m7A&U~%hGW=ozXq1qc10{ zi?Jm5{03_mwBB((op4&+-m#u>I-PJ{O?k2OW-+6E!MZSdKVj*X{?R>Flv_hzY$&oU z3z!JKv%xc&K^`|s9*JkMRW`KX<~cQuO=_>~dGlN_xOr0svj#=SS@DD|CC)9R;1jef z8(rrlz@ykg?r9sVqH9MR%Z;9;>EM~%bKc?lxhPX2L=hSdG)B+Q#+k7g=@B6-oiwY- z11N9x1OukprXf=lYUomccS@OTCUU^G!Ifk{5tQzQnq*=TW=>9w_F~JL9$a9|`jBZ8 zR2DZI-Y4VNeae`&$oe1Zsiz@CZF%d>EZ#=kGvq2dYmyxZ$y$&t1<)iL32qD>!3h!H zmbV@fu|TdNW0Bv|dYHC=oW1!FLS0QUo1;iW z2sP^^GnqOD3&Dxe90XPb9bCT8lRR4kaGUnomWqCO6GgeKlF3SBg*tOY3-SbycI)oa z&{$ltz|bM+479f3{(Og5FJIt`FTTL**ROF}PPiRAbnF&itZ{d_6-fov+j>K*nrDM0 z<+?qMIh7A{x={{&89qq2_q>O_$5pJzrfq7*Xj(GNoAa4nrYMW&RC#a7{n zr+?1lPFqb3e5lzEBwFf=;z4Ml*fi@>$)Rv665JX{;&ILKMoh^vxzHr`=3k%L(nRq* zhB=4U&TnxwPVxgng*__eC(=TUr4~R%prQF>?Lt_@ni{#Gn2IH!#F#P$5fz9E}G6WtET$UEf%724&?jhl3{MO zI90c%E1T*^r=g)v`rnN4)RBX~z&2Q^U-rOBv*Ks`)Q{w1P79{#EF+};9FrYsQpt&{ zN}ZI=eugn=F55G{HWPl-fw4MqNnGXn467SwYmNO$0qW#@9>um<-0YV>`SQDe`M>?k z|K%UUK%)On|1~9*uAssB0fK5n!aVYW4rx=6@oCDfRhi&<5lJ%!s1sx$z0_zUL5At; zb)RK4>O-xN6+ehL0W~g}SI!4y@IX^a&zX`P%0fUpe9M!m7YYsdQlwoFWq6ZJO`}Q1 zEX^Sb2ywqR9BvadU_0DKdK^PGZG~iPc?1WQgi4&CTZQwXeYAP7Mx%11`lS|@DWKl>o`I-|Q&l|2I zRdq}>^|;7=y6l#vkhfdbvx{YQQj=t+yM2`swjQ z(~#e@g8)@s7TN7PZXg^odFWy|))W}?H2ZmC4bQash8(aE3&sG@6fzie3@bt*a#ex?;$Nr7h@p z7o3(AqJnMT(P)?@!iP84?dF>l8@Bbeqfz(izkMHL9}ZG8TS;#XtuI!{vc1Q>-C^i~ z#Q{)bKOi`u%N;}q#^`wDPjTtLhRW4Jrvs~;a2x_{f|ta&-8SQEH~@4+{OPe{KX!;J z?RDa(fbpL#tZY7`#%G1oisXh6E7vx-+ZE;v+FZ3EjOR zrrhULFq9%HB91)~(9bjK01gL(X)$mxZKGnam~va<=f(OM_HVG?BTX$E&$H&ol&SC> zlo$_RY#eloDGLbhNV|kgX+P{rEhU|JIYl`yjawvK43oa=bT0(dIkp!f-h4{BtVHbd zt{67Y6%SgxZx_h{YlEtgPmq_#8xsj2oLkuBIQeijN~btWtey>)6Y?AnC5gC2B^4U+ z86Ckm;^e{vEH#s_0#@AoNo}>Evtm9x)&SEmTZ?OMnYFzAYJ(Wt^n1ZhIdb(#X@NE` z^u4$t}sq_Lxhg^j~CB3ov`^ty^5=-siaT~yB2Z4tLG!g@-TYU0xWk=l8 zsxv*8-TSP$5E`P-x4>H4JoUKVI7?$W184ul^2+Xg0Uex8t@`K3XjqpO%j&AL^BJGK z`2?SR`WByl_7*Q+y~dkQ-r#OMqb&{Z-+#cr`q%#ofBt{`Io{uHwbhLV(qZOSlN|Pr zj0xPFfKA^-KTGbK9Ov78rB}=Fb?1yS@op46X%>9WQeps_^jF1{CX~Gao9U8+Oky4$Q%3U(9J2u7~ zutNnz4j_xLsH-n@dXB+!)b{oeu?){0GNj_u$rIc ziZ;cU=H}i{2tff8}R8e^b_0&vG(@|Jd%#J z9-M5C<(Ut#k$s8cFLM%Zxq)b;KQ-*{@U6Zm(t8zI*hny$CPN`&^E9J&*(cdi1d26W z7RgXxIFJ0as>B-!7T_?iIbfULtr?(}>PYd8l&IRlD>l&a9u_a|AMpD1 z8?@drWXEwkaEL&TVZKaaEZlL3fn6RSA8|W&j9uK%81gmSgsbM-$1PYB89(*vhOIO`{FrCK+FVH(iyZaR3JCIPDuH!wNz;=g%oV{iJ8I;!yuY99O7| z#3$}8Ha4084OE5(2gas`BK5J-VBt~kaHdU`}}4crK)(-|U!_RztLr+fswZ6 z#r04~JFx8p)=^xK<8ZJhW7C^`AKYE_KxlnI<1_Z%R2lnnM0ywYZO6VJSmoE)Mu!{( zq0h2bx;uU2P1=FedBw2{_G3#JnH<)*h#U}Cc!&<{LvW0NP+RWX&3)j6V;nZjnWcpy zIEFQ4Bbp(xXWSYd@tEF$h;$Y&Hjw`C*eDDar{e$sAOJ~3K~&rOV`sHTwR!r6ssKHP z0#cj*PHAOG;W+_gxU3h^n0lcUL&@}Oxpg4fIstmLhOKe=c7*^X8+4I(4;p7Ml{zmZ zy*4n{6mUW;JxL(x2c{e>+q18vR)=R1F0N>R4-kq9uenhg<|)o1kT62_I&`9n%X1eb zPZ0muNYMm_ex|cUs>;G%BI1^)7-TYeV-%ZJuH>nytQM*!-Oe^|SVF&zfnnt#h{$K1 zGBsNSJ)V>PbEx5;tuVkN3V2G`n}|-yQb?PnEhoh8OMkp2z8WzlkEoR2!TB*lIz^Rg z*-mDam+uWMWy#`>N_t;mL~WMTLKa;xXILQyQ$Uvfy*B>^kTH>`-)OPf994W4uqnS5XoBjD1MO zaek*FN~V;CxcSr*r(%O|&pkYKA{q=+r#27?-n@Q`Km6)H!jHfF2|oYyQ@sA<4PL)~ zgHK<-!TrMv+`qiTX*~mA+yL1?Ccm~PeE;3|_|>n!#?OBKGc;GF*kX#3;}n}*)guA4 z3D}hi5vf)RX@jk#t7=5$S@Otpn55^EC9`FaqLEZCTD7QB0ZfDQgCOY#bdW}V z=lTpxHJv%(1PL8TL<@)afP66On{=I3=wkPrhJ{W_Inn&*>eNhu$j)x?vo-!3Q9G&I zWZ}vJ%}803l#8o1)286>sdds0RFt{3^@W=-4vdV0j%Jf45nI#@&!FH`mH>8EPNa;{ zY=zQNI~6ZLGYK!?i&75bQoHc9I}~qGQ|ULH=~h20MV*+oq=_v8^@dq6{N8LV7+TO= z#L~{sefH)$k6W+FZGc%8GHarz#5Ctn$g{M-5YKS-L}9OurC5dx#95~W$JVLFO$*oz zN_N{GIlm^9apGqkF=4p+TLK)Yg$hWBEfbov81iSzD~OB1;4f4Qz8!*9HKqlI4-bd; zt%IOl{fG!_|IDE&#R_q$!(5Z(iF!N%sZo|xnymg(pFib-xGZ}G+vI0zbiusTt>4|$ z9|~%U@4kPBzx>O8^VR?OKm1Q${dfQUfBW~pR|APykf{1rm&%(seA?C64o)5VEQj5^ zoiv>h&8GJ0EU6R(&D{I=LVSwFn$4|0^}a!u${mg=@+7Gjhu}y`?J3s101<``&kZkX zbr5BVI5!5wHslWqJSjT_2`SSPRD_dB)M+A*YvyU2I>Xc=&2I{I4`XXC0rLOF)|>FF{?VoiJHu zYm#l@o*V6$sONAZ3U+6vs8?QQ)3QOyoNNMWHdI4HPEV@O&J77^Y>H~Y(Yy(i1d>S( zDU?wP2~VJD;8Y=!Z%4)b=}joN^>)@-(Ub#n4)7aKH1r_Fx7zsl**;<$ucdeq)q+fh zYGQ&`CRsv4@-03}qFVFc zruto`KKS!ggr93>q{L=9k5Gm6A_@LOyMx97V{7e*6bt;>{;-ak)Dccn^wQ6*LsLW5XDN zec!Qd8@_+{9iASaN{JY}hG(#@DPbvVz}PldGC^#%BkDT5{&p5kgF~)7Dh&<_GlmFA z40=jpgF*Aa+Y+WBis@z>asc9YFx<|X;J@Yzk}@XLHX;(2d^1dxQ9m~J7qJNFsIRmV zFR0=!iXjjwHBxp|_LL6$MNN?^hoCjLEr_OkUefNB8%TRDB80?$k4SI`b@R-A5Yh?! zjpAGQze8yGt4AFrC~h1{+=B%00HH&5RH{>OQkc`~X0}r6!X;AnKt6c#UnMLkq!S)Q z%l?Tes-pvPv|`J|06uJQ8j5c&6_2>8JGdq)Q(Bz3I!=>cVc`tTUp8YZ#E`@N9Tm%- zX?ha?dg6;9BHKNhJ|<#9_3S82MZs>ZC0E99*dB>6R9t}(_CN91d;51e6o4@Z9+rMa zBcd;PQq{-8W8z7;dX4>A6Mt9W>Vsg3h(~M-fYxf*%3j}!Gm}WHc8ye~iqNK1E(bVN zM?rX1b%OM3*0?kceFR*#Aeicuvi7LtNwe6951&IJ<;q2MrFCqxnd?3BJ8;C3!dq_k zen6EWI`FrD^Z(%Q{`PPG8G!%!_sg;YXxI}}Dyj-e47Q>|)tW7B|D`?J{%auQ((6LH^xBY1yVxy)HIaa1)7Mw zN-$8^817jOFl6}oB#QxYV=HOpQ3pIz;!-1P=t(Ai#3a)rzu+xAjWboCfaO6@Z$98F zC||9dD`u1myx|)a!H5fV1`}y%L&DD*1CW?J1^lPh4M?eQsC(cEDn*%-%-Po1gfVRM zC1#s~?ijM+>FE*cvZD0`y*E7Fo^V;uI9&)24|mv(fw67CK^UFU*Tn{UV_=ZmdL;b5 z%?G85gH~WX;n>f3yb*A{8OV}5#&#H&qIF#EF6mi34#Csy3F5YjvDtub9Gf>M_8A@W z7OVaS#}IUG*v7EPP-0IS@!{_=Fm~Ja5&LF!m`dRgMqj)$IBK}F1_)CKLk{f6Ko5v6 zGseE#MLNU^3zs~0|KYlS_s0-W?@&=3-QK&s`O)gzPt zonaOwmI(sq2~e?Z&6qV#xppu=NF)B*8{OdGaNQA{_7E<cqpx=!Eg;hd@w2u?Y4EbqIVd#0#jnf5Wm)VQjxf-#;rWT zfssgmig%tTD}YG(%+>L2-q{EZs~c23#?+EZVn7@~6P+LA zzDfG{6VMjgFK?b~P-a85CR7_nCLbn-U4x3+9s}PwI@ULVO!W*~<2(&{8`bbHd0oC3 zm=uaJ+9noP`k8uxHO^X>LgxV3<<45eb5rv)2#(~7q+egdDyUPlFUhuo*C;9EoTB!) zHGJ{;5AfxWe~eFFe}eVAKoJ(U4Z|%tv8CVl1CQ57+>RT*{`zbDyZ`rRxa~I^`miZQ zB5qrK36;vjA*u4OXH!#J4RV`XG`2phFiVY0kR`u^hd*B^-h#5(ggCw*6|aM7+OS*W z5b(&9^TQS^&v|WjHjDvlv?q(H22eHC1eEM370G8MUO z0%ybl<^}mog`961M5K5R@%)b%m?K+x@>Glw=`Lymg5c(f7N0NGY&s#?_mZyW z5?A3jX6R9Nj2L{7e>OuEo$Sn0DD)7sUte zwZ;5QE@v6Tr(liTZ;c7Q;_3)uV93<)Pk@jFvJ{n71mNzHDW{QW5PgGX(cH*}W8z7O z`&)rF#czopavV^<%)WKnGs;#$Uf5nyH4JE36zC}h%)`UC1}&n) z?GQ=fwOVXb#qDrqrjL+UIylGWhB4_38CnjUhW(N)ZI8w1h{<`%Nwpw0sk~6%vhf5K zqfrxUgaw4N*b9JIXozNcolWK|nlGC;#OAK>KIZ-P=8Po|!vfuvdoYd*buI?;bAKYU zrCL}b7L^T)*!2OF2o?>WoJ^@aX|Ia!bNKVdI5x$g!$FptQOh42zHh+Y~;(8y@Kstojkhen1u2_Z|JVfmp$9!4qEK zhp!H_WdVtTyIp8R3>kvkc7=%GI6C&RLlszB#{~!W{T{>Z8LSs$qHIrSr+Z)k`}P4h z95&F9VPMVPF+?%O4i>^O%pTMD}bYu1#(Qb{^5*68&H zG{rK28L!jW+i>9sg z9E9o&mXH=pH2ML$mB~k&o34;c8snv@S^c7c&jgxjP=<-Hp9`!sJxelQ;JUXc5=g6X z(P(NkX^1JKH9xj(OGLznhBc6|dFm*oU_caACf->zFjCvV#DY_*oswB8BfZNBB^E%t z7kemci2WJQQ;&&`L{!^=Xsb=3|2q{&Uu4R#c#3k`8$z`fsXZn?SKy$^bq{~Acw^E9 z(YHyYoW%VecREd}F*;>P2Tvtp%tdV&=Fb%C6eN~Xd`b%oYgtNi_!i6jg)fM=-}c8NTSP%o~|2@Vx|5&}uUS;LM|jnbG5f*M=7<_(1} zlrRtJL%uBvZIS@GNIs#EIBBR83Z7}dkIR_@UP|if4*^rhvUc1*+~NN20nOu*ZLnj@ z4{q$BY#aKGW5eUqBlhbJU;pZt`1RLcLk~3zlalFqw9*OG)it)9;$&K-b^8+d^ClpGcVR5WKvv=9sQA+wK(6K!B0ep4>HPQ<_Z?>@6&C_uCt;6)#l(eMF^TH;IEAl$l|&1bMy zikTBlu(-M{hf7Q#Y0&0pMDTY4@Tip*5zlJfAxJfvW?cGY-3>UVPD?V1cV@DT`bC@1 zhBJQhmWtp2(Fq(c#gUG55v|@ZER!P0^eG*lCdHHW4gd}Zuaa3ipqzXt3IXMW8DKx4 zhcDtx^=I6iMv5WM`{5~}Mam{0@MfK%5M17Rje&a%Et(I9q6Y{tHTlyNaA8l%QW-WJ z8JY}V1xMm{I*?$9%)a|*j1yLslU+W2P^ueCEh%% zCu*2{6G^G1h%d^t5plU7yZp+FP(M~{8i~qlCnJM1G^L%rHN1TB0{yh&xZSYtJCovW za>8)%P{1w^&u=zB;{dM)HQTlgs*3%#nfK4PY?(P6em?Woc!B1{qr?D4!-12k0!@;) z-5al7y~3A2`VoHggFnD^+i-onVaPG%um_Am@b29^e7HWMe}4k;Eu)E4%krassFqeY zfKZWp&E*%*ta1m*xO^0c7qZReUON1cbM^(uX}|*XW|Q~WsKii&?H7y!UHv^ug-~Ko z<=IX+00QdrtyxYSzOzAeBkVDbNukf@3ZF#`54D`0(@$m@* zjJ0ogcu-uQPFPfM>Z{3kTLVPV7Q%^E94c6j1*@Dej@>t(;VD0PgsI?A!C1~#&^5)n zoUk;;)9r>Prl9CccUDA)9A;zEVRot>(A?2*97odndoy{YcuXU6x9#?D=oGsuLxZVO z+22QRSO{2DAmoDwkGnH?lof#8p9%3shFm7ei+RbLntE%DDUwdhL$4#U*qb*329RSn zOA|)Molyd`eBj_)XbpuBX{u{Pb&9&AeEfn#K+iyy$V3I3iU)*el+vdaFqE6u0kSd;V?&Vcie8$7wJ?>w;z~ywtvYc^RR&QQ_?h^hn6#Lk5eSE_8@e%Ldy~8j6zpwG(;|FuG z4)J?X(>7!k;CPm{K>j-&qwdx@<0YE1_#P=e6vzbz)M}`>OVK#4y5uL37Dr7`4J^op zI^wX?0S6BHB|W0S^D|e*1TGj6-C92OvZlNJCSiu2QWVyjutso$4)G+O7=lqg>IN`{|WLiD*`IjZa`f<=8Ytk?T1pW zA*lid>fE=17EN~W|_sCq9FwjPvHB66HH2QQS;c~eEfl)51`jPRXGbSEW)AE z2ql7O@R{5K#BtucB{~utMy1Z?_wp>B*3RGHk15Q8DQsuwfl7MFBrH%Zp2!vmVMUAn zOH!2;KFffcYAUcQCB_E=zD%NsfA?lNxF6cAehWohSoadv@7x!Pw5SO0r~o5ZHTmbm zai2`V2BD&WEn6}jiyW#dl{r*5)?9Dgp4yKa9UCgwCXi;Fz#sB*E zzc&L3AqcrNRHa}bLA@5wndcOmy4*#)QShl)NZqY^b*T+E2MyYWL8iD#$tiA2|7(!a z?Y}VKW{GD&zkmd_sc?%adBFEHniw)!^Xf#4^857)5WKse2QHK$9~w);RW%+ z3kDrHZUf7@;@98&rZR^mHny5Ea}Dekj`+p|-6x^i>{1|~%E$L-cO4rQ-W3djD5i%? z(A+9i;yV{txTMfG3pt^7DRPd&NQ+V$+U6d`o6G$g*M*=lypVi%0(#W^GcIDXzk?)3 z_wRW!=Z4!nt6ot}1%ghADpt4--m}wXsd(JT*;3i7^69(#$ra7ra0Ki-{KLx{%nWrmkHqB zFb?ynA71z|Te01uNhN1@MrL3oehxD}qm zJ(jV6^uk*wFqzR)^SKrO-b3r1@wN^2ERcN;VF{7gkW+KI`IJQ-7DTNGR#65mrXI;b zZK#&KelZ0L+j~-VwI2u7RA_934zbTHcuiV=wz2_TiPi%2==lvAS;;;`mXI-Pt+1ThyV|!Wk9^q;wiQ+ zC5a|S)+9(2DSVbq0W=kUV=UfkW` zbUvfgG_;C1XELtVzTNQn_=pehKj5c7{V9I_i+^u??##9<1_5O*TFVAPMp#ERIU=?^ znte&$NcplWlX0|66aoevp)BC63o0$#|LgvFa3Js89?5ZTNx=r<-pghtlCE0DZ@4Vq z2SNVt$t_8F=#2f~>M5{QYzY}I$24dCK|WmMZ1hK(U1o()B*BCt%MB@^ zV)Gh5VfL+i)s@iP9rMZhZ(V8^FA-?JIOFNXY2)7S|=-3M8={Lt{CLtQkcqwRH5_48nH1;io_SyMJ(w#P60NF<8ysXd8Zs!@xbOCFU;zxpgmx zeb_o>V8_cH$R*K~kR!m3+8m05Q4Qa2)PgZ)xse-JGmsCWT7)D>+Oc?zTxi09qu3QvGU?jI)lZmmlJsHc(}XA+FMSu@9*zSb|eOGTw2F+ zTFoPW95#?hs8NHc9qrs1a_rc)8?KL6lX0^GdL#fW^0Ac^2CV><6}O$I?1JoFDOccgFd0M(YcfegJw(=;t;Ltj!b&eeHk(*L}xF zd5M*t&{{`pHeB7e10Q#TuB~gsX+7h7KBM==ppvkLyAy-XCtNz?7w_NV^Yc631_{>l z3Weh7=?M%0_XVm0OTPhh!P3mLe;a3vb_X6G(c2@qbqCM7XS*xLei#ErfMYZ?RcyMW zuV#hV_ATvb3A9tfK!frCH$v+Tx({$uEYbjO7_Pb)5VY3ae&zNMWh@Ijd=O<7Q#DU= zJYCe#*;qp|M$HikY4c)-Q3Z$G;v*~CS!~^H2~G}3g*UsEibLhK;Mm-H=^Bcxh=pbW z(CzGUo1MB>e=dmaaS|WKQ>NWmylMAF*Coja32IrF%;Vg=utkD8(_J8y1+UEcJZD60 z>9%LGfr%ExrvAKDnh}G_(1}l<9{a-3gLjD{+WKF4{ zORq4y$DZ=Me?~437QwU_z~qH4y)0^ppwXPSDX++eLc+0$qDjpX433^PgUyY(?sl!= zO^E%%U8WS!w#A>jS&ddnG=yPksURE9`=AOs#f?TYnxD5i40T$ACsCk6mp~Z3w&Zc+}+{x&%VIx z*KcrHPw4B4*33h}&3>oj7&!I~*XtFJkB`{58{U2VfbF(ty=TLBfxzLB-e~FsOC(RC zFw2lVnbHg{{G5s(4W#4%H5@-WG>3SQQt}`p>Z3UqG;iofa1%Vzo5JqKGHpjS0hYv? zO`9nPksJAl+ibxc5zVOWv5g)MsmjkxuK0^8cWB)0K8_kG`~NCN-!t_qKqO?dhi11nCA8voO9W1 z$$L9$uAGucA!<1|#l(&%OlS-*VtMu4P>efRWHv zRD5d$1H^*Z7mZzPfnzK+w`)zDq{fkoQo4Imsu<~r0oRDakz^_b116L-;))(Qyyiu1 z6V&@96EM4F4s(RKv-%<_(YZrkfBl<3`~4b7u!^O*#alfRbR-$?C>ALbayN{8&)L^e z!dy*%w8;D%ZbPn0U?Chn8PDu`A)~yb+jdkn!7Wi?sf;4j=JdNz2a^53G1+2Ft+Or-+ccqZr3Zsc_HLr!I0up3}##k$O-i< zfMa~P`TCMULlO2ze+FK?dV@dt<3GWV{@@Sr;`K|cZLP_>Z;IGAXZyC{>2}5Wa=~re zupgUA;8a{v+@{h!CjHjPv(~jbQI=dirWqPTo{1e#cko4GdrvfP2yM7A#9?WeL5`kG zF%>ewupwx9$A+ebC1l5Wv8in}URzi3$-1Bt zR1$*gmpg04{sH8w6~BmR$2fM}wi|9wk64zDep>Nxe}}s}8%DHY7tFRg(#FwtMPDSGBu>U}w?6UZ6XGRU{$3(BU5S%0f3TP%FP{4Eg|QW9OLIWgYQv)zDd~ z&zTMtz}H~ypk%h5jF5$-#}8<#{7_s9vP6TB;YTC_r{y*>-wm{;PQ@pWS0MLDN8?fh zq)=H*kU?iAsm6O*anu3kBI3j`PUr_828H0>RTDC7aFTH-GFdQ&vus>N(RhN0VoKhi zgyl8gFvstnmI4e{IEAF$2NBJFmoeEKvnb7Cj*=0P1*Nb<9DUR3=Wkf!A|-}AR?jTx zBc0gEvJ$GOdY2YAkFQg*)EY}c3SkMSFP=PZnnJY|L->jAkMxBb&xNK{Ayb3WQ5$Yo z7^u3ng2@z-jnSx~wT8>x1?Tnb>aR0S=M{H%_jvv4C0@RIgNM5d*7F&keDVgb-@L}d z@vdzKltmYTi7$$5`Ih!0z3MA5I2}XY7GTUG@(mceu zEvDL-(oLp+i(_&a5`7W3O)#__9s%i*nh_Q1Hd#xZiGc&xf`wg~W1G3Gh3x}~)DDG6 z^XFL-_zNO$G^0WENKJ==G8tbk6JI79OxE~et#Bn0vne;jlaPu_p-kP;9Kz1()=;iW z+GMPJVrPm_!rF-@m?C01R7wh{Lc9G40W;6^3JrK+#eXta)Ow&Y5CDHDq_tQbB_Asr zRLVe58#4OftTN&K_t4;&80eG+*ZCZPV+Y2E&1j^V3YlaiQKM-^M+UngttzoV5?2;@ zQ(IBQVT~U?Q1xh0ru;p1vP4yKlv{#*1FMnZ=R^GAJdCK(4n*9ozq^Wz9OaBDcUDeV zc_fw&{?K`Jm(uV-T@7*A`>RaTd4>*%b~hl?RLe4dn8W^p2w`BG0+xL;Zs1YLV3A-I zN;pt44XKzk5KA=5K~#w3s8kYRF)}+$CEnRJY*{8MH2~yB`1tYtKR8F?cRG+Dh1knB z^1&3fcsv#jGp$RbCs|VzJ+Uzvno&Yj$zndy3DDH=D55Eecn%8IW^pZw^zD#nPMn2o zdX<32Y_ez~kXvCFg-sI4do(pBT^(Ba(=*{8L2Fhj&!oB~DZFYGzMJ{etQBl%XDJnT zAvTD<0Xr=V?l1Rv`^6V{dH;a*bOJTtlQ*AY-#5H_{{cgeOk)t3rD6=1xA{R~vUb~O zo!W}W+Y{EM|jjP;V)FYptR6mPVXkef=w3|KcxkeSE_E_m8-38xG4# zbdcEd3~f;P4AXj|hQN%DEc=!FGA z8Yc32OiTwz+N4(G^q0juLr(4$o#L{3s87OkSaWqGZP+*x>BXZ0K*F~tA^>$%;!Mq9 zyjY&pPE61vX!U-esufq9LqAUVXIO56J0nENAyF#mWi3F-hbYB6t!zZ!LqszuBb826 zfOq%oPXh=iC6kK5sB{8gP7#FR5mpggZyUC416{AUJ>Kwmcf;kpfP2H`e8Jim+hT49 z_Wi(lVek@#;4kq+23I_t&mblo$35hDk9}+yPdoPghSpcC%VK56I54QdO=i_|!yztucfMz4-t_B|WVTz~Rxr zObl9VpgZI+aF%b&L?xk)q6YI793Wwj{E7xn(zk2e19n-hx<}t$>6Rq=y4WKH#Do-FqMD4~RD?QnKLY!XA^BQo5VUg|mtVk@f9w4xok zMdUntLRBXQe2yUx3U)`+W=>LGEh%OP!=IkLD&^)ZYH$A$GYwz(Jg z%KF$Iq+WbusspHE-5|DAL~h^=g6*i}RAO3s0@vm#&^j^9SMoE*?^aPu;}~q)@EA;T zSux0mPR$n(8Bm;ssyEclHdzBjqb~-LXTirNHsbS86nkdLfseZ1oFaiKT32y8MzBL@ zL0`IAruu^W%L%8;Jwz1lsc?1cK)krE4A52V)r+lv`2fFGMo2v;W6|&1*)tgkR zWU7QcaFtt%JrZk|$ zRQ8VP9LN;0$+B!UN^{k7shMJ%4^KT%aQfgWa+W8ZeDJj}Svf6AHPS0eO0OKTjgnN8 zNDhKbklf$`Df{On$vH_vf}isVj4aV$GQe0+^ixzsbEtPEz61ZF#v*wjLj2eV{z zuK}C)`YNa1(U_~*?*_^`Qi*0Er}NKB%$^4CktD#mZVjR-CNd-0!#?Z`u;yyKvgQPE zGHJ-J_MR;_?86+ml`$5z?9l15q{rZhTtOU}#uT>te6DnMQ6<2UG={~T3r>X^T$IJF z|5A|mY1)v3@JKggpbf_EoWOJ!3j=#aRnN#?(BKqFH1OagrjkvGpk%qkKd} zw)KisgV~!Cesc0jpZJSvLy%S;qHt}*#WodW8gLP*_^$w{#H2s{SAq#n2Y)FzK_eP- zJ#VsTgdEPBP=`yg0;kIfm%9r-dHE7w{*#~J2S5BF*3*j9xkIVp_VMaqN|QS45RV=lJWt`YTHq7=mLr zz?D54{n;ib0s?48WP>0%WNJppA#35i4JvhPhz*`50VLz*TnNu-hD^#OS2e`elu=tc za^Gnj%0U&@lADQxJmIPrWO__Z`7JgT2@k4iu>VG$#u14i5|i+NYdwn@b&s}alJf}` z!C$!d} zMrME-Egp}C6?usXWO=SjQzk+vDh^4M*3DZOan_y+jTbzN7QwJrNw2b4hu${LrAY_UZl_|*n>cFKxs;D z$}?O+y-hKGI@N2w`Hz=LL#AuRsafaj{=p#$g$_`2Y}nJQ3jw5A@kSA!$+=+@w->4p zoLMk2p0pS_sMx$>>G}X#;yK{*y33Tl06@ene-aINqyT8yS7SM6|9`%m`49L5Rov60 z4x_Y@D*CeE<>k(-73T}?9$w)7?t+IG54gYFgsyGgdSbuuDV%x4*`U+9OZM)$( zOa;ZAQchgDc>n#+Am;sOFPQt_d#{$tD*ZFQE90E07@m=f^miVt!X35mg-DFG<<{c`!z zV?Y#xnM_e~r7WP%vg7Q9WM~3>lQokZ3V00h6o26sGiCw~KUIR zR%(;wBO8vc*DD7`5rhH@WX)_vbr=NkRaQkl#vHZS^K?Fyg9}Elser%iHDv2aw z+!OMziRW>Tv4fX=N#PL89I*S4MrMd+s0<^G3-PlSn<~Kj2cd?~F`)4~Fq9Or;AOu* z!b+LkqL@OF`$YXR45Dnju;B^Ki_xP|xLI^e%0VO^etW-C0@ zA0P2_+#oWX`(dQvfCpPa>IazA_a5u`C=3($Nq*>(g1UX@=XJ$rZ$HE7w1T*ybuUt| z03;ZP04~QJ7`W{lzW?rfy!-BZeEjgy;y{r^N>{^_S6Oqt5CTqe7>^AN)TLq|^+MGe z_(lUfUWtAH#~BF>DYa9^EIPX;bUD(tbHI1rSmEU2iC}o;P~Fhcg);C76p~RC>hF zl>~AKh79b-f$etSak=8d=?UlainewzbsR&iL8XQjt3^Z!pz>(3iu^UM!Z-mqHOAv+ z(X_X1!TsGm9`0Xw_IO0U*_N`mj{EyFWSl_6SWhd40Q_c!`7C?ZtH5~b_65c(X!SXA+uo`3GSLP2s0poZAZs8+cAifdgM(BOD!c}bB z*>r%8u%t!CI)S=?Jwd}D!kp;=B)~!J%DBkkCdfFf|MciDb{%&3qFtM8vA>b(5V_>= zBnX|NHS?U!@l+N9lpF8{)&cS18jB{pFw{pp#kUzQn?3H35@Pn8k)^NMqh&3e2;PNxha3ELZW7`C<`#n%vY# z0wycN%otHNAduoiMMLkf*^=o?Z5Iy$x+m_$#yJ+&UL`OgW%Xg3p@_%yR4R2DntFV+ zI7oTO4BAWmCK_4FOB#4dQyoFtJdGga@grrAxdle~vS;diSal?5MGuHEp&p!JcKmO4V@q;gZh|k`Bh8HjHaXMXaI-Sx*3)ZA*vU7Z`$m;yl|Rz_uT_K0V>_ z`h@-Iio>I8?d)t}rp%NUdptOw_kc&UH|$_Zg%kMzgP1_Wo6`g{RMUyv)D9ak#Ku0< ze^bg_F4SD9B$Gs&DVS7s+z7-2=2-p^?uo00EPuV>=g#BLEJ@HlOARP_^ue;}<9zVPAdVd$CWJAO}B zz4^eT(vCQXr7SfVNIM5pLY2ux6lP;|&iTN|rGTT8(q^u%z-GT-M9umx;l)kGissVG zo^POX2kkCnxAfTit$wHAx43#9Yw4CHCiXdqK1+rRKxGUQMoUwJ1r2c}Oz4CTO0HSl z)|CgEil?ys2gAu8!%F)h=PyueY5mc#{Hdn853^FnRrd-uYm(3eAJ%8-a+08C$~4C0 zMa8ovuCT(CvL(|J7ucKAwPBC2qQg1kt>^WS8#1}{R#H?>AF(yEB^@?9SLF@kR`)~2>ci-Z6yW!Y&`&_!SG&lfOPPxzcILen0 z>CKD7#{PYFivvi_U+j^FpMUWMPUn*qFLB_8#F5$l6JpUwAKrh&ufF~TzW?rfY`2@q ztw3HF+GReHNm8+7;;W3=eH?=+!n`>w7J|kwi%`;Lx0#r1&w)|?8Tewq)C5pSc*0#8 zN*QKZaHskCz!(miLdMfNwJQ%%z~8G>eT+26F;!s{LxvUuB9BRoUV(iRK^n1s0c(oQ zhI`pv6Gz=Q+)llZJ6u$3a6w^IlCpHfP>QuSwOKGmqhV~3fC+~3A-CFOl3w=4OTwp_ z{-8+lXc&{blI9%xq&8clGknmRFT)`xtsN?7w7x=R!_pgM zKX9ch7>d?9)@21i*au^m2Xy`b-L4)(NmzTcf}F(RoR*BUtSiPC=zT#W!XXDp8;(X; z)&)b=&bdM01?XMDYlDhl+cqp-Y`4`E5sNP{_ie|E(<46MGc5ECsGR{}@S@#)&?2^<=x&%F);GNc10cgW^4|Z zB9DC}@HCw}u?h4-(fqp%@?7pHs2rA&cYwIx*<{f!%~r`omWl**0_RD%ILB2~5=!N2N9^xPz>_OIz~WjZ6(d1d*OW7p zOa6?x&xZWPVuI8DBk?Mv z;gn#oi8rM2S@^KYiR1!FzdtZ}#yv?P6DBYzsV4P;Daf1&;DxG)KVJl}G_!j6d&=Hq z4X`UF!}@F7k1<@9OXa7Y)-(R+C0SDu%N$$6x1YYnfAwGf7x<6==#TK_kG{m`Z{K3Q zoO2+%_KxLrLZ^=27xd161FBo+i~#K!^-1~Nby|ERBxpLn$7%|JP@ZYo*E#Q zy?c*im`Ua7@rI}C72_DWKt_`8{T~oY&wh@fqy~J=&)nWR9$q})&F61%e=+XGX+0IV z&<7mHb+fxOHh}T=d;0<;qMG+#*x^*w6mMDUST{_geCb0B)=AFgvgbW&o_O{$TPFtz z&o}In0~bpHu^|IjHlq(6r$C#C!*z9YI`Ja;%%9z+!BQwFc=WOXu+3c+RqaunFUrA_ zs!l9yi^Dd^Pem82P$|Mm3UKdi4H3<#(x`=FkYn+vHZ&O?*NMVuIM2cdF)Yc%sMI2h zNj24hzgzzJ=q(-q03ZNKL_t)Ub+#m`2p@(6#~<`%qA2ZcW**-XyYfDiB-YXu4ARiY6o zVY?bCEs0#h+4;GJ>%i! z3w-kS6Wrh5W9&=gzlc4gotqcD&46SvvxwZ?u&0nU6FSW2-(y1( z90S*zG8FILy~EFb{xiJ)_z}lAux|&3Xl4#GRdH_z9p}rQKHKnDDrI~RO?QC}YY5ys z{_qdK!Vf?H94bI>D=-At%_`*6vL-Z@8>4B%w%zc{U;PU2-oL})&GGP|L{W&FA8@WJ zaqXII;3PAVkp~5GU?U;x3&O5y`nPAAPdMbg-)|8d)S{ z#IKPSBu*9Dkb{)4T5#m7vqDFt5;);JJXM)5ZU=Yk_J213+(0A0az@9%lnj!K^a3tT zzTMd1g-6z!8!KTrV<5f2@M+?)yKRI4xdHoi!|i4==IwIe?*5EbPk?r`rQz5G!Hf#VohmJT!q_Zw91oQ!X_ufB9wAON)7;k**g z>%et4Mbvg|9{ua^N{3+D=9vN7{T;Su**nUt)v!u6>FWOY93E=( zS4SYt3gL{F^m9=nI+8qo!=;r(C1Q?dp4pq05h}i!g&v!G+vEp#hN3VIPc{{(IeLh) zOfeXTNta_wBvW2Sd~P6v!f_S1xhX{@>%=BL#n^(0jwva3SEvuUE6pNf{fha95tQ`w zZQFL5JNtIBgddm-KAloJ$bk~!t)SvVNR7E%-e0nNQFcC!rN)?gji15WRNV6)i2w=a ziL+C3h#gi1$(4mElbT?PdbYyx5W(fV;FC{2#ZP|nPw>Zo^vC$Y7eBz=`OdC(Y8d+g zD6p(2EK7F=j1AXl5s%`V)G$+njmh5Aw3@8&X)Bv`j96_WN{ZvyvEMd)|J{4M`~G_< z2>ZU{!}||-db(OP@-T{ij=_j?2u%M!cW<*KOOjma9aVFW$jtj+e@!$nGXRMZjz9#$ z*85A<&M z`t>oQnK^h$$g53W@wDecW6?2+H?MeYkzpUmhuhh_djb;q9k)_`p)zyN;f+ifhg!*nnyYMBplxaR>d_Lu$>xvtRrhVeIp<+g}gHJ78 z)Igc8<)!*d&W7<0C^vs*ddelYEgc}qb*Ndy+OdS)U4XFnYB+nhQQuy&AhAr&{IkC^!$MmjegJpaWl?P&OHZx5jEg+G^;FVXN-dUSA z%;~$8lxl;`hqO8$)r_jvQ>E#AI)jpcM2($)lls?=a8EfPi_eoE+s@kz#wLl-Nu zt`_2aa0ddk#aPMQ7MkJX$Di@jk3ZLjdAEZ@C#~syt)LQ`BYCkJvzDizy2|KnB{>R( zcE_vpJ^uE8`1g4K?Kil;yF*(P-FpDIq{U@Ia;XZiTgT(`6E05|JYTORKb+$|F?r61 z#LowhBz-kKBLq>hW%t~SkwnaaM{2)r}z$BA%nINYakgY7J%`;ueE4onf z`EFs!5CSzlAz0-R%mgN@9O6)+_KJcM@hJRjqlM`ro6$iEXQZja?2OXAMVY za)(?$$;WP^3W^-~ImQy&Kr{o2sZ`|2HrG!;{n3qd- z*G2+N<&>pKVR{1cZxH*A8a&$ zQ;Vy>IGl3==S~f{JDsAa=Z5BX99UF=li=d-(Dpy#OaBh{?K8C9F@jH^dY~T%+8)K| z+Ln-2_X`$VYM5r-;w;k@)`A&O{@7`=o`AsC;@^#^yzq^pNu+Od#%>)w-bu46_?!(# zLM;O_CdA5C6)POt z{f^9K*HY)%#9_$d*<@_?GlyMe0JqBaS7pFqD>aaJQ!R5Ah~<=s|0yRuvuT}zr4pNz*|#{H4e+)06#dXDe{ zMqC?E)&RUs!H7_RC^Li4R!ZfTxsSJvjtc%Bwg7M!Jo> z;D@9_A%>R44D}FSYR=NGmCOnmd?okhDW8s`Kwe(LZ5mld1LeK6~W12 z1n-mFtZUUFnTnC;A6`Uk>!6`j1L#br$>m*cfq}4N7$=xA4TH@yrm+)X>2)-DCin3D ziDxcV*F!^XJ);1rhMD^^&1DV6EVI`J|ZdQ#g`^4p~p>hovB$UKW z6j|FQilyfLJVdl1Z4U7S3b|qiXO41e%F7 z5L9Y`BP{pA!5bu&w#qcewCUFXBgr}t)6CQYO#k>N zGDI0LF3r-!{`9)RC<^W+Kr+$?i8>!IoQg=WW7><(9c{V{7(xX6Nk@8A|mvbsr ztOOoP&9k_iCQm5?Bdo{BhyWX@^daTYqaHRSsl;AHG0u(LXl-?Du?v=$F>>SZA5kJe zCwbKjHAw@L0OImu{3Hv8<-Fj-H}CM~)$8zvQh;U$B|L$tj-ie5kdWbtZxvI9+727Z z^ddgx@`fa=rA$PFSDXxY>j&VDpMLx!zWnkLkB^VI-nPnpFK?fCUCN#yAku}WS22-n z7=m7#1{Lwi-re8fumA1e;M;c}aJs+4vMj;NsJ!5%(jCkW9M*C9`h@H4!tOa=<~|^- z)v!)cO-Z}eY4ulfC_1(*oABJ4P;!XjPj-u#y;A%n!F@RPYExTT)U{c(k%Y=Atuq}X zQb%NXRD1U9q#REN+EAKEsSgC)3~5f^Gl3v3@Xlu~;ioBU&P~#Y7^SkogSHg_>y#D_ zuvdBe`jGZTQ<$5Q_?^T?Hnk|XJQ;DCgy#t|T1MDD;!#J&^hA12Xc?kVi$O<5ZjAv{ z#wOJ!WCB}*{?L#{_MpP&)I4JwxeXS^eD%$7=^gvF;d;H|{{9Z2`*leg!;7gS`z@r4wr_7 zgjy#+D>n;G8Vn$Dca-)1X*cUmRH8m%N3tLf5x`$*tQK{zkJ2h z^A#I`vqD>-sTANXrQP82C1Gy@!Qv=6xC6L9MZEIqd_r5+f(;5%aS#m+Xo=bTj_d7$ zPoICm_40(BES+pPie$uk5d}y_Y^BwP&s!okyHs|r5o&>mN_1)PqnC2G3Rc<;fL1l9 zWK=I%Rgy*v9VrpYp_IE#@JbH4b5LR9I~*~_9K4y9n%8>|)es|brE_>@U_x9ge3Ese zGD0Uy9ZJisby-WP<2}52ihVQ zktPP|RH<@jBpp~aqUuE)%_mO7OUgn#uRWA+LU24Lt8CAoRdp=OEba@ZipUctkLYR4 zC+Mfp;+3|JnhNo5u#Cjc7;Qc$X^o2cb9H9h{WXw!<%8Ui&WQSF~R!~FOTX+WOP-G~1cg7?+DPBujqX}`BNJp%6yiB>Q zrJ?=wXQPl9SdExknhuDEOqA{cMv*CFbtGa$K6xMVsIuO~X{r_}9X@6owe2)+epNDF zsnNSBC66sXP);dc{QaT6fO;)!gkM~;5)?e-=jL&S?P6FxFu8LpU)#rvl#E>@JdbG& zDn5K^h+wV2-k*{C~VG*%;=~w5TEj{K&tvE-P{L zZusy2=CAR0fA=4-oX%Lap)CtQ0&PJ$xk`Dy-mo7#e*fc7_`^>>;kMu6N%hW%r`W6- zqDjeUr8qnyvb1x%my?0G(s@u(+UN<+%a~2ZP$3ls;xMm&y0$O*@bqFApblAQ=6yh{ zI0e_T?iKmhQz4MH1N&1@4*hjXSQJ8`B-}nLL0NEtdauNbXk=2g!^yu_m^%T4UP_Dv z2c`5)6DT*?auPsP5xbW1NhJZ8GRI6+@zLPrCRRAQmaSaPw1kn(K=(Xi&*b zGcdW5=d84=7_>Velq3@NLT};FE_y z8j~ZsP({;&8<0sd6GXG|%yGU0;yTvD5?)nT+Ln4K+!BZ{l%Z#gT3#G=(@kK@YQngc z@#%d|zivnRW;>giUMUkYr92^HvN`NfX)#>glSYn8?69(pX*GbD8tk+_sZ5^mRIa6J zsPgXTh_#VLpWxu!^Of=<6(|HI#-yi|vOvxwW@%@c-o^*n78}8AdR#c*D0O54XrpFxfm@rNoqYhcCWKl%8PF`D9vWK3AeLK zPDx3Qqe+od*xT0A8M=hFRGA2}v=+6dM55PrbhsJ%-tqOzSKMxUN$;J}wKQr>$veUG zN>xp&aB|un|8C>^VS`=a2uXMLSTX@@Ejd(D3O>!6!t?(Y3kn}OY`n*M@yn`N&?+oh zkNMd4GTq0O0K|mG(!N1rMf7I5KU>XmqrsyNkNHiT%}S^*GE2y4s%9ws&Iv^-M{=0e(4a`0TspH2WH zM$EC!G_GW$c{agJNAXeQmqnn`zecugW~wC*ObY5xi9c&E@9~^8s%3IPm%e^U)?Vki zB{HQG#sUmAyKQiB4mnV- zOB)?57Fw2xeFUmg;hHh`T8bmYa)4=Om4cKC;7mEw0D&ftB*OtJLNte1tch!YfXND( zpRH51MNo4lPo$U8$qrQavh}t}YgT*+K4%zii`KadSf%;W!(f1(7yusc5 ziOr5WXLALD3h0w%dG-GtTLYq z7MdFa&5dbZ87V(E>?BDaeZzhPN8)_G3oAxjvDyiX9+0~;j&*^ZeudNBAFyo)ZrdJe zAiv?Oe}lJj!|nDA_l8&ZC$u}o>3oM{-{O5!#oli~k5rDfDE{f=Tm1DmPw3kX=D?|~ zc=io_I|%CVa9X>cp5y$&e<<%%*rSFF2KXyz^zp zkO4OzDnB3KHzYJUtyfm^Xr27T4ukg=+j&d*Gc8hL6OauRC+Jg-dr`{WihMt5+iAfV zz|PKdx>1P{LoOW~L~h|3%u73rWkh(X94eyBdHK0Vu4vRQ@EY&B#r25kW8Yk~55`Q$3OKg&^=P(w* z)QOMBOI2X56^$4_M8|1a@aolT+}}Ome7YMzZq0-Rsm3D*D>1CmoG&q+Zp8kDZ$4ta zy;40;u+2_|YVv6h3w6?NhRgK{PhURa>({S1_8Yc+$7Q?VcDXRxF`XGBiOVGcq+yLc zY^VWTD5MC`HEmRWO1mO#bgZTSE^7V|03lg2vQI1I})Abu8&sdTlw(PCfZ79VLDnYe7j zK9+IB$=S-KVk{41n=4~WUEu0?rdqwDN@cXet&N{ElRfHbkqC9lP?NMSn8O=W$`tS( z$p<1!s3Ruz(TH(QRajbXn6}4+vt;pf)=ZO8bNpHCXd^8|%idO24w)KE84J{kNvGAf zIB^L+&UabkU1{2;&q5~{EQd!jslH;80CdnYOqE2|^AU+rXYY|Mvu0LQ+yO@^N=Krz zdm_h;byTZiqv@2!#^25IPL_;v?=j~zoj5uegF4!?*&$T?yDEFQ#-UKRsXy|uI}j=& zX7%=Dk;ys|`ExP>3aL5})OT8zD3vh~=G|8ROvY$>4x&aY9E-f6xC0ke$l+GPm0cim zz!>#gag{ZcGXiHZ?3wBXQFdu)tnp{wOP&d$GAzuK0tjSiM6XnAr#~Ns1jKEC>7|+` zGY=3~AB+br+Ha?a%z-6dLbLRlCR0c&M^*S`hqq_&A>5|&(x*Ih8QxKCoSO2>$ZYZ| z#pt)!ylX(;9lclPFDG!>07bKx#N0R>8n*~>xoSxOlR)SS;K(LS3o52QB#fn6&Ei^z z-&j;|URS(&b&uB%uQ^#3c<0u?t+Dk-Qq;AkmTAfG%AKZ71}KPR429B67I8T4j>fpe z+#V$05NP=P^$ULYyWe5kZs^A`;*KN}Hafg(eO{dhRLXSoQcv%=>X;G6=NoZBE{gx@ zKmI-b(?9$J?jP>aR0%s`G?r>e@%j549vA!5(-mL8e#PbK3fm7>2E7{h%9E!|t0Ly; z_bfdN3xXu!mxr1j}8oKzom~RL+Zdn#eP+8c^2?J8gHSWFfUoltYi-9 zB35i{f2!CPg_JxrK2xh$U69R^Qq^f=MmcyEDXm9DPy*P7M}A5XoUDRoC;~HeW)p1I zSeTD&=p1?}CE1nC;Z`8BICwcaq+CP@3Uv(oQKlAzRGvBPDfiV>O-ZSNQgIPKcRrh) zo83WxPd-4faX58pBNj{Yf+i`wQPOTnu&?*WVf%)s-f_9za9&SXPbh>cbpzp{QK{p<6J$R zqoCjS7z{2U`~Kzf3IOo!`ULX@;u;Ez^F2g?CQC@NMbK2SE+@S93*7G^`T#qg;HtR4 zyT{QxcI#M{6PgHa+Z9LOaKO;F4gKi2T`#fSp+9}`1-dF0Ss?1zyu**9ic9OUogOmP z7}|BSvU{0#z{7JbDvTY?rOJTGD#60R0~*Dvym0$c(V$HFv7rw22@2bfNc6Du!PbD3 z4Ll2mRF`%REA!qGZGiXiaN8hZr#)nrrwT?KrMnIWjf~>XT(PLdJ0wG1UD2j9>^Xn` zmasoSdap7_86oPly|hZSK-ue3O{5&&#g?j*8_-y&IxTKqpR<{i`T*Q6_a4!l>64u@ z!1=rN0?ok(91|tp8p@eecqAEt{XuwL(%8q~V+fJK;f_xOlG)aPRFkMcx>aFa z+z}m#M>5$-+?2y8{#okv=VwuDUGVnpTl{yw`c)-qXj3#Q3DUk3lKA+BJN`80c&I~D zPFSjv001BWNkloQ`t9pbkMa3-#NgCCyyn;A zhU;y^adbRAKjHDy7i{}c(CTDkL`1BO;+^l7Qt>(1G4;(d!714n{Csujf{7HF7#vgp zn!#Jdht`mg%GIn{gWnZugK7+t`tyxSfw21Eg`(7G8%Tyq+7_7E8a2K0j7YF0H}j3}b4S?ZGlCN41xF1&ouRd; z!4v50$-+xUf%h0tNT%`7Qov`wx4f66thAOJb`1+?#`t>1*XlwJj?zvnOs|$&^ct0> z&fhzgz^0j&pkt_-19>LHi~+qvEYfURDfZ?8uSN6i32vt4D*e=~o7;4oB|tv&tTb)^ z6KbrBQU#51-lyFw84x^|z2NvDKZ;yBYaDFmnHiR_NS(^S1*M8EKIh`wnJ`|~FnNlG zhbXN=l2$aGQVq4h*U%0T`Qv{>fy94pg@n5)tE{HQOAua&xlmP7aJ}{i@q#F2P)*0s z!hFcL;fzSjsZ)7hNB*%gel6?-9uZrTmaFvr$#{62SD4?Aqf?|=6% z`0)=vMdYArO$CJ(1DBMK_IP<2=~{(kvJ8v|5G@qQ&m}kIHnOBMMl8wIkj#({Kl?}m zZUw?haAR4+D@vEes}306NNsaCs~pMC2|cFGwcP_H>KGC>8GMVBbPHn>vq`C$1NjgU zXUE}<$Y_RSdL?BvWKt4A6j;mzBhT7`!mcU1BTz1<-i;aLV4QBnEAiYYdU_6H7zylo zkYKG`U;@z^D>^@et!%JRF7LP;i<(a||4<(zUjGC2k#?S<)8UIN;!{G_HGr$^L zI!s!rF!CK2PZgb^nUVG`ypuC8J?=CoKf!N3km&~uCHI)b=;BuT~jM1~dDq3O?cL_(F3 zIU+BHBVNOO%|%R)n3YCrM&-g6Vcx2?p@BLSWCF&4%LxDAy@T#pTEn|{@A2WAZ_vV5 z8IU>_oh|KWDMptenKwIXQ08t-LUC@t8>RVK_>aiNj5&q(#7hu#YPj8Qcz(Lz>G28I z>lL^CipQr%JYSzf?UdWg+zc0hRcFddB~R5VmQX+#@ao|K-+lWH-n@PbZ4Iq0Y}$SB z@jSq_tFG|gvEQ!f$ARtW*l)*(EzQVJ_prUzoj9qEN+%>^u!#ZaGbE#IK)zFVoiVOu zBd}nCXpE0wfL-m<}~3)fh48g?(qq%Em8sR zw3IdWbF}_&1L4Y)9byMGHy%BBYQn5pAvm|C0#3t8;JLGNs;?xYyF&%-Q*3Yz%=3;& zZb>mTxJa2|&U>Z+%CL*84`qd`{PE95At53r0He)fVpnFC9bINU<(2rF^7TQpfCgmRR&zN*s!Dx+pES(}l&3A|h&dZ92hX<_7DdPE<&FtNv zt&uXTm0(e^jz0X&sG=-=j|1Qr07K@#b30&0Pr0=4Qc8NN3`g(y{PhvH>xSbvXgO(& z8Dnu=o1tNm@wywl07R;d5EkyCY<$+b#J~UB-~KZ`e}0566ErHNp_vWN-`RRcKRW)) zkH5!ne)AhVUY?-cfn@-aB;%~|V$2N^ChJUI>?LE?+U{%lB!?tgIGpR5ZO=wuyv6rT zNg5`miFj_u;zBoOc_)7%q9H>q(s$ep9xQ9fnsjspkQb%l3eM@K*QYC>SZ=22i^iJ~I?q`GLphQ(o9$ASe|1;@T& z-ww2;!43nqKj3=oXic$gJB}j;1-EU(zQv#raA;d_a>w;@fpx=aUF&&;3HF02AWSZ5 zx&WsGdYmdQQWg#w)EFC9Je!4_ z4^Ay(Z3(61yrU_mXO!N}WJ|b|!a%3Y^h&}IcB;AcPDquH&Af_N>P+&Fyc`<(!l$0# zG#4KZgu08S4S3nX8eyEN*o(Mnw;oInAN2U(^)~c6OIT>r7u}LHPj#2u80J+_e6eJP zVy)-KRXKFcRfanw1f$VzOdk+h5#su6mF%P9gMXQgaw-UPI$P@d9#$9?xuh{nM zfL0@h7p;&COF{2qxe&FM>y)HTu9q*0hlf}A{`+6y{?!9c>ly13p652jwWqW>0C>Q4 zzv1iU8PAs|Z2K11Qy;O5T}ERZiWMK<6TN~4FHgN!1f>rJ5`aZ~BBsc=?{wIPasyfK zZ2m)~d?kw{hd_)a?oL*j_*6d+OJzP==8i=y6jizCXjJ*S6d9*du@a?(p}tgm3DWk# z1*|Cg94ZuzfRVfrZ#iJ`(uO)UBL*wgfLr4xnTmx{BpH>@bqXUu zh8&nN&D+AZ3>zs{3F-AxOD5>ji{&TQ#~Gs<*3lQggOA0+9HZ_0-7{gMfTmtB>#~J9 zp!R13wWQvgTU1oy!^qR&!DX>+@_P7(w>qIuPNf49)JJ)?@vngln?{n)8w;c)S_cck z$|4~N>-HXnWk!}cikK{!+<{4UR1&a>Y|cq}Yb~~288Ijl1w4U~>8+0q&BzuHU)_*3 zg`74eTD+AjHc8Uy5%MUF=HT_4*ZAh!_c$%*d<$wjQX4t*n#wCBctPFfRW(H^FN`Z3 z5luP{hN+=*JE9?;_$+RYFJC_4^XHG)_AQG3+@Kp~lk|&iqw?9vLKzdV4U{}NJy*&I z((b^ofBl#Ew}123Slc-B|kJ*8rKYhg4$0s~JKcROv@RngW%x&vl z@UN0TkC>NfraC6@-Wl)HbO1opLwwLglIn`ufcB6gcYPrX1GjT(8vZiIh#KK`#==2q zd$7E4veA|t1b7u6I{UyW@oF57IO)8pI!-!%4nd4ixCB3X4ai1>U)oKy1?(|5C*=uF z`5J`q!Hi*=U{DitrebgzPu3Y_7$jKpU_jh;L_Z>F? zuXq-Ba~#KBgTQ57V7+78cO3fxhhfziLiD4f_YUvCb=wGd1up&$die?G(>cEP^RIB0 zAEStH$HV!I_0;hA<&h9szNg!P-TH|5Ys|+s!(j*Z{T9hSx&Y#g9`%wHxPh^B0pSD$ z-j*2jAvW$A%WEF*YLL$<-xrHvtIs1MrYF>}unl`&lFJ#HScNRS8oHL3za$8DFk&)g zLwZK9NzuT^v*_cm%%94%yyuf{Y%qB^+^|gkp#NP6e(bS4t@A<1_$JacPwo+v{KzdHgL_UYp zP{@;ZZkWQ^2<4-cm`${b;C#Bnw?BM`^Z724HMBun<7&E%dWG@iW&aq|n@|nXQkJm0 zRNG`6tye^NO4g%t=^n#7cVOQRT(29RpRV})_!-?JGWc@aV~CR6Bp=^zouj3%98pbC zvKjyR28d#9E8e|-i}&x};q|N6(WslEElz+n2ni-YG8Z=nJU>0*`gFl?(M$vJ;wnkW zn&*ZvK-|L`r1ZAr7O@(0L8TdV`a9xZv8usHL8FSv%imqYFg8M^%u==)xeT#s;U>e9 zm3KyyOfJYc?Qz{lQ)P9x@BqZf2lGAM#>h%m^?QU8ajsv>VmEj?`RUklE#?wrVi_r zvS)H08`5G%C?H*4Fnkh3TTHq_R$_1?`iOy6ECDK=mT&4FkmTfm2rX^p|3&p7VKX7q zf>yJPnT(z$23l5JF{8bAmUHHrYQsX?9av)E>q}BP281-+JXW^dIX0a!Ihi*T?^5@A zmr)6X91f9-R(uvJbKs$R6O*#FWYcX-29Pwydjy^245-_%8}Z{ zL9}Q*8-}pDN)iU05XaYQi^Fi+)&x5US-{90q?r3W?s`is(x}dztD82D2$DGodvj zQDMqlW^E3XLby~ zt0zG`<>naq)iOPxosmW{fNW$3cuiZg9ZjOF_mK@hKwHDdYyfo0%-)9khX;K4@C{Uz zQFxIsVM!893NWo3aS$ZTtP^15Wr^tpVYKpHsfWbtsJ&1gTt?3vlRu7*PoF>I@%b{M z(fEC&XS*U^N7Vep;h|@!Hw($; z-W8E|G-*(^C0(K_wF=E7Hn4oEg+< zcap+uGeVF}PA6c>r}q`L1vJ_x`jE7WO!DYd=#bKpQA=XAy-6~>!6-${&rk{Xi;K1t zDqOvIWmL<=n2~DMa9GprO5~;_?{XS?rAjmbowvRqA(87H!UV zJrX2F1Z;;DwK^0DJ3fE;g1g&>^?bs4U9rd-t6=pQ0(Qr8YUoX|-43+&h@WpKtmn5l zt6_U}AJRz{x=k8B9&hl?dJB1DTTAx3tZUdU1X$Ju`+kJ{U1GAI@mr_!32*!fUvC|k z>lOFsfL`7|JmC512}eJ$XotgbKA*7fdlVWi&Z+&nVO>Is-IfK|8@h)wLUoOyn~uy? zL&tF#`73ee9~Ni1Hl<{r>O32%0K&l#Y&t{;V2v$IdU&;zS$PPhN82sIVKJwW=`72E zvrXKBe5Vpww$l3pUH%=xl&5A+>GQx&vi)6 zD;lhF+S@3e;)i2n4@IT;iq{)$zow) zHN;To_b#(cfB5f3X&Z7I;q#Uf{6AfO#P4%=cgEX?H@Ley2ivOU=iTBC?3qYL+qJ-OpsSNzHwnQY`Ppy62zH~!hm_hhT(o5J=3 zR|t+nabq3Ya}Xp0SQ0ARgGVrzq$)3l(4Gx>)3Q$`0M(WT*$;H~Cp=`MI7>AF8X%q4 zTmxklF|6yY)a|HqEQg&O%NMuM$6Zlzhm$Wua#@F$>4!3u>xxV`LfL}AK}Un)@Py@q z))Ll%GKyZ)DY`lDOP|1|6)0mP5wD<=e0CGAjxmUc_e$%|DO>22Sy?LhRAkl96*lNp z*9g0(Nl)35+60^14bE4%8V?*Wa%Hqpc84v$jFfKkj4^_9NIoif#!{oHl3Rje=%hj z8@3cT`s^&X7gmr@Dav(^=Gp>4lu$|AV<@3b@b2wfeE9GI=exVgd+*-ko!6=1%B!V7 zi5XXz-vQx{y+hg(eQn7=Z8$wNqkmG1LaCeu)c3XtPC@agj>{NWEj;&$C5 zJDy5ttC%jTd==#ltO($#NYQB^J>SRNX>=Fuyv`iQr-lAc4A8|!x zn=vnnd;pJx6I&%*h@6lktDhJqjC4V%#s;M*!0^$bmRUS6BF|eGe^}OR6P9`y6YC^^ zdlXq!EXvp<$)qEtJk(QN<3qk}78@KGiy5DSykstU zxRtN~vQ?4lY=Ru{g)-jmfF2xDWF#GvhCJf=tPOv-QDM;tk)%eR3b&!wsu{O&D?L#l;DCjaqJ6a>wFWY{b9C_W9Jqe$eSv`auOJ?@G|Kv|~z3=0;UmabE(M+}*q zDFyChFpLT08HrsJ!-ro)^Pw_=2pS?4Nqq?I$KYP3BrftRrs!xdI2g|cqL%WWbPV>7 zQOcdkMG5V8)X#7!N$hi_aX~tk(+RKNJfJO0$!|>}y+}x%@8tv?%F#?i zO1(r_v^0hoOwe-4g4HMc1{?OKKvyeW5B%?4Sb9_-V=#yg{LTG%sx{^^hS^z$#+ zj;$6WQFQZX;)CuSKn=-)yh!sZd!mF7U318SX?}`-J(HN;|Ux@sjsoIf_C>mgKTa$iixfA$Y`VQ}HJbqa+;S)#PWb z99B80X!|IiG8UAndbG=C!`6^M)wH8gp*Su6FhM${)K65?Dp^aI>e-AuPL@;EY-Dk; z*NDMnru0}bz$yvBbEYATJNH_`ltxNNV)#6Ku4ka$eL{)(_e!$h7{53abv(Z%Y6!K& z0E{7x)!;ypysNifJe=Car?ajZz-`WXh2RS!@NXNb(xY&jR3%n@~>$rAVr&? zgPO#0MR{tIv=27@OUmU63dN$M5rpN#qA~vDr&9^DZ(!l-HbuwZ^p#-OS$>NV5wglfah4fa^uaE@JQGd#C41!EE zt}37LX4QmMN(XW~6NQnrxI83NY1qt1lS9l>0+=-T1=%d9CxyQf!X{wf^qpo5Y1Wl3 zltdZbl~6g!jQb>xk6^Yj1S)iX6xrR6kf}*XXT<D`Els||}!%HZ>2 zY_T0R+RuvkPc3oHoK%Bjl-rw3?I=sa$kujNLVf@d6=9HPP_gigww!@=LEl3SbboiM zjhl69)tY0o4VV5FZ(m&jZ#c~0rdUo5I6}U<*#;qCaP`K|J&OHf>)5YXTy9sm3SQma zqaOjJJgpsl6I^d$nZMwohd7p%888E6sjbeG8t)PsmIyVhb<73pSx$l z2uvkIZRyQMVUR=ENae8_F6KbEyq`8=%QAVQ)IB;uaavB0 zWr?!}S|t5!K9%r2p=WI@2svaCA(y3xJ%Y&^S*u}!_C})>$p=t}ZNT0;_M_wZdd1hr zN8Gj@`?le7dB$N8-I#a3s8qwC^wYfA;|Ep@ zEnHwNhN+Gm+In$ZOnH}@!CQOzOxB|9Vz#BOJ*SlZge?NbZ zZ=O4h!0QtPiz~Kf=nJrFyuUklqX$`CU1HIBz_Cdx%;Q0{J}ST5}>Qr z-JUtaPR5P0PMdVU5+Dyv~MFDS*k&3orWPqJuF1rj(MN`SpWSsQb)UdiDy zqTM!rn)Ex9vVu`e5JX7>$D_g0+`8Q>m^^#^WA6dXl4W8 zT0{Q?U9%sxw0OoOz&oI@pZw2dAmI*cs*GA8q%mWLk}k?iE}oe*x};2jzFRYHBFU1d z?aKrn0LVZ$zvN(`5j^RZ(^r8v79ZTIRJ

    Vm^6wJq5FbY-N!K+Ax3spGA=*MF&pV zAzY|b(wKr)&iE9y;yw;si5%s6;3+vWNy%2`-D)KPfUqeqIi_GaH7w@^uO42ZwN~tc zBq|FpzMM+O%Mk^MO);iry(H)+VLCZUlt#~d{<@YIuOV@}ZiY{vKjW8Qe#UX#psF~I zUda?0*%siRw8&*HeM%mAS%L?5Rfs-hvy^xJ)nEN3{_a2gk2sxHZsHrXMUJ;wzUT|A z-*C9$dcEO~KmUx!=SNcZb#jEI<-%X&+^2_8%X5NhYN}JR3gxKq;Rllej1q~aFF|fq ze0Q&HBBWJ=kp5Jk0UZcgb$abnO6H}l2=7_~xMB;?VNK(AOVYDwSq^3ef>f#}y{pZc zFYPQQWhzin{;`Kh1~39(g3Bzu8cxUuph=|y0|8l4W1j+19lK-mP12I z%O_*~*j^AClf`Anl;NJ>(5XQ59FF>6StOJ!HbO@gFD}zGJh>74-+6AeO!lB;IDtYv ziR3vvOr5M*c|L{qCSyUP-Ak?0JFp?F4`z<_)M5Qp4KLt0wFTR4gWCez94-r{ZrwquLd*cB%Y9tC zjF%UDl7!;txfU z1|V%xJ*`yLxM^*SlZT&DS&@a7vS{kcS~o|frkV)DDDIOeA@88!WoUztU(wI>4x1Bk z;gtuULtIc!U#&co3G#3YZXt&hGT>i;p?MoN98kUJJS{wxmmCgQFgNmU^-2xu#SC%M zG=-}@;a0gM&z;+FjC2JbHs>LM&!&mrnTX^2@4v@i{Ka43?tF(u8?l^}!BxAu#Mx9F5 zTyP2H_$ng?dMN+sohN?=e3vAf5Fg(DA=RVuOBbLY(d;6jx2ckkyG|_b!XAqGQZ*Zu z1e&uEjU)vR-(f9*8db~5=Ky&bUNP+`_R6q1p$0xqb!{v@QrV!~EBG|niGgDNzBxkZ zsff(zu{kCcorsrz`~}VeWG-pL;8ZdBV=-w zxm_}Ws@FfI%3k9|vL5d=^GbQlA(c*P6)`X*BZ|wW>P~#Z4|ojc9E~g_)(aw&MTunP zX~>=``cxWOKjN8ZbT*nntg)UXt1Sx*ld^l%ee*JHVzNS}x>6Z4nn8~ab?vV9aTu8Y z)u(L0vS~6;)qs{tA&W;dPVq&%!;YMM48q--Qfvzj$Muq}8)0%##&zMY6EB zx)1yCj@o29N_Iw`e_g6@@+zXWm=G_+-BR@W*2fOf!z#xw6LAg`ew($&8F7- z)y;6Y0S5w-*>d<7P_TmY83^J-Wt&^Zg0T^dV?&0G368@hl8Oo{>(#J!8^v_FX=tH9 z{&!4~&Y7}8fU?j(-DyA;jV+aq$#68@l?^{O$7S7p5$h%8^;eg$DEa_m)uvWkJmVDi zlyaSlqOdfufJ?yy8#(czgm79JyEFYK?>A%&s=X+Ha#WK3XD`o;4XJazORLS(VM$wF%Ca(6(Wh2GgV9I${9__4E!)Y*tm{om18Tnv5M+tao?#=DQDg`}Q?-Z5&=(>BvQfibF-o zcalmYktS%J%z{);sKipJ2H_bZ9nSJ zcLGM$BoaMr{EP`HPB>_ig+M-vYYZgr9`5n({d>HA{R$5cZ*cc;hbFK_@5qEB%U8T+fEoB>9VCTTn!*;9{Pnl<( zaV9GxQW2?AwXoP})-I`J(Xw&XGs&M$U2tghN)MaiZ`QZ?p{lpK@Np7`6A=gW8e@SL-U3co9O1onB@bFY=Z7foVD51To0~~ z4lz>Gd zbHHD$?%%i@dJSmvyQo+S@mj>z;;o{Z83#Hsfpn@-EA^8ZP$tQEqcW`g1Kd8y==_Ma zjRkw2CCwuGmMS^-9_rFOf67KLyq9E7OlE?O*TN*36k~`+CjchCV|-J;c>mLB#=qu) z#K3`ZE7Ho>QgJG&V$f`^xnWM}Y#!#hK}gV5MrX;8hh~!;49D0PEB{XO*nF$zT{@Bn z62#KIqj_U?x4bw2fUyNjpqec~Dvu4+&b7 zf)iLsYBYT8_jj-G?%jJVr-e{Yuapsp*FdK7)HN;#a&F>9=vEIginxlR)uMjSA0BssUh= z@^ON8kDSH|MQD*{t>t4Lg|3fnUmFSsSTq&P(VHR;6Eoj=4)H8b@K}QWa+up3+MSu*(X=;F z^gehao|P-@gCKZHXanq2OU0kZbd`KnCDmc4)jj0 zGKUKiHa%t2HWtX!yDcMcBVpJLg0-!9b^n0(@809#{uS;XPB@)b@;%5eIbt~7_XEei ztMdW(&7+Gs5V7~qI~JYW{ekgPFhwp zlQXDzuyQxZWWyX0_vX2s))HupQE1ldOpekidUGQwfGfcG#Jb$sIaq zm>oTMDyd3q`PmM#mr4w6k;x*-TjRa1GfCbs7;;pLRRS0y-NwsmBG9j1PNo z*h?C(eA+Q`NmFTV!$P5a=S1RrZ-?WJIR4$~9(T)%+tSh7iN8Y(5zHNjchagH=8ciK zCv-C10KgWlNN+J2QR&i{6kGzg*IV+PpNn?Lx9qIvLFcPr#hx!z&4h@ z&=&tpg609ne#b&HV?1ji)pB7+tK$@xJwwoQxt5_G$$N6udbi<-lsQY=BvrFj?~}qV zHAf*&%xf8)wx5yg5zo@hSF%}^oJy=z!sWKC8+_q?hGh0MnD|flZ#s_nuWs3B7F&FB z8mwbzmZb8F1GLem?FlL*)ToNc3oU7_@q!TiHRxIKp%zVFN}eYbjg%hV>@o6!U=a%Hxp2!e&PA{3R>nByz(qNEPvyYyy{6pX>Ts743Xi4DG9SU( z&XoN&ENv|^ZpN2+j3*(BjtC(F3p3v~>YhRmzbuX|fIDEJ<%aSBil!9mNOc;n&o_Mh z@(G{6e8GNfyu{)Kl4eu7!AKQ>8e=AP#AMQUJL^%=FlsC(83{-RO zRzro{*hlf$(9$$E#~7uLc!F4pSS4JEH*O-BP?Mlca!IIGqm@Zs5PU1fiQ9|(9s(98 zB|%oMJ~BS6B5OJ1szDR&LqYP-&BqK#W%>lhdz!Lx5h-tW_1ZSPZYV{-^c}Im!W@~& z%|kRR)ISM5{OKhZ4inX@aL->s#EQlb=`e19B!?-O=4jk%_+(?so*_Eov86SM7*gX# zM-X;{R7SOwHu6H2?y)IJDQ_m0%l!IEzB{h{ zhSm*@(Vl`G&w9maJ>h)!1^{q>?|6814|m0OyI|jUtm_I$$F?i__5=}NT^4j*u-`rb zs@RtAq3tKMexRdc+YfAehuwDQX@R=nc5K)#w>Z;IOYkkaLDwZ_1`-M#vxAB*8}Elk z#(5U6#%G}$W6YWq$hKrJVne~iVjVul&dDA^79E+qle<8Rf(2d4I|=W$x-e$672R99 z7pyNOzJsC4V!x844pP>6=BGcI!&%XIX2Xu}5yh-p>>jVdZ7={r7?#W<4lQQ6ICMmL z_Jo8PZACgoPZI{I!%n9yV|oeV<^89$hj}(AM(MT=yB|E}NKoN0oP%5-VuS=QvbfC zg2Q9D;FJW5cQ&*R(S*5xwVmHW)UAs*bAm&b4yRF?x?nYhUwR6ne3EQop3R<LW^we9N=c*XS*j8QQj(x&^<5e7q80G!!1-(qgf)D~KX`5bHA5(Fh zzn4yVzOk-N1#+s)1f_n?DQFYCP&dZ%kGvoLo(bz5PL)C~6D$`YUbRyVXJmn1{TV1z zC6n5sHhK`?clWPx_V|5=3N{T@Y?BiN9qLG4YCIA%@cfg79+a`ibAGqBQ{2RH!A1~kI zKkfe!{~>v62riWK83!EA1_u32l8H0!0r+iF3&ALkN2MDf?L-`?2n;q9+Ng5Lg$QlQ zqNz+W+L>C|Uy47NLx)V((u%7N0dS^luqHOCxg*@`u@@VR-sYgl7tT}uS(Z40whZBE zT}~BbUYC~UsmVI`Ic+w?u^F4RHbJCi{k*tz_ zCX39}+{H(Zc#yIXa$96`%!-{;GhQVMoK9zqZrJ6(g>W>uqqPRzs7T?3o4gFs^xm@& z1UDrq*{zM1PmmWD2~A(E)4xc`L zVxiBW8jBtVmw2E?Og=Ls977hmerz0vFGTe$IDAHcMzgw6 zN^6*@sZ+h-7^M#KiKs5{+%T&IB~)U}gmk$pnoP)LKL1Urf?0+M1*yxxAelpeh*IP* zk=7t>^qSiQ!Ii>AtkfovDV;;66nYEODp+`6s&8DRB%=j+TD+K9eB~jmIT*?Y!KX*F zWyE7fmD1y0c8a_h+~8D56rfBNSRMyy&RDL^1Kz7lU8oX6!!lE!BohvXq>RtuEo@S5 zv*?&|*cvskK$cR{@CGKZQ!uR07tIl|nO(56R^Cn+>}R#HS57b{xk56xcdo$3b$wWAqWw(PO{_*gOUfY3~`sfF4EZaI5X*njh|~ zO%A}Y93E<&172?>rIAK|CKXhMDQB#-n=JA}B0zqH^1Zf-`b+;Q1{oJ(OK`GMErDvz zBC#UVBQh#bie=>7#Jhw{IR@{o3T5|JKJy$?!Q;rC@>8vW&9p{XF?GEDH&PK}#^H2* zKACy$z5swzAw+3uZX~HlN#FfeK6~Ymad6w#>M@@){?YJLC2!^Ucju_=*Pwx%~4Js-_(Ff=MxK7EHW=m7ppR zODuMX&lc4rCI{p0_6}dYd4pFsw=A}mwgOrm2hYar`x-DhVQlQ=f*X-aGY+9Slt?^e zNm?+u8_pyr8-~#b_G8C!9C&(q#I+yT_XB+#RNV=zPo+=cFK) zHsAiV7$xsX7A0CWsCnTr)6-H738h-2GbS_BUm9SB#sz`p07BOxf-m7fyuUJG!%;*1P$y`C-xohE{|>0F zY_iZYb}v;kd1fdF90BxZ!a?I8MHk{6#WmO4_{Ps}(^!qW*lp30L@c?n)d!HatU4VkiqOyjE9N@LV%)W*)? zPWH#m1wVMhU$h&v-s4O$gQ20hqvL?9hV4*kop6Ops7hsPp$Z`!aYJw>l-7Yp^==}< z3@y&f4oKV31?W^f24osgf=b75plzWHP$rgyr;%q!a-0Ut44X%Chv^xIIG*5mcpf+% zhQGbN#eXqw@So(0zHMluV^g3nG}%>LBf4-r8t*o$J~UH3GRw*4qRlcHGY4GD@~jrb zWVD5vGKLYEs40{WdNCn5l-GGS=SW-K7HIZV(izoB(8*4TRY7Mxk;+JCl)cs>!?(GN zPlljb_G&WKUF^py$U!si{>8%Ur{C5!T70GEE`DokQ{x znVXxkr6?-1dTqQa;!z^rOSgtsuU-XdTpGQ=6Ly*&zHu1(G4SiJKjDvm`XiqAgZ7iz zU}U^j$qt7V2O*2@1f``?!xA2u&y#WzN`Q@XOQQU9%;R^L%5?R~Ln)~sFDMpQ*eFp? zUI4!r&xw;;dEHQ6?a3%u%pO>cFe$4PuiJ5|nbKG6GMr|PhyVZ}07*naRB7Oh60rjQ zd8rG0gkN55vRXvOq(|rcKZ`Wa9B;ynWP0n&tu|;7%|!}77caSEMi`Z>U8j{wlmEc2 z*knm8P)TkOQ=$zjPICBGQ%mzj!Gao0c~ZpN%w8XaVp&Guik0=MDBE>=4y2p1iq=)+ zTHq}$ke=n1mgxeO^ri`3Mai$RNKyOfFb+pg{RC|T+osHu7vQu(WrIt{^ZpE&V>_L2 z>{mQ~`NE6X2pHa=@)^3_W1Q~s=?dJ*N9@-<8tVZ(Jza4eM?m8`&#w{B%W>`4$1{(G z4dXcAP&C~DF&xJcyb6m8S5FONC#~P3;OoO7`yhdR6cAQqqYaG72JYA@J1<`H@gnHn zY9=uOaQv=^hon2xW^Agb;0vTSn;Iy*8>CrDM4Qa*AQDK(^bPa$rc$cgVvCtDJYQ5W zvWy7A#z#g26%?uL3eJ(19WdVqSwNb(*>(EUCAX`2-*8(#cy?-n*>pmE%?wT3gnQxJgaBV?5hbh-AV~S+dBSp_M?=)pBtGTB&FOE$*8nJxo*I;qu}GfD3e*-UZfS3%7<34 zOj9T)FYl8WJQ!3r+&?_v;mup<>4dg5O8gbdh*(z=`x{kOJr}Lh4=t3ChcYJObOX|v zCr~3FWJzW*)R|(m3>~B6dfoBm%NMvA%sck$fzA{Yr3X}U>z-;hrXx90okBi~IY==t z_1{---r(D}Ux!7dZP2YjH=bSHYN?Rh#cl_#`wlb1r_Y~p>^qlH1ajJ#gW!s4MKzq7 z!wL+tg*o)$v!DX>1mAIyp|(WSdk%lXmY2XzG(bmoPNSkivsv~^lGg&UM?J`cKCPP> zzb#reqOdftwmvIRv-5ougne;ZFbTQFd~dRW%c08r@{1;&6-x}WTAW$XMY_&UR%-+$ z^ZS>XJ1n%4r5b9Xcb362->+ojcuG>!dhD5I)cAe|qbK%?X#vX=AS-6rWWl6#KF^e7 z=_H8ImKfJe7qhZK8;7#y2H{;77`tq8M<}v%A$iIiMq`8mOOc5!`N`>`gq(23ve77@ zW8hnPh41Br%^Y16cH+6=F?>8!AtRJNd@YS{TR<;SH=i4B0iLa8HBAkFt zH#9Nmpj}dG}^1wokd&|UDP9rFBydpQ1?-s69@ zC%nTbPT=N_o(e!H76BksmSl~x-=OSH75~Zz&k*G_4)7R(>9l6kLhamUPwU1U2A2YB zNYsD%zGg6h&XC_!Doi((R@Af5>K7*nS#p`YmBY~&%SqI77$z@7?x>U*_O=@D2Xu1X z%07e?0R%rpRDSmH8yZNciFr+&1hYS~0*53+!4v1frXLb5ewIl^Yb7|ymLhcyEJ{^YFvE328w;9v`dg+$EQ*TCjIM`f!dPVS zyV}S^gk|H6jK}f`=i3CW#k5!vkyFM^?8BSa_~z|bP<4XVykZZtp)LnLfrzbb<(11u zKfP?8z{Mb$72y^uL0Dq*Q(#aIC_X=ZuAgayuB1>9IVez4kIQFIR!GXa2rC;Fx8z7Z zr3(GL@f`f_FMfyf>4N9uz~y{XNgU}=Z7rhWx*fP)ulVJcU+{;2`~$Af*Yf`vW{O7? zV3hnXDQ;F9%jqK%Oz$<^D?7Tjj_H|9`LzhNq@xucS6S$u1oDN@jj}XRjY8Fei9mY5 zRLhc)>vXq>kaH7z~+Kn{jDWfo4n`3Y&`FrzVUDW}b8 z4Px3&mUIb26_-kAF>ZreBY;z7dP8N_M6IUvv^`}(Sp@(2v)5D>b5t}`H4zz~3Ba|n zF!zNYGtaMlFO{&q0G(RU#$`+;P3EUd%Fp8xSEj4v<%1>7#uq>0lnO^}SY>`ONuD z5Q&+ImXb=Pm1W6UX}glG;0|n&u(LlJPM0$_5gYt%Dy`x_{e@~;; zY-2=PSi8K3?Il2=KJhdGxdDdLqaLgXYy3WYLMy$BzzK8A2AMElN^g5Mww6tX)X)!= zTu|s^ZogS*E?(uN&5L*pe%l5yJ~JBp01PcJeK-fh29RRsWlu|gY)Z>hq8VGM7<$fu z+RSgV8Sk5lz$J!!o|}?_MQ+`b%)P+qa=2h~iz+>Tg1s0pMmGEe)ENhhPMGerw#TqY zL<``jnEv@f&*gnAT2MiAn=ou&`lsSWf$9`5a(_W7In)ttwffBAt&2}?sL~>;GXlJ% z?*?q5#RJP^orG>r_Q0aTD5O%OB{HSonDKxvXc7rYv8j%l=G$+-!8hN1hfP~7Y0`Fw zt;qAOF~Ztsg%!g$uX7C%Mn4F;l<8KW`Yp&JmMDD%yHcZzC1qR^W!7-BO*Vq z$6iC-c_XK;h#Sf`Ua3nQM;XT(fSKjFlHz z>?T|WW_k=*=O&_DvaZn=&!Z)>P#xAOx>A8ltLW2g7Us|j6>|@zm1}s#2$wz2Ez-~y z>yj5cZPtvFwG!rU6#GqZ5y4+_k(V2(=bWI~Z3TEFWRMMLOqa}pD zqa6*d1BU{*TyQ>i{L8oB;otuHD?SWE+b;OF-QaW#yz^(gA3Ly><^fw1G>^e-A0x+z zk^bh>Mkl^*Qw?5mmiBd66qL zB24I85q@~4IhjsIJvTT6Gs#kXNe>FPr$;V%4xI2y9yB}%o9m*mn4myQpJVbQ7UI5? zq>hJ8NzFq-J1C1y35{eVVGS}~l908wR)B2|8D#ECZV_I>lyo6Yr@+^26cZs-Ap;J> zci(-7hc|CWv&`rkC_TIm-)1qTl#13y#W}&|#@S1MA>#PUzx-?b_~Vbby}c{1 zHCW7-{m_A9?|6QG#;@<*0)*LLbxvfz6Q49|gpp7}Ov-+JSQQ-C2j8)EAU2fwx;^lxVr(^zS77#0SPk?mX zFsHd1?@ToYHBcicP^bT1DyP{>K_Z#Xk^sN-*ijv1S=gsVN+;Yfm8|YFv>&Kh-vk|iN|0Is_H{ppb;l{O`;k^n=U&7;YntWeNKgIoB#RW=;gD;{BJ zqd{Gen8;Zwhi3|dhe0C zV+P$$(0=f_Y}i;JU5{s24<*T_8|ml<+mA{c?}OW82edV)b?huE68Mx*Q7w*$O}kKS zoMx1jEzOWg#^UNS__p%D$B$WHMhTy@{e5WBf2_^Ui3r9Gcp)yR1|N&v;>O_lw$1fe422T<1))5leLvp`AI5wmD#zgy2$Xf)NDB+O|^HoiVSn&LhRj5;}2MzSt$F z!o|#S*DiSd@EUiw7i{M(`Zt}4U}7!&^1P2iB0Xp$qZrP}-(aKbW*nKdCGyka?@N_x zsw$Jc;BGkj75#d})6*C9{eT@2wVB5T*>VOf^$;pcAAOWcHd*57c@%sKA z53e6^dv^mpZJ`$7hO0HjVR07sW8iu{a2y@`e#Nive=P{;KGLphrD%&q%WSTN>IkMj zRy1ZX2^7##aN1%pIv_PrpO(QKPD}=rlusPA(i#;sQxcfNA`VDZD@7+$!wi@(K3RtA z84ln|dB2n`6rJZ2#adb4<^3cDr%DQ6$pn_#5sNKr@}`Ufss!vQ^C@DXWOKftY^HM4 zBr!~M8=*?V++sU<8wm(5JDViH)9cKxT7zm#NqY$fhAUROQdu`SED7~<{PUKJnv}42 zFAiKTNT$UjHt^-7!z36MzChD*MjJ?_$t?+MmP!JjVPctRRIIPbuQLvbRl_%N3&Yo! zTiij>%&<46W_WzA@jK~EDien?eInCWR0AB_LHh>cj>=RTk!TFV$xF)y9C9ee7GO!; zBC*QRL**cX!$-7~v2S{56KVr4Ek2v(7_Jqwi?&61hri02B&yZ9R2KJ@H}|vumIe~WEJs0*%7Jt}fM(-cM)Cb|qoLINLovC)*6cl4g|d4h9mxVgE-x8Ht?+w%q9 z9eUOpHdF?0MOi9DM#VN2+^_z#)%IZ0#+Qw@m!Y_U9|X zO@o=qlLC#L*m)K!T8F(X8>(#5wd@xJ45{O^opJwgkIT(%YzKj$m$x`Xx(_Cs7@qel zK7ILuckkZe;7lm)Ps?LJ50T(-FE_AAiT&`t{PF?icr z!}WTF;zGm%IT+UA*3pj`s)ZHApr;1lz^5-?I3#swn;TbmgY|)9bZouju)*(W<18u$ z)Ys!0_r*Ehjs4i^X{2P3bdEnK0iA!ZsZfpc+YpF_=_#XoBv-!Y24A1)$K7RU) z(MJF+_Z>F!XJ&qaS1OCB5Z9BMm$u<@Ipgl`6&~*HaeH@*^QmELEe3NE z_n}!XzT#YbetN=}$0vOJ_z8dfEooOpGjdpBM$(rvwI3P8NMMBuaGA*pt$C*PR zX?CcK5=8iLfx2Icd$?=jH zN`tD@CK1XR^~iv?ID0q3BSVcQr|FN9TcxSloWZQJnO^T2<1{~i9f=b!QK?7*KszQ@;>H~6t# zaEsS?l!2~}qwmn&uy2ZwRIm&iGkhS2M+iV?)=XX$^W@xcXkb%G6U%3tKuoKDJ7Cns zFp2ZM)UfO|d{HS!id;r=uG%qQkOf}v5~$?KC6sAfr{e4{IoNWY8IdlNR>c8^A*_q0 zMKPj%dQQdsw+6KonPY*RSsFc3Qj*cQ;G0y?AT0wzvUR3jkoKJqZ}n)r*98#YS6srm&CAJOm^S-{8MS1F=jTU!c=rzPKYqYr1n7DAaSelqPD(YU zwpCTO$%@@h((+W{l;1fw@AZr)WuMnk3)V>!iQ{UC^UT6XFlC_L6Im4UcJnWTT*)1(q-VIIU=8e-ZWN^vAa*7~LqF zoKct1JTSHFMfvwxFfX-HN;ZPRi+nFR=%fx!hR=A`dRLhWP>^Mxi6RTnqm^`${eg@r z>8}e&;0??Uw;J?$%B_8F&%7eQa;TSCf>N6;5vdIA{C+DYF$)-(v7tyiPJ%rN<+3pW zW%&VTj3OMc#u#J>yc@dr08nyhaO}P8DhWsJhe9{Qeq3>CXEq{nj+BSLcU+&J@nsBL zpLf)p>4+_99|5;?8yFrTU+%!@9V(+Xr8Y)*{h^SnNBRT+^Mh&+VPq=dp5vvr*)XG@ z(-$q0P)2?i7Lmw?sfwFJRg}j$uZ7`Og#BuoUFU{Gb@BevEt}w5pXX}Hgu$55ik?+i zMJ|SBEH5}=%vTl~%ErV~O(d(dpW9-|W+nX3L*_l%RA6aaLBz6Q1x3ykYbk8e=X~+`hIs5Ohf{jHC{Wwkv0Rrq1C;`tOCwA<0}SzECC)Bzy~m+R`$U zr2l-^Gh);;Eu&SFp69xyawe5(rPlj1m07{Y61dEUfi}cSVVYskQRI(7zy6|#uvRur zV;WGZJQF4$)!fES)SBS#<_2#c?s0c_6Yn24j~Yznu#RxxmsTERZKE2rb*2t1cx5(d z!8|}u77rIKPFBapyT~k}EWKk4!x%e`e#LOd^*HMNNEvj*$8ylU6!97CmBaR2PzX5& z(i(2>Zt>MuZ-W(eI@9hYB}E=KFl=t0-3-_LiZ7o&;qmbco}Qmep%6PR4hlz6J*Mgb zvGL4%sm@SYtT*Jz2x}4Nf-S8(Qb|ZsY==22UMJ03!?&=jEv6P{yxXEMng&VU!$XZy;1Vk^C^NFz&&BXbG1VlB2|zch?IZ>zvT2IEGBc2ZnRu zHTg}ctXt92u>hb#JJk%!b6p<~VUA^0W+ID{0pzRY&=-4XDs26QUV1jF>=zyYyxlgm zF=7xU8+NL-qs9~tS;7jhj1h+_h!Bd;jqhtLiyIEjfm59K4%*sP0W@?Em2!i{pG%fU zs@(y87o{eOyGYUk(Sj|d8p>(N#%I}Tkt1n;Y7WC0=VQm;`5FK1>3jSi&;N*D{RzK6 zPWUfz#&@ph(r|Ub0l_fG;{cwnkF|!-N~SO>g&f|wpd|#`EkK_+u6M>!J0=?@l_p*c znAzoqf{J~Caj>Q+mR_o992xtr6}v6GU#G%Zn3O?&Vh+2>RTHmK;v7h)?MMQ&Zy7Ic z3N(vFK+YwzzNpo`Oe>QCP4%b#TN+5Hy1~LKk#ISeIM{-KJ9YS$wBeTWPg26+aWL2Q z&RCx1V1V)?odN0L-7--2BD2V+#Lac)jTfZQ=SHexbvy^IT7PFv_!76&CEqg^9Rx(x zW=9%)#5AfTpT*Q_HS|!;*ek7n-&jN&o8)NB72C#G9(UZ_oN(SY=((Zg@1L=ARHU4w z#lbXE%EM4)*C+;4HQzarlcf22hL00?#ip^gdK;u|j8QUte*A)8e*FdSK77Q{k0~SK zDXMDqxy80k%7o!bdb6{l@u-gH`aP!WP69>vL*C%pUc9>4tj z3-+r`k1&CFCABRSJu20q;z?!UZBzn`yvU3WOG7FitY9;xfUt~u8&OmYZaLY|DkbB# zNEBUsLKHJn%e^A)Mj=`}ZOw2>&_tXuVA%tY^2;hEc~RKQ%OWEcKD5<@@{>}o+)OSk z6(O;=ChR$oqx3Tx8eu~>VcgO5^FUNzNb+n5N&-J0gxKijhFKSjRRt$v2^NS+bPYEK ze6T3V#29AL*Vn&9G{%O?3S^b>O5!sjOsmF}B}qmq_r8=!nBbvRwP8s6P`qcdu}I8@ zRs>AItYU3Aqg^`-A||*b9Me;8yexp5NkJOj976@bV8pG{9Oh{%D9brQG*qcbMcfo=n$K$BK+iiGQ9IABLF zcvMRzg?lI?TqcufX8(lhY4P&H*Z`A^EjG7gd55?!Hl?&`4AcMwExv=y^)ApGhe6I_ zO>F{+6M~l$*|g?WT9Ynk2Mcpu>H4*ip&xZn2(4a&W~T=?{Qk+m;kDRgk^jb-sM=Vp zplmQREk-Lr1`9{YsBp-^u&tK89HI-L9OM_frJ>@llcgZltN`L8HfoJfy0mCP;SvwF z<`RO@t_8hN(Nr=JQK{2%R<{!GG+PtHY8d0C>Wgi0gO5=(cC&%84;;sV>$Rhg9!+bX$t^zf zEMv&fSMH8fq&JqdAVDUBHE0u@Z*Fk&>J`qH3$}JbYr3SMOjU$=fUdj6|9gIZ#&H}_ z2$Qg|M7>%unox-bTDZHSq;RWR((GbFAf*&QP+Kwrf%X^hdIi4-Z zNZ;Cfw1p*->Ot00nOv6*g-SGrR4`=gn%`6XZX8f~CIw~VmvOHN8*ObWESCYTPbAFR zceA!L+5?hRkhOktjtJz=Ay3uF83*g7yz#QSpsXla{}-glHDG*X6g5Q`;7 zcP4W&aW0oT)QWoYHc}0{Od`CvUJ3S{mMUjc9(PGH2d4hW7n{Gzg0AM^A%`Pox%?V2 zsA*Qv)B2lptKJ;=0XO(=J3~gtAZ%7f+^xRFQpUSLTig?42$yCk#+NefELF0e#fAc^ zCs+@TpCD8DV)#CMP^qpHR+@ty*HA7oe$s|bg#p!5u2ZMgeazdaDg0m!%-`F1F230f z|M2iF{@wFW__un+|AR01rZv1Bf-`X*cX|WBaqk9ihKnCKL?AX`uILi!LPH#z3z|2a z8Z8wN9EW2k__Mpghn5m1ZG7>fP6a5awzCn667YZ4SuSm+9$CP+AzGJk-g`njca63Z zw7e*mXH4lF*cqm+Zy=AR#-Toc-eCG2nDkS1_*CBHeXb%H=D#h4MD_+Lm2uEtJ~FE?;g+mNFZ>WW$&jeieUAu+WrRX9jyp9QmTf z%M=e{CV2R)fD7PLLesRJWHGKv0y#-_vc!B!$8enmHzt&?Kq$G*<#r&C*`^KW%LP!u zMu^p>QcWZzUbsOTmC(OZIqwABWDMGb`ba5Avca6pZ*Jez!goKWpXs>nc=zjj{POEB zndMcgf{+W%I2$CGj@$d3IM*wlUU15lrIDw>!P4v3ukig3KjL&cG1`nWKn>VkYa9A; zV2lH!4}AIZ34i+2Pk6dMPLN|JUd-?&hyVZ}07*naR1`oBHLOZ7jay2jtqR_X2cst`>jKt*uT+XuHw-*^!M>J27LXQ5YJB^oYGdNZ5DO@LlQk_iDTD@B4Sxv{~B zSgVW>Evk0aLR!VQbK_M_C|7i{Ot#$sZ>{psGv+Ka>4!z*RSFK7^l1=&n+m5ajZD2f z>RH@ox=sKNeWJH2E;K0A=qbTVRz;>}=#2ZSv=NrdY|fS0;YI~V#l~6M)VziunT-x^ zamJ$FadB8bfOdwc;y88;>$tml1*M#E-w!mAQiu)jA*UQsRJsVR`wqZy+O`;CjBD6o zTB~Q>1vri)-eHTlV5AK}t%j)Kkv?FtHHS3>q2vMqeg|q0>$#OXOT+%}2sW z%JI!(Bdr+|ZMIk!l9W$~AnpJsWMM?m%N=8K5Ol0$!f(2Otw3+ZWN;Qfl6-LM^a0dHqj;%#Lpf>nYzo2cFe-~NZP-rmqx0XfijKA# z966OsYm*;yNvadPa@447;6<}ocZIAup+Y?GfSH6A3!beJoGyYB3_qSu_(nI#K45KQ zqst{!&g$Tyau6^niEIzvT~YVXI|y0dm|%Ek>@bJk8g^MiF-rNVOtdO>rfM;c+)_af zj4@&2!T_;APQSe0wnd_oQauREWHkm6Sckh{J1Nu*{{*-Ahua_VzhD1v{Njc`^e3FR zTikUI9+eGj(y*E0FXR=zZjP=%?+25ifFZH4JV9_4M*ONGjHkeE9UpMu!x(tNKr`SX z8%*Qh>x5Si4!RwV-3G=MT%t%=(nxkPU_%VljlcLz_pv3CUW-qrv;es;0SuVWXSP;} zPZM&0;4M@cRy2OvG5K7k{2pPK%~;+X)<-%D2QBJ9;kT5s;UebrXlDeM6{$%Hi;`;a z>wsDQdlN+mo7J(PZ^bBk<+0ctGB`=ghQ77(BZ|=jtg}J(e5q>NLs$(bp0aN0Yk+{c z9UML%v9`QHn;RCYfALkG_>;d*KWOlJ67^;hv+taO-y#!uN@+x6p)Hg zO)yckx>{)`jPFZ7yu@}afHfQz41&aT$z(2Oga+HAY$aX|@;Ev^eg2Hc=P&5TDCR`Q zfjW!25Fn)Ab&}Faf8@6s67)v0d0h~Ui9EFv{;U7`U*qro_V40%u(@T*0xh>e$8NY@ zpK)9dJnhfX*i3VGdB*F{N%^G!Zark~xh+tcHUq8ksA{T)^yn@*!($3WXH68n7p(tRQ_>BYjFcP4@@bjs`0Yah+T;W(Rs$CCB&>Y zz?)?8qp3)z(7+3}CDnLoNiU5X9M>$61g5b;O=@@2!87(X8wX9HU`3`er9nL!Y0Kfz zjEz-zybntzb;9`Sv&!L%XMQ^YbEVYsQb76OU;9fze8*x=7Q#2jrq zO?$~nF~&e29cMiOBIrjCN2v8GKK4Fp>+j+igj<4VP+BUcq&uYC)*}fi6cCe7qg$)! zR~QSL>53vu&R*o&4-)D2x%vxmVCE5+;4628NbRyDkhK z(Zy<%P}}sM%Z%+z*G0M0cm%3%RL+qAXGriAhNjIB&*OGaq(?$(q;Q-Pve*wz%Wnua7=H#+LX*!lRdaF$iYYs=aoq|Q5Z-*ncm@U*hk0H;}fpe9p<4%9HUpv97Yfg znoj9{-ZfDh&^B6<(TDF=$(Qh%(tr%!&fM4-^eZrS7pYi4C z3H$So>(g~E3C4_!&SWd(hB)iPTvDWc)EcGr9E6i0m8^r5U5l~BFkb-K6EtcbHkcUj z31#!-ETqz^C2Za5IZADQrc9)2A<3`q(fCLOQE?ce`K;1@%Wd;;5`|KEqhO*|+KHv$ zm4mfP!)iH83#t`(G}BOo5F_E4kwb&L%j2G6fhkN`TeJyEyFl%SRVNE+I9k)>P)A}z zEfD7{P4D^!lbR5=~Nriu}@*vsWdn+3;(6mgw z^9gi@XsJjOx|mk=u|WL=63<)X(`E#M`c#Eb2}afDB6X(g^vt=(GnNj^6oGc7T| z$GOMRE}%q7dJNMNxn|_M`op`SD4C#kdb>8jYYOEV(M~X zNZ3F;9}-yrWIo|00Y~GXr7xvp7_m*Y#hd1gZL0XpRDN)Bop7HdmkXOrW%@?|Hm%@N z83UM*6CDcQWTtV5!0dnuaC5nEO)$5+BF+UOJ>zfAAkHF zfB*OY4Ziv2Ycvt)7P8xjpAxk6_xZrn(-R(_9`XLuC+yef@Z4v#CKja%GZ>PvoO+r~33*y8yzCnplSMe1%_zCtGUVn`RuEan%GxLZS28R_K>ooj zs&bo{7l*LGO&FJClP(kJ7+!9HN`qNMHw|ttL>$9x`$*U`UFMYMBKPq%tq_l*8xup$$DF0%b*uV#4uCj+$|vT3eso zVYDoo_&W~u+HkGp?IIjQYxo_TmJz^8`YZF?u_YwMd;*RXGi}4&p^b!SO)zIZYfC;< zDoe0{YBSDmwU5Xs88y2z?E8+U4cY?P-wC+QSFUlpdp>Pwj002JVMH`GtlrW4!AN4o zFooNd{DKoY(}9e#Z{tIw3sg5Cv)sC|xlZ-M@SedhCFhM-S()5I7?#Q$3f4K2i6+TW zr>%&Ut8l25@w8!hwX&Mksaq<6wEM(hXK*84U|w0a^&Xq0iVV$^0!%-}aIZ5M=`Uqu zwf8)Kd`5MyAYihXlreO^HEzhbQB12wg4$?@3@EEtf|Ytwd#nI^lS)tSL|Pp)Q(nX7 zAU?S{QPVdLd$pwQTT@7DY=jIx8sn!mie9EjAhx{t+1YxS9k0F8}eMTm3D+URGGjajQ&Ag`M` zKNrwnoU4YR9vi-7e)UiYjR8DAJ>&D|N7xt`qvP}EPXW3ub)OUm3L<42ahsN*tSNjr zWaCB^Q5T$A!|8m+?cFQfUhc3-G#*)G&-iV_=m&c5c)mX4%jeJd`NOaH@bSY0(qi8G zN~ymy8okM?BJlDm`3tiCyzewYxK8EiGElGKw4_oelb6D`j{?tOg7GB#3#&7H@~h^1 z7-u|v6U&;XEWWf*7BeFGFR9p~HFn4n6kHa>sCGWL<&N`U7G#o) z)S`>^ItMH*)n3rDj-(lPP0&gxa4dT!AFurS{Jxt4!!?WkQZC6rWtVW{6Q6lml?6UJ zlo6J`ogm)v3WD!%9&oqaVDp{u|5(IS#CY1y32-zEMvAM!GKzFdl`a*_eThrmz`HPj z+pd6b%OILe94DKmsbFM697fF3JrgHb+h|lsMny^{r0&a&ZvVY7H18V5?o_wxkD z35tt19A)2fbZ?=KfZ;R*syAo?99yVGy8s{E@Xk9vh~abJad4^X`L281FGHMgWJW7H zW6R-aXk1`r(iW|blg}7+x>7}B^Sg#m{+Kn6kg1r7_s>f;In|XZfRnU8e{ma6zpdC$ zz^r0a3W!hzUaACADlSV#aN6;V%!HhqYVve=DHVz%m2bSnJ@ZwuQI=+Nt3jz%koYFa zdiq7@7nE@?+4!Wq+h$%y`TT`3c5z{(e0}DbOKs(~QWz341)1?P$=U!f>70~ElcGLn zCj8y2JN)jieusy*Zy?^#q?J5u_y`##rE2M6%?#&J@>`9|(iT}8Hwv*?h(gP3$x2UO zsBt(Uz~{#+e)_|o@Q2_39>=lQ#RM^|d%1$K@p}tuZMOF#iZV?ws1;l%Y0_qfAAk4} zfAzb+!ufK-shx3ZrvkhsAawMB<9WyR`i$NWeE$3yUp{`RVV0l|^3hm(!wB(SMQ|(_Wjcgwio9X*OY*# z-`H}7w| zI))5R`Gbowyhl@(jiJR*g`~b12&|d%+*A#>;sE7<#d2$q8QjZf4R?4Swdts2r`qzc zu_xJGH1fq=HYQWhLL(Bf&X|mfln+aS+44C_xi*XEOr%MhozQPQGzW5+U}xcQ$~TX zK*QZgh=##pV4H)(wB}SavN6qy>?ko>xET37q=qolREV;jl=sZj2C5vkB2+rbxXNL4 zrGlYggv#F~&fH{(aq2%Q{+s z#NUx9xAKnJ_yDYJU>6-=Rf`l`>`__ z1*NXcsL5@S>`^Ty?ohew!YygwE zrqoMl3*h33jZ%D4R3Y$ZX$$bQ2i?BegEeAvoy_U-9OcAoQxMfe#$k&R#CiL>LtZGwy6eHyj*awX_8XTjsM@( z5eowN|Fe`0gR`-+K`D^S+Yjc3iWDet5JtR20?&MKcX`( z%ribGq>;K|5>{QfE=d1x3>Jng@IC~e04=lS4gLZHX5~|k;%1F%LT##P2&yJ+FtSQG zJp+t`3atUm;Oa>EV6~=r^ZG6R=|BBjync9yBTXkTrxndLYHmU^f^OP~1`8-uL8429>yqFykx4bh zH@9MN2;Zyt*QFxXR=A{S_zcIEfW)x$Bv{B(MFb7lpv#oCbFdpSLyZ`yY~ZN;Fm7~x z0X-#AxjfB`7xmgm#Bb9)Y$7y^&S8koGlAly0ye1&uzUq0b;~T4wKlD7$uD1V*9o#* zwxl|tluf}1##SDCepqRah+Tywpp*@ua+r`Yh8{3m3XmA@nA;NP07j}2c235J`;3pJ zx9w3Pcf-E-*gCS&=sRI?kI#5|e#Uhi*pD6i z^%?!zaU31y2ZkB?(czr~-ysZB~L8i8R=812L zQlL#{j3`wUJZDs)TQttCA?yQf!_CbtPPezXoX=?Og!5_RHaCZb;X!tv%@jP6q+FwD zw2XOHDk%sAb(0txA@0(gVn-Y{qf)sP$RF#T!+4$1*Hn~cnx}A>AToibaHZs!8Hr-0 zSe)M_qbt+8*!kyiFqw+bREUH5>Ow2Dv=-Q6p#z5my!))ch=$|1!iM4L^CLceA+y1H z0s1g(4kPgL?-1wfC{>2LO|Tj_gH0M28f!vkLNnWFnD2A!D0;HFXSJU%{R->=w@ zjxS#x3Dd*WD&`P`iysnB;^*djnf`lmHs(}W8R6@Y7inuw6@+Cn9GogN)`qn0NDbko zh8;0{XizF-(~{w?rE18@TSnNHO(#_!(n|GenE9D8fahIA5VNj!Oxpc~^|KX3ld+MG zKksHj6gWEno>uQvDSELyKm1)Qu0g94Xjvrqm?seWrZnu+S@oEfR%TXNfP%1rBRz#mf}Bra7Q!Ge+Ly zxk%_?GD?t;Q^|UJE`n113@W$3#)j|O1!{tWXPD;FRoTHXY8pMPB28jz9s^tp%R*~) zW0S* z=PJOq;m}yB{IzZPA8xE16$jm!_i=9O_pd}TH%t_r!&>jxtxpl zIi9354B`VD-#>?;tD#G(@#ELs;J6)*2Os#>PWT;e@t4~TzSA>q9Z(wuO0SfA_Mc@v z%ncG%nh-^k203XV4&pw~AsbV3GHnS#^ zb!p5f7p+WN<<>)VMvf^ro=)Fj9@@K2^YYAm@${ulntyDM=ixr4^;dLKj78yHi}arf6{8xcniUdQi5D?N>D|q| zDS=}$BWA z$JjB7GmA!RV9@$;;JP39^7MqGAJEn?1_4eyoK|UX9r(af1hOb>n9L2;=}Q|^s`UL2 zKj8N6uBO5nMF>t|V{mj}AF(w#_5;`bisLw<(dV$mDh4$+lH5uy^>WaNz0ecR zt>jYqW<*!=0UU;5b5m3&8dFNr>lxyKQF7^gKFfkcyOmis^*q#<3rRA{)U8xz@jb9` zC?yHKsv3YGKjQzdZ4Mcdp^}Y7xltNFHo{}g27#xGdZU_4g0&)-vMdT|D}hSI$tIb+ z;ylqR8Sl06|&42?{P!Bl8u#;c^1Pr#}C+_F!Lh*mS4pLxa~ zi>391n<9@m-c>g0hYneDn1;xVgQJ_gVveVP~TSlNxn4@qBxysy?gTSnbyb(~zcO>fr zX;S=!;^P?W7!@?$#Y$UKNbgHGRjFxVT*#JU%x6lO%8>Lysys|2cqO)^lTeDL8NDn( zgW*Tn@E}oFxHdQ~QSJjht%ymxaH`~}rM4Om)kq!7I@MY4fu-r-~wmq35dH8G9>KL*?+A_4QdAHf@Z}Ra_l7L<5GIv8rx3 z!EiGSudU%bz2L95TYTjWH^WNRDLUUZUn9sVnKMfp;XwV>3CS4ISQdlDsdAe;4MseZ zU1Av(z9=_cl5saGWh2yF_VaIUAmQqkUgMI=)a98zz{LZ-I>DCWS|+h3WGlRjM#u^{ z9Y^IBYszy{CdbPm{bs%ve!Q@)AZ46Xsk|ULZ!)`$M(>`~@uSm+o*+xdjLWHQEIqZ^ zat%(R7foQ#Nvr5=-qS8t0>JG=D2sNiaHGjWB6@u08Z&0x!Q3tpvJild081dqc;{eQXKDA_rvClIOf^3lr zO(13#h2@nT&Mj{?oO3wj-5{;u;q^UkZkWSP<$&94CJph=9>>6Szv9D(5BTHn|A>7* zYLktM2~F-pdduggf@_GlkX!OEyt2WoDB+>ql3t&*br+m)jZVB)o}I6_BUF8VP3^vnR9IUsF`7 zP-CWiUPRd&rZ`bk<(b0_--_b`4$D#=DuZ&@eE+<#_N?nhOsn%>d;Uo-I?3yer5X3f0*{o=G!el$thQD}qR=&v`C^_X2!sG%;kh$24h9urS zo&J4B3}&h5^^Bz6N3*M`7|Cjg>Re%=rK$dyvg7pa4h#QkTcXLLiRO+dzVs}rS(mriHL}EhVLjW-XUK@BdB7? z;jwejYMF(}(m7(>a^x|SL14*nh_h*U3{0D84Y7j-unhZTT9R5`{xX>-nU;|a9&1^? zL&=(nLSf^x=@|=&iYXwRj20ZpW@J=6wQw+CKDWNeNL#WkU=i$_&tqzg;3)UJ@ko%Jz+|UwJK=OWK~KRkiR2Zj)G22Ygyc|E z1VVm?ASK|!cv53?5j>eFo=N*}Ag3WkH_?nptMC$%({n=_wS@A}vSqlNYL~{1=3GQ@ z0J+@XMjJa=aRyMKkR-h74A>4LWDjMijxr!CH+woRb- zYK2J!rN~Tc@`N~B4#QfdW<|HRQBA;e&wzdy>~K6^uQ>JtW&?+X%_UU?<{;~)6Tk`< z*%DTgOdRt0(hk--3c=;}7T5&Spb^>%QZ|hY$Gt z`3u~gL;lJB024gw*AscEFO*peCZJa*q9|aGT%JWVBpX`sslbvIX^Aq&=R6`Cf|j-| z+Nv}GDvLV8*a+tmKxiWp4!4|X4q`ITB-2s{WI~g3$!8og$%~R-C*LbUm@=3gX7RnW z;Gk4|Y4I(>3K#%ft1H@a)8xjs)}f7@6ZcAM+>K%lL0!5eX}hUG3(CrFvrCL_4Nr6DwJb_Q*NrVahY zIh(d4gKoZVif_E(AMU@y4-o8dd`8D0D@-J;LPNrCu_YhPBf?uG0MW_~*1`Tn z5YKUQosen{&y{fJ8yDPqM5K=fJfmY5Dty@os2esnoXzoSDBj?VzdYaLO}oLSbD(KR zb^L^RS6Z@SM!gCZ@ScNYTI_XsFH%))bFf^HR?n#Lgg{$y7Sk3|^&#Z9r;ut(;t{@cuB>^b0nFZ6!f$v z*l_&v?j3&p@E$fC+orYgt4KC(h_WD(g(M44;OTO(FL^7%ga%>k<9R#bbUyJ?J7 zx0^9{6%7%7w;aIC7}}*^D&QOah9cDr7@rxJRsN_|6r|5ShHW!dL||dh$OHy9&#GRe z<{Z#zQ~Kwl_>Tqq^pZ}RgOmL2rFy9Exxi5wWu5mMtNKBCxrkX{sve1zkH85VvbcJ$FA=pXWE0Ll#t)?MTX1VQK zq6(3u^Dyl3nd>Fv7M^bv4bAt|4fV|U!fNp48C>HSlcSMI5F*Z4TVE_nv{Yzm_sCI^ z@`(yU&Ys)5C~R8{0V}Q)W%WsbWU7yH*lA?T*5GpcAh z`q5dO#@TB=LJ~Z@=;Pa?kCvF>;zib1sK$bR6S~ zr^hEeK0m^Zai2D59ZjBqR->hwGIy(ivX|#^wcbUY_Hh3S_qTU=^Y#_ar*n9|WpYBy zCx4+aO4i(PeZJz;r_boe!4#im`5}|vV?Kw3&O0NAH}kgSeV^9W93G|;pa_j9`E%S~ zUdn?U0_7PJiv&v86GSKBbFV1U%xft1pVwWgm^NF+D-E zS#uVB$zLpA(uNW5pHr;)n%f0<;0vc^wnK>Q@qSIx9zNe3Aj< zUdbFHwkW~Z1z6Vi)8r*4r82F=i?m`B*m-u~hnsu6R)M3VyWu(p0kZajuM~KIf!=7f zDd?;eu)TiAf(Zk|I_qtI&iwA<%nuNxq*YRsVEa(k!X2_@N}kbX7d_K%JwWMP%4sI3 zWo@D|ml}kp8`I^C^@*JnM;kbc;$OCV{EgpYgX81q=$k_I1lS&y6zWwytvw1jZB9mO zR74&J-|g^h`n>?~Ydt0+Xv1;ij$3nFpfGK?dM=}9JoF}l8+V+o<1!AM`hg$X89(?L z8zK0@Xp=fK4NGJ%=0YRSNVWMaSAy(vHaTmcY>aB8{?QGo&Z?I|SS z68!<>rLQj30e{x5#^*Uh~O;`3O=l^0@q%Jo3sY;Rb4@dNoMNO+F_L2N?@?IBP)bW1>MpghPq<-|xg`;e z?ykueFPREjQ@noj2HWX`rYD@*HZ!W}l^oen%Mv#R`k%RU+qU3QG@U*>S4wWl$UrJI z7O0>U#K@87@2@BjFp@%hsSJUu_-`MOV8+!6^@TeOOsw0i0ICj7tTy}OcbTXLN@ zGUxiMYG2PjH;8Ny=qA*R03jKY484LvzD#*?^act&fyd+)0MUu(|1jPZ>wR(7D&%HmBNW8mG}cf8m~^ApCQ zq?5{jKc7xKJbuNO&tLHI!_WBg_|S|rJ^pw%Ie5+&D9l!xqm$ zzQmlIcxrIm-fK9&MveMs7Xb9;#>i8N#Qd_zEYo`igs+WgaCZDfbo z7F^fg10`*3)p1VFb8FnSdD-ByYJx;FN9sAZhS8;U&P6|0Jp_nEp5k|K7!}c=pSB%s zv3cnQ*3_`dTaqj0B{y+u{Y5M1yKOlsqs$%_DJHX1N+_)pXe-UR-4b?7F`u;=EQNsg z&}z1>^nVisYy^Y)F4DHF6UUOn^Nn_j1aQ=r$}62}P$gyDkPV*O;Jt{! z|Lmv%1_YXP#`L;(f`ic>QMPn7>|7dbTSQFxSZl~%&-$!k%jA$QtLZta%f9kV@6XwWe-+FlXiii7q ze7*mI$NT$wZ(2P_$S+e(z-aNdHT0)L%MhE<;CA))_6DzBzrxFxuQ86Jhv&-br6`fGx ziabSs_oa&Rlr2VoU3S!TJY)Yz)lF4cI_1>aHXBfGU64w`6s3AQXp07iNUS02_Zl1% zW!K#6*UjZK!;*KWl`MQvrIoQK>(b^Ge1FKE>f&`!f5E}XjFk}OPD?k~pa;1&ggK;n zvy!A=bKU1urI^oVkIBMf8+23+w9+I%dtqXIY~B{hjU-Fk znv_wi=8IIq9u;=6^##~J|6D3^CUKYsQ8Vz30sriBgUdNfVdh|MWsCQbXkHG?Dy6SE zM#%_+E#ef3IFbW};PQZq3#&8^eRe6UPL{QDN})$fq*6w%lTpEw10w!e;w+g`B#gFM z-78)T#(vEBP8~jwe&9@CTBQ#D%5LyqT;Aag439bSK2JQ^tf%}+#ZCk!&N|CxY0=R< zHG(-?NuJA9?rUM5ak3{fmcKN=OKD&Q9CdomEhjDrynx{)953yH+cdnKi66%ezRd$; zE(omgy(&enFZIyem@~~f1fN;*Am(bUJ0Ku*{?odgq}3{ffwmLtbT?a^ugpKffy9Oo zJJh7BClZ9PJ(STZ0ZGJ1E2?c344uuXOybd*(pA7YH^_v@%wUpAD2;1TzL_aK=q-?Z z(KeTVGB(tE)o;z85YS38*8*SF1S_%B8QRFVv8XA1E^6Dj z;Ec{qY<AaK*#JJ^kS5jBE`yoI`-rWtdBp1wuJj?9aB==OqyVTrLNG z{P8>7jH9f0t|@4?Jl~KwL{*iV22Id z9F^*lZFi=nU@TZ>5HJ1AaLC7-g660l(0iF&sV)xF3D?4I4O)`g=xaMe|vk zlkD|G=5@s~W+KjIl?nn9J%LjeE4kw$aIsM^%)#$h91pB$PknZ6{cEIvhFP|XT*|&B zAD4a93Db6+VcIxXJlIlDX~3I{9ieBg5)_J~e%9=A;6{#UsSs2#+W?5>I6!@rPyrU097JEaRI2rbNTDXI>Tu&9b`(m5*OGQ z`)<7q0%w^7XMG0+My%yQZWAnui#rL!yGGB=W>ux?9(k@qJ!2_Kv)L}$#hGfXUPNiZ zkRj|zZX~1Qj|6@^Uf{>eO)Wce zMWs8VV>Nh5EIIb;pOGEU9Bys9PcPNQ+iT*lUVno>!wn)%{FEoYW&!IjJAAaG zuvtGSL9uOPW%IbW7xhaSkI%AUuyD#@JByh9SL3wRBGz_ra$|4wnY%4N)fg*?*RUxnMq9#bF!VPpJoEZj)F|AA{3QiD67-S>Yl(yLmVp z2YQ9{?+q=bqYvykPaNaGH}Brz?(QXW82#7y)mS`eoj0+JT2dqi1XWITg`e)lH{@ONKi&0b;MAv;%@+f6TkfBukbH_ z^%vDFnB%w{Fh98Y%?8gYUE}FI@%ht7Oq3@-Ms4Lour;&#Y~4x`oRSk#$`tFn?no#y z8|otPwq(c-AWMM7if>a&X!|AQtlXydd0m8D>+^j2YLn#!z6!e+Rlss`G zf=4|kf_VnK6)$P!yIy#mb-_^U9wu=HcxIxXEI~TDzkFgyI>Jq*9zWucl#3G|L{G& z|KSI`d-oPM<3fn$XsNp*N8JXc&n#vwnG=qcF2S8$7-LK`V6!fc(UP!wUWPAAOZ!%& zLg|RDr5!c`w^HD*j#QRSO)3_uYa|zEGoisvTv6HQ`P{p!0m*Sa_?Z%j)ULVa;7+F3qEM-B;Nl$H7 zL|(zLc%e%{Aoh>IZqXrqGUvC|VMcZ%ESn;>`@@(yWVs@edl@|Bovyp@7zf5t{Fp)R zfLkrEd>s7GTpB8AieSuykAdL_4zC!_V;ueY>T&a71p#x%F?crc?;buH+}KG3NcbT1 zZg>yak}+jcVOPcM#U=3)OYAX){ zAA{$9Ct9FM>E?uub%u2I(6iyADH*WCok=2_9RQ4u3*JXbMcEh!6<6GTcTap@=%{cM!@Q9~*^3UaP)36dP#)2I8`>RxwL|b)+E$~YXkq^AMdx00XFRD2od>)Tw z)ry#H=X$zcalKw)p19myFgaLGDyqUau%pdB|CfbOk-Am8jpnFCs~j?VRm1OXt|pZC zd`|JQ(9m|JtH`ROc-G*sHBmK&B;uoC$9go%;^AW~Kxg)MlvTqL#NRHhD@tq}RhWubAbL3YL3Z>b@D>{{FMKno!VJ;W?!uy%*eA5%;ZrvcPZ-nS;HX*I99ks@djnJQ(mq1^f+w8(QZM1a1tt zGvMY7yvD#CE(-)pWv($K!DYwep&FAc@`#GXw#6h%ezp}u?)`4p?kJG7KxLm}wCiR* z1Mi!FY36@~P)kEd_^b)7w)NmPzPy(7x(`n3a(S7JTTATQiQ$~mRYT`K8ZaaOV#_W$ ztqjpmy%IV$l{&&?^ef4kyt))*H4%R&leT0xto^i-UI`krDB0YUnK!(!r8DVE#?9!t z+`Z3p{hY%O7NQ3%2fq2{9lrnWJH$K@=SdsJLCKS2&a(huz6Jm_6^s7-hT>D%z|{0m zTcX5pFX@y##;!Oil1S7SPqTk{4!ND1Tl{M*zs$4R)_G}@<~*}FD0W22-9;(Z z#Sg`1TC+7oXBL4uEjuH;HV(Na?-7IyiUb}jCBtePQ0K%3A58)+WryXQNV^rDz*P6_EA$1|ux1 zP$eg1D$RkzqwpS#?ADT***@YVd3}abO$Z=F@aI##CknhK{vgl7dSjx`DL0v=m?Nt^ zW$~bQ&cr&=5NdRB9l1r^HCKh!09$N_&Q90gLb`nN8D2&8Qp?Cw?zU*--S7%f+U4D% z`%67l!&hx+YLKr%OgpMcmd~?k90*2!rq?~yo>79}#-1Q@jMdCb!Y-!5&{lZBCi>1u zsz0!8J~LNJfXIPCzj&v4Bt6AZ&sEOJdr|z2vfzy4z>Qc>9aV`{tTSap@q0t@X#{8* z)rP)%avhyx)XFx;0@dy58{EF?`vgd33qC#haIJaCi3thjS~fRF|X;M|W$kW_3m^*^d-bY>JzXFD`?;{kFXE`o9NG z);Ec(2OgiE@NoYXk6-U`y$P&#^#DN&Vc!-;qDd*TgwB+r1BHLLE=Zh;8mQ+IdBc& zB;b33_cZVEgZF0toxGDWz!sg>V6WkB0e`EX8ZPK!mJJ~m??P%ecPooDSxWlzw5$vO z=foreGV-9N{lUtYqmSIl9E zOQ|EzoDGh33k;0vvx4wPE+=a_Sf5K$$rijDLrk+Ew+02#jUeoD7%o}0OE`I6(a;kP9w%Yr92y%rb+Ij9q+Cl71l=~2E_m985=#5=Ei&HL z&nLh--}OI2g+#`tm$OcG&_yt#ija&I&n6PB>NeE04<+}vDx^KG=Y1U<5zi5VxZPfz&z_<+ZUN5rg&@RgT(7lTH2 zhAm4)ou>SrBmsvSUP%dqq3)6;iYZ}R{8Y*x$}NpD z#bXaAqe6pHL*G;iPU(@dyhYb{U?b^UOTrdp)!;eVgs$cyJ3P9hed|&=!9mw9-;e%W zOMyZwk^KW7tRRaDi6F;e_^3E5%%0}PHW)11%SOcR&vsL&Faoa%krg$p1Pd^cr6W^W z%zFdWsJfVeho=X8`tT7kzu@hgmw5T|4i`UY7m-41a`R!8TislN4F^E#zauw@S`A?X z3-oJ>oF%P3WYNjp-kgnISxR9lOqxHkq7SuoEifm~UM*;i@@_4_1OPp$q?kc+zSK-i z0p{;NyYQROHnYVZ5Lw-~+*Wi>y={5-bu3Og#1{8N!_@V8*0Wsi?(pOUZru6Uz?{=9uerpcOjQ?og3WHgr_Yoh*BApC~kDT zo7=KfRAEz9eH~qMCH@!?%p%e-+-Q+VF?X_}WZegDj-wb!$3YY4Qt-jY3b!`2B~$UQ zgUxzma7&Ah7SH8z;Cmo~{8+ZW9v65k1Ud+c>Z~C6xMeKz@hOF41OI}4{^D}%_XmgJ zkcXZYJr%xbxZK>}@B`m|`wi}HZ*V@7dh@N;b7d7DEqJ0>BuS5Qa=;aus&~chEOJsj zSlWyRnOOrfC+79U`FO?C(-oh;KH@wluJhW3GWOvg0yDkuJPk7?6SoacyHpy$<+#PS z-+qg4-h6|b<7P!w>dcyGF3?1!KR67J*DHSd-5>Dz%O_eKVmk&0MyHIFWwHm(na`rp z<-yMxYq=nxs-l_87`kNltyDM&I^C=}8~jkupm8(nvL#F9(*AmHvrqpF#u!Ok!=lkz zgX}Ob4QlnB2=a|I*x@xS$W>Fr1xBSZmT6R;D;BzzRaTrB4h|;&hrdKy=t69;6*|gk z|KQ->IgE1t+k^a@r4*Wssb=N^l>okKhF(l?R2-Z1JQq*S*$<#o_-Lh7qLVEvY{!_> zW?GQJ=mnB>M*T_@A;+?N5m{!Xfc9oY#Afu~+XHe*LZC}o%9lE;QaS5f83ufRd4aom zV5Cz$bb*W2=fJQ^E_;fT!zd~+qH4m+@KsNE*{eI=KIWmwdV*_ey%H0s;Pu{db7zZ>82frGVvL3LG^0$a`Db+=+vZEAwh&y{8~ z^NI&M@!2LG#=wGuC_3HV`iG z=ptxKVB4`0n=q~H{D6FE|e8pLof%W>*YloJ=MpEl2>`Q zhbkq>HAddhyL3u)O$9})_zU`-0#8p@JU%|+^XD%(`Xat*L7OIj=1q5)imJ06xJ2A@5n^0L> zr`~ksvw>1Wktq2}55oGq3vj7@CgXVFg9*fr^a4`JViA1*!mP>rhTT7FZMQI6@>D4$ z24S<$B2pD+s|!E1*5(DssrZo9D`TUiCnxicbPkrZiOPy53xa;aHHgb?C^=}u@&<_4 zWjDLQRwOGsH_E>2qQ1^Y{QT2T__zP|-{B8Gf2enC5NOAy!#Rl4Cp4dJz?S^aG(<>6 zG;!Oa3Ak~SgW9@OEa!^T(xzDS@1bs#oBEK_DD|A%_Ldoy0o#BT;GaPW@!;=0ffyV* z0R#*OFFJY3W6QA4E_~8TrY}jVWS}9$`|xhWIv3qyF@E%Yiazg4ZMRehty`!R2sS5S z3l<4o$S^{xtT`e|%~ASLwpxKi?O_`ihH_jBV~f&7^ABT#Oz zY3RfWMS@;NZE;~0JbZ;`99}~^St8I=g;{1g`*+e)|e{uWm7h_n;I^|7bf{B;o|F=M`T+e#ZOvKjGp2p$+XT z7B5?AmR9ySHJ9L>qk)!^B2=d-fpF+Or@E%ocLW;l?-$6!mnt;rryn&`wDKl*NK4yS zJs4SO^yNqMevIDteHbk!2AX%VP+x9sAmq?*@snhoja|f zNp-RI*H+Hg-yt2iFCvJ`6`zkAj%UoH&T?sDQso=qb7ms8px50F1Me z%Fc`9w8S^z_%C05j~{V^Pmd3H?|~QxZf0PR-!)jvD}MPniUm>WM6(m>B1iMm#Po9a z9<&rTC|9yhCXP(JG{-x?!|8@k2z-i(D-4tRl_1qUhT}04U!(egp*q7hxVZ&#D6LSn z1Fa%ssj@mzE!P-K2|QH?=n40>q^0lR({=JLyN?9-p#MMlj(v@@)wa z*=Yv6SiFhgYf-XD+N6bMHAFEu>q;r^l$>C2P0g7i0mTpLvT<$9vVE&C@B-t3IlU)l zAS`atMtRpv0gSHsGbWU!5`0SHvCSa)Vq&H^D&AcCciGVdMt5$*O$%#ce$Tr1xtk)KQr znDdIKrze~Z^{QxF4HR6YV9nx`u_#7}Ib-+;aCG*?+d!{ukeWOuME6s2(V z>~pTR=DAW2z#(qiezf>#KN6dn?q;aWsYYkIZLrw-Id2Zbt2eLl_T4+Opcco&&7OIU zEe91+buOHp@wg2NXEbOzI;JlA_d^EGDRxTTj~UhU&w1kO!z1qRzv7(N9#Cj;q(Swh zs&R-ZNV)T_a?v|6H!+m^0Xqh6?=JY}&0D;9@fvrxH)UCssx5##XAg(rQ}T3 zNwT`?4wLFMxn!8JM74-zFN5j_F{iZLu`RwcXAkF^=b`O87Ohv@lFS9_%st>%_CM9V zjDi(-)H^vQXlXzXi5sZBsY0wFz_t=qu*JCWJk`)8`}yZ^e`F^CG&qxI?OunNxKH97 ziGQ{itqG)+g1IH~tzx#7NEf-mxd&|u_B`TzY5AL7lg>nmtv*|jycMIYgrJTxwNS{* zTF8-Ec9_*O#?Rp`4By|q#7N+zVs5edxENwz70%DZ3vj3?;u$ADlChSdT{AGMbxK{u zfl&mHYG=G}{k+$lWqS_}YCSev9c?cJr4Lz76F6}#Tgg#2bk3p4iCN<%zj#|G!)A-= z%@db2oCaJ6@bYQmuf_}f8n1B06+g#`r=x10F>7FQB4Ou5(`>J?lrywSfT})^>N$rZ z+_UUB1HpHSoGtdU7Z_gJftN76<~{U)#MhB{1aM+t&U$vfCh+?>@qx<|!Q|n&CG2Rx zu#f=tU1ra-nun4rAvPkB0ySd4pZnWQ2Z0Wy8+WSAcfy6KiYg+7z+!18O zIC6GPuxWpYo=9a6A~ZI%(pGYFvV6Y+pP6*Tp(KoYnL~&%ZQLl4(ga*q30LC-MrPV- zy5!ky2&85Ehe6;>MsV}PhC83>tCv4L>=|?^jb3YP2yfwTeUONL;JB2}cX$Dwj^n^M z2I;e`LMFjT*nc-EQz&C>ghLgCMHe2FpGRkN*Z<=dLCXXx048csa-Jvt=HLA-e)D&~ z!Ff(f;8_4z_JvyNQ12v$WV|LY2r%8irSvV=hNBAm*KgnAFaOnF;_Wxzl>eD^_Egcp z=4)OPPmhn7Pgh)@p76Wh{|=A$4<+sP4T9LLk`07ro5!>_&UKJw%;%9nf|vC_uV}gk z>=`5o?Nf1Pq5Ia8QcesS?0s85b#9zEK6R*x(}C%WYF=pidRyZ!UrhPeSa6HnJieE9eQpFVxW z)Ab1$(JYCklv_U}1rkfF%z6!v`gzna#M@F+8``zy52*pvcnxl3Bl`QUueZ{1EZDem zdmrs9wgyL51aYkXfSi#|84~mq^kBy(J5zomZ ze=aI}lB1F~SNaWy9wDzf8`MGHm8BJsE~Aw^aAloKlIk3S(_W%IreMdpB{!;om$cfp zm_KrX@J!slB0xlBPqGWr&y#~MKpJ1s@BME|m6cw4D-J;(w& zm$lOr4XZmGy%ikDYQ7l-pr#UA`UitwyMbz(LIeI5Gf(m4eyLQHS9f=K^YRV+qM=Bv zFk>Iw3R5mGrid(glbMo17Tpu6t>EkDS zc>h7u!~giOWdqP<*%mq6b7Na6YyE?1vt`RZPz`A;l_m%ImQ46D*%z#nelHhik+TRO6weH#RFpzQ~g3UxS1=_4ko3SZww|Y z0BI=oho;u>W$onFIkwEzjJ32^4 zK!a;=;utyci<{T@{_aItsK$UDSW&c{Z-WN5YDgJz)B;f~u`rC>>_Mzy=ULCvF~}^E zeaFiV9jsM+)S6=qr$x{ZUP*ols-`%^(0!t+rv5X8%g?e4%-kfRvg0C=XDsDxQlY2k zZZdxKFdQ-Q!!-Qz@(RO%pW`0)aV_hxXAQQE5aP)@_gK;CX)6V4vE4Y%GX8vX_Ve!B zE95#e=EUtxd}|lH@e9&{PdV|~4UagfIuATp;uB^?v1?csrDk?pDw_sSst;hytoQbt zs$mUPm^RQ>cB$>Cph#0sEmQ&Fy5hPaf1j}b=3H?`e>?+;h>Y1yfuhs)!i@5DtBp%c zHuRLW&TvXM32RGhiKtP2XWAI?kDL~SPAKK%G^>9BuxXOdQNn8Acnp` zBSrtVjhNhPL7x%h!}RGY}pae&E&X*BCcP6^)Lb z8c*eG^Dt9!tP4jIOXaa=&dvLpRp zNXTg4s31t4kCL(3H>`b!PO|#9-+YJv^w+<}t5+|}3(gA>oH!F*jEKPF(<45A`ikHF z_V4kxfBUzTbJyWIBRT=V7|YjGVamf}WM?AF_T#4L+G-eVD@amsAbH?BB{o}CAbhiJ zH1X^m2|t%W7ZeHl=s7sC+%8%b3gE;aS)A0&o~$e1xC)@b-&si}E1WQClaiVxl~9A{ z1xhs3N(JOto5PL>iT$HN zYM9I0in%iZYbdK*QZ8!8czcH%kJgvzBECV4=_#A%|(1i2*S> zDDf2}jGJ`%Nj+DRgqr?Z?rXwqOUj(I3b;1A_0C3Q7g0uU?!>HOkJ7{5OoZ4I(S@zM zYzWxEcgymjLZU4>%_(}e;^Y~~;xHtK-k2Q~tX{()4#OM9%yX=UT7%CPEmVYhCGWsu z8=TxkPry!`C_vviuei<=I5m8!&u_Gso@xLU)&^~O_=29L zhyf;NLqW)C_F&$8w582Be3zX^1Di3>vV09{lLM04+6-8)2nf^d_t*(gB_%(z-mSsC ztZ&dqZ3TD8pUo7WtOMFJqe1UB5TkKo zfhLu#V?k!mIeAM;yqp zsSnUfzQYfE^UXJS{rWX7$HCtLgqJ_RW2b!n2|Qg3vU&gTfG2?jD_^;#)ke#&xvlS( zL-diAE|OV%mOykWS@S+R_Febc;yk#buUlD^J!F@PpuQW$S(G%~bHz1FrLCW*pVp6aGu{`+C$cR%U7zVarrN2R-r35AWw-I(#z!+b zY(aE)rq*UVMH+_JmKLGGAUZO(2DY}`tQ#}wgjgFZ(JmtpR@^bn?E9vMkm%Ubw6!>y zgjZQ^k)UKy=$su&2FYdbuJzmKI%z|B^Bz!w!d#2ZTuNHoboyQnTB=raB{-=rL1K8~ z-EoIEII2;PY;KWF#)i}G)Q^Dan)yj)pjyCc27RtHNpJ(4&hy5n$)WR;WMU z7pj3*`zXIf`+Go1zV)1mgM^Lt7P+=umJ`Yb0l`p#WSojfu{>0=q`zi^63S9dB)zoW z2Te9$uMWW)E)06;qJo8%pRFArygVXtnBh1U=itqo*Z9*v{{`NC`>si=7ILl`JzrS< zwBgmn=gQNa;K5~+bmizsPfeWLCuK|aI~D(whIu~W{_z2yK7Pdg!vk$6iV%$kDap2C zlpGn03q_<_F58XUVs&U@oJd^8z}q)(OV(@yH^-$WxqKO2S;kp$WIum;kH7x+{~qt( zzptd0Sj|`|>=_F;u{L)FDZt=HT7^i*%L0ACwoEU#c+}>Qu9>$3Oa6%Ey^QUE?v7uV zGzUhIB0yY@T%OF9!sjBGYfEgge25x6XuF$x3W9JaJXq`Uo+ayMp=%NCKlI%EQXKw8 zvtO9=+#4o-hQ0mr_Vi{KD@#f%xNXt66>B$^HAF23Z7Z{-nZ_c8sCm~0O56-!q=f=0?H$JrS{~*UN(^pwpH3$1q-|dNSp#`*j?5yV z_j{s&vCe7^$1cYOcQ0R5!p6%NjC$33Z4Fe`*#ZTKEnvfhiS{o#qR0iJ_F-$ zeDlq>`0WmsY~rh%ADE7GT6+WGl=c(({uH^OaIAE~BjC8RG!iEI#Q@G!g#;F|&5&?f5u zViIC|a3M6P5)+=k?FQBi5D=aigb2gg#zt~L;58K9{%$B?Bc-Z6F=HiGwW^qa-By5) zL5^At!Fq7T49`(H*2RINGgBoxZxEBE(tu% zz*kSaACAAvEB-D|-1B`5+Lc1fF|kR8H_WuY8xvrI%O>kMTZILHL-OFlRB3^k4iB{& zYd|=8o<-ovGXJ|c=l_F$LjU+j9Y|d3^-yy2b}7`fyQcf}XHf}I4}9e^^KEQdveS;x z+fT|kBUW@_YE?vtq9Yf83JX9IjHW5y?iPh+qZ?qJZ0*4df>ENuJjO>jGn1jW^g}krqt3=tG$w>C4wI@y)w;UC2g|O+d)0HX?IQob!r| ziBF$C;ql>~f9?iXO!>!s%W>ytJ_f?P0aE(2y&*Eo`YmFbR0qeqjC#;wQ`|cdrOWK{~ZL28V!(qD!Ft_}S?S=m~!+Hkj%p zF4uc7q0cR}(vx;0+kEKs zLiWbQp+r<|@pwuxGhQ$r7Ao=qPBf1rdCBWsjAo!L9j&l-)do2^R6h;$V5zQ z6-YznIINoyEwP?6nw*YjJFHot-mKU=b|oBOSTjVrcgVHn8MY*zYDTnm=(F6;mi-5n z6e@gUirK^ z_}ZGfHkcX_$s?XMWY1hueI+9ZB;3t&CygYBnp?z4&^G6rw?3qB^NtRceV|5oe{D+m$aig5p^{H6XHnr)@ib*^Siu zmD`{Z){)4HdUQ2Rx&yM*kWbeqJUrav)8|k4`uTH7t*^8G%uT71{aXg5#D;Mwbw*q< z?|(+C7aT9|?(p{YOT2h>hugbbT#f_o2j-mhKO7u2qdttb%_s@TBRpxoML%pP*9{hpaMcnvkrlw?3d zSc%0sJn{ve_C%{6&Ak)9)-c<8nB6uTrdc(tU&DZ2C~$~4jh|&K)>&>W1Ao4RP#x0e z%61peWaN0LVQ1th(-vfx%JkjKu#RQjeHP7ECvMl=MaODzcV?YE4p>E=+2$Ed) z{6Q-~J$nac5GpRaF5w3@mWn2^BA_(~4Vz{0v-O{FE-(dq@3aGW+eEptbdMX2t;pnmhr8MFWq&pD&CPo^OPbrhC>Nj*P1>&Mbs7AOKGir+#2NVV`5vD> ze!v;?IjAQ)mYo}0`6I1GXlmGtzU(dOq!PU&-HtKvt3Us9eEaTgeSM?s1p|wkohX^_ z)AfX*B8onJ{)qdBN6Pgp#_n*dC^E(KS?~N-K^euRb5-jgJ;w?Eo~#G={; zmf|6{t!~t`x(<*6N;adyU&*cAqBYJG7ba`8SJPB^J-acw?L|9TNLWS0O8 zI2f-Ti^HJwkZk#C^etC%bb2s33GdXAJ=&IkfA4Z^M~;?FEOg z$T-=Qf;)liXnk&kO zDwi#dz_JrI(yO_o-JQb|ZgMrCHD7?WWT8#lUCJlxYa5zmIV|888hELRzS)wmCstgf zb92`bitfCm#qWq702m7db%NTMXnSeewrBR`T+&P}aU`j?l?t*)FyKSFsJDsgE@`@apb4C8sAEp18k~d*Q$2N-%zU(m zI)P(!^R5OU_?dxFQ6!4j!3bpExE~!tXN!$9I{KQMW^GnI+OO{oyOkt#HJ`Tm$31t( z(OmTT_VltgFe6Dm$U&eKSpY?;Bbw}nbw@A%jE{QH@A5E_D#ipi+P~Bfditj zXfR!Ua+Q<-rft=WF`L<=Mb}ae9VoOfTZJY1^fDOVSS8z$e z1eM@oBXTx3OsP1sws~gR8d*D9kc=O-9B>Y@a$_7I7lhTfZ7(J(Ew7VCs>?8Id!VP{ zH|-e=*s_|_dKcH3FAGVX%e848Sn3?~xuvu;rJ-$2K9v<6Cr|a{^4%#)osdweQP{Y5 zatc;bn)Ccwe=1c5smz^qH*{h_Z0t7GRZ7I|lo6#UX?X7|5}bEm2YdE=X|2SuzB9ZP zm}aBZg2C@ss`2Py&kk}14qA=|mw7qsIT&95Jo6|>wd{!EN)f#6g!Pxz-8-J$k+^Zf z!Ocri!uL||EgYL7rWC3gG{lyc6N;4!!xP5~kFz^+DzsJONjKx|RQh3LfPQFw#0B}0hAyi!> zswRzy;+nzWHrrNSqr@7RT1*@kxDbeMZn*cvZ*k)9;t{`__qYx$8>e|EY)M(vsb0D( zQ$rB)JYtIhRiwSF+P2ie^jWeGozFgJFAGN^^M9OY{2%xy_>bcsce-K4Gh~f%c4J~o zBF6mQmNE8LwxB)ZKua-Wu1H8@BgYaQ16*7xoFlM&|jA1?LKTruGZ(qN` z-HR8UZlIKnlE1|&ZX49OHO!{yIt^#)I6gHw3vYrhoYHF%$ZdmTM&i6a;p3-I`1tVy zl`6)A-dbY4b|Bb%1fs=iOT#TeuE?myKu2O+Zt%^!@9^r?8_WQ1W7#=eTX~$wn7CfA zxWE65-~av(_`@H5u4__1$XJ``N*19P-8NlVD*pS3Km}%|(`zX7+2NPcMwr(&3 zUGT|&hfuk?iXGSHh2Ozo-0AAl>!JwTB6JZLYBfUfQ755?B;YUQFNZ=(;A3;_jIVMp zt40u@Noz+aUwh^TzzUp{2DTU1*QQBoCO!M=0)iVHK}c2n`!H@BtYw)w_Q+IhjYc%h zz2)zc3A0Yf%=X!3Kl}3Z?nTU!qbgR`*qk22aWn4l<9FZTyB~hQ)6*lq-rwVLIgpX~ z!%x4*=g*&TlET}YDK!XKMQP@Wd^LA&=8Aef(|JBVqjVzEaqoiN#m=%aXnUPK z_>*!4vUw;S#WyG#<%g2&ZrZN)*9$Z$%Sd|p{o(Xl+fsgHl$}Fl{b%q%x86psVOg&2 zFneV^cZ%z?XI^jXXG@vqwT7VLC%8F~Woc~O1jT(yT1r{Hdv3$6z?_ooD^*5A(QLnX z)#Ki2IN4HXE9vY-jn<00w2TEbW$_v;RwO!hrH;T#ikfqoe>s@wL>8`#rdQQG5xB{N zERXT*Jc&q{ADi?zF@__^_5rEiWlfwb&UAK^^vW&n1uvAGy*@@GE`lTzgx4bQs&CweoS971UrlB2Er%`{kp zEbC}A=U_XJlE^9*2^bX_?!nZkYzr3>%>j~!D9x*ASMr8vF1}M!*T__oX=72!S|5hQ z?cE*jUcAC#qvLN|53fkX-g+(-Vop4Rl(y}~vvxsaF?f-SZ;kWhsI_HS`u8zS?V0~ZXa>3p03*25VcyV`&<5FO_ z84JW6d@*MNbDoIviu=b$y#M$!K7Ra&$A_o7r_FHi=ZR?!z*@bC8p^ga!@NW8?vB?qQO zr!pz=1vzo}QM=U!h?lyo7+8y0m9($`KumE?e8oc#D(HsnbF`r4@|LScH7YdojEzev zR#t2v`&v7=*YNuc4O8p<8<{xg#5iaRB`a!}-Y4(xDYNd$gS?gLj%mQ{Q{pdQyuq*S z1^znU?B-8%%u3z%<%vx)oe0THsNXTPl};#|4jF9I~CsV_25>N^euN(ct1OH=Y{5 zt(7eKKV{7S4gW;{@sB!?NO<^}$Wf(^)8^3eB=WvxH6>CjL)U#;LEoAm(Fe2V^5456ct%ORujdtaHy6Bp_ZGLur8j_G6vhI>=pfXE0dCBfyL~iV z%`{c+0fjXwTE0KoENWO}xaP#e;{*Qwx4*%s&tKbPUNrT35fn)lvA|Vm>9RN1pd$1( z5Ls-=7nyi*_X6L%d5xDZUY3u_i!QGekxJyaAhHUi`^N|T?ce+dynp|`WZ9I4pP@Ue z{*o!a0%25JFG;ROkFu0WMhL12d`w8w213Z)V5b6RPvBgtj5x(bUaWeOQ zFI@C^Tb|&(;RU>7fKqU^f51>_j{rTsDT(~XA`s}gb%&dDQGp0bZxA&;nvg$w)7Pmo z#5+tjsjW;({(V?Vf(WkzGprj<UQ}DAa*zJgQZ|7cY=rY@ z0&>!)XrawU!Y8^Bvc{qsi*jxg*j%b98XSU!D^z>0qvT>b{b;Pmh_sw3{>LKUHD!Wt-6Wo^7t(yP3d}|}fcargPX)8ibi48-My@dL9 zGe$d$@&6k(FfAKKw~EMUGoR+ZrbH0W@`G~AfFQ>;7iXkD2o5;oM5{-lCn(FWl_av; zkbQ9wG${*XDHojgOhc&{H90aE>&u+`sDapN*V;D4#P9>tN=>DWTS=Q~iAaZAaDuNl zPs+e5RNU8aJb5q9ocy`SAAw<5;EWpNILcDuq^<{5HO^+ChUa^dFC459Yr6S8SZXx@%r^^eDmflj&Z~PY6?WwOO03| z6=+G0&Yv9Q=J--nyCSfC!Q@h^I-ze?69^kPpH6)Ja*q$6KH%}`9_RIn`EV_%xDVP- zXD21dyx|;57|$6gs!}$DIN|2FTn@Z`^$ItaJKS7u8Ml~BR^g8ka+l1q8D-*pdcxQH zFZlT3V>McC$a7+v6Vf+H?yozvc~03BU!ED>4S-4cr%4F1y!1?WGn_cB2Fg+RuqqG~^5VqoCOf_C`>*9@FoEDx&Y&zvh(JAu=W>Wjw2CtUIOapDj0h$jpx zmV!evse_~_vtGQrCzC;ZOR8Flg^6vU48gtC-x_fNQk_WMhhBgT-9?%IOT_$N@lW_4 z|EL2AgT-kRHm$PXd9gWVn2Jpbd2-m`3^R<&4Kl7c&zd3*I3}BY_`1MzMn|e8st~u+ znqIv^m^lofTO-0&LbCA^4Rj<;Z+Mk~kYjD>`0xRrK2+|vWbok6VoY|hvB?{^)D9J%u4bE;hS6f6 zDf9&J%U}Hq{N-Q%B^#PSxL#o7U}2h6LKM*Q^QX`F@aYrI^V;%sMQheH*qWSdd(5hn z$jru17gD{{-p`pRI8GG-I&g`eNk(M>n|BMgu^#Fjn34|n_s_p4tT*^=q2qL>-zk_hL- z(4dU!hw@)*Q#NHT;Pfn^MAUQ@th@tcMRQv$05`;`_q__^feWV1wNRqfFGg*$lAz#H z2%gPP(%`~Ye@`uWB7vB2J8;HX&(tV?xcTj+~RRs@ArJV^py_shVOh&eWB`t=|Of1~Id})U!XkrE028CWmKPz)r+uvof01 zW1DIw4Di@efyf={bHp4G)v-+HVuQo*wv^?7kHnm|Qby<}W^Gui74fKI#ffGN4lS|= z5AL~CalT?#1&vlzv`ZZ^qGK8bv=+bV=H?DBZeHN-#U1=`3?~GRcV|=C!9lJqD0dIu zd{;rOWE#ss@G%pCY_lU+DcAC>6qtFvBIm@z^#S)^KI8t&J)RyOan|IqJ>lLB$Ls;7 zzMGW#G$mzEn4Ofp*LUy7z{?kRxV^npGh)@W9G&72@H25mo#kX0{M;wt2cM>)ggB*x5hh7Flm^ie{Wr9Q z47rRO9q~TAVYVa7LI)V!3qHICP)gmYpV^QY9Ehra3@W|VO2$5)rg}!|L${UW&@~iO z0=>^(G}%iEP@9IRA{8%?_{DgIU-&IX44ksEK#(X zL^`#$2F$n;d}}>ODsD@Sp|6v6@^K(%SxPOV9LJ6t`br-ztCbz~99l6#JS$$*$H3r# zIicEn{XP&V)vShJbouwCDlUiBz&I^&OvmME;@2{FrAo^v6L)E=hGF#9mmCyb0s|(5^>f;qBu4c4Ve`Ymr^XW z<$^JB+lCGQz<1p8Zd)n=F%}eG2@VX5ld3MHY|GZ^hJhQ4ZtTDbBV>9bcsMy{AUr=C2_8$3zXMQ?c{ z8Umx3P>V__`04%6`0Ky+J3Lv0VoScq)qihDTDn3ywgKArgT`3rvj>G!xsc9fH3cg~@cxul4UfaGOtR!j}I z1B(b_y-`naYh!pORqd!X@&YEJk{dD>LHikam1@?hc9a*rA`MM;rBbkzl_5Jhqqch0 z&{^izN3OUyJ)60w6|{1K#jt$G;$Q$PH=YG%U?X*>Q3-^O%f;y(^VhM*ojg<41LI}X zsU*+__TV-O&x$lOzavRL-d0Po0F@Zo@xf_|WutU9qI12qMKnp;ykz#vKCmLK;D-5h z#fP7N#`~Y%DHSJUvZ(`1F~S`@sQS(938mVs`^O6=7dI4Ch8j?2PQqjLsBU zfFjp62XtgkooOqSr4=CcOxv^bRPUNKHQd1inJEqNAE{tAa%~j`o`{?r@T}s==%2-5 zFm0~n3+4dOI*T4#H?WXS2_CZ-oN*8H*?31g3iDqp5irr2>&k0l{q$_K-b-IF*@+9$3b+j5zdX zWV`4h*Y>3NF{9s4a9}ve9~ws$GOk1wXY{766pK1120Z3sahL{66OIGQLNgA-hWk>s z$gk~G3b=;e)uhLahQ=lD9m(5m%Sdm8^&cz^nj93ZgqOoItTq6nR~IGt_9+MzPP~vF?7;owq5>A{OgKO|H~Hr6_SYLkvxu z&|D3!QiEQ+eu39--qf=+sFLbvZZ(KZ$MG7@_PiPk+Oe1};*_xI_TpzDYe=62G3tz( znYhM@r|T7u_g6eVTyg*PEAAg3d6twFpjbbIAZu%mXvIf^^*DTwR?o1pyIk<@n|FBq z?k$ev(&t+%o6ev`gp%>|M9eF`eEEtmUq0cSXEkxU?rniS+ZeOeXDbU$d@yU}Q_iLK zGOP0~=43tP8Urrh5ASVHo-_wwQ-JhJt{P^To`hJg*v#Ax-?aMNR$rV{)@BW0bzf-U z96OIG+aWWgP?4Q*$nIu52Xfh(#7XO+r-HEt2Pj**78Z1|lo-*HO{s`FtU}3e&FWb8 zQR9NjTOk(&fR_~^&R8*pSgBxQDxj_2vPL$3X5Doig{lpIV`!Uc2FB2K@*XA?Yf?E2 zHYjGW}TKBZX^TB;=zoGdXW!MLn<+<xIXtQW6Jx=M^bcgzwosAkWDs%JLkv~&&y0BC76sf-iMd>Y~aMpPo%WUp`@ffv`r zpS^g6U*5mLzlryFFyJ@y0YBIczVjP6)m1%yj1v~LFk#?>J>ioDu1GxO6HaqnRa0Ld zIkXleDkHxYILM@lV-a6T*ptaE! zOYwiK7YID{Z2X^|@jpcS*#CbrkZ`jaWQO6e#HkgggeIy!$LO?*F^X_H#?jM99D@ZC z>AOSCV^qwZWQJS*ar?XI*=}NP)We=EB9hxXP3B6fE19k=6tV;W03ZNKL_t)be=N2; zf-jf!J?RO>0-u@uk=iVCQrn3FxkpHH_>k5O*Rz;Juc6Il6do*M;_aKa`0m|zIL4@$ zzJuApoi)#iJ2zpSDQuiUQ&uO&folsMYZ8!TawGvYmh}Gf;1~7y5}jdsv4qwhQmvuFExer1b$^(XHUyji25SlN^X^v z7oW_6&dthEMfZwBY`+`@gNlGnWnXgxRXNzX6hWXoJ-31+>HzKaE?FPVvM@3QMBR^K zqT~jM0=lZ{h6E#E;g}u`t%S9knFlW$Fg3q|g|(F;u$CIp11d=+Wjkr!55FTtXSEGX z&%5!Eo}iipTRwa)Y|0tjP%!1jlb_QCP9@Q&ub=VzpMHmN`Ty8^w^#kQ?9A&KV}8Hy zI{l7&@2XvuLnR>x6Qd*$m3Rn2LR$Jlz0d|}x)Bti5d^)l^*?AE6eF}qj7_T-BDqjO zMHISev`uSkheFb=i6&J^>a_Q+-+7(hcg``q7|$4UuEI1;yP>H{tq^Kg?Y-A;t@r)D zbB@FFe4d8Ovvc&((T_ozk8NDqlDxK$HpBR;kkN!6`J%)~8(4yh5@?pS7d`r^8p=Uo zrC6ve!_{Pv40t1#Wmq(r9B3HN%&A1udJl$rNq6V#%^{)5yg;WxgM%Wa|1wFBOx=`xQEmbQoXKq{a{DHL=ZvsUxL&B#8o|q6e(?m3<$Dg1ihmR;H8)2}xu;s7|G!JWJzvlu2QhXZ2_TK@`990Ph0rml6E zE36ObmSHua;5d#WL`JCWGQQmn-a8rsW}h1h*BAi1C5wa+olLN53fSmm0E0k$zeEYH zwln<1<#T-T?Mr+`1m8r*mvIB#8m_wIFbsVIp`O6lfRk)^BPV?2{tjg76NaNBly7bnlUA&t#)lEU-T)I6Wx|ZOoZhvJaFluZL`p$;pEqrQG}!S7z;JPSiSzSAk)ZWKlo&TO(u(qK!|2xz ziDRQwuVmLLCFjo-5<#m%J~q6$MIW5R0k2=a#>XFhjN98AjATYCDlLN(hWdbIPP!7^athnB)1Ar}*I?{!yGA&f?G5SyVSIC5%~Y{<;tJ;|VWczQ*e}Z=>9F ztArE*FH!pD0#G?-Q%uiXZkRI~c66KXz0Vj~Vb0{-cw<6j%J+;aHl3GeW^{uJ7AWt` z+;nAfgzLg)kY(T#(mvyM-wAeD?I%%uTP1vmr1p41-L%O56ZQqFBa8sc8d8x=?b6Bz zY9ETc#7D=9Ud*7Mjbz6&cHJghu7o&mo;ravpo^Xlh3^%^b`r1l{K66_qUH zbC_d})}q-p67&ao5uV97<`X2a+z08SFY&+vN{*U-1ouE^9=!DA6<7?Y%rVeu72Dya z0nhWvl5j-iTAVG7K5k(G6NB?~Zq3C7x@mzVV=H6D!STVZR|0!|Mz5w9Jd;zDo1TGL z=o%L5AQg4u6Pl)EMdaCq1k*~ZA)kweonx_3dv^O<27@lxLtA>4~EiZ?Egc?dlBW6O2&&09qY{S*%1-8RJd01>7+n{}i8~l8h zL6EbG%%Rv)7*89YS7dT2vXYfWg{7efG|n7-blkjqi|e;HOwRC9*deVyxvh3%tR(NS z1hT;ZYsDWhckCqEpP!xM$&)KQdHe|benu51i+e6?#;|GI@qxSJEpD#g;QHnsr`{Re zH)mfrGGzK)8I3){c1cc~ij~D-(WykurXIaR*7-^RtZ^Ze@rqmuOcgX9SX$EJCsdZz ze1r=s_-w9I2^kBEPL5c9t^}9n_~6MET7nBVMq#62_<;B!{^TvrE+yB;IM8iS92Er* zHH=CtNnm7C!8jQcdxXy^id~|)EFvH?&kPl^KVW4s%4M#%7+Pox6NSNRkoLQEM<(l?)Y^9%puoTyW{3UOQI>bsLO_T&G5C;9lmOJc&G7Pr?N{I)uM42pGyf_K2I^|jZbzng>7LITVa*S z!4cLZRz0vhVAs^hRN6Ak$c<3~E^YP`OdZRCiBiEjV+-W33Gh#kG4Akf^7ZL5BvdS= zBa+ZI7m}v~eu$w!Gj2H!5&Do=C+K`bLFy)x52v*Ad#1Y>?O0i$gV zd)q2Q{mIp1eE7M~;`}1ECDJx%Ym0J2IiR2!X~d-2Jz{C1@z;_Zi!9(IpOe8lA!Z=xa5P=DG%_zi(pQZAJ#ZGz`6;giB zjCG$O7JLhf5-DmAFK=DiJ#wYz+G>NJZ&XQ1QyJk>4Mi%T;ysSv*KAZ4p(A9G z(1F|QTio6qad`1fjN!0TC+v+9(vh!wf-I-{MF=R$q9Z|2sI53AWxLNUe_k$GP$v&> z)*y>3}fO#X6H8RH0TXezh#UlvoL|O)8Lj{`~ z#8jD>u&}}0Dyn2L(x|eiu<0dzA&T+9qJ$@Vj2@dc4W(PtJ?@2;g#K1aFDTG*NXXJA zvAgw}t8#P3h|?TEWHcb0o1i&Z5fON^7=%%Bo*v8v|*eo7L>!FwMrQ+L;oQcCwH4j+!&yAw{wBYKadt-+y$$(VL5@{JP0hm2YTIAkA{x&?9{ zjAx`a4lE5!5z|W5fC%TPGKU8`)fVx#H%J?ksE>J;WMW$+#YtIkmC@xeE*=U3#Q{(8 zP*97ROpmK5*HkTLDjicPkc=`WCrA$9V5RiT^VHpH$Q8dAim8!8|Jn!>l#P*~&Ug)n z1WU|m-5yjH6^}cwZ6RFPfu@m4kk*cPAKQ#d#7rxbCQOGT5RhB`f_G1!Fz?4{;izU| zrHyvlI(@3a#K=k7CTO?@QTa`%!c(e_MgonCapPsDE79qe1Eu&Z8i(2&-zL*g{$4=v znf(#$#9^-K(l2Nv7Ky{X)$7H1cnmD0p@DUz%s5C@F6l&LvB-NUp9wRM#0{WSG8-2N zC+AYYV;Eo-gUhxBSE!E~&rG{#3mbk%FF>ULOxM@|F^o6)NBs_;mmQBpp=daX;}bvPcki$9O$^-C>S#dG zYPpur9a8zk?Q^p6oQoi@6k!ggF{r4pQXWk(ePRGKVAmPoP)ZvvNLi1IEJQf>;3_Uy z0xwc7Yb5T4(fqey{!b3GxA->t(ogl9xmcwQQ6iMFc1me(#<%1&ueBXbcMcF@n`UF+ zbl)l0OrQPS8jCajQKx))G4RBo-M zUupp(g-qXSx}4Hwr_@-tNm8y1a+`dKY#f!vHQey{=@WeJ!_VXV{D3A6ExpPweE1d2+BWv1hfMrrT zroS<*Aq!|r@}Jj?NuLE7RURLH_CtLBbDxWXcvoy~Lu;XunMxKEE8_O{4j+Bd{G(l0~XP8cegkmk2sAE^MO7_Z5}~MABkHfOBN1V z^Vx~y#a7L%rZp$yzp@eV`K>a_GVKCblX7In2*>j^qOnbxc4P4ckH%6%QQ#H3>ak%b zsa>1bAjmNZ>e&Fqi^%xPv4ExObKIt4qo9#&np7&$B8qC6Qe2(CyQLJC&3m|7+63`& zGo&QwFSce>otK-v7ReMEhWC!4PuEmB1c-eJ`os#GK7 zHh|$c^%G7#8nD6pL*20vP@COPMfj>LR!lDdr!+=lx z;i|oAzG=zIh=Q!`N|i+px}SYv6UI+Y*c8;OxR{NJ;t;#NJ}-xfO*I5?H#%qPJK`wOqbMFau1y5nVaMTY!}-NIwzfkhQcP5(*bg8B%iJ(}$NlMq>$_{* z+}@yDpV7l<8>xggt9xqlX7V+(Qkx2`MyZrAFUl3S!HvUGssW?_CS-QmV5S96q8@Oc zkibm9k|l;4wDg)!uFEjzQhvtCg7t*D&IBO7f0hYsULB)BPH(i@Q7E$U^XI3uunz1AnX)H4X?PmNC0U7gi)a*H+tNl zJd3luw#j@jkc9!wu$xTh;vj(|RFo#x^=B#xV?Hq!4|cDfHahhWq;?UcUGQAAR$a zkV(qSk`|ek26bQb*I9fAz)pN7ABaS>m$PVy*q{Mz4Nspw#k0pxAwZ}__@N#g3PsI0 zU^Z|(MjrX=SFej)Jrhurm)?~3%^Ek~qqM)|#TDC3O`8jqGsX*X_K2VjFX=;-ihRljHj+*V<-(Z#+Y*QBM*po%5NXQrgum<5;^VkKRQG@C8}QZ{H^5B z8A-P*)VO{b4y-x~I_ zC%Cx0!oKehhadc0h@w(GR3nkX+#d|cvvJ837*ADXE?*Y4oN{R3!z1-*#OL0+1)mUpF2D4vXXo%77&5kKuWtifUd5raN$f~%`5Jb(TH&h~Q-R$A0m`9bV zM3TZlXu;n`{vz=psU@2iRvBC7;Dc6N(AFFSGyeqYf5gnbjBmrQPua3zW*IS;sR0SatKxWeGr44t2o3jWuwb>xH;Xdai(Vhw6CdGkh9@vnX46GJ1>}S#iWe_l z;FFI(!5ChQ1vf3yvs2=&3EaZWgKi*eA`1u0jjK$kkN}S_ukhnP@#A>(=t&&>TbmI& z63s^B0y;+TI3ADq<~Lv9&D%F6g$=kLVB=IxL0VvRX=PJkYuVG&zc%54lf3T|iV^h| z(LqBPl{D%1W-DNc^0ES(<}$Fwe%{;MHb}}EW#Kmfl978l=%JqlcO^P_$^*5y37G^i z3(}B*H6!(3CJ9}P(R3UA+l|1ZR+jXVKue2)5-M!)}D^OMPd#v0nnCK21C1W|D_ ziio=msoT61BHqu^vp||WodRxfZ;?lmtOd(hHj73(rTGL)dL>)%d6ZD(ob3nTEWVGq zp&#!thGC=kx{pz*ELPTokUTo2>F^mHoDEb?t_u$;sdh-#z_de1O56=g;?Dw(T<$U9 zpjF3&2m)Jp;}sJ#r6IAb!79}?jE?8?85_48a&Ap1{`2N_;nDIG20$j)fK^N~+_X3dG8f*fIE(=`N{a2W$gstx zhn99GA7TB3O;&>_7OF^WE%O4Y% z$}m}yyo@#{&8Y2=r|)gU)#Z+-PoAbX6y6=uoX;mY4@`%0rh0gt%Y2SBqmD5XHr!?^ zXk%mRljY?w3>!H4!03ki`#T)(?{Rbe4#$4P>C|CnWHL;vmlrQ+MpwH}Kz7c8AZ7ONz<0pP%0KkWLgElZ8}a-$ zH+0su#9W6R1d)W1XNNO^#K(w8ZZgfmDgpcuq0Sg$OWrK|Y^nugp`1qrb(ff$Y zQx{7@Hp09(jqk0x&kH?yA^9Pdl>VZW4L$}w^V#?DV?XjE&@H~-raQDn0o_P+7u{Is zJKnv0gRgz{Yq+_&hN?tnvIdzr5P!Z9@JXg#whFZ5mPMfvZj;-y^xFwJD0Q$%UNW2< zXJUsc<)Dp43p2EvQPvj+NkLZ2_q70_TtmTA(WxvZG`8km#Bj$H_ zFv{#(vI^3-O7(#W;f07TUMK_zh}1O(YwrDu!Lbh6@;PS|WM;1C?@%%jTQZn)Ta@|Y zI<1&lEQ&7hPMOx1iqi$XmW&Q{((-d{(ZtZ?fOd%7`>V?YeaVBTn#eQzoEM*#44r;Zi%j^G_P8y{vl8EL zFl8Wx^t}On_lxQq9g(A#H;VgLy|wtCurE z%M9>s4%=#5>18`m`c_>9=*y5o8|BiSpQpj?X!vEFJd;8An@On6lu%gk8AC#%NZM~r zioKu!yfY`A&vIj-AKV<}MuL7ap;eht; zEjHw)7?77QO0hQ_u-C64x7RaGmeNXFT#(L*= zZWtr}hRx!Sl5)wok(c?!@-5FoX z?`2kYr^zDllA7o6$rfco*u9o*y=SuHm|(RmqM3Z=jQutUu5WMf?)Dn@cgNT$vVM^n zq%|imbIx2K+?hNvEi_?)638YvJG;Q6$5&K^Ql>vN28HV78wVUGALz#;-d(@NyLZ>< zr(-Nt2=qh_CVVE{=ycQpjqtZjKgi@SpKB3Y7zPlV$q@vGBO_EZ;be+ssush` z)aP`B`cxT?8MiBaGUXtdj*&2HFD{Mb`^~|PVPN=-6vxsqmTE15^&EJ50zI${fEOS% z9JMqn;I1!eFZq4VaR$Q&`-U@*<(F<7I#r;$SU$TfTlO5VK%=3~VPUseI`V$rTCgz& zqcG#V*tpg(4(bJR*fCBg4#GRRfLhN+rCjCnz3l;-C)|H<*r7W zY_d^hoIr9y_z7uNiMnoNbadNd@IjiAwphYkIPitbr?`CcP29E}y(zwOca0b07N@q6 z+m?e*RJ64tqvo}G$AZNcx!RYcny8P2oE7kAt(_r@-2!B0@?P@MAn+c;%u!xoEA_GZ z2&14c%2n~H5N@rKY*gFP4gaj!_}}nt{PpRkY>2w^p=p?(P4z}eopF27!h1&=e-xw9 z$aMs)(oV+aZ@`8Zfi^Dl%wk?iw<|^FG5ba z6WjzA6CAuW9z>#f2syzx5e4G_dN=5{F&au|qY94?`0y$$iBAuyzQp&ty}89VKKdA{ zinH@G+}_<`OSy0!cD%4NGrP9bn7O#=H%I2iQ&Jn&8^yC{@8R*)6Vi1xj+&uCtmoys zGup;Q@b29^eDujjp`LLTB%_B)M^e^`%BauZ*%yo#me`#Mh_WhdW3blhG8e`4@>O%l zgUU$greqi`bJb9hL20Wfh#LoYqK%EWSNt646^FEf7U>eP86Y--gV-<^@Q`x5+5*U^ z;hrvAqzXIr`P@qaN-MrKy{9Bb%c^c+(I->+lxR^W5k3cW3A=TFnDF2;qm{u%o~;T$ zD`RecM|^izoAG;?kzkq5@qpKo!JJ}n-_Z^`&JSmJ^5ii-{LE*uogMJ<IQGEijKL5By~TNLOdHxPsv2ku#J(0p1(8Qz1(tFrm0G8 zNK}a#wa8G{28eCkvS@uDW>3cDCPBMa%_5FPBcmu`Cs;#873<=C3{`+~>ngzBFz!YX zw>=MJUZ4>Z+*uS0bZBghBpbO*Lx_Qna>y96S1!I-M2F}`Df8ufOg}ic5Zi#i|gmk?7#Lu0srB@2mAW(qn~)Tc7w`BTit{S8s;F3 zF#1xi6=O|2r;l;IjNor9dYjTi;bd+_?ZTV|%g_8o__u+1q#<=Gl=>TG&s0G*JKaw=?w>oUi0Ps z9i$&|NlVqAK zj4N_Kno0ZH~r~|i@aEW5m zc(YHZQC^+_hn^hWjY_ffPbft%RqI+?vcNBx8HGCq*8*f^k+wb9NdO%#%HdrV`~k%Dcau9`w`EdzK;(-`#}`_ zTfo+ouo%l}sccLj$rz{!qZ9h@8i1iKAT}vGr^e8dGeN1A0gyguV-TPZym|W$pM3l= zzWTMV;?27^xV^b!Q$S#2r6Vkm6;9l+TW64<5%uSymN=+8`eQWjr%0cTQ*q{V2ixOka!K+)2Yl)g(; z3{=alwr26n=(^S@=`+6XI*IJ8*ewl}l5kCh=pc|!3x<{q4x>joy}Xqm!(o%Em*2WP z<$Px1g%J3dYzHl1PpUkacU+s1Yyf9Ooh*`TPl6*iILgm--uziCRocq}%|!ZFou3t* z@uUx9K}GhXlhhLl~SuX$7mYOYF2vr*}6Ep!0p)`ZuUFqwnO^~qc5zE z{8>C?pBNeYmQjOVvTQ6PuPKM7gc5NEXmczHnq+AB1JIvg-PYyCr5DUBP{BbdS7RjF z=#X8}Hp6gW-wpev;^L9v;!+?V{xtfNzaPKAba<#cDh-%HmEWtyV8bM1o4JpdP$CWVQvVF;;RoIrqx#|ZLw@`xv3>YC z=zH(S|G)n}^w~4`lc(@UPw?goKZx;n<5m{I@%z4j;~)Fuz#seL_~5VpkI?_}S8@9n z{|nq*$Ftiklv`oj80D>2%Xqn{uAMU4vY=T4eZ67?C8H|4;UpH@*1ch~Erv%L+LBBt zkR|Rqlas{a{fL`=gXkzVp3*)kSm3u=pVQuxHbf(6O%_gzlJK7~vzA_QOPYIrkEzBK zml>1mbB%4-Dls8H#{nlx^Dd`3lvfPG%M!Sj14cnCS(K}YQ_QQ{a~e>j1Vw_VyR>r zY!dqjwb1EwkD-D-2KwNtc|EgyQGeLfFpD@NW>N9SnfQ~D?AjC;XJ@#$y1-#S;Oy)W zHr;)Xdz755evGZIbl`Y@#Le{$-d(@L@pO-Fv!2NG4u!_u*cMh0>zmj(&ZykU${JH$ z>#Ls}Fj`GZ#NIyWm=I^RW=i4(tg1@C-3Cqqz&4-s4@`_|0AGxf>nLK<_{8!p#-;>tmaqKo79co&Q%D*Cp?k|*58MLhkq-qO%H%ktq5Ra)#vtop(X zxUf>EJFOq~Aju_@2<725@AbC}P+Ck6pGq!_39U>b2ycG4G*`!;8l(Rzz5~8K)q%vW zo1Of=pp9A}rffoW#&J<@T3hUm(2%wRHln~+C1@#ROJ=9IoP@qR&q-B8sHsvmX>s^( z+Mqr%(2V)QzN8aycu*NfUMe$pLCcsmX0EqJZ%;z0mb8qDR;)eq{D>MKyC^+ zOwEJS+^}sswC(Uy$K|6-JbV5g_QM`b2|AOb0cAEia!cv+gTbgCG(q>tc1S9+XlxIx zdwJH=z%8sa#RpEO&I8|YdwYXVKK>Xt?`{~gG%|pxrsrAq*4DoOKsGCbgz)7-Tt3Z^ zrTxf{{21PQ_WqQ4JFP6d>_+cV=yb#Jc*4!iHLh>o!Hm;YERAE@L3D{D%x5tzwK?`L zy8PT~*H&MxCZ{?ywlE2aDj8nJ8DG!Aj^pSmdS$ znB;kxq-v0koYpE+oaiN;o+^mjZ0b;=^Pzj9YwOMQ5^vqO!?T z8v!@0Z{EP)yap_CE96_K%l0s7bXo%O zfMbZ>8sza~$kikG)fMF85`J+3xwt?(KZl&1!_UuQhconj|F*fLKl$081Ag}BaP{SX z4Nv~cUq=6r{v3`UeGTZXhQH1&zNr*gt|h7*L)pXzR<=2FYz68zapK zZS|Q9VpdS!ge|HxY`Wp%>JpD0J;Ju_Xlf9Cp)?*Olw3K-+Gq`Y3^NL)JbRMbUv6Cky7SCD@!?@T|p4p}zwO_25gCjM1 z{kbBBDd?3&uE|Dx@Y$haRG~}K)BIdGFyk?O_0M2Nme!u%WJ2VkLZ!mjCvA+pzZ&R0%UD9b4^}|J%#hUrYAP1RNB(@P<}$&t$@@yHECOVU%0|H) zJXFy%Mbow9;Lpzv7{w7hZT|@o^=_LoPb54TlUW!67l7ytm>M*MvN%sLy;&5}IHR-E`XJJa|E&Zt{gmu~?%sOmsNfhCkiM_!WGIe0{0| z2~`@Vb;jTDstnIJ56P5@i3+x@p=}MD?l>F{PzdghN0@iG5A55HaUAHvY*)%FMMCl$ zm%@mA^YlMy9xABqdP!)$C92D-qRsqnXXM-n|E6V{OaIJbU&GmzS6EcUuFgUFCmFhP#l|rWG?4AAr$Qrkjk4xI{H$ zZgN}Wl^l#Aft%|vY+FM&fo?mFW8mehR~X|&SsFopilXy}hc(nG=rdDyT5Ys^n%Qev zXQs3}&d+vy_Ol;gYa7fWbNmcWh+wEWR26*;+>ax!uixV1kG_EypS0%;yZ473#Z^6+@%9-{spt)kS z5uan31#1D27SC!zkILETfVU3W2eu1fyHY%P>NtN7`0fArt9S$8r@r=^_~Pe(>>=al zZ*v&&X@7ZZ@T)7}>I%U7|DVH)Kj^=YKj5GF*8k1_7XRFL%GdP|{1C2x;D>Pdr~VmS z{4f7A{6F|5-2Y!+gr8b$W^*thqgoGTYE8V54@-svN67^eeagTssrIgH)a1drmx{@NuT2-r7=Sh-S71WMJk@{632F(0bwcBka=DDk zmPp@>zJo3sSIq%U6!0X%H2A8AGp7}$Gdmec=hHr?r-m^xrUBX#DyI< zHsxkEuR8-qP0^<(Gpbm)dDrs8Ow;|X4GLY zZM`TF>s%~aHdx8MN)Nrvjz5wls95t<0|=V80N3X~hiM6Lk0w}z3?)1fU zLrW_Zkg?QaGRa%TJjs7gm-5Vw@x+8v>g3%d%+f0wSE^~pm}y_m7|#SU=OJT^Z8pzZ zF8NkWC~J{QA=0QWq8*s*sYizm7kh);_Bey7eDZCB5n>orjd*>B4Mv^D5=9#AaT#$= z)r*idlXq)I*5~HGaXB@pY>4x+Q4um|*I9TvLU_Iyxg>+jpM2V)m+5Jh5HdHq!#1D1 zFUTI8nF{WV3APr$M{6JH9^DHirh+{ zeKw(@Q!S89(@1GMPu+s(pwb^ph9lCjZwO2-mB60j8Tg2j}r9@qTEusS(zt_8WZ}Hn-`V!u~eOs4E z!~@apkV6Qgyvj3=+#j|uOT?s8wK0k7gK!#+2%i>P`fPd?ixOaQ>_VneLsem;2idsI zsef4-c)3fZEX3jeAl^zQXs3p3ZXxH~^5@NHpyU$((qdPbliZYbIxQwOz)~YHF!_VC zU@#WcA+yi9@ALz_lTX4`R788RB>Xj^i5Yf~_@-F=At71N{^m!BSnYu#+125zf~X`fwao((HPq zY}rnyFPFeQ8Piv)EmU9|spi>C26QrPQ6>)EJM;itJat??0lxlUe>Fbg|Ie53)gS)B z`boZZ81db&@0_pWqsKV@!asriANe^v{$GC${m=jVxc~KE!#Dx3jSVw5mQ?K|y9KVL zFiCe%oNFF-d~UW-b~8smo!GcK(CL34hD{ny4xA)>|K7Etag7C~NFyjM+jZm6W5l0F zMUtIP=)H4e@8$6qPtWs|(q_6$3@Bo27x9d7esqjbKvBgMP2Y^B7Gr8ca-3|EN2NxN z9KdG$VOl>e8!)M;#C(O_tp++XZabqOom>NDvzESmg}0DHAI=W=@bSRYCy&v#EwiVV zw4^BQ8ks9Mw;`7k`= z(v?R}78>d-ccBF~m23G-HsEk}hNn-T;L+2kIGi0~TRNRtoA>dSJUKd;U${<1w6pF<=g&rE7jluQyW)lAs#XyY^~+vjkP zf%Zt|1xi{U7pu`uwlw^dvwOZyl67JXK^nd8ss3x~3r!R?EVHT)wFYl0GNT3)cSy*5 zHrnBV_a(Z1${;jx6v_nj&JVyGDg(~Zx^{@QnNs7Os&1v? zA^DI7zw>*qr@jZ z{PFjENW_E*NvYN_`aM*#N$BW3a^lSf4~Fp0YUWK>Oyh_N9c)$=gj>3%2T4i|kn;Gv z{Bk2MhKgEJ%G0M0k^VZOku`yCD`U_wJ+eLvf(Npy@|F9{LKa}#Hf-At8yyhElV?wG zc6Pvic7V2>)Bm&|#G&7m2bHheZ%xHYNi>Zt2^)@0rML=AON_XrFy}#rR_tf*C%k#{ z8ejbNzmAu0UWVd@^jzWmwU_`B-rCBGUgVmhVYAS@F-(@62!%Gsd(Yp;qpK_I`xY{; zZKJdxWbW!gi3d?SUTWqXO}TNpzVS6A zU^0D|l90hlDapbAl9!DoL4)~d36t~$tkTblCh*RTcIKyNL0ZldjU*TW0hK)PvH-DF z44865S{*>Yva+%oAWVmzI9n1k>a|PjmCd-`^aN(1kEN6;)&mXSTJgJM4r@3e%mQ&5 zlcy(7I4Vy`re`XTPycvYIwMW?FT;rMe*NM2I$d1i#n1l&@bkZbr(gWvf&b)JaQ~P7 zEA(3dhE`^|x}j$bVyU{8sf23rOgz~%E`*L*LB@@V2}ZS5&%_CIDjt+uL7x56fYS*o zJ2$uj8HP@;zgyTB9S%DkgJF;=MxD{7P@6+XnL4FVXp?#43C8w-XzQpYKtghBgWf=3 z>pbgZQ7gvGA(a)$m5{pog41dRtE+)72j67{L?)21VaRiIZ7QV$Xz|)@il@(?;QZK#!ej<5@bN+#3e~$wfjl)ACV058P&7sZ!DMz3-WUUzxZ`{zbDMBcR}LFi zu7i|D0!)LcSY3_xE(bf#WERt8B~80fz>`z?O4Z&2oLVL%@YU;sXtot8%EcL`d?gnb zCTq-fZ03bZ7o(E3?43fM7wd&g+;U7XXsI`{JRwMPCap;=4V?A|)me9Ctr}LDWT51q zHyIuUA)jzME+C{}`cAG$EoyU#n2IbiNiGKP{>240OBGvVB8HF=CbUivT&q^hnla+D zp4K7H*ht!9Dw3GN07gYL)HuXTHV9R%8u!B(os5=v-n_*UcTho_HM|ROe?PEqO2!VG z|AC4`uh*XpGxMns=hJ|;#e>>uzimtvgvT9|%J3rq@s6FUh1WXp8+Whpniks(I>@>- z;=$Kcpg1$qUSAR2P3qYOsGcW;+^s=;RLVuxd&8$vS!ibz*s;^ z20CLxH8ZBID7!PqYH|;<*hqL38Ik~i58i(dPai)kkL2leg1Do%Jph-DO&#OM-rc>! z%a<={<=8NCt4*_@@_n2-J-%0i(OfFLaJ@ip%2bF6y9Cgaw2fj!A=V0k6bnpDJ}Kk zHAk`91nKm$7EDtC8x!sY=A5U!#C)CzO47P5=g2aYux0aXZV%D~6k6HUDi+EVON%>) zcH)GqPAH$|8hErA9JM3#0(kU3@c0-11it*I|191v=k5RbneYE@SN`3v7)brp&jA1S z&)~h^_){4F>7U2_fA_2KQ^Vct^zSFDYC8rhR$)fW(fB?QpsRHwiC@Y;ysThm@ zP^tx~KhFt6cG}6bHeG4up9_bIF1E#ySE(6nQiT%qRASm>Wi^%XY$t8K6)O$efb;!; z%Zo?2JipA;Xg2xN6Q7opneQc&r6V%|3<#LA!r_C+Z4cXKHDXjI2H2SQV8~X;Y8tq^ zzsI}VcQ~GUG~Tq1SxJ(mtau`^)M!+R8J{`yp$&pd-gZTgun( z=1hN}7__|IlWOBj!(I{Hxp=Z=M?`paM#u^W?Vdnm%t&%uA_nvGmD4Vl)Ilm?sN{a| z^Vnp@O@@jfKC_m0Dr2492f0YgRjctshZ=_biG=?(DcfeHzG&7~UU8i>icV|eo(s6- zh-4y5#?3z9+UWdqA{8m_NU?c0C$Mv}vM_Bk0+nRh|xavlrTuCiSk7{ z91fwX8VL3o6Ua6IKEig*e7}e@?emO_^ElsI3w8#%6=K2f5D!4@NkZ+hcP0hx8@_b& z4!*xy;Hq(FD?h5xN-55PXg7Od#(ZaKfkUF~bVLh4h5B&*YOz&QU(s#g_Ae zqH4;SYB*6tHG|4ugyEN5?7uO0|HDwne5wP9v%|q;&{xe1G9!&t=cXmaOJczA4u@lJ z8?G)d(e@47)^L4&i^Fz=TSxZ+?Q!+E#8#y#>J6XcF0ThAw>{F^M4$af<9j8xGqUj(0~u4e!7I0WK~c zlD9|+QrqBtM1d#a>Fq8-OLv))UC%*LT!4-KZn4e}#Nf7p1*RFs z@r18^{Tq0DbB(=iI5GZGm8zCDiQk#@QG5Z5siiOqE97=sR9?#{NikepT;eBw^6$g* zXV0p*jRjE1=v{zu3QBLcfm1hp;~O90&8t^Xp8%;;;P@&xbP^Ct?oXYBjT$oK`;}=O zQU(Yw_JpLmflL6YxF=a;XdHNHqx^FY#7d=7g8?=O8eT>vq{}GZwB#&g6etMRstc1v ziTYgV)bj zCQJGY%Hk0qvWANAq`5Un_VL;^LNh~|F}QJJL8B$LVHA~@Zlk=UV)fV550$}2xo)lE zDyQkrosNMrUWYLbOdGwUITYkGp&rmv^_+-<(9alpu3OoR$U|3B>6u1ze!}(`c=RX# zRowo=e*`an=BM%EyFK@Jzy3D9Ui{=w0e|L~@Z?Ya3&6krFGIilWgPDT(-AS1JQJmP z@nO`$P8q>SUPV6L7qMkyF`f#c9O`8ZTE<$#Xd`SOnVv8sHbYh|3DaC$Tu`m_g{<*g)(l2#S=1tlh`AWu)+kiH$ zWkh+8`F@z^)*o|IDgvOBWzfQ+=@znSGx#u^ddJP}9gfF)+}+%AJIPv#aji}+m{(VB zC5v`A0jF`U4T224$9sF&H(Xqu@X$XiHw-;?7r(q>^03kwP?Bfe*PQ}Puw*+)s?&;EH8;Zx}+UM9{;F)2kLL6royzajy;K7!HBW_$x2!9)7D~BX zC{+*nA?mC<#A2Bjqem9N?Cq3_oyEqK>H?&#gC86dpAc3KLNqtn1d(KNrrW})7*4cW zvR<)*>>*a+_CFSW8I(#D`Q+!zy%nCcTMV*_+b zWzz7nddEIS%VN?T_>44se!IZo2tYT4*|$N^(A1+2Ds~89(n3W(1`LKxHw=x%-SAX& zcv(vZMnzV}?3ebMMJ$5;40?-;6RJZtLNoo2;Q z-z*xPV(Q3RH?rm}TUQPa=gux+&Up`{9Yd(Dd+<5R8D=Ka`PxRN{Y>5p@Sg$rcYTb% ziRsSzL-F;g4kQleXSRkZq|{j~nGGq?;lZboHSCN`+727`vooBIN0^;>a77b~=4fVY zDYiPuW5DROUG&T=E?dAXR%DTM3*$(m5eiakMh*t*VvqtH&1RF3O^rMRQrS?xHw380 zP%s{$yl%B+r2oAT`%7Gux#84La2TGw_Z*-3@O@mIpG8840|!Y0cpRS1dK^X=kUbB_ zWKLA}}Ty7=ejc_8@TrF)ta*rhpcmv|~6W zzhz%3rZtvHJJ~8ORn%twd2ZwqK4nF9oFFelOrp~>?wY9#oQw-o7GP0)SimydGE9lX zuTjRkKrmhEf=@s0lqYiYz-Fd44?c%y$Ov6CZmG!D;8sB;0d!HUDE+L4L8)O{HCU`t z1|*tOd4hi^k4`|;+zbuAJ$X?y!ZkfzEU_i#XR|IKUMMk0^Z5~G#i}B6n=nv?ma_QG z&1IQU^71MQc0>>kq3rLyr#SoP|0P`fgZ~g-{sTXY+wb<*-~IYK{Ce|~KZPg1^cQgc zFaA;B3$K%0Zf?4O8hU_r<6074@4en9UgFCt4cwyfXk*0%##Q;i6sw0s19UEkN%p=80iU%-|7OvzUO~9YKRqB=$>tb3& zghR~H*_32!Dmpn5`Fd7j$Gi_g_-)g9W+&j*5F2w5;PUbk&!0cXc8Ivugg8r+*zsG) zyrUU!Q{89`u4KwbZs}s3$boBujwLKM8JXvB=!nQq>xSWm6B!cs_xHHDy^AfS5d3U2 zuH3WMNRN3c0%U^AQ>9r|yaM}S$Kh~}^Ycrbot78_TLaR^zc%$6TQOCs5J2~QuW7e!QXC4) zXFd4?{Ie3aN}&?(D=TY7BMG%)E_K?0$C4^j>2Dqq6XQ}vGleaarjn;(YYjZDy`iE_ z^>2J;68`Nq$LF)frd|l5v_-)RE)!y**rpxV$u?WymT76p$XrQ<^#a6Af!ETx6)V^?d1A>(@*$6?qA`B z5CL9;w_Hjol)t0;V#kM+MHuU{_+oN-SX)|En72b4d#8}a_}v0;U~)}Z{`;di^EM^B3~ZiqrX2L zaXQ_jj|d)D&D02#kKG7p$}H)Ob;=0c0I7^pV*6Cd=( zyIHu}%xfDYm>Jb5nQ44TvDn{k|O1idYo^DF`%nT+2r+&osyKCIt-B$q#nQ>YP zqDi%0^m2{UMCJA7mFJzFtr+;k!L-Yc#}}74TpX}%JGOl*=~L4L3QmIZ>HdgU@800{ z{ubN5$FL+fQOT_El!tl27-cbO<|Zqq<%7ilC#Uo{h7VVlTzmd|%GfDp!b`Kc>7n#7 z-V2StXN_6l3tTHx8rDcnN(~1K>VzcaYn`6(cz&`tr1gPx-p1!HetxTzmsF&T5t|D| zSOun^05#NO22{!1)}f6L#WW*w3qbWqPJ5yLfEALku7c2FZl9boaM20m0?F|%6R5Nf z1R~}x8zm}T$d+iLF&xA^XT(#bBWU=4*?YHG+w$zZ>l@=g*WPEJYu()!+wG1WrNNOR zafw7A5<=t!DJVp?k&%ccUh;rMLV|>N00r^HMFAoR4oKJvHwh3gARbV7f*?RaP9hP< zR@+J1?n`xbovJ$5z4w~`F?bl?H~#bVv*UD`y{uNXt9PB+Ypprwe_X!r_eB>WQZ==L z>R5R2dK$En)G#i3#U(a$Nuf2+S)k3%2G2uOWlc8=0GNr`Cbm<+|J1*Z_#;1s>v!J8 z^<6)G_xi0EO8ms{#-o4tzs19U?GNMir-{ur+}sMhg_bp!I@G%{D%DjKi=n~9t0@pC z#W+!2)<$Rio#NT7~%&xOeXgo1gfZc}1%dbY}Fgr&J?{hOC@%d4K{lmEe$)01cYoh=yb{HfAdL{v9+Iz}XhOF7C&`H{_=u#PsYJJh#jpVUTM z;Os%QLNhb4F}1GlTici{Lp5z%xoZXA2_26A7RqrHi!7q14QZ@r((Ki{i0Z zEqa~y<|Ln}ReI-%NXN^Ifv;bGjK7tyfRk5km~-t8QUA`W$2lC=@S9vfC$y$gMH0)2 zwbEwv>@(AF`JktW1Gn!OV{3IE1eaux`?o%i|J|njxrF@>8T#Ym#_hQwINjz&o6(fzYFNDJvpl0q ziNRsLL1S@s?BAP(1u@$;(p&MgTi zolPBgC$AwEJ6%P&Hi_tDEHVbNr@6IeuLEj|IBdB*i&FLYL0KUkF^dpB=Q@odTiUo7 z60nu>0Tvu89L7P#;ko!F^<6L#W$^d2K`w`s11qZ6j8tqEO-dS>APdCuja2ir*}N8z zGOg>Z{dn|YCkdS@P{K-?Q`7OX11VaL-X#9~q)NkT%W6&rh7JhwKR6uBr~;68CVEWR zlBL0G;P81KqwK7<7(wFuF=HJat=2562C}7wK?gY28p@pDS!8KDO>BSsUxfWH|0iz# zg+Gq-JMZ3g(RZ)k(%19f{}14!|Mi==|2E*46Waw~n>Ra0ennmHO0*FIudR1t(D1yg zo_vwsBkH^>SS%2;EP$h%C?+@Ait9@bek+pBdf;#E|JIXftHCd2@KTw1zNtirmcbea z>gc2;^92z$CV)*1hr@#57>ynOX~4UJ=^kaYS~4`NkOIwkb-_S;%UByc*N@IxPbfOp?{AAUM9 zIf-W$ru;ZchT9|eZ2q=n*t#bM}QNv+nL2IWy23Xq39 z4)A%-D&`CA6@y4NC-hCY92* zYB!`nhSnAzIaJe<3qsBcLf@_a`CLg0E-?O7IBVNblF51AGk|Qi%NT1oB)&+bVwflH zS>llmZ0E_I#*kw}WMWzk4l`ym7a{{8@Gu<1{7p+y8b;RMe@;e-8dEIDUb)@(QVKbh zZW<^dV79IAJp+@F(iCq-;hs`{nqwQ=8hGj{$P}Go(~bU)eVpK9Fu`M1Rc5C*@nr+gdK)mHAgN@FvMTmO(bjXzy7pDt&4Ev<--XJD zn&BTT7pNtm4>UWb)qApx_wWNqw;P!KMNIpBxyK)joc~?s-7w;}d?4}o@mmR(ha9~X z2Y{&vF3PW~fboXA?B^Z(4Lyumbn45CQ~fEyhCoAt129mN+u@_s4<+VqiPqZ3t|Sdj zgl8WbV0!CPR%+LF~3Ma5`P! z;&h2ek00UQy?cOpMPRZy6QjY}ADOE$4M}b*TCkdXHn;LrQVv)}nR5edl+FiP83`L3 z;EvCpeTJtWeUHUm8~}?!6^Y(W9@0N1oIZM`HDq`VWtu3kOzNbFIP(#VN#ee0X}wO{)>ZqGZ$7;8{O|EaGmvd#c5X>RhTSnh_kOsu?q z*$0je81(hzENXc;v%!~-3*q=r9-SP528qTStdYd_NB#&d{`>zKw}0wi#qF1V`0gaS zd;MK~efKYY3-{jvPMa5h$p<&G{-!*&ZAAwk_u#WU6x@lvXQVtTg#ZVy5sXT#RHPJw z-+~;7XtsQ zDt9&7c0j)I)F7RY{fwpjs{w~j7{VoaW>(LkGqTQOhvBqs@G zpJfA4Q!fV2K`zkZNsF}wHnESE)#LG7kMO}4zJNzh9^&5V3LCwfbxzgJKjy^k`5Nba zVxRQ~zj*!vuN!epd;kC-07*naRA0X6&Gy<}&c%6>%-*?3;;;+{wuX2ErnSMLN73&c zv=TPBGS(2PnHgdTxGhVc)P;is1p}s3Gd)_UYiDqYVFXw=>{O~g9~{I{sWBKs$RT{% zih^J9kpjDxqPH!>n9{LFmzURUI+G%75wM{6+&bu~6G66;Z zX<&4n;Yk_1W)ALLWF4TKGiF;vJ(fZQAp17SD>2zioZObdGdGu~m~{u3Tw)p+L>_EL zw7hz=Icq6F9)Xmt`ojFElw{ueAUM);sZbL2UO-zY$P`iP%1GcU9Uq+T<8=a9&;jT42g$L5)Ko19E59Hn@e{4SXsp>dFyBH4Ddvf#{^ zHK54g;LcjLB;dJJBQaN8X+>d94BHSnagH;e8_^2V{=Qs1Kd-@W8n&6ZjZ$@81aLV6 zw(i`JjRij43bJ0HIDikOg6!3(F7$@z_|6#37{_WXx2f?WNifc#!cR z9cdUI4Hndf+ITTGNqG}!Di51?(9g<7f7Hffsfd&cGFz^AF)ncb>OS6i=N(*LT>?3) zK$CR7#(ymZ>+LZmTZ7H(L?TBciI55GGh30N=G?gDsn3$q(`nfETOb0b(?vyl%^f>c zFH^bBOw_6>7U#+F)(pgM zczu0?U;VXT!#DoUH{Ou(+d&P`2M_rHF({R6mCNYE2f2P12!51@_hZomHn-*L%%dbv zmXH!O_8_LkVjNcBkTlD?puKIT$hKVau#$oWqiff;LQztli`rYTNb3oOIbd_LP^A?i zd7yNop8ar*OiJv6g*F;0r=eGUHUz#!H0%gNUbyH+#yH$sRBL(|r}CV2crZ{Lu`x8D${`pFEpx;G>>vDn*nZ)^#QER;vv~b`f8X5+bocsgdwuwqzKP4nJFr!v zKq?s~y2D@6>6VC>r+eiq&rdMC(iWsVi3r@z#Isk=@$A#jaC@GM(IqmQ2GhQV9ZAW#DQ8K|TKd5JR|1B`FzHHbH?rK>xz6g!_CbNZqMs?#la3y=>R0$ z8&z$=+Rv-`m{H$d%Ad~7=^OEzVi^z~T}&lg%FiXXZN^B@wxUM-sEQ2PfZV!nEz1-O zRRn)de}*k!XA(HrD`ql3loDqZL#rWFa0u7{YSwu=!%Ea7CY3WzQ%dE!AEx%ihxtYi)m6QyW_m>b#e{&u!XS0vJp`k;bC| zmZHG0Jgxg?^dQyU-hdKX9JUrxHl0M z?QNKe#}2%IbsrmDuc}XC9)?b`+Y$~woN01w>N6)*u!N@bNjQ8^agC0xXGdL@VXNW7 zparGswJ6($;-EQbyKt%?!izuCX)4i+I1Rjf6wkri#_G%RK>%%J^#?i!G)QyT!jPbF zBy0@qW*9SZu?Jo|@K;}bjBgom>jPr~&MUc*{$U4OeKmgdyylX!Bdir;yOw2{UP=qg zR!Yfo@dg*qC2vMQW5}NPe_;8?;Q6B&^WU8N{=K_ukeX(@GDP5L4MW5&Abi+ZTfJ_MDcYf*XQGI7hrZfZhXmem4%u7a%> zvPfWCv$c*dvId@T61Srd7SX}M7|{(C`$xSTA4E%u#>xoTYTEwz(Gz^}3tz<5#buL! zy>poBM4VCNYUL*f6$C)SN5K(u$|h02>*h{*9}6nNodj_-Qjq;r-_gfUKfz!A-~JE$ z#^3oy*|kF)%jc1D7&7 zK71o~Jb(5KAAkHYG7~4?Fe4E`=vl6&Ub*y|i0g#YUZImuT!$&wV1)5ViI8b+f9Oak zO+Q_bA`TEL{-LcGVxG8Sg?ic`E=3J?LJ~QsxXKkT%f+25dh;MtHZob#)ELi>yUGA^ z7?adx|ETvcf;Jt>AX|k%v2hhW^-y+qs*H7ia3gBgo6Dxki{HUUL6m-TZwXxSTY`RU z{oLMQIoYj5<#sAdtnE$cCZ#tNijqCJQoT1V0L}z@iRLbq1EUXBboMn)>2tE+1SP!n zyjq1)5x7SUP`E`2amvk+wv__qOJ9Kf+5ZUn3;zvn{=gqZj_vOBxqJP#zaIage->9) zB{3Zx-`fGUl&h|1WN%At*^(mghO}A{p~h6IWXy0q3WL)0D!ju4hNPuaO*W!!T}wYZ z`X22DJ#0k~x7H2I3r$&bDEcrNhX(`3Sn4fFd&j8Xp(opl(j*v90IIG{&>RA7*}c_I z2PwGM7I)N}?B}x?ErAY-x+}Hh;@$-wKY4Ou&lz1}5-{^gFJ-@P<@)byfXDOKqa#Ia zqL*dh031q)6F3p29Ip9aS#x!!%~*^x9|zt_Td#sJ&*)muF%*i?iy|jOBgGV%YY2ry z#Hbd?76Zh0a?vC1MI6*fD{(97r^jG&hNSbVqp{SWW-^oTKJ4^6+DQ-aODP>Hy}>$e zQCma0c32avRvtBg3J=RiG*YTMOV)ucQ51ht^jEOlXVg}~9{<)a^%q%fBVAy2x z7Y72oK*LrHJ*<@H7=vsD+GG`_Oz=8!BbYeyy3laiQ9fT747ZbgOCuiSi!7pH+7b=}D_9OkQcpe3w|>2;V)~ z4kV}b?S2>vYZ;wV&rOT`A7S{%X3T#mBmRq=dA_Saej5i8R}UW6HK@jbI;}iX{1x>l z-z5)9vkj-q69B_FoeEqsqc-#Vc)MCy$=sgZDqcW*deNY)Z|r^4yIvD8{I3 z=e+Nj=aTD-Nl~EmpmcTRVVjpNr32H_S0S~kDN0LOjN2ltV-2+Hcj6Pb2U%g1z*(~- z4C|Wesrmu^B&lW4JH57F4UT9JXU#0UA**7@()UaIO=}lhNkM_#2yER2j!m{ zxXG=p>%Q-UTo39@H7nU3>-xFTabf+r&V+Ayus+BKs86Q69nl4L5$L*TXh>8I5vC3p zN-(wBom0}$v9>JQLNEjT=xw%GgQQXj&2CsC1+Xh6MC5*mJ&T@z^z%Rx##{%b$%8Ih zdoo(^t6v3v@xMm=Q-2uq>gw*)xqJORd;R*q@$cioKm3p5;v!qcW8P5!Wx3KUHLRu9 z@?dMmY6Q6^R#a}xdM>2X0%?r;1hRXox_nG22! z%6_Fzbe%sACj_w|Xt+~NQtHf!LqKQ)H1S6CY;xnJR0nX`){C-+IIbq! z35>04{ix*2ZpbS^icR{)-;GNKSqzmMki1xj>SR-FJQ|Lgw7#hRQ~GJ!Q9V|qyp)^= zK83B@l~w=_1X$0ORfsl>P7X=#myRE~zjM{0}Kjl)hD9idt%5#YL7%qXw0VjxP=Y@Bk;=<99pN zE1AJ!!ZV8zmWgI@yDTu?YJ*yE$!2G5JsG14wHl&rRdWf#Zdpl!v#cDGy$CvK=3EJF zISMw;`#oX}Vslo4Te#N{*y?wi3EVQlFwM)NHM12oMyABn?sQi2-Z%~HLD%B8zD4ad&yew|lk$p{4YIpMZ3>NFQ*i%BJD()Gmkt80Ax^eJAxdWGAFL%umB zqLR5yO;W7~LMmGaVNfuzpyTFB><+7##t**yCA|0k`>({UF;dj1`+uY$}tD(*qO+1$*zpk;xED5C~By9o^Aw3&3WulG)tY)pJ-{6oc-vA%- zfjoF9TdrazvI{{lQcu!g6_e)5TVF%5h*|a&HNx}*C)Fo8lfES!AaLkG$|9Zf7N;Nf zn8`ae0AUkQDul&KkaDDEWoy9H+1`?@W>n@myoU{81+{Z-5|44SFu+G0c0)0HCPPg2 z;Vs#=%BdZ*)l*6+@ydInnw9=-ZY~4}l_;{DCfQ>lGi~$Mz_JS1*{Ylngs9N>RH#hpmydC`-!i@|H3aKe(bAvr_J5#2jcauKmT9h>UaMhY#R&OaNwvi5P+A2 zb2W&QWwpsi9ZHB=uwqFNrmPrDAaL6DT(NiD(6{_QN|hBE9pEUtf@>Yit#mA*^J@4! zdk~#$q>9qdCN1Wwc<4bNzU0@rv1!pzHWwHYR?tVw^$ghH9U-Veh|zxGxz25eVV8A} zF?>Vu8*HFA2Od3si1*%og0XFwoXX}-U%Icd+W1*SDAKMM;bp%#G&mu^v5zKwhi5nI zg6Wx{Z|OMC#P#)S%zel0?KRFfx72}9QNd^5!20Z#Ts>Qj*#lYg`ktK$5M(2qE-!Ez zTXC!)P-)TeXC(H0GJS30e7?nJpM8p_pFYJo_FDLacUoEfay+jZzKW%|lVU>6U3a0R z^|QVRs~AvVCot6dd`JVZGZrv(tX3*qMU1l6+>B~pgKbOEY6lN~=$z{%jV+jDM|xYs zG|dcq&K{T_Rvn$Tg733E$V!~HRD&AAR2oONTtD()MbvOZ9(j5;99Gw*8R3LSiva7O zA&B2P>l+_)Sk`npn7+0G&$YVP#X;$v-jZa5u5Es=>2Y9B(UKkd=ah7F0E~}p%STQr zTzJoT!9?dQ;CBQEQ$C;>SKGK~kFAd)Wb{OAQV$UF$>o)kmlqtgMOJ(!L&*V5Tmtyg z)dM`(PVfLGpKU&=9cQvy`ict8(4r@?N8K~iRuu0Ti=|^^#GN=SoKb^H#UV!q#wg33 zF-0XE*mJf8;XL<_TXm^#l-4nvEmI)oc_C8+m~&#^0~tF`W5emp5&j^eRT+E5HB|bg{{>IHGc;5Fwv_-EMRJ?{F6~!B^n$Y&zc+VMC`(}%^ zV@hFcI}`5#!imZ1`_(}H2{U{tBK~B|`Hj0uqL30j4`lTX92>kdtVv2HYDgnJw?^1PiMhJ8bZ6%OfXxe=#iPdB;8j@4fpjo;-TOL%lA1V*%ux zh`NoIkVn=-WIIU$A1xCUzR<%+_??J3DSHg$+-n0R72=#Qst|tjH@<;yfB2pHdj(f==r?k zlTSa!cfR{woVHr0%snvYY$;`Qez&ayk;kE?@QS39tQTsUHjFQmQ+x=Xa)xF=vDK^}RYz0D?Y4Gxkr++~|+%QkIs ziUwC<+sKJr_RQF&hjh}^3EA)kA!xd_M%HsSqaLs2L}zbJg5abv0wnFVaQ>) zy1a+`_a5Nhb~?Zf%K|8om!i3~&G5tUGo^GpsQ6@0vt5lG2*j){>KNV|Y6HRI`;P1L z8LwZx!t0xB-0nM$QP+CEdt{%7QjOK;OZmI<9Rx1s7d=iVJbCm8kFFkK*oiTDPG(qO z#*WydYoxgcZf|b!*{9EOzP`ozJlV(x8UZy{ttSiO=wa28t(eR9hh7vId@&KS{-Z&L z33zT^F_rYQn`eVEGo*@$-oURIRzmO&Gw+P#w$~aQ*%r@4M@e%&PRnTlSwV$^Vqaxb zbo1qD@3>v2R|#BgM@!KHw-(HD-TT2rEYA8{S#3IUEuy#vxXGON#bM*(m7}2I$X=LNX$8Fu?NSTRcp?DVtA!@jNv$K zTd5f+WN{l9yyr6}xpj$?*bf;Pj!FYWz`Z8Sn;^Tiqc{e(VKpJ*^0)w?vlO=(m1VIx zwmos0f!CJBZf?(b z`TQm3zE`ua{~O=|Y|R~L9)Zi_L%F6eAdhmgQ*lpN6R$U-_}ZU2T8R;SIAzno>2>Ag z*p)0nX;x2e53nyaNcH<$U)V{ykbxI9rB<-wCOmv_AMd{VF0L*wxQQw`Q8J22x$y=2 zs+(^3K+NK9bS$FsA-tOPscgcumcwbtVTeuu*~tWW_4*}#_3K~9vrj**!>ax;3d^!# zR<8FWCgE5Q|DNMlaTL}RU#}wmb1f@w4080Ig-ffrd z*cv(3b_w}b^_@BSAInEa5Uc#JM9z2HQW>Wll1m#u_wnbC(^nJD1WTR~k({8?4`V7n=LBqPS2PgqX?`3$Nd4dxJOP=#fVQ zLwV0p2WnsY8ZQ5{zlhtrVZ`0*2k`aTPy8O_zxSVD+q~%gW|)d!R8h}@-UXHAF!snd z>^0_4bKJW-HaL}2pEjFGfsPlY(jg>OC#^ge`5cp;;#MxKn9-c_-`B%q@|;Sg-bD7F zr^X`n;HNq&_y>-T?2F#US|>$N`kVw7$0*P>4(iDTFF)lyJ#CsHPC2Hydi0?EC#~EhYzt`p0JIAa2G2o z$Q~-CNk(GZ%C~-dzQxN|FK~Tx0}&4dZd?*-4jI_}x0T4PYpq;YAk~^y3Lw=b)>)N2 zk7H&3>CBc=)|%}Md5%=)nugH;EhlT@? z+Q_-FABTKW+ILJqcAAPa>a^axKmY0z3sO5YkRv2iHQ=DqK^7-XTZEW0#;_V2yc#&j zSrz5$cduGeXMbB>qZB>1;DGv^Q}B8m)VZ$jvM#zb*=}`~iv_3ezZAgDd|?CXTwn@P zy;2ptQ(2aUvurJ8BMv?*W+lW~6QnxP#SWJB$wf)-N<1rmUfDGa?`>DO3Ww(qyT0mC zDW%Eu+Lc1Vh*#Br4f*$~Yqv3|^odRfGIMO^*fy)U-??MkMlt5XIgncQ=qNA{;(QCs zNp6}3HPNbHcdJ;^S#^+Qy^l2ztovi%12?y4j4?VMIp!I+H)rhZ*CNQBk$u)sias-( zB5<0CYX|{GmDb|0#QWclY{%8Ax1SUdF}6B}UdoHaZ`urpMZ(cLbnh>=C$m{Tk0-e2Q1E zUf|~T7Blyj1=(2J03CXgj8(c$P+Arp#RO4K`PV~ubUkcXY+1tw(RPm}=EusW?n$v3 zW*)Y{I>0KfqXrhVM1&^jd=yR#bf_Tjm^^@s?p@r&qepMy-S^(Z{rmSZ#s)u)MLR!c z+wS!+N))N4r0#3;wAGHF@|m4L7R`_~Nwt=Qz=l`6Rsj19eCONW!pGnH7`_c!GS>Fp zScqXSxmtEUHsh~Et<8WPymKZciUE_8+$T?-;7ecnGA=K#aB+D7bH^SyM6gcIH4^*z zjO*)H_~^rr3gQGMS46`!?J#1}A)gIbir&&Ugwv(cIlxMFSESKPykghDxK0Sv7HY83 z*O0ZKfWs0@xKBD&)fDn4>K) zmGc140D8J#8!O}HF_j|@4U}YkFiIxZJ=5x>%nJ9mH8AKbdKLodHTfSAy?rQKMZLdZ ze65EdQBLW#A_KG1aaQET8WbSgCL{JpZ)39v>lg>Pr6lH=$XpQwMk(!-M_7Qfsf{2< zR*XT5G}u?aiqoI_3wZS-U%5MB?p{BzuUEhSAHn#kpY3N=ZwFbx>RgI+f^A)jd~TQ6 ze1$KVtr7-8tQ>8woSyh~hZ?-0z^yXSAfO5Ld{!Vk#~vy$MXKl4O3Ojj8}gsG&0lOB z`5Xuak{nv{`jT7YFbqyMb5wSZSh>cwo_TJy6cHOw0U&CKB3i@8->shW7pH2{AKrh6 zZCtE`1*#R3NhNa^$Ejt1y|H&+ew6I+9sX^7{4`6Q!U$@8?=( z%#c%-%7H5Ct!<$P>r6=HcZkmmHMWKa_aEY&_us|U)jim@HFL|;vB%!3Hs?ZOyQpXJ z&FxM7@7xT#4>tFLQ|Ee*hJ)fDnjI#^O529n*dx3Fc#`g$hSJD`6o9e?Mw(kt$|<8-1PAR8hE-e~{j3C)ErW>n+GtQM99?Lqqn?uvU@I zJP;|u6rlkn! zRIgjt)Ib!2;(SKzJ2G|z3^Qg~;S9yj@gCci%{&6R8GPGnDYn<3VwhuWj*rjR_?7EV z@oa3^8Aq>+yzhC7Wq0#DEUc&y@lL*7jFf{_Z*k~3xUAmsU!IYFJR*Miu8R0Uwro6k zE4S0eMy(#1jzhB_i-F(qQL`R?x3{;retm=2*EhJ?uaO4qdjT!yIm*UCS>VvtffrRc z^MP&9??<3sWFMl*xIp0sopDEM8gtIhV_!)izLGN};g1#bmj^1iQ<|QXZn>9i)epXV z7f_`m2PII8ju3qN-M8?;yYFFKYz>^L0&Dp+lQ&&KjtG`yhH3VY0T|)}*dR!Af&U_{ z!GW0pBj692*w454jc@!WK7INL=J{OjYw}PX-tA~dCU%}PtvTH61Q`MXk)|Ycv3$!{ z4TQ^ zvMP8)6&CyjbI$r46F}#bQ)OVF@750Ln&xr0>OX+qMh*9+kP)K+UWeGorDN8=<5l?A#@%mv~NhJ5j!>3G$qbVP5@{KtO`7eD`txcSPRGUD#_gZ)Z(#Lxco z*tUvm_EZ4cMT;RE$6l2*WtEr$RD>TTQoorN_GPEpS=Z>PRl0IQ;E1ST=-u61h zcm@s%3a<^fl!VRD=>Z`b0a8QKr>vYE$5O=wrO=t3BT;{DDlB}lB@|gKps@bhGHwpS z2cLlm*$KF<^FeT6H5d2qUE$q#-ok^2S2Y+KQWw&PJ}2=`Qb-=va@BBX&>o7hNcyj( zPSpUz5<8PDv?&yjI&vc742y~L`35)FxAm-M1C)-EP9K*xQr&S$;|9b??6Eewwo!p- zhR07H;Jvrs!PUKcIBll_+F`-CO@pd!U^|tS@8$I?yn68pxiiMvNDw7ACuf;lci{M#$}u8I3&~1tk1}x%3|;=vBPJsB?Oe_APx%Sr7bHeSDWq7rid&vJLZNm z1FN|WM&=2J6Hc{3Bo2)hv6Q(CRvE1KG~?(xk;xh!RdYx-=&MOnO=&}0_cbH{QwCa@ zEUPOU;2c`W6raK7ftw*)29D9fLHlZ~GkuwDd0koe5U^4w%ID=uM{qQk%6bdrYoH~@ zNdTC2&kxh{$eK+DLN3*wXipJZNb%=9$VLz=Ytjn9?}0aab5t_Q0xVS*cnx`qJ(oRL zk_P+%E^z6i6U+cq-!ms*JL^8Q5^dnbL0+L|b@ZMGBvJOJ+n}tF%!CBUIf7#_1A9TYgc7Ce`L5iW6LF8_YH? zUbC&y(}2w@mFPSMe&gl^{&wDA=e3|_wuj^uPtvT5QWMd(BLQBQ?%6CJ)0rl^Tk3J+ z>X-hnyXxU{Fpzlk_)!km1ba!!JSbHY;V~9%RtK8X{!rUq!+xg6JSJw4^qh-un}_59 z%*mYdeKkM=@AY<;xrw>_`rc82EAHnYdZMSw zo;-eph{U#?Fl-YavL2@9$eo{YBtH81d-&#WezW{)8HhOvsQ|%1KEFATvRRyFkPM0uVawP1_AQ|pcxQ0 zxs>4bn2GRcz-GgkIh-pclm-FXE@Uq!T7D@FYBN}P46^7M91>9{)#}*Mq<5@*ZvsnO zkHd|C*;1i&0UEUp0EZS-POfW06LJn5L$rGi*cz19F~g+GuIsuu#L}W zdCHB4jS@FgoSES7NWXA-wmDR+%)MG!$Y+jh1)FSD@EUSYDj^zbMy8B=F`Y7eJyRXs zh-FK6E27yB177*n`|m`(pTr@inNoQ{KJQ(g@XlLr<6^rgt_{zY^iv)CR$FF86EF2& z5JorGb{+u6c33{-V)vvtS=sTEQqWCqcyV?9JE#Fpm$p-TA?j7mykBawvARt`e3XkQ>`)#BNLmA#j8Qd{7X%| z+%VHbrSX-hGnW+`oqB?mq$2ev|I_JSgd0j3VMBGg27N~L)_?$?YBTa+wMl_uxnvO< zB7=%>=O8jyDiJm&`ex06+XIfs zNkAu>^)^1QC_!Rj1>!-NRxfXtc8KaX~0SjV7#a;<2 zY7WCaTk5364GJ#Ol*nSNhwZt;@01arhk?Z7hmZ5>@*azJG{7Y&VH=`UJF!p#7n&h` zz{g?JKAAKS>SDMtjv$C~Ur}I`8~LaMt|VpAe^&dyB%xZ|-SzWEsx8nLH_Q<7F?Hw2 zTv}h7qLTFt9H6Z=P0c0W?%yR$TS>gU(oX7x8MuD+8sGW$hj{+-C8pE{%9TE(eWb;Lm)e4V z)wXIucS4ON(^VFHbl|N=Zvg=Ixi9vB5Aqm%Wz)|ZK79K8Ic~0R>hqH-r62Iig;IdQ z`d}=UyuJ=~YYsz2utgViAsLs%y0)&D24Eox7}CW3K$0gY(q)>Y^jHMxh)GwGEmsg$ zGaa19`(T;pjBf3z{jvxzoc0%)>A-Sgi6MH7swP z1!=|Z^mzBWromn?!%Xko?1O@o&;6DEu8pHoJ`m9!%0wXOag}968Q-#)q@wg%vl1+z zRvx;F?^e-pFd2?6NG0c!LaI`D5-znBD-6p*+|`D}5kLOp7=PgxadXFvxO;t`US`Oj z`B{v?A@z_&%?UYHhr-=s7%%?}c)#U=l+KTz< zdlZ-)Y}a#Myr^_}e{;#1jn57ZWmD>jm0Dyhj#r_Y>)8~gyiIGh1Uz3wmLz{qAoaRouqLv`m(#K_s z(Esa8sXAy2-7X#X@883t`wtrwsA-K$Jx7ZIIgZ%pj_cR2a6X?!MPJrSsu%~G=C+5x zsqR2?x6DRyBNYFL)%?r;A(?sf^<0fqaGBz)BcpYCu@py3yuf9W;zxa@MzQ9SEd;to z)ClSP>@%8i><1fiZ-H}tw{^Xgomdilai==j<~XzpUrA}5+?H4&;A)r|HGWt)^-{$Q zk)!@S-mXbrFXS~Ku~IR!!*bS;fc08$>sO+Nw68|cW*`xJ)OX05Gcp%ImS?r}_nu1b z@MeP4;0t}tY9YavctyJBWY0j0w#goYuJxe7sRmKnwyV}R0aRscMn}9?@i#aKsXz1n zxWE-Qc+`^1t$?>~qX%y@0~i~JL>04{WJaXzh@sd`z#}k2D)3-@?2cl=56+X5*07Z* zv#fUsJF}j3FPjv~s-(YXFxckx%t0|L%tk@I34h#`NV3=UG-G1Uy`Sgii8*&F5Tb{; z=A|f^89k`IIs^04@we?3znstUTIbL)$u4AZM-Q1)w_>(gjraZN`=FM6*~2_j!kW_B zYOe8DW^h_^_xiyeNc_}K{WJN&2OoSm#^6IAuo>&TIsp38B&p++G53B>?E7qq=;Soh z^CaK@eqhQvu)Cu&r)p_nt%BslW`{vxQ-4)7~m zi3zExGYchlbM+w1+8X&7xVSjsz4zb2JMX-M)9IocOLJdJ6RJW&GE2qfFk-0g%>wQD zR-Rv*gk(9EMMGOJSpp8;MKj=|k3YdL{k31h>sPNB%R^0CCTs*EszKCtJK6MiBw{Ro zUCOGUT;JhkJha3Zh9CXvSMcc3LyX}KUy3<-Anhxn)dp_oEv`8v*-n>yz~l;U=-h7= zEKvn1=oLkjb%!a=u2`m_Cf6wIV_Qoqw@t>D8l`Tr7!@%YFQ+108}w<#*g$!9aonD1 zJJE2&wRK2Y7;=dZSGYZQF`RG+wwVVjloCVQ2g>Ls93Yp(a<)HinYfkRo$G$J=*T#a zY}qetH2}$2(Vj9xaJz$KIM-%R)b72lpgIf>4+<@3bU5*vZ$*O+3c|hO%^RB@UnZs; zCD$rgV29$0I}v%aBNbJRCd?aCfHja2WYZ1y<6nXO{C|Fzrg8WBT)tj@?I&^Z(?7S^ z0vft0S~q7$vMq)~ozEfl5vl32CUgqZVc03@aO4`)ww(%K9Okl8*0X8gwcn@&|=P{FXE!(#Io&+Jb2;r}K;vUj)i=3rTghbEh7T1Uo#&(=;85w%?+ za(#A|%}y{gy#4kQJbd^Vn-5H@!gp{Z?Ow1>L3cqLCaC60*itQI^isic#cyg66B)Fs zQTY{BhlEXJPV95ycHi;oC!gWdr=Jy}zlThVH>Bhf@A>j<+5KX5p}~P>bp}7Uy22N~ z@MXO7*1I@eTokk#dVVMNx%cK?XY91V`T7R8H#Zf>s51wOOs~2q8IRi6oqj@=Kb@ah z^RF>zlbDBOG@Vtb2+MLv2A34I9P1RWSTWr`nhe$#jV_z|TxK=VQ|Vu_fLhm^7}@HF zCD@HOIAtaak7P>v0?6~2SymMNgcK|@mYTuI1@Krg?h4w@_KMDCR)JO_9PC`ZY{as4 z=%iHqZ`okEU8cg(ISdF6vAWSys+B%8K2My@wrkN4Tm~XJ^t8Tzz0)#mr8gLxTWhpo z*G*=O-^HPJ;_qU9c%K4;$Khvq&BwG=YplscTQ@SkZ)pS#T(ww=2I^fH@b0*WM}ES{ z#BPCe?!|Zk!M-!gDw{i5c2&o3W-!|ni_*_#vyE&FLi5LHFz29EJJR4A2fq;wFt5it zhj9qTT-LyFPaxWA1z?{mZkX1+7S_#ZE0O!aK4+)K(a2uUTrT*M(D#vnE!WwRR`&O+ zn7A1me(Cx-e$5iQjaJ-GTdKHX;b1hObDgeKsM4=vNY zceC70)^YkO%fiStT2$)w2T$fRw+ItcJ{W`EbVDUuUAMzaYt1r|BES2WKaI{`V%_g-?(Nv~je zvJxQSI}vl@e6yFYv*#%M9wj|Y=V6oUJql<^HCxLG`EYCZE(^2MIN|LlZ`J!{wt5XZ zK*mOW{(+=o26*xE6|Qe?DPQHpUP&ie8ODm(3RIInvJUc=E!v>`tYnHhnCcL*P7vvg zbyo_Va}ZYZ2D7s8SjVQ8Tv7^y8ZKxMQ2@v6BBA%m^=)!kW8@e?PA(kzbO9w_xj418(hFG=LB9&oH=0vU(7;#L*|IlX9yd8&hoCXyS z{9eV)HN%XudzC720LK_&0QMjJ$B4UO#NF$2`bsm*pZQr_Y~|0lSsvVBgN>Xvl3DUs zlG^n|r-->>#yQ1&0Wby!#@4olj?JdJDxzfY)j;IBD>*UU)2dPL0RdCD$haxAJBOI0 z&x@umX})+lK_*nBQsVOXz+*~7pto@XXthB-v%`Sx z!tvTuQv0h&>u`S#zU(!Mtrj9cvL$Nm)6w6H{U` z(tA$azP`q@=bz%$%a{BaZmYFnRNBzQvc%fxW>oS*<5{ZV1rT`n@F9NWOJBy@Z@pd7 z)@}{-+f*CQiP+DKxJ=xR7YISkm@>#Y8UvUa9hG z(+}|>)SYnf5mGh~G+@KoB>9TfJVbfMP;+GsR;+W6vCxEEVr0jTQmWcRuEnX~=9j~} z2-U$-Qh;`x>{OL`uoc0`E&v(|9as^+J(!0qEZ*#@QVdNhE<4NQV9P}lS?d2CWLBUz z(=|(w;xP(D-3v4hK10_`H2*^kj3A$(IA}rpB$**(WJMknnjI;eJrr7T^oD^2lut$i zH$>M6#(&pA`Z3|EIV$`akb!?D;XvP&Cb9*BtE#jhex-D5jA8? zTZ_W<8j@kFCESkU)CB~iEcQq>|7D?&;=CAp^~-Fhithw!*yvk5$7f8$AS-Lz$Q~hx zGz@#}IPcNdiis`bDCIVpG?B5M5w;Q!Z4@=a4wfziVd0UmS@yuqeGOKuRA}HepH{H* z=RWbv&!6I{7mwDmIB?<(`B+Q{Evy8As}vNT6}rat{8lWh0ut8cOE9t_E${ddpQnMu z{YUrznkcab$59^9!zq)*ogI#s5@OCBF#3mLV&7*;*cs!OYGEYRiXu!;G95b91Bxv~ zOBIc@!N7{@YzdVaS(BA0;h}6}O-;!#n2Hn&LS3R|uQdUbOKruL9;2qRF|LW&a>Zsj zm;vnAE;d|VUMzGmz(~)%AT#ue7;iikl)=c-3iwcwR5y6_$zGmZ1LHIYH#M!{!;C1B zbp*y~bY{Da(TyLxd~LNnVB;quvRh}7OH!XXU^WZ3qoGjL@Myc-@IycJg`Sd@+GDFB z4du3Tm2$Yfy}|Pr&lzvZ+~dRno2mz%HvP4A53T}&+cX_&^}r;9LPZoi(RtuIBx_zn zbc%@wS@A3YG)Jio-vV?g9mU{nb7)WR$dVj~EodAce3~*GbCic2uGct#HC+U4fnS}m zuq=XX9C$5k24dErr=$9~Jrp>NWad_3=$;yATZ3YJOIVO=3(AJSY%8@TbXyDvNtR@J z8N(O+)xpG;@_8pWJZe+I6FGg6l9dsjQBubskERDLi)QZ~3Ad!r+F?nSZXLC14Y72S zLS+D(C>^Itp!t{nB*P?+RDJFrlP75!H^Uo_lt8$9o;R!}1a#+}}t&vi50%pDnXsSVI9(N$N&q8OX@n9hYhzUpV!CIE?J&q2sYOnKe1i!88uL_Eag?yggJIzmpM6%qvtrL)TqVmG2@b%%4Q#f*gq-Y4KWS34*cfp=lDjx#I5;Z zKXg#+P}O8q#*|WzDmjck7n&KtvqE6smBL5fAX#`d+uQCG5}&7m#23Ht<-c~icMr&& zen5Gyz$Ts>;nw?QQE=%onrB8$(QjY1gLShd3bqjzkby(Np=rGJR9}AkYyo_%s7p%Z z*QO_=M5liX2fB4O%qzbyaaK)BG%zVB`%YM_ZITOkD9fvabaP_vXS{m-qEj)HteKe| zf9g2U$<1)6@oGS$4S&0&R98f7SD<%?*Qw1X=b5>*h$+J)K{Q0w5Dke*|NZi^05{iG>U^ji_Xo>4LFqh-%SOZ)_-?g%^^x{mF-Dc`?_f za2yJ2w~|moR)I~hN$UK9wgTs)l-3F7kVmGZWC}=ffRI#=i2trBNvD|PtiwIM$?$?Y za{Nq5TQv%SYQ&=^b|sfhrH~v%sN{dhKk4oJrkV*8bMp2gbT0Bh+4kkR&9;6xqoceN zLbOh(IJjuGBefA3wsyb`OYBbxcL5HXmy> zwx;ul8$p+1Yz%%UzJ>$Y8<`e?)U!S+nTw#h85Q%n-=6XOvrqBKcfW^MuU;Qw3i*uJ z5I5KAHxKFIm1UYed@w$j6To&FcyLwG-tL3Lh^3UN4fCw(A`<8G8Ie1#Z+Cq5*)v?f zzHR{N0x9uXZ?ZFRlYX!`bb*wzD`anXyJq5fs7OqZKQh=@w|}pqLJ0Vtt;__^NpslwRitzR| z?a@%@5P^E25oe+m(DXXWH*M}jE_c~l9S~4xd*T>+?3SQO( z>w$%sW4bh|wn=ynfFSm|mqvq2fxvxtJVe>Y_n0^%5rppoP1=I(RK0ouK)q`~r{mQ+ zT`w})T5d3G0@=^Kz_G3c*_7s+07#TKO;&{j#;LCF6bp#Y>DVXcJlFRZT1r(vv3bR> zrq#cT8gHD&sgi{FxlaO=(@N0|cLWS)1olj9F2FhP*}cTyeDM^YV%Feksy7orR)GgV zexDsjmKgyobjs_C%z4zW<-NvawFlv?)Kny|k z$jea@1G^rhEIvqz9r8AL(Ver$4bf9#>qBVy|Kuf%?*soOZ`|_TW5sn!-W6Up4sL9g z1TdG+{s@FRc<_VZ$C)dxtUUM0ZAEH$HyZdE$HDdy1Re8(eH#KRiRwNzaB2 zH6*hoWt73WrpM9pGURH&kO5D@v`YRMv>12-bC!?uyWjaPo;`ckgAE@n5W^TFE8Bqhq?3pK{i`e7oUd{J>I&Pqpdz4R%p#(un!$7vH#~p&65spy z6TE(PjoaIswVmO*!2D6k*!wA@^Jx%%SgG zGC(ZKI;BTgLiIEJ&-`164}R$G6uEo-j(ENJ+D~Hpqdx~fWvdZ&e@Sv^d$vz_shng7 z%!d3%OBkXzb;qoCBcJJbP6;lVNY zeAxP339Yc@1Qi*vo?%#Oyg2mVtpLmcBP3c(sbE5Y8`m5&u+JTPOgwx39N+owhq$@E zsmMW3#K!ZH7KPD(A8;sr(8_c3nFrR$Fu-lYdmp@yFMi>Na8XpfPPwX$but;C1|<93 zaeI5l%NNh_(MR9KtJkmV3kyMym-aN6lx3-ggp3YC?uIKq^Wf!|l5@!+El5bo>>Y3GgVWQgY9VhPloI!(KWFYvJJCbJko}0T8@Ubryqt8 zP{N%cf>JD0B&!Bz!+S-C21=kd=z=Cff*T~+z2-3Qg5?u+3ampeRkeqdnsvu?ZWZP@d;i~ zxoa665sgq3ZZ0*Mt>t1W{YcCo*6doe6*eH0274fe1k>;A&aUx!8%R8ObpKcOJilIN zF&`9Twofc#*(@YgZ=YjKZdWCui&ezPdU1#jJtB+*#De^D?d@BSuEUX0Hf$l zcmy)aQC<%#K{Iou9q7>YL!!vwq?B9uJV-(#1hQIM4dc|c4$d9dH`g`U%y{D@OKPyF|=TkCBd9I@K&DA{XFrlZ+{!lK6~B`c20W9>>x`W z-dpbQ;G~&eNd)tLOQu>XkI@fKpC!rq(Xaj(-hclKaNnu{pV1z)px`lL$Be-F_Kc?= ze~eE*d5ZIX#*BRd4s9+#Q!Dt^z{pY@1`Q!7@tWBab<;%N6N3kk24GFPKBRFR4^Y?1 zWOYb%4da5Tc^~XXpJ6xgxgd*4>TzmnGP9jKHTYQaQy}3RIT99(v6OUC87;~~cV>Do zh?e`H^tV2PInjf($Q5~8a@`uDOonAhLad=q%VHVJ8?kB|Owo)N-Mm>FO}JlQl#0R{ z>IAUYpdgR&Y_5EDDMP%!ZKTqMsL~8n|G$? zQY3(Iz66sda>awYYmbK|w3h;F;=p&H@*=*EA7yJcD;U_$}{{U}2c?%b(OBft`9%0#4pR`yc7f{oh zog}qLaDXI6$11|netg<0xkaCQouP9N%$qY_zj}?A&z|A>)hq1uc4KnLkU`5tGFdD7 z`|M_npJ8M)^C+qCJ8h1)-+6*ZPaa}BZOdk;^n{!RF{f1*k$C;;H9q{_hj{k<8P4OIixtc}*tI5bnsMYNI|EQ=Rlsu#6UZlzMR z)kXC|nD=(v?WiE~HxgB}^a!aHGMD<0H_mm8wOi4w&au?SP2{8!?dQ)v2uU8jTZE_#~zzpWc4>jy|Nv zyxm4^-^|12cNIWJYeX;yyg)?WoAxJ4{Dmuw>-FEZb%`Bmp;=_jAzwE1B(up&H@`uajrvbTz_W;8ATzbPcVUudb)#ykipu5>!5WZ^STAlBuj;rUr)PIb* z^XxPqjCJNlX4B>?F3@_DTVgV9a_yjv=#;EFTSF3iS?~zzEQRD~+hm>jYyxtrHpL0j z6MSYi1lw(cZRG`j`1pN1dFySQE;m{Z7h|)yJawLo@E){hKQ0LxM#4u|5=F!3X`7@^ z8Vvi4j${RZF*sBQFcEn5>Ls3j_8DH?T;qImi`j(#6$_b4NTKw<*j~HDqdhE;Pv6a8 zV}r@2F*cr8I~N+pZRA+Bi2=9g9XID2T)((R&cw`}n{vgT=1NnLi{qeh$OBAUhvQ0_ zS_3G>wL3SwVhDiXZX@5$J;e`ic9`2z?-|dXXaKb~wc5bbwuDMo(1OIE#`*i;)wWW; z>UtG_%bHhZY~tHWaHu;4yiUpX#TCIye3`kH#oC^k@j8l?G**i*EesYL#H3I=X& z_?tDHR5cLgz5Kn~E$zmE+~G4q4Oa&^Y@)Jk#hP0W(<*2rr~s*d-7w&Z8y>@P65B!Q z3|~nhQodLeS1$QHsX8o>HcS2Cj`Scz96+9Hpl41ibmB}4t@{RKJsWZcHrsm8JN1mA z)m+Mf*~6E5@8_uD$l#J!PxkYS|Bt;l`?Yo3uDe?A|EQ4k?kmmU=k8U z1QHTLC=m%HF;NUsBISh#9+2Q6FNjwZ{tTr210W$DViZCogs3ba$cn?gzOHU@?m1_d ztC-*Dorm69AK$*oKXA`oQ?l%woU`{@bAEG-UbOaW*w2$oM%LjWz0!MflT9x=;f-9Rl56w%%VmkT#M}=6 zR|0ZHI{O)GGtcl42@?>Z>tznlkn<&uJz zUkwBd7E#+;A&??)GXf`jGefokw*iVhE3hJc_Vi=gYz$>J&XFs*AXjrb=n(|t%Ph%6 z$SnDwz7)fHYdideuYdXpp1pWp6G59RI+aaP6(_y&XMsTVLINjkPAK(S@kPqLuHsIS zq{=2vXYu90kACzS{_yvHpYK%x$J}K%sBKVi(io8)A7x24fmv~f4~38C;84ZaAU_h< zSC8R7a4}9bbjaBx=YWc@WHEKdXPwgD^n*8fy+>w?f}I`G#tJWk{#wJqxzOB`r7d2unW>}~ z76zRptHc^x10Ni`l#-dcS@|YK8duXV@22ALAO%RzJc;U7#_3&{CKIGOBXy$7R)-kV&_HA{>X!x92aj+Hbx!lD}IAMd) zU?(dYeYQ%XhHG0o6N1jz{=>hCn_vCY54Xp|>#O%N!~C_sj?1gMtB?gJbCS`Uj<3ep zSG=$MaCN3sOkG`!vNl9Zh(n4xI5rph(pQ|IB8Qvz66>!0VBoK10445WrEX9Rsd=9r zDe0{<@yDz<0W1W%j-YL9`Q*8*Kr+OrqP26%4{jYTXgbGm$TXFt+E%KOd08P}JiWm6 z)is<+72}}TqYl3c>n*z}#ta}B-O?q$x@4RbA8itaEY^f>-o4pyj0}|)^d)(dL z;`1N<2=Cs$E1Sy9h2}OY?fQK|+jF24ilgPGug+y%Ip5#6>^V`bjx#GPt<;u;Lt>LiLNqBP- z`tLXl3+m^KgKEQ>fiV#|da3q7gs3VK-RYBm25qyTt=i}x8ETZpbMj@^&0PEf5v&@z zEn9=O(z1If&fBE9pH-iBgeceH^n)mxd*(_{Fkj0~S*ML}aONRQ-GK8v)HbNaX$=d4 z$|y1@Y(WyeW&f;Yl>DQn*6MSMUMMoQob`KZnMjDp6El4Mbd9TN829xF?@BDfK#Tzq zgZr!*rS=L-Ob$2NT7u$GgcAgd4SpJ!X*j!KM*w`5OQBO|aI%N;2)<|NnA%`U&uQDR z4Tnz-gvLO#K=?LnV_;yk-TX`lYVv!gBvNX0-($jqsY^0#%W!&d(CY)BHH+PUD0+7P>>TcPH-{@jRcUu)eFbr3&$ z{uDp|$Nng;E+2CX_c~enzNgSsg0*Z!ObDlc*lr(~|E?-W^A&;dO^~VRjw_Y3Z zcQ-e9_x^^FmP*VUvBE(};PNTuhwnk@vlL96Dk1Qm)qUWKL7d<@H9 zya3VGkYx=a(@J^a797+nQhJ3Y8j5(`e&bw#LtEV)Z6LkRZp((7zsKu0Z}IN! zJM44v?-PKS^$VeYQY4s`8)SL(VE=IU19eYrW5d|4ae2DL#pwcW13r`}5}jnW&wI@C zJ?7kTKHua0?R(tZ-8b;D;dN>tcQo5*4J)MOEKv0zgt%n(VkRs`P&$F$D5#x#C8T6B z=?TZk?E+=1HnU7@T}x8sqcj-Hu6x=roMJ%C`k6GeVuC~8Ibta4?(swHtv+il7x?>h z`hy(M^iS-hF*r!+n-9?77q?`0_lo<-mr*N^ZBh8=nzy}!3fnNDs}jFsxP z)Z`U0ntNbuG#f`^Y#We{`<*e(xah?tSX1phBW4DsCB_&S1h0=#sTyM5l?rIVhPBRzexKYGz`thfA@R@Z6^qx@%)hw>Y zoD-ITH*enJi&w9(=U%?bnAM~z|5%7XUTAv&!ZDTN%=`ChWuMAj{sM zS65vHn&|5N6*7r$oH9(=BK)6LKx7SijN2!IJXl@LP3{60SPJF(!`0D$qz2e zjJBTqdjf&+U;LMN_pP6QxH%qPUul&@{MxVMv?Xkt*r;T{3>?zh+H$s)y3;<*Jf-vrCx=QY_SXm3;X~P*=P6iG-ofAnb%%cA;Gs8q=;9c9tayS2SDnaZNuf1NkRC{dHzrW4e3 zMp?sdt-aaaUdI@CLCCDj(`#++jIknK;>~7*BmKwi=MkeNyo;sImymbF_slY2%od7+!3L z5mk$WXO=PTCrs}X@yNF7%y52hvg~VfN&!;|LuXLcYK9XRW#6A?{9pMBw;3EhTU+x> z{8H*xJ1Xs33XGLPV}chSK%p`dJJTPHc5qE^YRd7zkN8O)Nc_wq@=)d)As{Ju@eo#w<3zW;r^d;b>09s4{pqNxrMhkCm# ziK(qY4JbuBZnhU#bQPwmnV!9Pj_b#lHEjj3ZOmAuhc2v|`AFb=-f?qtgUrM>wwk6B zVira~A|YAHIJJf~Wl@j?PlY9>1ajvZpL&@ME&3nJhr#7#)=V&D2R;sl@Dx0u#( zPYb=p7+5DuV@egVcnnE3LsHSy1*x*OSzH;}7us0%V1cVrsUPH{bVhnAZp+LWugdph zP|X4y1hOmpHuN3^RZ<5cepBx;=zq>u3OTnBb%yw|n(^nDlLOFS{uRWZ|BDZ|#>49? z|9bVu|9PDL^51}46`)czs?lr9C4J#akchTHQj5l+dBr7rgQ}5pv2_}tw_u6d<<>Bm zG%5`kdv&nVLb5IPj0%k+ED`2OV2RPI(_0Ow7wqx8M zvK9wYWl#|}cxM`io{yhC!ONE~adCN3gIT8%O^xYcc;Rf05`#nH*+UhHM3frhmCw$KiORx_Wg`K&p4lVym|W;_qS(?OJ`4=?2vbUv>J431)~eobZ0V&iv|Bk z!{yZlp1*jG>+1__<3tI&Y;v@dqPDcbhv@$99$&tGh5P%-hLo|4ii1sWi8@DC&21>) zv@`pjr{#fb1@onp)-tl{ zF(zVUcCEaC_}TSXac)-19yK)_V&zhL9aOBNYhBuUB02%D2dh-ZTGV+tr2^8TMan0B zMo(?vStL9HJD0e!5S;_q$v!CPBojF$BbB1X2UCk?o!PNtG6`?yjeH}|u^MoJjJ>RHb7n09BPjw8+|P+K)mpRw*iA=Ln&qgfgHvGH!Z}m zHyAgf0GTs@X@-FscwpP``|~~iZ`@-&ksqe})$o+Ou!FKe(Y~^kNg3oKTDI7m)9=s# zcEK@GYUGE3#82u#;_v*O|LXO{)#Z0`&UFYSXOZ2+{Fq;d)5Zv@95ns1b=i&MoLOoI zL08Ndn*oR6y?6v|m583M1uECL(lKnoG@}ZE(HkE|faU0Vq5pzIKDdb);gJ7?Fe3Dp zRmU^be469(>H_=S8P}KBxV$_;N;NkTY^xz&fN&OncF10wQ8Gv$2kD^HB|M}wx#q@P z=h_$p=D_*xj4!`@jr;St=Ibit`1fd2Ubo*Ym{3N?Ws62@l=f;1(a;$JoXj1+^h>{l zmoGoTb{cH1%kNg6$$F3Xy?muNH+T5m-~U~_d-IkXALSnx_%!7g%;oL#@P5Lpfz~8} zL$b*NEtoA1Nl_L!Ei!@_}<}=gTBbG4bxN;5V7Q-$qJ^EZY-;iQvoO-lL*yO#Zn{b zhcd61dchjRdWhV`f@pQIK+|+trulPl&^To$Vakm~9?pjB;({ViT6S~6lLRlym8l+w zjYtnFhIKTQA`LSGKBE1@ITQZ3|8v|ud;V~1JiLA?Ui+{8I>xAtnZC!%XRbyyq*-WY zlLo89N2|#MfHbqVy&1BZGsHC20C8rkt6Ci>&@kf@8;;9XFSsrhQ=+zp#ivmd>$)ev zc)FvtqPvbN$X%+8UTWZAN#Qn@BF@kv;=sFVynX;d%Vxo)>(AYWW$_A61>J00ld`da8e!iyjGhezC8;;xqH#fJqyT8YI-Zwu& z2~%~gj&2O4-V3Y2i&DPu!5Lro+i5tq?S#{Ify+miH82?iV;f~V6u)RjEt$>}!z=o4 zKkvA|y}^0EYlz{@#ibA+T|=sciAF2uLTVPiCuJc9A?B-@bsG&b6qmp`_VQe{myz-* zK)XPqvH_`f$;hP$jDsOa3PTh)(NYO1^;y;;$2*0q7k5-HWKyw7%Wf~)f=e(e{WjlP zHPLa}svZNiacBjTVr9Js_Wk$gvLQ3t&9iJkx{HrduOU@x$0r&?I&t7t5E!b3jU65u z)fl;~CaQ&N$&~%R9~X?FrGukkwl(C?;HbVtQwts}7LFaGUNaaWTnjhZtg1HAvXkF` z6Py$uEHIQR=o~zupA%VZX}PBDY{2n&Y`F4M55`W}_JjZcAOJ~3K~#dvGchcxj|c+$ z38){d)}K_1)c}>>Ybj05XuTQmv0>W89<#V7v(E0F($e7@B5SaaUI`aw2HOB1TQ4?) zX=G*VnhkkHPVDmxZ1tSjteX0qf>v@^HVb5F{2uwB1wIR`X@kjh`;Od8$@1;{xA<+$ zu9v0oX>}3%6iWd)c}5IhwjnCk0S)=;Gide9A%Gy1AI(!F9wI+~at9Is@c7Bo-%LS$ zOtC`BR`#As9;1fYB*cfNzq zzxW)v2ljn0rHd~MB<7^Nt>kAIJ2zo0LU$~Q7nT=s`A@xMhj47R7zgsB!J;5N2Njne z#^)|Yh|~|=?*r`2hZD%12og3B$S8b8AX&@ z9(G7`Nf4`1C`F0)X<)q_iVu`;l)JiDK@xy@RI_fPFf;NGQPM+2h6yqWZ5N%M%k?~Q*rjbGzq?XbInNxqi6*|;{YC8m z%Ab3IxSEm)$HkvcU=@z-QktPMKPvY8FM4cAa&GIc;nLBXW1Y-U1qHsazYna*_q zCaKN{Tjvs4pnV@Xn+VX;JjTGdIN|xT=Xn17IZoR}a~R6G5OqJ#{^`1?j<(FH2?`9o z%ZH^%;1KCYo{m$YB9QTn+H&5V@A2;48@ze_1@7+7ILG8bM|Veox$F0E4jY8FE+wPV zp#&nXTBx?|+v$X-kDuVt^>y9#Y^aj&Np?Dk*F8JWxWBu{>$h+4gD*bE{oOs}8L)%u z*4aq3=#ElrZIzOdk~Qq|UJNK!FvMKQ=rk7VVT~~{!#X7_XjQ1DTeaOW`r2BdDQ1GR zq0_RY)3n+&ES7*HW5?0o>8<=g5Bi%BVYV9FRAcQh&r%91@>bT*x$K$FQduzA^=ZoT z%cL#c-fU2t`xM6z4KH{i8_K?o6R?q8TPbOyo*%uhYoRkEV7`^KM-lUShw{jSi*`OE z83XE08*dGkg5QtG?B!>0u%zZWdBAjy#yU{Y%gmj_i9?E=>Prqhl8VfR_a+w9eAePI z&U;6~n?4U)@9PB;PjSLElY6F7nUGXHnD>yv>+;byZM0V zITdS3rdKB7JP~>3(C>_W&Wi6;#4Bj2hhc=3n%JWjrcwQgvq#Ap-Kz`^#lY`H25P_o zz+zJQl^t<9oUyC4!VNdP!ae@+`_J*ES2Vj)gPNHp7SKT})#S`X@s6AccKq(?^fEty zU71z~4skN2FY-4w=l}aq5&Q%XB%VCJ{@**7nNc)7X%?`F{xls3HlsG7YGPVK&K>)- z757LvRL3rc8ZA5I?pj+ILfU=T1iVh<;m#PU5|^a~Je>X_Z-7t6@|>1!VkiQe&`JV! zw52seG4|N-#F`E3GN>uDavKZ< zlhU+SQZ{QSBsWW08}ZM-_#EH)_IJx35f#_toNi`h4;A7d{j-t1;R;UFDTr#{T#CP8 z4$HucCr|O@@ioS%B4KcfJlsoh5qrI7qb9I#UcbS2zx#W*xqA=yiYTgq%TD^QFH1;f z6}9P<=uwh4WStqbE?RRYk{6fNgrzu%^tBEvRY=ZqeP$0R_#C_SCdO=3VS%!c3<=DX zC?^l0#4>L1u`=Q(R*Yt9fMtgSiddqd9yI8~^yutqhqdI)d&^LEm=awq=1R$mcS_cb z+16OHNJ2L2GaB_A$BNC8wMx%}dnp@&Kbw+hN zSu;GB;=otRPNJyhcW|hDb#Z~0FP`G+>KYhh4c4VbU}GXUb|P2w%GTU~JgL-LCydS#I1`oO#F5_b94>(MpsuhFH3bHGqbin=fT% z4KV7vvY_Ab!N6bjV0zHdL&U6CyGy%G4^N~s=skSsH++K~8pj`}g?0d%BxfZDflesApszI+UY|@jgu>Z z<-d1tWk7NwKg>UfC9Q{-pcOS)l@!Cl0S7W#(0gT<&KiQ^*|Qh;m4EJ+@X<#f@lXju zcVrW*(`VzLWqCw7=~EOV6%#4W1Ev(i^-G4S_(RqV%LPHI(ZrGg30lsX3P|y1M+Jz44;mfIRY!yzPioWi97%*p(e&B%-q&gM)o{#>^!<4{b23c35|G|FevOsObdqQlfLs1@sn^+R;DbZlL-b3v?r-_ zDlR><>#GbZ87NJcMP`^IW768PfJc#ux$k)Y_6D!Nc!gJQ-q3T69ym);;0>xqs}GQi z`DK_bFP92NR#`X4v&TmOeW=9+eLuqWt`LjUs_a1;xvKkHvA{xGM7}3LU*%39j&Y;zlNhnNx($d3eskJC$ zVGTakpurN@nf|bR@!+;uU`uw^%M;Ykt!yFqfUklc6aY(2()L4HG29ziXsqKj#8?_& z)dWDhCvI0l#*zY_y1sUvdLC-`xGml8d= zm_h%}>-(Bkc7#q<$cm7xuhd{o%_k^p&}w1WsM0AGgJ9$-M-ZFqW!Weu!)(>1V6Wa$ z62K!Qe&*35JjzlX+*x3Ez<1i=wn6^H4tObw?^AGDZ%AfE*p^!LyiZz`7bC0=xQUUo zEkdxsK1*fpgFVU^IE}5|yI7$0;iU}E8RW#&(r1s^DRAZlwm0Zr>R*RVrzP4jXJX?J zPk%-YaI4?g60o@=0K7jre*5j0_+d`uV2$s`m|A|P$eU9VVVbWV5Gzi$;gyF7c(bVI zt8KhX{l@9b|ME}?{DcoAzWFoX{GYLncS#v1D1mF=XL?JG2BWU1S|S|B`+%v5owW=S zoPryt{ww;Bk&Gzc=I}$zW30bZ!59e9xy8|4@k|RA9JECodM~ALZ~;`$E>j<-?0tr;&$|;xu&dp1TL>GaC>`)o7?v=3!KmA@}CXaM3O@y ztcX*r$-Nunp?pm~SCkuNmE5*KGF@&CreLcq1cMMEU6``MM5qDBx>fniDF-|WOKti( zoPR&#za(8&+#L^W%P1M2{Ge1FAgj4wuuSw84k?w2nt`#lMgR`%BF33o`%@$L`W<7v zmr63hAyqs8ghxwcHB^bY;{DR*Qb0)t#lc*~y6R02HYHL4)$ino?Pm=XW?7oJC6xN9 zL1n4;iGn=Fo*6aN$)Sy#^E>Zd8zncX;LsyfkbL|C_V54ohnwQz^;7xUudXougTGZg z6<=g>Z)xKCY9CUNu~POZx$oe5)_F!AhO=ubH99o;fA{H^dPK2^g(Dv9f zlJv3{y2EoWs2moEv1s+#O6)i}Bj=Rx*Slc^O0~usYkTtaS*Li5J`~$|pKn)}w zKYfJ9PoLtnUDEG8c!JK}@M%+vEZAca0w*DYTLZC?Wm_Gaq{q?2sh_VARfg6g0MarJYv<|^H-O=S|pt)hjVRLBE zQi2%{f5+uclhd z5)Cm_OmKTH`$lb0Y=QT+`J~-d(EDCA@%aLkmwl9wIWZ%9sr4L&7Z|waqQr)lR+Hz~ z;&lawD%)@}aDZ(rb+LJpGs8gw7jS!US;IPZxG_<0V$Y~Z%n(F)V>)6WeKHkkGA(Uj zYz`ZaJ)_j(044`}HH#~Y>t+SD4aT|dIvq2Sb0T99kj&*zW`o>wPMo3!8DApt-F%PN z0{sid6V@!YU}SLhG)I6cfyh$bDjA>^-&ztLmZm6`u^_0`kH!N(;wOC|@pu30zk7dq zy8MS3HI1ysB37D!1+%o*K3!5oHCf)o)MRs+t3jGU=9>jVPt1NlJhc7Q9=jv<9$gVHg313}Z<9xoy9y@F>9- zh8VQr40OCw`LGM>Wn=L3J_KlV%2Kur)3oh8EnC%g*nF(G-2;)F->1}eFN#T_ay%qY z+T}&H_OCjXKgMavk~!Ow#Pa6pm_T0?=K8FLR(mYI(a(*dXSFH|3*>+Hw=uu=^@p3{ z;q_DdvOoK;VjDr%Kt|bq=-ed1-YL&T zCo(#FlMv5$EfKg576m^UJ)u}WZMvO9N{2WGgc!4BQ#+GXN+^y|Hyj9xwnH4b)D@L5 zvHqlH-1r7J!}a9_o<4bu)5VD~g=*9WHXjE?9oD8la~02I<3M(Vw7wUU=R9^CQ9-2` ziIs{q&lCGh+}xe<#hWj2f8P5H&E9;EmCR&Rr4{OtDNyUPmTFWChGJ+8T#Z46Tdeb! zo9h{S`Rx}>!7Hr7El7x)WNX%rkNxgB&9 zyB(6xypcuVBC8&zL1h3`!IiQ`_F1UGYsMZn*;uWZ3$g}BD+Xv;TnX76T4cjgEk+Hf zI)c*HcL~9zm#=GBuuCdFjLQ^D|4@Y>(V!Lr6CIBmt|lLhDd+EClXZ_3Xx5gc#M}{9 zkkOg5<387@MNfY=qore0-TDh*S_-(YMO!*YciC*3)MLXWtX6C)I#{u~II1tA;jr3@ zYjb39c&92DCe^=MrTOvCF(m=6LErkh#kBE`5nT!~G=C*vjD^kqzFQ`qZx{IJ`f=GM zA`rs};4kIac3oAYqChAnRVQOB2*iE?_6$r8RZk}Y^SR840QMb-z=`kI z%n2W(EHPf{iy4VAcy2{to+n4cl~0KoO5$G=g%xVo8YkOdl)w5{p1}?S}o;-Pi zPe1+y+i9c4mRZc4?%RRFY%8U#OPwUiIN2!9eR9y-Kf%g-w>(6zmh3JPokM z@hg7;Qg$W56r~fyZ5lWA7a0|??p$ZA~Ulh-w8`EODk1&F1CTMlqoRfiW~jpwp(fr zZ)yJcfEloJG2}Q5p#(}lK*g;`AYy#fd*NJIIiERr!9|1hMA~S3PU~{F0DNMaREvx> zCulUHIA@`>hAfqA56T?9`CqigHcTyL;Xp@R$4+h*s(G%aQmP)pUys1)L%grE`y9F0 zFm6Gx^->?TI#ZcYj!Wk<2!=f80mAE4 zURNzzgCqgYrNCkAajQ^$@kwYUa+J!}6;a9wm!e$L#$s-8AtM#Z#dKVnVg{cmGHPtV*c4%#ZR6hDux+P$eiPVpZ-v#!1opYk$)OV+xIF>ip6~Fx zIAf=RdYafFXiKoG&ekJezK&LwI7Tg?^Pqy)c^0%HkrATA#l90Fe=BDE{zDb;6&Of- z^z!B3vkx}UioS@>gmqg30aXP_rbF8W!Wdgs|2_zq9NORuqv&zaUUooc7fqRg*w+Ol z;HDFRB7TE%u{jNBS=ef};==@l-lPP9nuBTwTQd=PKj|sMObmBiUS8t))2Dd;;#pmY zUjF<+6+&Sj>OHB3)^UF~@%oEb1&krkvk&Et;NkxcZ@3Q#d09nZ7DOi!H}7un>dRNy z8v4knDU0?knhM5PF-HyPw8o#;y!H?~V!bH{JbLyBf8^(X4%b(YN@iyDI~g}vW~0d8 z_qW*h!29>Nc>U&8QG9{*yF`IFC6f!r&UFN-txZsS$1K-nu1CKq)f^DSB0qcQF0TcU zcZlhM`OxVR8l*_-%L#hqVj@UBtE}-Rw6B4~+SJqH;~PD3Xeg9oJT*WIG`+E|oo*IZ z5$i0zLLS7NRXpfT?YL0N4#jlTGi6y)Ky7I> zd}HC^A4GmhvW=EVX3(Bt6>q!9%B#q=!*B;VNa&aM-TCveQDM?dk@soO`BQ%e@$B60SFSNJyQ3`Awuww<=r4QdAn0F(B>_dHPc3$s>$Zz_~9J)PId|e zFtQgKj>OEpSP^NhS_@g^l-#X4B;@<&z}!|8V`BEk4jPDXXwkCfW0c=972&Y#7|96p z9^~@pKN+^uz@x`caP{a3E>0KyzHIIo7#wOCDenalwt3h$w~`XQG=UgLeFn1>;$0~P ziSxcs0Q>ztZf|e#>dPFl0DJj#XJshaskI}LFGb8W*lsvwhLTd zRJ5sYgB#Zx{(yH)^7DwjSXN)WdWHAz-_(`X3XwLUcHIMVi8>~}f4=;naW)r_+S*kf+ke7y?*+Ccok*k~0GT78GMfqL0?dTDh?_KMjJYH)Fqld^Fz5?gK+ z0TpdfiKMt&-i=vuolL7lPsk!72p}6_D|>Iff3xmsZol=_m4x0Pb%^tex4`JAKV?Oc z^Es5lDK)u(RVtmDarB~}11ew2V^}MdK!rwRwwxa^Th+*!N>zST?SMR}B#f{YjM=OK zrMe6rBzaH*mrk2J6U-HfE{$mhRl#wHJ{F5?rTlrWWnpu`EPEkkSV~pPz=Z>+-0FO4 zxs~@rKdlnjs(u@7X^*IZ*^JrA9s7Pu^+UqPiN)mT+Inh;6^02~KmhEsmZk<^`ou)w z?tI3Sg4nYaQ*xs0Q9h(v=2UvmypLv1>`@7S^_|fob!!lEeeb$4DGwi_IdW-+w`1V9 zZr3 zn$7K4i4h&wSpU61!L?E6597=#c5aw#`3nsyw7lbdLcEQvLdVT;adpD0*RSw{AAAop ztTU300IA-aI~ezK01)LNaf4Bx5x%J7SdcEH;iDIy;E(;nAH((4C46{g)gO7*8Q3r| z`~)|{&Fu}|zI|7h>u`iNSTzQ&LzR*dWnk+F()Rxdkp!GXX1+tV$GAq92hJ&Il;; zK~J6c@Jq2s$GBPRhMi4CMAPz*&w3YdS%U`2Q_azQ6wCE2W-PUZ%W65CH2#tzce(~s zOggs79g`}b&_-}yACGvjn%%etB#at`SQ}K4gsq9VjA44?9EtFiPhZ*Jv-u+t3IA)q zjvU*=E%EUBsekQ%`p;qvp8dAs{{~f3spK0DDwJT<-gIr&Nk=!PM#MoiII{e^gYnTd z6m>Re1-mWTK2Jf~6nYEX>bEK^?Vyd%F;;}1>*uU{d^EtaC0xiE#S`dj!{J*k8lVE% z=Hgi#y#F#+I)|-oFLTH60$N@_y~g9mPoTDnK2`d)WhX2}>G8f_Qhg1{SEiNNRDyAL z{-`2IB?#OW0Q!7?hc|EE;_chFxZCd(mZ>=a03ZNKL_t*7J!XO@kG{K&e-_EXy#x^K z(`AeEYM{mXxwyQ*lgE#7d3B8o->8W5hVm&QcJ4c7OuT=8gU^2W89x8)bIiD}2xsm= z9D_q5T5EfIwdDJH2CO44*>w0oMzky+XLOV*+7g=$5l=NuI9U1!HT96Smv>jNQDP%0 zFePB{NHPn`W>a4IY#?_MmKYq^3l^Cvo*24;E0E&W5w}LJi?|vcmm5LYcQt@hmSGMP zla}95Vh!0$kq7P)e5F!#uWa312cyRuCQ0v9KQD$(+UgZfTR~J~%u;1waV~18MFj_b zOm>N21Z}oG8)gd#`eRF#X1GTEH#Jz~h8)(GP5U59cJuxL5X@jEu8{cJ#TA~=QWR8*WP-JRscVup?6gYIsNbb*t{dxqMOm*1eg`@Yd8_5BcI!B)xPt3>OzCJb(Edj~_ka1(f||b2hIvF%*cWHt&xg zU$0?2s=a#pOD$zG6f3)n`FFBKRLiW~$fi^TW8_dR^}>I=a4lE1nmjb273LCB&h z`gmusOVS5@cIe12Rjx_-!PBRYad~wK8;;Yql?sIZP^7Pf2usY^@!9u&h%dkV0;h{h z_;x}@T|&XwI7rnX3b-w3NMIGh2CLN9gMiT(@zzNnh57E7P3w5jwPA=wd*lGknu47j zwYiZqK@Q15-N8XRVp@Y7EsInfQS;j22XD1xroP}*;fr;V{5&NQ&pfp=Xsi7N=kSP`p$?u(IwzS#UsPsUR3IwBfGY;{k7Kbs5 zrt44*K!K<^rHG?txY2ak=wuX0vV&B3Cv`r$JVF;-l|aIQW*Ei>_^=sR|{k}gjz~uplq8( zHe7>a-XTVX>G^!d`FV+V zj~+k5<<%v|X{!OXGsZH+sH>$*MquA3-oAf_H}7B9dps8unuPAdwts;DliTI&VKiFO zZ&sF~X!8DH@3Ye7nXH7C)guo!U&d=^u1IsGTkN(h31C88a{;K(Ofs-c(U}z}OD2U< zVl)I<%9BXJv2Cer$r~{)8BF41S|rm@z}M)tSPlB+yGUSC1v&e{u7ySqKn~W+aQguI zt^t>Z4sM1VHL!||P74E+JW~Zi(i-C}rlLw$lLsnJh8>2Bwrnv-@g}rzlA?wDD_g@W zj75bpZ8BWn1DZax5IrbKrPMOk7mKN?q_$jaF8(1tw+$;jcS6A}PBqnH>9^m@{ZcD~3$cy1WN)j(u5r z2**#`aCdcq-@AE-@8!988a#vYNKmFdPGE^(|0Kn-^SfE|RjRPfu~KmOowE1iKHUCak%0sNjLmh)*+Rx0;tek%RFqm zu_mY*n;K(8*>3HUgmr8tMLdck^K1q~_6D}n4+JTy={qm--r#0$?AMFRj-CW>c=YHI zuCA|fy5Q8C{y1aAlEE=pC}A4ujp!-c|a@N*Eq+&n% zO|Pid#E`K?&$Z z&qZUlgSv*F^Y*X(>o|Yw=N@i|hu6akK#YO$m;V|zrQVQ;-dzct3DH6B&1xbtiZ`(_ zDIgH=fwo{S=$^#t;>wy5ljXTkj`AU4G+IHCNaW_-1XZ(QT?2RzQdX>DSwXWKTEX2S zN|Ax~u@j0+yP4%^JBPRGk~UHVIap#+)$(D$;luFg@ngJv@f?>|R~7lG2x{r@ycCp> zrG+qK*CBSyJJkEbhO6Q~=Qpn3c=U$2y z0+?MYjBXMM{kF2HxMwTs9zDLsC!c-|FJHdI#l;0SZfzB_of^LEOiGD~{r(mjcsI9GZ@M{vtRlszXm`v5;u z_Y|!*T7YSLeh8TAnoFOFrI;*26$U))gBVcXh-Z|_wm}Tpet8W|3KU9(r51LKu0KYN zQo7hl13mN-)KAuxsGrRRdskXje)!#(Sqdj{vIH^Bj*(r;ON*6aVQs(BP%mNOywjtV zIvKRlGUC3557kIFl-9U)SFxI<%+%tlT;i-llvqbqYj$)jXB(Ieku|r&!p#mzBIvmE z%wz0%I|J5x&Ys7^$(@*Z0^k!r;c71+rQuZRHhW;9cxw2$!7!Md=N#V5*b!&O)e?Am zvJ)8^lRTY-`9zXgcON^<6LSXU{X|3!ABHSA>gYP~z@gr%fo z%r6FTarp?}{N~s3==w3Xv9Xv4UN1f(BZiN`aFD;#lpAtTqlcs{fFu08Jr$%Jz1^4w-u)Smgb0L&Uws^*%i|zr?1N#l=E@J zfmQF7gj4@-4iv2=vEJZ1QwGc` z=iYqXjm4gr2VcKziP7gvY8fDrBqIbfvMqBB((6XcS|S~A%&fS&)X#HhM8g7}ju%g# z;^~v;xY)EU+a&?W#o-?QSF$;YVu*Yq}v$Q@EU|@GU1J*?8|~Vn?j3_hh#w4d z0`K0w!_E5}f*Ywo(QtdNB~MVb=SN_MK8sw@>H=??o+CdbK%s1df}`71idPLdq+GPD zrd7}QDx}anv`*D@KUe^mav%jlM{IJ?f3QyEk1dQM>+^_xrJMDTE)L*p7hsjt2e(!* ztfhg1EoCEG?61XNvZF7#YnhxXrU$K%Ye`V)7FBEPS$2wU5M?z;Yh9DnPi1*NETs9F z3}Zbq(6zM&Iw7kIGr6*+cJ_%gHdQWlQPmAI9A@ zA4iH;aDWmhYt#CkWUIf#xX?o9nGIY{K8wRUqIFq}*S8(c7}P0%$JPr|0dNh0InM}0 zDW!9%1#q6-s1G-6UXAz0!O;EOyU(a+nrd(de4G{s#FlC-HHdPrA%v@53(P%A{hE7y zu5euXh5>Ke{YVtEZU!(6m*WJVf!Gq?8$14qzrn8AQLbTGc3PT34zv)p=EhlDhYs64 z_94#yKhJ@g1BpQ{P$^}DN*;4O@FTuz1Bs_kp8g|)e-Au_9cxk>v!++0kIDraT+>{U zcGiYK^8CSu-7^}ZC_9AI7zu|>9&)IICUj7U_bd@TON0=8o;Cz2ZM0CEhr!@$wRIikRajGuq9EQL~+J4uiR(qLQn-vZZo;-bw zk3asDQJk~>&YUnWQnU@RE+!%ZZ{NMa4}SPP%&d*idG3gPM*uBaT)ExW+lP|mEx1}! ztZmYZ>88puO#Vv$LJ` zmuY%$ET*O)TN<8_z+BMN+6Htg$eQ7I(HC)eCJB;qaZNZh%RIz3WnKPtuhoS%3AeOi zEscfM0HUL>xn=8<1LqnTNEx6sMr*k=n{zh1NjbmwkVIjQ z{8f}xK!v~zqor_hgX$Ds8`qj|RN4|I;DTc6u8(Rg-KuE!ixVy`Pk8+78r!z@(7ZtR zBEWO-ozd}s8ioia9G!Y4N_-9L5mC}^)c@W64-AOW9HH~wx!}<^ceuYjmwF-(s?J>L zVn?vhVr8w=0JDR7P~LF@7_Tlb@$AJ5JbrwQZ5!~-yRp_&GUtvo;C{cy&CLy7zk7@K z?{9FvyXV5=K)t6YJk^X)1!_ugW3+pzU~SP8#qw}1q|n@m6*p;Y>POpX79X7rt5RTs zga1^j1JyEvcbYQk1xK8M!5-AIvOl6E{5>47INab&J+N91sWx=yoo_2$U|PFudg%c= zdEn8^i$Sqjas zN<|feOO|Pi1wn^L=c3bL`WeM_wl{d+l;4=!t6BwTOh6^e*?lUSe;}CZq88}lZln5@ZtSniC zj(hh9Rg(>fEmsf2L}F}ZDc^>m^#ho@~((C-?p9iYmEuG0EQ9KC)Ja$~{hBF-BowxWMJL8sr(CG6*@w3^m*9wBk zR%vjd8;IFifK(Obj>!N1;nx3(4J7{X5C0%foBt1@5l?}xhIRTv z6M;pybsV;!CP4r_rlcF&0V0?t_E0rR@}yDbRnMc)hTdQ76p4719Q%Xb4^dob9UCUz>J|ra~bQ_mt7Lbtf(F3 zY%_Eor_&`~Jb#AktE(D5P%0Y-s_pz(aAf0$g*LRhcv2yS_`&pS>c=wDoJ^H|7$cBTI1XRU#2fW1$)VL1u(N7 zg47)r8Ku~01)S_^WFSp&CF*@!6t9YR!3@qZjOX^jenEJj8*uW0ae0A9*N-ty8?Bcd zzz8zuf(sVp&=1bM?$Sb=bT)IjYN_&j1k49A2iZM|w0)(b#KimeZ}9fb8{FRAw8=81 zDAZyo_g*1cr==tTlLVEEL1fi*DdlRoe4;o;-R2-^TLbXHq$t7~9qg(%ZXt z`26$F@aEl{R@SB4VpSaAp<;;m@`WA-+Ei>Fls0D@v_9*ZYwK?#_`4E9C?K?> zeez*JzdTxz2@VZ9U2FkB2zy?(Nu|w11`r(Lh3?BiOLMlRDGxx#VDz>r8rGcZpj8w0 z@ZS|5s^@u>OrMrjsVG`e(U&NfT8xI*Voppf@wGe;k*I7mlc{N`p_q3lYS)Big{Fd) zwTidZ%X&~W32@ZY9sCSSrAce|2T5vmCZ*J&qsB`em1$)O5l6)dFr2;ItB1_V8kho< z^K1z+t4sqfW-_!L3VE#QY)l^4Fj8PX!n6q^8(ms05P$R6IIT%Ff*!tpF5Hah};8{marpN zZJ$BKOJJT^dkv{$c&-me=EO`YWVj$37zN0zEh92^+>VKFzyA_n&ROq$xWWRPOP7eY z>>w9&4N}7@A{9#&Bb#-flx9qDL=TwvB8>#se2ST#%a62@JvB(9vRwY$j)HGOeTJesa2Sx;WwG^JjSZ(F%;8`ztF3ekznM=P< zRaiE}x@Mb4d)TsU1QiZF6*z3kb$>bIxO?wSHkO2RMx~ayaRzvdWt#7K%>!{4m)uk;-8?=Pk z*0x>m#I|U4t+iB=DhzIqYw~WK@zM7f6+<~ zM{K2DY-J0)BG0pyEuk8ka8An}m673r#nK2Z2M6PSl@>;jIEM>Cu%{nli4~V_RpE+xw#SQR`EVVKM?%P3SQ_Ewc zy?W@gb;FFklsZ}v+Db4nlr34yuR#SI3bn<=7d2SgL%POSbs({g@%PLKYMhNZSW=lWDQ(B{ zPw@c`Hj*9P%)?dSks?pq+FQtTD*J|jKT>7*nwGkviBy3EIfl_ z932@u=*;2);0?``JWW@`qwB}`=;M!Yd2!v%b!WFrax?~uvr#`&N15{Ar$S*uj4eBi z2CyoZ6%F5Nj7f6%_kZvM{LXiO2T;TBv#cF#o^=RmbXawY;!GJkWdl=}B|VH|jEem+ zhmV18ed}BJYVIR2^`zqPKe#kdffFA^57u;zzG&3uX?(9mTGwa8W_5 z|BhRU0jXZH;jTapqJmZiU0eyrP?5s|%_KJtRi(?f-7(fK#)hxK+fl|AYz^!Xjpa)M zT&fyxnWqwRTJh288bv=Y|8lgSSpe8<*{Oz+wyZMrqB=c`i9WL^IEX2fjEh4LqI*!>Nmq#j8?iTI zN~+SvPxp%MR;SvaY5{zpGJ|#a;Ywzp7rh4Ujv(xMm}6`M7pD!+A3w+Cc2P~i%!X=O zE@&(e z=QWs}IqPT8V3{y!fyEmmDyn|aE{U;}XJg>$HhNsRNV{8m6(DV`J^I44G;Kbe&b@ z>SUc01>bZk3+7V6rj`~2f-{K3>e5l(C>@|r4-0?0+?b&m=w5+ABvbvP9b)-?{H%rFP~v81(`Aey81lvWomw&S-nZrY9FXOvh;jFqT3_Hsm;{uxi#+?<;f23HQuZyAy=ZnGfSYec62L zOVwOEEj&yXTa-E>#j)eF=FMzliVsy9+J^~v3jmiHH~~0K$416ou(l1W$lD3Il`+Kxi`b-agQI&cR15B-s-tr zDT`V zl`nLFvv4pTp>{kl?C|C5ZlbG zwAG$NsV_|S06?(|4e3%Ydo34MrZI-&>iP;VUVMa$i_@yg(y)2

    LehtMM8(`hAl% z0&2>2K}c;93FMBLWwi*&C0STlCgy&R?|<)mc=zsI6*kH3J?eLs9Cjfw>ARiW9&ARg z0;7@+p2UoRO=Ik5;_IJ&9T(dP<{K`rE^u{u3Ex<5jkRfutjNzVKK~Kkzj=!}ckKJ# z&1T0**(yTY8q&?S_xj$NvBAqe>adRuy~!UiN_^2ErPMdX7Aadq=p}9Cc7;B?+Uj^W z-0-Cikc>0KR?@~;X&G+5wkOTN2u70H0qmp|*@qC+r3zsezfnF&(3OiJ;%jvgk@PBN zPxCWp4RNAmZ~C%Yn^j|7bdXwhbdkJ#tt+KO!$EmDYxA&%Bbo`;_9j-WX)Z3hG!xzi z@q(O5H55`r_*es1O>vC?03ZNKL_t)M^j3Trhejo5F{xb5OmVei>GJ3q(I@afIshkEk z`IO9Oah1R`y)ZP47j&um9O1qo!eU3XWt0)%QJ^=n)(P9BG+XB@ITRV#G;Iyf_c5sx*kUSg*Uo7Q<{npBK!_ zjQiVLyt{di)9Hj|IYpD6`%LqpzH617#^_iJMn-`xdEh}Mf_U+;VlQj)TSXb$z7(8Q zOzUthF2ON!iTF`N#nP_s5cTc;S(06YN}MK6p-vsSwox11URFMr)dkL^fsfK&X-s+N|71#Cd+s zF+*uJ)T@RY(-FjCPj79apeYZKF}<(I^JD-}wKW#mPzxAu=g$)uwgQ~{4wDl6%NIIZ za6J|oO6GMmFnTPy%_1693B8Qr7n3U_)glVlhK)>Un+*V9i;0W`szz;z7}A9P69C*7 zivb`TD4mHK(NqwwBXjT_1ZcX6*Fpu^lFBl~dfjL-35#8$8rrCK1C*Sf$9=>1?{4tM z=6IH^(RR&xjkfdk-VonwrUQ8R>w!j;wE^bPX~hl$@RSkZ@TF={b!F`FwUgh4Q;{pQ_;M#TBk!Tou7r2;5Ox zH@q*f#Y(1^GFfGuC34Ch-~vLe`iNK3MsBhWBBHtR@@XgxMW@sZK(MVllLwa^lj_^lc~!u!A~ zL24C2l-^aUSNK;)@x#}ymBx|3Zp5Vx`$Sq33h1`-ZAJrQFgpq*KkfoZvcqjj6q6BmMIw3@X)q}ZQGN$5Luug|zn6`b^5c%` zz?mM%uTDMI5g<^WOpZ9J1zOorT0Xe65DvU;5{tcZKKc`TF}@ z{b&CabeoM)HfTjKy7sn;NF09uR5dA|e<@d;R4P*EVaCep=!2I-n5H-i_6P6LNH>I% z%t+zSBn%E0)K@lyF0~dx${G6YO^q3|Jfsz?hTJsq)3*$HKd%UZ%DYOhMSG};a&W3&OZ3ERyUXS*F#CxQ-910(Sm zP}$PMA%KS4yL;T--{SG{u`I7JVQhKqExm!v2d{LJxQ(pTaFTWoXz?3_QJTu8dqTl2 zIaEp;ilz!u3fwvlGdhrBGHgJ;nvA=8*qSgbzd4M^KnO|ONqv9Gk??fE!jU$bF<3Cl z%%#PBk(lT1HijuH^=34aGJ(U(ZmXRWW@D4BuvS(ObzMb^yD;irM^mP3q6BzWs!iHv z5zH9$@%&qaH;~yzWh&JOW;RsPnGnMRZ>lEMl?v;15*_RCl}$Gzy(8|imp#>i!N!uo zL`jV)d}OLgM#i_OrDYB5B|9h3UQ7>C6w9jOfW@hy8YvNQ$6kPoDTZLe_0F)cN6WK{ z3SsimWa!<}fCQCbFLVy%8Y7K69CMF9ztvz>I2_(P8U#z9Vaq&fOl^ZlqXJtH)@_73 zOc?zsXqd%fP^{UE3rkx!Dl>A(m1!7DkAYe@%(einV>!*RWkE|7FdHB{5qLA)ZUlet z^Z|F-P^EfUskD)7P}wS8HT2cR4NRdg$dRZB)~wBGv2!68uE`j7$JhIuuJMPxA+gMmsbx_kor(VA&m$o2fOG!{s9dj=!k-FpJ;v82OR{@h`hcrE$ zNUv-gR+NNqCET+3VU2NHBOsxgA0i4g#mFsDE*OsG)G;;1H@^N2Y`5Ey48cRPGs3B@ zsMAum5G6!s9VhDL*EPytGvE8-{2Z@ezeE$kJkNMMoG_necu$MQP%_L*$K8iJ93LMs z=>~U)Tg(rS1+GenRmM-%#0_*Y&@x~cK+D>2BzPj#El;>qvN2SL9S4*ZT1?O)mTBY{ zr?tX;*fx-P)QZ89P|7pARbDny*_$skW2;3BU&nkPKTp{7!z9K2+@RKZtlyakB$1&sR z0Y>ARX^bIkQ^R(eaCv@)tIKO_rcI>_i380g8l@I?P4h?^Fuv}XQ1TF&dn>#Sh3Rk4 z=8sV|THs6Wl1?c0Vjai(BOZ@OSWj6u8%{o0Ova;OdB{yi!b83nTiX%EvDq|i zHakokHlcGcV2)+!u-=2|#rJ%7f5f}DH(2_NDM5{DMbj!zza@{9ETPr@>moHiQ8Bcp zRc}I_$~akh+lj`YMRCcb%-n;mmv_Ak#TFk_GMHqgAVR4yP|?|*uZsyWPDQUPp@8Vo zL}{(86^>L9P}#>?Pqi`SV8nnk(1CSLUpWVLxHCeoF!Xp7R!e^xp@L0uBjs&z+31_oLio?O- zpi-S%9DyOq&s2!K!j_VZ>%OsD`3MENkXa)0CCFtq;wqicvImyfonB z%?`~K-2--Mh{ju5Wz%;g$*5^+O&p1E@Le z)Fx2f;0t&B=6r*PY_c+qshf}^LhCJS=Q?qvVlUHm%vaBKeA$gkcF9G~de~~JvBjaT z+p}Hc4}C*oyZ!G&oS5n)FW6#Fx<62M2s2;h7cpno$Ao2oc1HfJyTe(8NM+CR*Bgrv zjY)hYPNbS4x^Jn90K2xyvvAc_ibpJVy}-^=Ui8?sVkfdjZ((Itb8%+JecEpD;^ixR zcD9}VY}Jku|MK8 z&!m>G;bWYVw-C!WQl6txL11A*Lph_5NmN5dqY(Uym#<#o<4-<~D4}VBZh{=lQ3|`6 zTG5n;<2{bY1Mcsl8IkEK@QiX~QDX7kTY1D#M{_L&a!mSju(1HbGfM zXJ%_-4K@`vKFX58!dT2W#UI`^NlQ^l5Tz*BL!28^rkr?Og_^3d#{Dl25nahUGOjJ8=l5KDGHzE!53bmS09asWCd;`rVTExE^&T!fu@Y`&J?Q+;{CR;73N^ZZK>KK89>?bj){8=IxHJKhy$*y4o2Fv z{PX4xTRP_X5r@N(N`zi&J`^m_k*x=jn4D&fVKspsYK(G6NEH69DYk9G^@|tSoo&(D z2Aj5t{o3eFR{}9@=4%EXACFA^@bbHl!vzOZK1K{ea9t#E=-?zs)(2Ws@B(7hklV`= zi$3ZH)hr!l@S-f$ubf;X1`UyDmMT4Cl)8+ekYwVRK_f0|+#7senzXP+s8sq@q>x}0 z+e$03Ijr}uKo)a=#Cx9UYTYxbL&|55RH=+N#(xaE5`3x;Afw7rX&wgERvakvp-8o! za)Nk4n(7<}TziJth3eTy;AXHv9B#Ve4A4@I!2&Sp{BA-SEqrg@R)0q>)k zm?~PB7BoE05L2;HBElC{$0Grsjwb^wFV%Hgp@Fd?StLW4Gl?xNIAJRiQ=F;bg$mA) zLq%#N99WF3EGoWC5UK|?%rcYZG1_2GI(XJ<+Tz)=9_oZ@1g-9sW(He&adlLt_;(Mu z=3<70K>#qvVo_6XrVX&f8sDhw1-WODz!CR#b18<~)FyOyv?frUFv&#rdqmJSwsaq- zh94{s_yJCFzJg(*B`bmalH8TF;N>8gi8pC67G1+VsW4BDS%OZQ);bzPWCINEAo!cl zJN!SIYx;-v7rpu7&6m6V?r)*VpJ)qv-6BDMr9^d@*>-gm3uhG|Tafn@0!rAG|mD$xQ_Decy+SCa=N+4F8>7p3)jLPh>!CG@* z>0BV9vGITW@Bb&h{Nl@)gxjzzND!g`UxqYJ3@Q;p6jbM+WGl(&od^6V2onATeD9Bb z58wUncbLr3u%9-NsYO$wikv7mmSx6gpM8$^?{9E8+~eVK#?lx1MXfs1HL()M<=F+c z+ig&M0URG5aJai0?qn7c$|-up#>eDcbE2cPL*)0F77k%UDgkMdg;B`VboK8ff|Z;$ zy6lZC2q{>U6Vx2?OOEO(OExJhY$+&Y)zonB%*T%V&lsQPt7yE~n;Riqz5<$}m>Bu* z8GK{KZm@{X?T+2q9vA25Se6AJZg1)IkhN&1h|TU{uPISaCuSak{n-v@yM5Sm*s*^5 z=1onQl6UFrC{zTsxoIo8aPcS*vemS22_o)SXbnkEth=Cht33I*FEt7rTczcQ;p~_G zGVY%Dgy-w|`v1J_PyCZ;TStG0wPnGACKI30m|is*P)W;{HP^3HEV+>pmE(xy0A*xm zs^K!Jicn&qMI)_|+2}M~#d&7zFpKq$P$GzVab+M=SrXM-c3wG)w^-{$IZcyQ`f!5g z$SIOkY2@(4*r*XSjZ~xk`4*R#mpDH^i^FpcR_NJ|bPVH5kI$~j6oqv1C2Flq03g@D z0(vM~vH)^Ip2HdIfOX8vjJx{--oE(~4-bz~LzO*-dw*IyH8?#@q}V`G>0~1fU6~S= ziBOX^ymU@}ym^RmRSVKE#Z9`W}5dmIi&*y43?RuS3;CH6`n zl&}~UTdXY$zB+L8kX!)9N|snDDOul0C&S5UyTB@Wh4oFst%vh?Fuig(g>KN+Xk(2! zD&+OHI<&3^08I9ROEK6%wSd3~QzT3@n_v)_^*T^V!lK1s+9DxLJs^*3`vwoM#?L>L zA{Gr9Rk-zP#5}QolxjvsoMNex0Bc$nETdGnHAHjH1>O=A-$D&C-YA(L=|QGaqH1B# zwxN|Ml8ITs5|UT~=~I$^QUt1-*uITA#a12dstS%4H;s$be%brIJa| zxJPF((X;Va8)Wruw^Z|J)MGE8c)dGETN=b0q8AYNpU70vr+`AcRRpSa#k5gc+M=$V zv_+hz#WPXxa9F7_Hcj!kEXaV6Nw;V%B0L+DCzgbhGj&5KQ*9h*32j%s&ycpM2B)f0 zTu#{M3K(yTL0d%p_5_m#!M>Ovjj;ZQCiwo{4esFRx}urq6gyx+aj*1H=f+*LVuZP` z6yj`TGjSsk9O~=lkq+0O8&un`E|rh^eEp$SNB}^aw*O;v@LFXVr;jpI9gGTvln+95 z$m(koq#)0O5-Ztk=oT?fK00Hd08DPcBv z3TfrYi9|{HGA>S}Hb2E6{&w~9kS42POy)=}PEWOYsbDfb79PcL#AS{o*C&}=A_Ydv z?y_>@+HJQG5o|UaG)*}nnH!Yyg|Y`w0_mJGTad~P5j-9raC38m+xy#E4_sPho>${2 ztrf}T$YPA~yzmvlzlyMt)Z%O&_SGRl39ys&p%+{>U~C3pItRY^`9Jr(BRpTv*FV5@ zdi@dfulzM=i^ECF>9h?#jj_VW36y!pAjjU*;810ZB`5zv7;xDmGssY7G-;E8Lj`l*_mEg6f1!pPOow0r?=oy?xtYj_v&V3qgE5+34eX2%&ge^!On-K^ z$N9wtuCK1J+wYkIuzG*FCsdQ7t=LH6{YVZ~atBHsSzc+WsdH61sg$$@W`?B$-Df-= zkN9wZkJJ1}_@0mCg=}Ilw^~&rxgwJy5~9uhK9ra-h=}u+V55qUUcSQR)de=&4VpIE zFB+AVIS`1nt)gKx$)*8EEy>_4$pC)8Lfm= zLJ%OH3ejxJM}tHQl%BPLBGev8HB#0n5Z1z8K=X|0H4rpvC=ONFF9Tp*tSSKL%TP~{ zPf@m%Yyk4@q*5r9jH!&K9Yu}&SE?ALvAzp1Y90FtuU(&59IL3t^csoPJuFPeb1mFw z!CfKBx@^UgvysH0E-)Rj#coCw2bZ-5NF&F>*9r<*scOfWKxhxKj7(9(28IjW;z}CS z6&;E$jSWnuR;2yZIn-n9>}JFJq4T3`@m?AmR!&uQHQ5MlJ)9n~EC!K|Nn(iDmry1> zE+;IENo@`+YnyWoV;v`3Fq_ljO3*nJeW9An7i5Gg5ZnkMBSJM4(VDQ%iS`aQhz*Jx zas1Zf4GxkvI;*H#Xh7o$1gylGx-%&KC>SsqcWa0nZm|*WK=w&Y?x+j)M~aP^lxN&8KW{UTD5MRMFV=t7F z<>g|KWMxnaugN@U@eysdJ6v2|p!bg6J7({g=NZf6%y_K$mJSa`+}$5=I2`cq-5Z=9 zAJO|lNo{mS^Kd@$<+GR^(6sd$6$fUrN?J6UYBSC*zr|t~mWF6-=_>0OB4`%VmP`dV zY9*xs+Ed*}B0CA?B37R(5c0j$C_BH~^8Kb?l8qkYJ=ZdALLxCj(pM@ypiQy)j=%RRh*y~m=nXZ4hdKkBW`0_(&Jw45(I2raQQF& zYtOsE^YwiFF0Rcl{9;T3Yf25Ec`0H+>A9(4BKs#L*sb{Ox=QNF!;-=?5CDy=fSBB= z2s#GfS~ZdQWAY91W-1bndG)~+(mL2V{e+^%dIV*042F{wjIJZ0ZD|(Jk^DuOKrHXU zrAoU&6JS!set(9`vkP2ZzQks;3pirJ`f`?OQu}Gfi05u$nI>eJ^%d&U)u=W*>t$_# zb#kZ(7<56mj??3U`^N+B@9xoOTY>#L?T#A$kyM$PMZxZwo+4vfF3~88dT1yb_PZUf zuC8%*c7{!xLXD|q*bSQrC%`@PN?it)))I))|N^TS# zypgOWO;ZYwRa)DMXC#PewPQ8C*Hv0pem_#hbJ6|@&#u%j0?jkUBBI+d4!Gix;!uhU z#;9fr6~XROz~;K;qRwnmpiWSs5e~^X7&c@Ija8Fj4b;xTNm?gUK?pGpY{&)iR&nEX z&o?2f#hC^nE0&b5>O$7ajB1pexLMR0@iEIvT_IsvFJr)sI+S_Rff#8tO-lmoeD5n#m1*as#MACb9eme^UnTL z(vaBfxBsiC_ds1uB}qaS4hbNU)qO=7TT-#5N%bUh@hA@+<=1)BCpDWZSOt7Nf#K1> znhyAay3sa<0MdpEF)4(gT#q8a|}27L%NA9l!pYzmC82cm6J#Cd6tfhC%>2 zhYCO&iUq(`i!oY80x=_kEPSNsF=;yA@9~{)e;1eM=a@E)3Jgb^HkjJBir1zJvyQ|4 zJwDvtL8&G|V;qkIQ^65e4VvcFoi9L5dN1=H>f_fkS6b8RAb?0f?Qzfg(fvbg^PH_It`LpzJD|4y%Ju{ z%q+Ja@=}rLf~SwVra-itBoIj@BGeuqPk8h0E#BXJpsfYfh{(dg*RoNxVo=29k#utz z1z^?=jObHANmt!N3_XZM?64>36+@~dOc2SogJc|>j!nCfXr9f!^nVKr0Yi^z&Y%C~ z=iT7>dcJ-a*W(}mS?KjS5B36UpdAWS^<^?7sump+OQB7KkiA^r^YE1Jd{vsu2mlrt-fQv%48!{#lzlb==y{W3D-C6& zia@6cn@z*+Y=`UXYwY*?nD7@wE#8CKGEzzC_Xn+TWfiDYknr`9tq5;9E!H}O$tEY+ z4I>WPF`pLPy}QND?M+z+lt~Mzkbtz9!lo}W(;I8DJ`UaPif*Vwm$+dwZE$sUjs5`j3ld}4j>Jz1`KRGG+ z-BplbC11Ha7Fu|^Y1KsDjdo5dVtH`Wk{2kgt5WrKZN|;{xrM9WOvvjp#)5mXoU9LZ zFb3+4YLitgSi)ad`!-gv?GR05G<>8fIMd);Gij|;_Gkd z!BlVy2&zcbjh*L-g1KkS>-FD6a@tNnE?Fd!oSe?a&KhuPOyp^8fH_;7d@fAzwfz+~ z7FLr^U}UlwEEC$;Y_V)6G4^+k{8cOxHVz=H5!M%|PH5UN=>!wSf?iNmZK1+g`hvc6 zs?r5AwNj#k$z@Sjkz3+gaNbEaXte%B3sdr=gt(&f`^ndB2TpKN4nS##7CI*|wC{$E;W z1j+OD9Lq?6001BWNkl!(sKz?m?Bq`+r>TtscvuMqRmWR(g80v~m#EyJ) zi`hl9)il;J(vyBw542|LFEqh5Inl0jD{T&If#zysThc2_2vW~P8H}i?GE&r3h|>8c zxVpZ^%NMU>qvk}#n1>FmWa}uVA>d1Ov8^D2L#_xes1&IqV^*^}m>y82&vfZCe)Bhe z1Ap)P-ya2lw>Z=_fm=iNNG5vd(4-2$s9UtA{?!Rr3Q<`wU0q$_+u!*n;YE%~C$wq8 zX1f_vEitqS@AM7{KBXhNxp`ji6TQz&pfKrWvC7Y5-1kz?M*4j{KV$Qf5y!^qRdkDFQBN790c~e^ z_j5n@yc;}U&)4tbI$d2u|H@wjWQjGjuX6HSU}}TLpAGjkwIi52UQ8GQDPhGl7;)A9 zB+W@^7qFDYC)iHM^x2HcJ0|ofA;_V?B6vD2h%ktCCPqjVxSo;4R=|5owKGN%Bgm_< zmf(AK1Uo_?m^NWeJ3qU?<>e(>n*zowTB?Jz7M6b>0d9jEG$+&qy}9(V#|Zbm5jo1H z2-6gTT@*Y4)lj<4{e-vg-{5$Ehy$22a+*zCF@$ejN77iZI;hgf{nsLWLzFF|l$@(# ze}0aSKYESx^E0&8Flpm7eHmnb0c_66vf<5_Z*cqJ4!tk1-lIFvk5`3<-*esMKu!X*?f2?KV}TI(_-A z)Wz^satlU7CY85dype36rNv=%9)zTmtUuE}5!|k{H4PgO0QBB#xa7{bEjVP#`kxV? z5(i`RmF-1oCoYO@{CD#fwH=e|;=tc%qxJ#G&8hmzv6Y;u8my#(!&U>qP8J1P*qPGC zTLpYtZw*%ve4=|?bG){2=-LRX3-Jz9`IONAY=Vjn^)H5UBGui?GRJqMK<_ge8dMrK zO^cVXEUc4Mh}8>~Qih5qZ(-VOAu6F*Vw9+{aja-WIz$^_=?fOOu}1O0-=I)3?v|d7 zv?abXja0dlDsCI_{fGBB$WYrDhn|X|I8o59%y{UU3RWXBn`e#6q_7UEBa z00jk9{)^{b{imoQ0RZaqpV}(NSB4iNZoW(skeIwibWNkZgI>C{UNC#OOAuu}7sQnA z=i$RM^dUi3y<|xh6{XJT9j)l50ea#C&u*(H%*Ie499ebT;W@D#wC&auvTr~)Y}4Umaa&%FH_ zqE{1)jW=)J;@$gqSeA}uUXXz7h$6PgiBHd{j`+2K&_$8cjRi`?sO4)L(MgVR@GkDD zODQA3W>k$q8aS8)kv0wI z)FSWI*uZR9Qc3Ja~+K5FYt7e1ErS8#`S~@3*u{q~+Y$Fyn zN&MATc)Iv__O%-uE%cJ+bLC%=N0w)lKe3UE5*had*`=h9wvCbZ;st}s-1zPt+rRp6 zJ?{k1*YovzxY|GS&q5}zbgW8hkT|G$tvgxlYsvk~znyY#4)VovQrE^{mFfvu`Pp0g zg^d$1<362hQ-b&y^%w;(|C>oQS7YMGshvoz@9TsmfT;y!QN`NC1FH>!eNqXXjcJ)S z*zLAB+ir1jagK}qMa4K%E2w!UJ?`6D}_=asA>NyWJL3 zo8q%?0&P#zWoXe8S;yz~gDIs7MFUdhC^I<6v8=8ms`D>=vmG8z$KFXcmbN zBJus{YN%BJX?CHkWd2OM;xt&&rdmo?tpjun)jFF+spj=+R0V)})N_fV>J|~%PK86# zF`Wam0HYSIgo;XCqn|=`nMni^Dio;lev)e9BM}BYl@a71L>Gst6`?{BDnQYk4K~LL z^HTAyQTH^07~4>4!pDRft619fkoI`*8Q+;I2=f7<)fJF76(P}+=>%S)#&@Cw+#-|) zA#xaYs{(UP608KOMk{&;fOV?miLU@mX zX4_~Di{keL=Pj)-PT0FRuy`+4N2UTHV|r6xB?_VXR>i3xp+(t;jioiIKhsn(SwPEY z0p5$?hsz_5Y&c2=;$`EH;QJged2v*%)W*VCVlLjHda~8Sl1o~WOS01#_u}8|^Zb83 z@96(%ujwE5UnmX#vl`sfD4F0r9`N%pHR8Z{Q`aWghQ=|p1+g%(@eoJ{eaafhyljdt z>8>OAGGRd6lnK6*7XPa)njNy@GZ-i9Qet{4H$D5;Sb5;$Ks&Vtn-`cE&M(gKt#5q? z`^{N0wt|AdGoNLLKc$}*2@0$@s85n=mW;oZ=@)Fi^aRp~a3M=@D6qKUbXxG?=6$dt zlwC#2s|Fwp6^SG^prG0LCHFQq!AvO0jgX7%x4-o*TwhElf|OG zoH~B|#ZPea;XO|C!rW!AZom3!BTCY4%Kxf6A5~nNHaOewv6&{Q1}tfwXWZW375r?o z+2i8ktePWt_jlNCwwR^~tu>e#dha+M4miy-GU6rUD4~W)8}?^s_4lNA(%sRQj=RHw ztPENy4AACJ5ao)H#9dxoqp6~`wqiGQ+}zzp5rJNlzG-UMpY5^TY#6Z#zQ+X*k4G#^ zFW+mjO3NH^lp!sCD{ z^eb0tRw~Fck-<7sAeijM$)rGWc6N@}uV3Nv`V5l>w9>qXjSiuJ@LscM7$j^0$cdz9 zYGX*M-JxxY11;@p;LssqfmMs^@RJ`8N1PrXa5x@uI3BRf3#IQGa`qJkz7th@C55A9(G-wdnxKZ4S}uyKN{yI1MD4}Odp457Z?wOxus{^W=+sJ z?RMk-W?C5@8O{|^mkI&70nn;FgI|?#mrTnrh4;>1w`k0Z71KgWMH=sCp?WoK#hO1~ z!;)j>4CxkhLgH0Nii$Djb)tPK(%KE(HO2cwEz1%$UZ(`dq|4}K=|}@}%RQND1XmcD z3MDYe?@d~z_~aL^Y~-ng?M4cMN&b`xA*|6j$*5XMn+v~gv(ABBbU6|etEmpk0u=E* zHdl1v;IW!QKyxh)h_Go9$$voW3tmqbxNaM4XP{4lPFsQYsFP;%klE8?wA-lB5v_}2pt71`Ufr9W;_>Bc~KPDE>P)YSU6M+4<|-5w^oCpOjE0<`AnEg zYN;E{C-UPMkvv5+)Vp9KVbx!3L1R7kxB-6BkNDh=Fe$2wXrOM;CPmv!XwqqO&i>mZBrUS)R zc8+cpp9&YWDR>EAy!i}&>u>*haRVHVWvNy5Nf1Lu86i8%81!97`s=(FJProXHWU8w zKlaCPd40{-5ZzSOpzH?JQ$zB#%(%V3#qHZ0JUpH-&vSj&D%tQrrBSSn6u$P30od<% zxVpH2(lckIh3|GdT%KRleQ-KG;N81-HFY~bI|l$fo=y=zW`=2+aCIGkE)WRR>6M*y zgBLGeV87d8W;Eve_wVuH{tojzlQ1q+XApQ=-+NA(fJrC3dhrrX75B$u{EQE`F#2&( zlF{e@Z+Ba~cyW#0c8lZjh@1EC@&5fgoK7cfcN@I8zQ%611KeWpNd0Zg*Sp_gS!R6r z5U=<4_7?LJuUFCF2Hf1-;QsD@^bS(CY4wm|ni^VbI2?}n@ZkgAy?c+x#}hW24X!V* zqlov60$W99tqv7Y*!76lT}h^)LWR^I=et_T_s~&ZU3RL1pbbt}d?^~Ya{?8-7Ojpu zt}pQJPydSui-dNcqU7S+o zVUinr5gYJgWvu|bQn*q&=k!+xP)~|hk8JL*4*8B&b%^kw(>!7ZAr3RGNu6l4#H(&e zeYa{g20#vl0oZPL*zYfJef0wSv&%@@n&R-0wo2pyhHtd^8mZpP!-yahj^=fkRQl&7 zt(JtrDGxKfV??Qn<9Iyc_U;bWtzxp%=8%VO=bm8|0hqpP{ykXnnIx@$QG1>318BL~ zZZPQvo3?>W0cdoK1CC+!0Xp#J-Iw^C@BbFwynl<<2;8eEvBGszIZ8X;O5vJy)5<6t z#vtb)fB-(v7|4X5)`y<=Z6rpeUnNwQqfC~xQb;s@j6lAuAH5T7oeDu9JntC=>$%^J zm|I4nyrNK>H_pgB5nz(j?I1wgPV4_#e3LDp*68=}dT^~M0eQy~Bw!1A7Ta5Pt> z_+dp&Dd>66)}0v-g+u8eD~qWVZah>>y4uG}0c@Sjr&LeaV5ji@*n}Sn(&-o`Go`vs zdBF7eJs^uJkF{ZQpgJ_w5uNXq4b6lirXAFcj2JS?pbF*K-p$Y^g2!bjqVnc&9ch7S z)dg)rmj--udyY$NFcE^|&Ct;?6Ka`FuPGH{Y|Ql(lk;;I9$Yk+xl9WZ0i=b>%7m=E zp<=e)u`mE2Y}D~un<_Ti&{SY#fw?mp*W#=-HAkDM6%ixglnt0B=oSN}iJz_0F4vfn z1c}e2OPmqh4W<)*=kWu+fMK4-`KmfPkqFsSiz+76lvBUq)*BYT_$S z<^cUnR+R7LRA80koQ?8UQn`waqAxwy;e>1n59!Ki#cV8OhgisC*iIYl_h;B|HrVcV z*q@){>cvZLyj~f&3Hgi%$k6F)m4tyY#!Rx!PydrhN8q4>fnHx-K}2vk9C3Gd zkNLD$q+^y=jYJ~IUEj{A0Dvu8S{L??d@&lO$(zoTN7Mf zUO+@}dv}ZD;}KX4-3*7rJ?`%g00bB3=g}2U+lc_zFZg`B zhN#&rS-noL!ZkM1e9jJsBOd1y=4FPt;r4JKU0bo)Y`y{mTfOZB$4LsM5{#OaAPXoq zK!%5y%8!hEBh;#6wH8P^Z6o(SWrS%vasvztcK_U(eU?<68dWFVolC34!Ke z*#!&hzKE?J;I!cPtkiu}Hd+CN2_Q_)4jTsb&urS4y*Ov;xqK)O2CH=|U3k8N3ua1$ z#KWi?aDO`D?U!%x=G{Bk(!=VKb&deQ*|OvyM+&^c>aF#m4!X6 zBdrzeF(Wjy=@txwROQlTDk$q{ut}t>H&)S`Z0seRxk@0d0oxz+F8se*4ce^aP9E2#%5Wb5u*8rgo@XU z%Y+FQva(!+jgDkiteZ^E6dkSw33(NNqJSwlrRH<8ih^uLWeTd9Cmdu*efNZocj7%6 zl`7VYiDd`^{76D$3I!H?B*53U=h$jP7sbpp6Hg9FBWsT5he#lc^)jI7@ug~Gt)vd! z1T#alp-mhrq%E38E#&m#q4=7n#%Gf1 zil!B7DrCY$jqF&u)p%JE%9G8sL2H7IHXIx9Tet6U4=2Oc&=LSx4d1l>kjv&4_jIy3 z5KWj+$&F@Uq{MLPq!QU!|HW_@5XY}rx7+8P{HL@b(WdEtP|^32phfO3k_ilm z`j7-p^p$h5lJo-Q&6PJXC1FY>MoE*h5uq7D2ygoge%V^XW7?-IfuLW1;cVc+Nax zurRePuC6X|d47S-G?DW_zE9TMIw0CLZxBWmO0W@RwO{5wmR7xt#?IucjB!qdNVgtL zrat3%IO6zlgfB+Q^z-33KG00u^}UftCqoEAn#jLNc!0 zO^b^p9#)K&b^{HjfJ?8sLA_N247BznSaBUg->H;wX{gGSvRchLZe@qfxOs7BoMkXz zRbx6`aqcc7x-eNRqMYNCqap?o&j4C`Q{bP6`hB7iI-Uby!T zQ=>gC&PielYb{bftcO}hV_li;6m}lgecc`1m+0|KTTE>Ok%`}xMOq)i(${wr-YheI zrysC1fvGY@h>Dk0qZett=)Ou5lB&}M#=#pGnU<<2TKWO5E2-8o4yonYuJO~{ka&1_ zSUT*#3T~nTHZkH5v<{>)VdRLbF#@t@*G#GtEk3+y8GV?Ofm#rXk2OG(XjpZ|huI_i zV-^o7rv*ip&L*`cXvk_2b_9C?DUlS#w4EqzACI!$8XtoYuR;ePjWP^QP&_r-^bAyNbW46a3-Z-tIe=a4ejRgYz zi#-jSGJr=3D9vPyZ1SAF{2o62_!B(NOLSgcq7kF3E+(^)eO~aB&wqk9UwjE)I=bb- zw2#y;T}fp%Hw1~;v$nd)QWlw`2mzM9jHZ|p#*~?{Pc=Lkz@_N&_ujLaRt4#HvyBO1 z@1x^SIj52|e|vw2c|JuEO;twP6!I%IX0Zdn#l;1i6XYkjz^J)(7W$iM6AORu(QocH zVoycTMbMY{d7I59y4T{^Zu9jnz>@xCl#kNk_;7!R)8ianeqS4>Lc*LI0M-O{euvUB zLXJSAClX}BQe`ctiO95+U4|lJ~#B~TgUPKfV!%{$-q_7xDXQh~6{0le`5>%^c$k`8wUllsqKe zy_i?R7Zz%h_}*F`;u$qK_0HM=cqR2l#eZRC(7qojf#+@cYm z=n=xmVImuQz}ixL-;(5JIkQFf2Kmkuj75Z4;oPRWua6rkso$ z*a*lW&WMole6Qvx_@*Ncj~dG3&bUll)lwO!NzW9Y=V+v4!wI}^Lb;&)d{ZGr%NEr{ zQq7*QqBtMUh5@tol!bJ!9#n8+y?Sac&PLuU$*w}{osAM@oGX8BF%-b2K^u${9my(! zHOqvFA8a~oMeq&T;A^IsBpLxTLfxaZ+H?-BTx2xaIN<9{d^3-l)7TJF6X+Bw*u0lg z>5=Pk!Z94c)BtUQ#X~XC3{3mWm`$>_EIzB&6iphW1Kq6FI$s>-3rr;LIva6BZL@_) zKr4|9u-@T1p}PhvMih^mhM)8!-r+O`pek5K7Dj!En(*FiBo=U#4LnoyWnE87*?EMv zzDV6eSwh6+H(c<)Ja6DX#SIBWcl19IiBt`3UUKX1zB)vEL1rxdo;nr?sWr`4xyFom ztedw);04>wMtZ_-qTwCK|K=V(byt{#WYJ72Up!+BD#MyQ0WcptWP(;Q&+TR#@;e?d zdtdPC^+(w5_oUF~x4?Ky$vkbZ!`H|MSdp7_csaW}!MC~S4vD|8L8AtCKNUJnxO+I@ zi_boTEqz2B@cZE$gU0WjuFn_}(@`oeA| zW6#VDkMoS5eD)b0=Q$A=q^4)7#286S(Ae0ZLoFds7zGKgDLB&VEFAvQ@V`oX(#a_6 zOlEhBuNo4qabvWiM%&Vhm5|@1V~K`M%EcrhP*U%09nE<-K2-9^i|ZG7`SK;s&MzQM zh+=K&JSsIs9QwmUr}Vjs#%RVkB|!DEq^w4^7;D=$a>_O1-cUud%2PYe!l{dh)D%Fb zhO3K9yn6i-uV1~!N3UMt)vMR_8Ud_+hGFg5FvdcOPHb{0^SJYK&l^)qVPS~mmh|67 z7b*&beD<>@Qu01rrchd8iF=okg445I1BdpZ)L+_x7*^y%a{20Vph+D)M3rn`X1&lE4rl^Qf$qyl|J&sDwV(4f1);ZkLv>ueqWStBX`)v9! zOnls{c*lbatSRBS$!zfz)6KAm zg#(HATv#)tno0tNYk(`~lR*i=SE7U1I^RMNkz(TRka#2Y1gof(JB=K zrvst`!X$!jPd=L1=tLq*9*)g+hgUCNVzb@g;o*q4H}CN7?OVKk`?eZ5NKliH7_jx? zV`)*!zM?iZ4rS1gnK>P>EzR`NYPtbdGSHOqsf_2bm=mB>8v>aO)1UeI=N;hrdcOXk zu3!2~z$c&5%gd=XCED44NZENtB&O;K95AH!-Z8xLy*Nu=IrCABl6=oU7fY{Vmb78` z>LBICgD6X$&`aLQvqA!)`%pk8Uy76KObKaRW1{w7gY|MtiG$Ck2`=_~oSp5k<Bkv(>#+M z73wjyR3<1OEgD2*3WVi!@Gi3O7b)Z@)Pb}81wQ)dBW$Kkc+EAS&MMhpM#Ux?hb4#O zJwCkufaBu>>xv-qtJ1UXglw-m?3U5++)TA3_%f88W0wrZ7ecXPD@6&I940yVSn;zG zSSqS)1PN^_GT`XhWY30MT8J}Ei{k`Qg}110n^9uU8qak^+!h=Wh{SA4EQ-lP=VtJ&Z)g4$@76p0dedWb}VI*pT;GKIen`uZTR7T#4LpmD@ zRhLH%r$~sU*E~rwJYIQtOwc=39gT|MUaI1ewNns^8m24jt9aI0@gVrUP{o-=m?SXW zJ*;LqplMoMoU}}3%B{Q9_{}*uYaGhe04VP(#<*XI;iK(2u3ezkh`dZFW{))>lQq(Y z#!tppDSUKzhvL8-wm9Zxj>MB?!MucOHrJ*R18iaE!QgM18@fAYU*O(DO-m&KnpT6a zsbJH#5Ga~90zl(ust<;WsD=uG_qb+bW?tfXZwOVZITkd$vl+j2{DAwUhmjeRP1e+| z$DT3(t7%1XIBdz@k`w?PIjlCGN2mg-hIc587E%ANpEvI3h6Dg$JGKAN+|fJxm(1R- z2?d61EH*q+iFHgkaute{3bq-%6AUCwb;yixKGbY)TORg{vTW78WpABcR^uUVL=Gm` zp%kk6H5)wAFw>maM$)`oAjQ5Q&qH%>p~R- zYxCYpRRl-V398dbBzZjH`0yC{;+#*dbt)@u=93@kk`hiCg$pNW&etLwk*Inc6xd|Y zHdGQx49^Hyx1fe=CGAKlG>pZteB(Q~``JJByaPO6&(|N&)nC1a|9k&F5Bqt*C08c? z1E=em(m=Uw0<9&y*hX-(@c^KvP;UjroZ47Z)Ip_Pud|6%#OT_~Znbu#>MrR$kAD;cRf~JyM34doR8PbBnR_)l#0=7P6#}AAYjr4fj6scA&|{S~ zS)N*hmFm^VN0Tww@dUV{!1BdNF@IVs+pu4Xq`E=MLaP;<8-p4(KHFHqG)r}q|133m z3^fSrR~xp9Y(({JB66URjhe7Ic@3GI0QOL@N)48hI}-7md_T$oh~Qoh5*}@Vi zYgJ3xs#)cd?^rOfi>2aVqLn?& zyga8gLF28r$uzPS$!BPpV&se!)+Aq`v*wHEF(O+bj=6W77S>-Hs_K?pE)k%2NRJ3l zV_t6Db_SJ3%Sb#sQ=4j<)f%5giSMFmLsP)og!eFfhC?`r30uv9taFf*zB%_yTM~el znPS}JXO&IwhT4a+8?MShak6-t{I`AXKYrf1pBobKpm+c88uXy)f6T>bggYX0nw|Z` zSTL=+u}>(v?50Nkt+LY@H?lIusmscPd%cVi=f z9Y~(>ObqjU!r}oRytur=)vMPbH>=1b8BtX|fs$jl3h(`@vk9#D3qXfaYN%9d^g4){ ztt5(+y0>)-rTaS(FwcC|C3nwLeY_j_3^o9jFnq;4qz_0y1c^o{^<7r z6ZGylo#s#g(31%ZKe6|Y**o68e}nh$-ozrR(di7YXXirCj%&q^jsgziBeT1Tr`-7Z zC^}cinp1jR&l%;x2854KR!IsJ_ZTd`5CIJ zk}+rWQ&!Jm57K}0-+5mBpRecZ4|-4k#lHajk?+OiFe4=$R5ox9YT>;URlwScDV!^) z*5NaK35Jf&rfyH~zHmUm6L?G_gEinHbwJ68$!r#6R3&sI{a`G~Tso9qciI<_oWMu| z$k}@UiP6|JVOmGM}v)4ZOcnYYc&lb&&tT}Wfi`A0tm41oVq$any-fFJZZe&&f``sRA zyYo;&P8;Mj$PklQvy_l8VRh|}#TFb750#YQ7Wg-V(3 ztN1_XFnrZ$Put((HAx-2l~gF@PdP( z6;$#R3W;6;6WhSVEhNLOoW)v}7w}-5D(8j)&NM`=6m^MLvPH$6vsMu{Z!+B^{*Fcq zO29~=f^LjCC1b;BqiHL(XxcVv&IUj*h0R7Z>Ky+HR9;41Sq&TTxfK~9C-j#uR1h>V ze5+0PMw_4j=6R+n!H1VT0iHeG23ie3TT(5ouns_5WDR-pO*mw0jl2!5W>dF-(9*4n z=8kT241!(I+9r}c)S)`D@j(TcQL$!@j!reJq0e4*x_L+U&WV+xwAM|TjDq<2wC{<~ zLb7RicbajB4rxjicw2?`dReQSiG$uac#~fC;Y{O*uZfUTTAhN45nP%A&l1Y6j%S$V zb39k8VZxMAMHWhs7uPTF;_@og6)cF-wGnYk zT)ZoKS4%+I_Cc-k$@!HI>%<&P}oy&8bRCDOC#fRG;Y+NwU;)m~CorPg6EZ=_K)dKW#{G=bcc6 z=qU850!niEO5UB4ZXvkNa>7WDaaw6$l*{sP0U;PyI_xj}rRUZE`Fg(ofG@kc2LA1T z7nyG0oDz!VAv&koF|pPJ5K?s}DHlt-V;>gH$;X-+rs_JmOJH%h zGAC^FN{TwYhH<^{KhTYR{`$K02I;&Dw3yFx2F zp)e{$s1&h`$!4?1sa7){)H@GL+uaVA*XNja6FFLpGSn$OJ1{eJ0CP9oA0qPe_UXDp`;GV;WTDmh7ti^Q9zQ_1Pi%+CSh4B9YAJ@ z(i%WEM_F9FAhk$^YeM7lxzkjs+F--c4i*RC!47FzZI!r+@ufDHAi843h_447Cn-O{mS7`M_zGEVO#xDaUW6vwz~P& zJiX{y(09lMu?WQJLG%#==7dwhedQ_4>HaE9xfyK>RtHlEusz#gzdwsff-0=fxW2r^ z<%`R}J|~WQ=7=l(+#RVOj)gA*8C#hC(dBPW6TtC@8rA?x5dn!Iz(ZhSOq_xk!pfz~u>;%wsdY*UHg>iP;Fef$xwF3;m0VocH08m8?Q zXbx?h+$rek7F>;uU_xfHKSZ9UAB;h zBzJ)!R#>Ngx}=zxQyeyyw!1B)6B6VACMmyPmRQyWvLd?V_aW&MbT8XcR+#2PdR)HG zMxUusl9>{|Dq8c~5qQ2I^oR%S@m#fk`sc7*zj$8#pRecZ|A(vp!k-8J_@9KEmh2d+ zgWyDGn4w40BJFIcTGS!QR_@t(5Y3_=Vi-bg+VisT^juS@l<+*G=I>>F(KKeJ8LLN@ zLrmmGdV)B~`?(({X_{6%N$V%q%Ou=yrVY-{&v3rm<7|J1X|t_nsWM3c2z7xc-M^YA zibkcUcud>HR?YN|f*C3Tf;nXI1I+C7Ei6mNgIR~RDH+RZtqno};5sD69ko2+>J2N~DM&StvvZf{=qqBZDB~k}J3f zcL_m=1Y(N>DHm`-Ku8gZNP<9)lDHg4go?`N)H(a?y;nQu|9@j}G2Su0f78Tu)eIFvQ4l?Yg=}FA&NY%Cvhj8e9!Dm^sf7!~3GZy)=xw*^!9)E@ zD{}FjqUqLTn~4`EA_pIiOoACoA4#R4%TU|8N3$6J5Hfa7z(zS04{5xR)pCxOo}LQ? z2}rgVBa6j=XT+k87*cX9-1rG#v}p{HqSmucu8e36d3+$t>V43Z&vg-V8BafXS#lGI z4;wMV*~A8SE9yta>5u`3il9yPf!I)0i?7l(GFhw^T^!fgSl}GR@!@oh>uH+}4caD_ zMpFkc8^k*%12$7Dg^EMay~kp3v5J&mde}Qa%aDNYH)|G)bjsdwG6Sru%^`iymxNsE|1GVA|La7{*I{s zFA!Z>5&WHXbb|V@0Bynskx4!nI=>_J5uWCW23{(D@LCAKx}!* zOT)Js6V^uKqSDep8dLoJiBXI_GJ>Bye}?^b&&e?gU7RqW?js%vS*+DYDgOPY>y(LB zK(8oXNES{&kT-%i39zvRZ{ObHlg~cm_Dbsa;}Y-mPs$)7Q?llL<$KJSB^&=AU5arq zZ6WG`GBc9(pubk*HHTy}57qKc< z)^kSTaLYX=@A;ujTTZ_2MzX!~`wE+iRK9c$&axTkHIzb8nT1qu`MtmYa`Imum&dQt z!;e?M@A>I*2j-<|s90pD;omZqDArcAVxnvnCB~&9t|Q@I>F7UR92oV zv9#1_ir$B3LMEj2-mzHxFVf|x_R28;2MH}%CZc!}!6)-0PRZ<({M@BUZQ>c7b_<8M ze15$E$>_sQT$UOp62*|r%M$G#iz$cwvCB>U`Fl*C3z?_m4+#8!mL_FU0KHU}6H0DN zVF@#Frcw;chC`uZ=o5W*8lE|Yq6<|6O^S6enrSaPLN?zC(P<(ukF*Nqo4XEgLXiR} zMK7rZTrWP*z%qBt%ZxT{00o{uzrkiR)uEvP0W_-^G=4orG@nv&NJ*29f7ck#X{zNl zbvB;vkz4{^M#c}Xx#Rx!9v^@7Q7{*1KSec;tvq+R)o;A$gqLkZX@)mUOP*%~Ze@!7 zagQ&5`74}o$L6U)&=*77#73h*(bb^Xpxc6nyZabcPzjLrtqXP6Wtxy zZ8kU_uFzPZ?DuhGUISK#4uyCDRn~c@!CI~PgLVX6TjJRF`v&cU!u@9!PDu4 z-FA!peg}~~Wsrv67d+fQMxk2#1jG615pU)h$LlNX_j??V$5K+v=QHjf9>OoW_SZGR z`P}jP^&1=x2W&ReyWg*M%=3(=$0uxdI~e1e;C# z+-8PlS@8Py7S~r-*lq*rHcb;AA0Kf%9!b~cYw-L<40`VFBeqszGYEIIiIHZolu7V2 z>lL3Wwb8TMe&ujdV`>?$ zo08xt=0oVjrq;O;WdZEmQb(+%mbyxj_m#s;p-OMHw-`P+-m@&T zl-0$pO+||rW)cBi9AF?7V7J@i#d|MtIPS6C?!r>2{IjwFyfbDnUb8lBFfTJc`shF0Ex%7!#-U2t9>pdAy&}$4h0^iR`kL7Q_T~F6EvTv3bzkvTlu?z&DAO3v zsSoSwJA8v0P|0A137|caM@D9au$<#ijOw5^DWkJF5x`iwC5Vwb5=xIq z$U##yiD>uU7qm7J)EWH=XaNeis$uC?z*9GaPl`qxF}h>!F|-WztzzjGeGV6*Y$32% zk2<(VG)#j~Z8ODMc(G7mOpP^G50)KI>qY>qE!8l=CIObPUO&3yb%YNW2PYNmF;wW4 z%#w@OuyFJJPmkP4aFbfBMz1KNSub?}^SS(JJ-C~*=)7z!;x^IqGc>jYB&_t;`;1kSKq}i{^Bph_trLJ>yy?3Pf`Am5V{I{=7mLyb;GLA zQ|0y|Bz}ke5zk+~MAHq|CnI1@f!o`AK<;?xlD4kE>2$)==`lq|cmMz(07*naQ~_!_ z^5dgi9FzAXu8NRy-SPJJZTy=iWp7Z7!c06j9rQU$ND1%x{hAmzUQ&QG2VmzjUY}2K z`Da0+87=4V79~vn+ygcnMcnP}ZKYS`eDO4&SsqeSYn>q2s2|Ox(!z6tH3nwXDYMYb zLSA5usf%q{e!f)#;wg<}XS5hxeDk-!u5T_U|K)Lc{A%9m`vdT={T|>i{S=ybECe!U zm}giQD-V4(|JI|)Y25tgw%RfwE7b(HTBvG!$D$ZAVpO7YDDfzX%$Ua-XJ&*ZZ?d%a zDaHV}Ai2h7jf2sQCuR{dZr%9x{eFktHU{HUYq5EBL7zY%8}BGetkc|(*q)5*6K+J+ zm+ibCE8?@FSu9zhL>)jZmDCqWt`FzIH(-ZnUQ8H6kd z=a4}qA?AdQCV*0dS<=B2n;ck}6`Zu0AsrDe3kM<%jVhf^2&AI3w(^pIST09j@UOB%hM+zH-vH@{| zrdoh;rn;Re$9DpHMy|Q6aN1@T8-!#(WVQz_6Wv#y!F;2ya=uIRM=zQ906#Sm1p1}H zyyM05m-z6@Kft~(lN#vbN?9EQ(xUm(2KYiy3lE=IgtXc=K|>naBvc&UNgGe{u}WQz zed$={Gk))i$!zAz=m zI8{n_GvNhP-B{(Hk4=_$EeRe(3ZN7hy3x<9v@-eFPQVKC+7iN&2`QCr!|{p?*`#zE z5o{b71l`(3Vnz|IhcFY2dmF_9l@G0r=vMx_h85J4(;7)1c^y|pctPq~lQ9x!f(+(M z7wEJl`@BQ`t$+J+>R%q0$M-`e@y(xv{^W0kNJGmBg5=25Aq!A|qHO5aw0UkZU9A#| zigb{Tv$XP63mYvk@uEEE61KJ6j;EA47T8`ux7=)2;CO-~R|5=Rq<9Kq<9n&J1U7`5 zX~Jf^#c{vK;dsDyx2>6OLEc0wHoGDt6DCR9nsKNu=2+-&jHWr&CRVV>R<%L+;+2g_ z_YPkc%%?}(KRyQNuLk$;n&_Oe{+wg1Yo4?~B6Slv9M`N{ChT?-c2wU5vx7qdF{Vx} z1W-mS;N8OmPLHQd%`BCw$l8vf;Ra>hpqdumxEBIuxfDSOQTeR z1<ex~gchGv_PQpEi$45<{CRPjlEKnM;noV!3c?EAN3EK0K&XVh#ox6w$M?h5=fDX|Hmr(gGwiF8u=5W=rWV9S0h=#q(7C%HHXvBxh!YgKRkt;ol{y*hJcuV zzK$<__+d!QN<9(N-!?VIsgCT@Pe1(x-~ISg#(kmko!_BUuVUJ?F3ox>C{r7G(3X-v zQcT@Pyll$+M&^2|bvTiwRV5)waVfXDfYZ)0_F+hD4NA9>g>^J_V%c*~PTpr(#8-2Tj|EvExpfgjgsGt*W9h>AFe`QS&1|4_}E6Y-t zYfKDTwDBw$8&HIdoig7{SP&!B(m2Gl+*Xf)EAr3gcT=!j)(Iv|-Xfu2v?3FuiDKal zhcv-%f57qj8b^YyQNheU3TEl!E++*a4s`Y40(s>p)-WSz^nDCaD3f2h*CsyIh*1U@ zX6v|nxWg~s-e6gl_}R>u0_9_~-<^${6!_Z})wrg`Fyq*fLjn0Hy>c7Ti&-?6Dj{khRo=7iWhG3gredYkl!!tU4o!^75mGRv z9CFD>Zxb15WZi3c5`e%=XYk@H7PvA8C{1b@5td>u^2{r$vDj%s#Uksyvh*fsmcyT@ zy9FU57*VB>{=$`3<(nt_P5C9f257M)u>lezyUqE!fp>Tgr*y>#NtJ^Z>^xW>p39jW zO<2}VTIn||tf~-HzW0`_?F*oaVtkkn=$y$&`O2SNSG>FY@~)0sUgB~wB;X-8@j(V^X_bdgx}@TsNZH|lE9;7=%RPw}3f7_my8%MyGWWfz#W=d;|Vl20uzm%!iwV( zc%KMJ9jgt9)E;LWSgaDV?m*`aY$q61*1`R9Gb zpp4fb>=ZHx$fTa<#C-JqynHK^=ZnG5UO5VVZSOv1BsBNuK# z7RO189~%{826t{fQaTJ9sq>olo20ngX!K1+gD?4$W|5Ncr{2fl&B51Tr9xg64Ka0= zQsm6ZYDqSON^O^dR31-Zml27kzUnZzWeEA67g2xAf20})=OvObuZ&3@uON!hJ&MjT z(2A(vJkg|L)8bs1paS%N@}GfUU0+W9%j5F+K0W$x{s*A{*gs8GLM!>McG~wa(xgn3 z;ASbQIH+n+=s0`Xf@q%eV6ocAU_FM_(clDRnuBTyg;q4|%481KMUH|Hm2m$-%USN}$^UTRKkjj5lxI;LV#`S^**gF@coGW_1N6 zWCTfDuPZ2*j9PWyL4ysW3!5SDE}^rH-INw)uz{U0GI9Tjz)^jkv>%S6>a& zB%`WnZQ*uWQW=;rjTIlS#aF;8X^o7(75>o+yqGBtqJ*7SRDH%#+JI$p3Fj3F%c#&c zhE{Rm+5kIBFcSa_D-7ZBd3P&oA0H(8SgH~JnefY8hB!KxBTB)H5f~{IC!XAcMqNaJ zu33Z8Chg+rJ^2DsTn!T{1u8ZMwEEH>lc_Pu_DD6AaUqopwzOd6vdTRMoaDSo+B@p| zVS}7CQnFy0R$|GQA^39JVIztTLFa$ZXqeH^lkeb$Y3?{Uu=OXjpPqp8 z37KWQm|bu7Mm#|4jvFRB26a^7D&@jWt=iGBWq;Bj5BPlMoZ?<)@ad@3Ig@CDJLIA ztZc#Yxztd7B?vt{P2arS%$I@0_pnmIe-Mm_^d=>Rn}BNHj80?6E3;lU;1B{A}hZ~_llj7XYQC=J8}hy4LR^g};{;P_gZi%PHjYUfTW6G?QL#AEAxw$s_$wYJ&Ck9ktd&&8{a*bcVxy84?{cU)k>w9#> zWIQaTIv`3c%9Qt|Q57H>8z5=i=I3@oL-M)ZZ1Md0OKdh%K+060+R!G2ZnU6Z(?%IR zW_a`VEuPMgSe6+^-={FvDQXN|5x2+lEcDUR4+GYu+jE|Drny^U}8LFmL6KA zc-45g>BvF{8(?vZ2{8*HxY0fnq-CqD@2Gi&V5Kb1;=P)Q5-e-l@NTAz&D&_Wg+)Yd zs<_b#g{D=54n$m1CahQv8#vuA@dCUq+u;)xs4)vYtLJV(8|OeubBM+bUdGa31N4f`X}?+Bi$(4irZpfK5=ixpti0xg zgJ%P;qcVy+dH^TQJf35!N|ct*OjSxMC8Vb+BmtRx&HIAod`9<~ zRxExtdXFJm6HM$E+<|E`)pE$q(H9gz-$V$m3>A({6ZseM9yhAcL>v^nW9dsV?#|~K zi!bjC{?bYoi)GZZITmJCs^(B`~)N*{4*&Y7k%Eq!2E6 z6*W1`0R8XAX>}Gh5iQ0IaT8o$AMwEl?_-)KO1v6lzEILH$zq6vTK9LH_-NLsH0_ZE zj`Z1$eX}{xmy}tN@a$Gi@h|<-FXFRTufl_BDIg`4Jn7{*^wyN6a*(xv4| z8t*t>9q=1|KQk37u*;se7B==LDnS9rZSvrZ>M< z@^RPDN}0`WHMW{IJS~pW)EYM1hH10G@%kFO{WhfJ9;~NO(or5Rsn*NILoQDoqM&L;CLMX3MG$*zO*B(0$}!xx8$&hte6-WOjHx(XJCrY($t`v25qr06c5ZM z5Im3G7Y?wbhBoF)el&t{!{w9p1#W_=g#@Vu@FLhS5-|Hk6s|22->7N<>qn_b+89{- z3i2N*E2-3o+S)UjDV0v_REiOp|?lCEU z`}Q7h?(XW$l!P8)z@C(YAm@bJWq`bb!GQq{tZe`y&c}cs`QabNkNrJAil@gX#F?kO z4gkl($!v$4p?AZ>!vnti=_diaD#?`(C|1h!4dpLQKdDP2MVc36=0NAg-t+m1g3#&1 z%#-fn^oY+sdxcM5eTrAFUg6cNS9tyU4W6E!LLrdTVZxY7+Nw5Uy3_;#D+O*-$Zb z6j2I?eDq%nL7Uw1L=0BSNDCA-E(GP@Ps>%2xkq4kIjY|Q%?3TRU{pEqD}WYbwZb~J zg32Na70B{Vuf0s;Sf5sPgX3TgVynnT=7V zPl?ua8l{3Hz*l2J!Q(?J%3Je}%Lv6Ro+Sv1?8&OoUbnKLQeQDH%3Edy#Z(SaG6BiB zMV8huO%wLVJq}k#Ow)!+KMQ6A@3f4z^s)eY!J?gUirx(+^~f5*V%uunR*w4sOf+eg ziqWJ2J(9Rir!(H(-s15*^Nj2(pyF6eDEcb$FrD~XLyo#wb;LP}#em`YvuC)vzQ(3a z*lspx(+1XKvuQfE*k+Dp>A1bS!$%+eGUoHbBq$6;U99Yj2~-S&WDVry%t*oDfNfB9qM0y2Nt8zg@<)E@CbiO7D^Y`5LI+tzI z0>O}5c#r4Ovxx>Ey$_X%5!6k!g&BWsRm(_|mD0!v96{NC3`{36Zqo}S8_NLYfG}+i z*2ryW%E(8Q3TZ?y>afFfjHDCR2NjdoQC?iMvH2U?|wPTLLBLB296 zesFVxmyeFa;t=atyyK*X^Tu%67@lOIeOJnA(h$Qxb-`{?Y`bE=7_Lt7zw0N%b5mS7 zfYT%T=@H9vjyO-I>c!iX!raUb{cm(!)C^C%EVHwRRDJQqwW<(2Ajb_F!x&`4k=Jjlj4CsRGW{cDp zI#VMcI^srktbh{48I$5vg($sXDEe{ASSv%>Rz^g|R9f#47c9$++uJvoA5SX)(khFa zS~|}))T%M`jQ$bstT|K&|LepdO*D=__TxW_XV0HS%p)V&9N}XP2_*re3UpiW_RSl7 z^3f-8crv}B3FTorq`PTr2sVgw;1p1tgeuAC+*d^$Z8$E_Mp>_=pO}EN=0$(jYPMH& zXt%Jlkc}X`php2A!@rzSWD=XWRLY>|0L_OCSXNvRthO=|2XV^{6$|-JspwEj5Fn)z z`RsN`n9y3`V8X1!fA8mN*IR;iI|LXp;JvKHP{wLwik3Xhg;xtM8yqq5P@EIdcqu{ z!Og8b2jH@@BT$+zlwXuop935B&|aWrL&kgD_0e9?QLYFL=c6nw7lXtXDi|GM;j9*90*27)bQN+tRev zSTvgvZ%R9fh0;`+e&eecS1*~Rb16Agx;-Zi9B9U8sxysC3+n2$T7)`SVQR%Qi-8BY z$jnNJ(UU$FnpkmSMmn*N@tjK4j9|*0a!6wiJafSh$sRAa2Us_}nSjqG$9K>7cv=jz zIX10fD;+nOu+a(o%?8J5k3$!1x?tlDEQTE%HfuEPaGb59EfY4I30o-6r!yYCV}W7O zhU4K1lM1{Ids+%v1T9KN?J<~z1FcnJolFxHGnT&4l8lH}CaZ)Q7-4jhsRl%5g3U&N z1vPX-H>PuhN~oE}psWd&E|`TiCwf@(L<%@;IaJI6Tb&IZ)|F?Y3`JWh;k^`A*%LY! zz+Mw0|0aN+z1+-~fyCeOVSQOtr~ky8{QJu0R8o%b8HvT-m@>X09vj4D$VkT+etDzx zCl14Ra&ala?_y9}gPRhvrzJYfOItzNUzc}cj?{yYow1qS9M7IV!+Y<&91Sadh@fJl z3B6Z!_@WN9DG~Hed2Xe(Ranmh~Y*GPn8Ul{)n^~$ zlULt`H36)_7uka`nqiTe7X&q?}6=z18dxl3B-#oWM%-qg_hE$gHU^O_s znGx-U;wj{iDlDp2!wBBDLpa9QLGowjr7UCAg(NTq%+rfip*)v6I@GCVP)i%?Aags= znp74yn#22wGAtt(1C?GPRYZYV)#28VNCG|2WB>pl07*naRDPD+7Wm+aDHRJ6k#FWg zN<0-zJ~lBG{iz*=hvnSIt&ciPmO)2m>WaHUCNf*N?J>rR3b&xyFhHb$PL*Mu%8D49 z)JDaA-PbOs{pE3a{8}vIrzq`jLpWa3kbwPrq)FR%MQHeRwZtp;#0iZMOse{{Gk+Pp|rfn@ma=ByXm?WWt zu}tx0hIPZk;{$H*?y#I`xn&)uOv5s6&a?3k5BrdQi^S4N+KM4SxMeJj43Ymt;PMLC zsNU*68e9?9ni~8OO^{>PQV(XGl|zs9ki!TtEE}W^Rf%__%0YoY2vZfGHHA z(q%db0rFVJPzbr$Z~?S)Va4opj*~p=+e)n9=ZQ-~ZNc9XqiR9|u4tV@u!NgStJDuC z`yfB>WsngvvvOF(bEUxL+_0-k*_THFj9P$L4Na{8q@my(HfBdGBw>aLj+@;zKHVzb z+~4AF&TsJ79`Er>evbz>+WT3s7ho?N>_zdx=7at8@B1hsfkhlyazK4h6Kb9^(gJu)x{|P{)#@QhwOvqt~ zt33{8ttca_DY40jiUF7NA<@eyK^8-7x_q@oc1o|(3m9LE^goS8c_=ipIc;zyNtXP9 zANcB5@c#QBph;tVq_3o#Iwp4>A2h37xkIEAR><$&l`~xy2Ii~GZIYbL!_(CXI27Qc zkH3rm>)Zcl$-~~+9Hg}|Qx_x^hE@X{nd0Yd%*8JMVd0JE#7> zk^>s!*ra=bE}ICnTlj4`D5*l%1g&{(Au5ZVhcor48LWm5Q1Tao4ylBWDC%rAjOado zKi($^CrYrF2QS+*onz#ScZ)%Y>VVVHgc}5PF^DO;E(NhIS#M6p5uXQmX0D^8m!8=R>B}kCdPU(^V{S+5jab&Oqm-qqMTsfPVrbrZ85f$h$KkJ-}AC*lqWC zetm=e?htAY(Trn&U_!zHmCYliZPheYIu#qrREWf~GGi1CjRTBj&3zzFpwmV$pU$}b z>l)$>j2&LVS7;-Y=jDXX5*o`{Xn@mqIpMJZux!tusWtK zLVDOG7zc)&$vZc!)?z>((U)G*{eqZMF<6TQMGP!hBg8*Rh$iYKWwQ)mBK3qRE|~@p zFzJ%LmfbMTBTyf6EvwgH{jqxhVLnw zaUE<1O^XDV0U|5!J{MF`t8(d4D#`e7FAz5fH%CF`d(M>j^Q%au+jQD2Dp5@mgw3HI z)sr0~?olc}JO>I=w&MV~hvGIZSe`MsS>JT0YRRk4PHT^>tW?1-0QjL8MhmOI=)@xC;Cv2F38?D6Gkhc9i9cyZX_YBs#r z6xV%&{Q_vFs+r@c4Tq~6%;vbCXFT==4|WSI6At?WRCee~hxH!TqKViM&6QmXmCF?G z*$LZH+XxK4I6CHHnHklms0d*xn~tpFn3b#*Z4EYeXg>o>ynk8()pe!1_C~v{)-bdb z9zFob#j|5fJQ~aFK7eMiP}Zc9MIRX!y>G6Q122!uK;r*jC9y1@sZM`NTzw=MXZ~1KB_|Zy^C=0i)?9l@tsW_LYMQsisk6AXJc>Wez<| zqfjtT=tUded-*<&$HN-yT^~fZ4qtqg8emNQAY-$md~tb5r&-Otqnn_mr?DcQ^2VH_ zSaHL5KKc$`fBIScEIKl#rHT*^Y;su9#@ACwMF++ClTyQV^pWAc7ccRnKl*oLvzcH9 zGz83Y(>5#clL4&(He;U8c>DGa=hMPrccpz-6q>It*qF?w7?U#A*fo`3G`HyXjuXX; zoQ%9o`8~UY9V09!RvUe|zH{m1n~Cm)Dtwd{4zO;H|Ax;n{nD|q>$D#5_N8JZhdaUx z-ulQ+cL!J}#Q=pQ><*wARTX(za>G+;94Sjo4>j2&k-1!LR1vHOcGQNYS=Yr1tWAueaV_!+`LUQ;w~sxeYQ5$RDtn-o{a9j>mg zu;1=5wHQE%3Ph*a@^gsdTu7LYMnBGLQ0gDHi3X-Q5i}_&Z_c8;DP%FK2RTi4gWG~` zhS!g8@#gk50!zR;M@M6pTg(8!KtI2TlWZ8-%p5479I#0HiSzZ_Y$tr-3tzwoAAAwJ z-7XkcgnNU{f_GzNW5?6^38%*sPLC%ny+{26fygE}5Or_yPC$=^00+;)MU#3m5Mq)f z1Y)*qCS^1-<^r@BnkRrf)j6p5L#w#%RLy$cgDb*_)Sn-*mqI9MB$~z0p|yziMkR!y zlmMdTsfB4;e>k*?{KOje^HQXJ#$mOfA)~_Mu8ZXpIP}l=XAF5_(uOhIkT!-Z>m^4< zil!aeQ!Ol2Q)cvflsjI^Nl&XOOSq=879f=RNtIAk7`l@#mT6x|hDE&Z7Yz1W0YEKY zL#^5_)hc0`$U#tJv)u(yWAax+Sub*^#9JXmkI{( zgruqrdo34AG2Us>cWLIWWG<{g7B9=R7_clI%@+6)b$o^Zq};e__=IyBZBDvMcS+Sd z(yq>UT^O`fb{O_vk+SVbNs9r8|3qtT*Oz{}+{~AO#IO7i{D*-2cJUP&*)3#o<<*GoDf^v)%x21Xrdq%$YT=V>pw9?Y88OyL(z%v?AhQf+xXYdE(j*Crg@~V6&Asv_kp~&rI?oli?jDPr9+RdT6q%E)(F~=1p;{E3Du~5L*h9(b9o2yv z<5bycHYgStn_UVf0oW9cd2+?oN6iQ(VHnw2l^ZJ=F`6k=tUx{PD$7_=+9;gcf5QNU z$9tngyg@YVxn;93hpCPRFfE`S*46qFO8I+!uR=R2xnlA6RzSy=P{*{OQVlG@iBfHh zj3sxa@pGtSY3CeH=!#oiz@hfOts+9#rq!t$j$={deSP58$4 zh_7OUFFbbKEFHTASURxu88-@Sq~R=r`RNgl{(zg~vsl&;#_r`ziRG5BtSaV!^%<>A z=pJ^ohz;g2w$MGamG@9;HAEfXctl(A*#x!`oSMM8V&Xe8fVA=ZlC`~tl`L#vL;d2J zu9kn7b+D0FmI*Mak_Q#I%0VJKzMY%>1b{z(xs^XZ5B{hM&Am%Wgf z4S#$w7;{QH&X#QAMOirH@`RqSly903TK4d-^GV9TXUvOcf)d-(#QQ~VioPK9|D>8`w7a~l%?rAZs^ zZg25dzV%7SwN}}^E%pgx|Lidc3BJP0E5|LlFb^2UVj%J>8L0ytj|W`eT-D~rJM+$Y z;3baAXt%k&dy9`g{s_IN8+{^6FxhCbpanIIwB>U$r*7blf@>R7BS8_ofh z^YsoxKr48pGHD{;V8Q}52T(bhcc9WYkhJIWzFCo|hif!Yr)l#L$rvLa$sh_AbW%Rn zjHE4TpyV(CBMwt(kKjcY@JvtqIHFVwiA)vI+9@lp*u0DibHv45uSu$K`hdUs*h+9J#n(G>y<1AZ9gt}d>+ui$ z!@y>9Iq5Ht%j0wM=)diEVA|2g%!um(NAlOI$;hFkL_?q$5r2ju8Fp^LU3hL$#vidq zgCD_*u7JQ98=U9t+QXj2pp=KZ4t9+Y(2EM3V_cpKS`!=&S2!FG*ls51)Hpn8V}L+j z0)-V22?biC%FoOyPEbjt5MWZ_COf!8r>7ZDPiLH;PT~Kq#RAG6 z0M}fNj61_K_L|QvVWe8g6kf5U`~41w-8L*8+6XD6osqeJY1xq0u=I|*yLL3~4p?Nxj7Tdj)eOGoG|)S1 zshDAUNF)ajxY$#`S z*r5!`Y>@$I1m1yOCaWtG%-ko~GCnijjJ$NIy1KcR^;rfjiWU|oiXd4XGICcr*pXo@ zp$d~64h~8>`4I(}^;$u&-eY8_aL7`r3k1|+SX=0Z#=H1%)@3QHDqZMZt@CLIM!cP{Z17DLUx#4sz<;6%B6;WSJU5x6m(fOx0MDWc4uMDVF}{LQ}L zFFd}*pLzTUKlk(z{^a>%{P+F_|Lf84H}(@=?F35$w&u9$if4=Bg>JFiY=Q2$y?=wZ zPxm98&w@RBj6N?VUmIYGN!@7b_GA-KM} z!j%J)373mC-Q#P)s7|RDZ%6u6=QIOlSD~zS_ zNEk318Ps`jNE1vN(vz6_hHBvE*^82)$KOSz4S2@hbT1KD(=2ud&AFhL7L;IEv2G!5 zku~ijJt7X!1+KvB&)(wplfMx^f6G+@<&i#Qn3^G9xq(lMi}!cB;qcb3wmy_{K6`e9 z=g*%-K?UW7%Yw2fx1E<`!|CCSPe1u2E_xpwcfqv)bVcwbJb;VNv8*eQ11m3Y1`E)X z@~?}NHR(LJT7t4sRCz54S6y4^4SK7y>qDLl_MXM=kOmFO(G9@Yw>>1 zEfb}{lF~^|2qoWldC7BpUYEb+d#Mc@N&3b$5=FG?6~|OtCtby~`uO}RFR#7?T|Si| z1Y82Nj{L!Y?{dmt9+$`G=yCtGZ(#qs{~f3XCnv8t$;iNzKMUwjPb)Wv2F9pt5RvD} zfAEsSTL~-U-Y;lARgoe|negG0&hsyuk(zK4m#UBpr*k3Dv;hk2c6;o$TkLjw z=w?If=v49EqOl0bbgxoM^f78^rRYeaeMrVlDNj~lL1zshm@4`^mMh-V>grhfjQjh0 zEXz_h1j!me4CQn+0p*iy`mLhTQ1f7(#6B^M7fhlpK`A7Bsh}@tX8^;H5bz_Lw)|71BGXZ@a3C^`jy@&IChaALpD=T9yWwJyQL5uBA4g4z| zjtM6&X?fI2?gFD8GeM>`4b@=9Vq2|9(t@1ByHE=Cih&0u_mZk4{`siqtd@CBD~5I- zJO#wEAs2;Zc`{$wg2-uMI}+Rpsq`J`Luwxi6fK@sEI;_O6z?rR@A)+?{yh$+ynH@c z?}0J&Squ3{cqv#VZ3Jl_&n2D)qb!vbJ>O))h~TV6c)qn33%|6%rY%CjR6nO`p~Zkw z-2j^-6{b6y0nm;oReZ;1{QUfg|MBTF{Hdo;@F$jE#?R_!__LcQd~2_`-%St$p38(6 zGq7I-?;ozPcfrH`E4+UDF&-c9;ydvM^k7JI+FL{wizuc|!!#-Dv53muOyk_0T0@hy zf`@$sYc$K@St_KPVz(G>X2A^=OqTqPwAJx-=@ttqp?q6p^hoX~Mj%s&gH^GD&88|o zLulL;oe3{uxC|sN1Bvhb;bMPUwEZmzAgx8hL>=my_xMOW&^&Ngo-`I3QWC!K{9BK$ zxG={_ozXJsTUtwv(zp=NU^agkgr1Z=iNlPPVhD88fY$Kx#S6Uu!TbE9(M%gl&LeSZt$Go)!ZXRAglGC8YBL)ZEE66xXQ6uPc`W zL2Mw?A(L8yC{G9}BQ)Ilup4mTAST=lNZDYNGC21!aY5F2u7D42A8 z=-mp4RyTgj^BZGeFRR8cFAbjM$ytz!>(sh8&26x9+-fM|n$1qCCp)7`2f;a&mC{gF zQ8S=P!)~|3{;50`12y6kQMQzlPZU%VOaQB%r)ITbWp(40 z(5=qH`@1{b-M$@zWJn|>dM|as_?oouC8(R~(NdVWRfE=P$Gg6{!p+Td9Imdg+ia^j z5N@uuNvTTA9DOmoeSM45!)I{^Qh89D+5|+h*@UL@U^peLhVls>&BaK(KJlDKeUmoeMm9rAOAxIeRz*fMqoK}Zs)dj3d|7}QOAQgekK%pZ zpyWV|b}m?H!I~J}dsO6hg2&Th;Z(ZjWe>0rX;5-$3Or~r+0+f4FFOOa5*nh9sl1epZHH%Vm`#t@3E4Oc;@O`^C8wU}w> z$vo3_9g{jFjNo4aIfV&zCXPEeeqp)8|Mc`4Klk*@_%BX>8~@e(5&ryk!AI9SEIMJg z2%gQrVR77S_V{4`9M5F~e|o~x-96Jma)GCCi)6lSW*CaXl*(=ZHVc|G+QU2MjQ0_T z^j<1z_hGYw2(0CM$8psfj%XaX(WXXqSTf`UL-7I|imqCsHl-5B#z9=7FRh+0;j5`< z4m*?mXc=P5K+9CrzUVR_>Rl-35SH z#ZL}GkaE~@K|g=~42Q!ZE-P;8jRjmIOsShx>cg^hJf0q_*k}+mO74@4kzTcog7bAO zoC{;VVnT0KlybAIs9=?)*8pzYwhCW4tVA0m&jLJ62QdCF3hYGH1CMSyCsCmIzZ!nK6vsKmox>KP-(B0bJy8ygf#CCwdX3t0|cp3@P5 z#YIKJ z;gbn3Op9D!cvpmlB9#=d8a?MpQi}?!XiKAqInt2CB>;2M;(o{j(^FCD1vCVIf72Rv zhZsmq(+1nk4lS*DoSYjh?2cVvESl5H9+8(Ffsk0|OoU~wDeoKdEQDT3p}iqhn0W`y z=M(PU+~Mu*Tb!3UY@g<1vo5PRR818K6`~1BmSGhUDmRrXI2Fb9%`;qGU)5ce+j8WR zrqk*$JC^f;)BJ>|r$@Yb^BVKK3?RtLx&YP_!i{G}oEuid_L?9_DJfDNnegpmLZ(Us ztK^JS2XerzD?UJiyvHV8nB?XVZ=puzy=Ivo&L>JU)|CsWmbF}qr3^{u!!DD{e@li* zM7eg>y0+476wfjjEBQSVtQ`xi#z=cU|7o!;g-_?OPpUS1N6KhkR?xINm5s-8(IFM{ z9ne!Mdy*067IOQ18yEukI`qx)T(@Ly^m_dOK&=kMrB00=Rz&L@QO}K<)uFAEwXdMWLGw+B6lzZ&HD( z2Go5KEKDEVO2a|6iJ7l6IBNC2vfq>QAsn*R=b6kk35#A5ZsTd?+2v&Pbi#1?0d)h! z^efY5`s(F2z6>P3=Z8T5Q+M2n2Jl2t=}1)|D6Hg_qC?V?2TK+)t*vsy^I+%S%S>t; zqD1<*1Y#W!V+Q&w2tH8BG;8?oh^=GeecYn)438#xis0wxk-6$nN6d%N9 z#fqprQgvzz?{I9|1|NL!OPF+rNgKf!oKW_VZ(8rQK~NDqJU-$6?!F?nTr3W})Gmls z&z((qT5=@S5y{_L4IW}}Lw~P?lrxdrQokPT3*$gWn0ld<2IqV_Ej~WLm7-T7AP)hb z;#eEf&I%lo!(;J1F=MC7`fJCYhNrIvKheh(kP*?hC|3`%JVER zd36_PZlm4Mv_m%&w%Z-H(+0=m0n^kNs|Pf192j%S!7~plwP&csH2}h_Ph?O>AYWPAsFeZT8*ft%pNP~ zaeB-#R1~>@GG8Tvl_ineb+ck_Isg4Yd8M@{n^Ug{Op_jwhv{pz5Zo*IUgiP#i|5nO zE0&rKCI*mQh75zrwyS|x){tmqe~dw0Y<)vHqGcOp6NluFNO?S+F^Zy%ue)FbWjfF` zmQ}QN=4U4PCQ=KPRr(6@E)Ph!uNFR{g2k;Yv=E1rNs(Z7u;dMeXMW8Q$y-SYVB(2c zC207H6X#TVC9Jj9X(K(%a}k(J6Ym_>QJs_3_liN`R7GemV`PBPRT?d` z+&1d?d<{E^j{&MXqiEB%AvG{f9=MZ*BFT}dK?%>w9Jpo=(;eSI$6uBQ{M^%b@LwfaXvqWUEDGS(4Zbet_DGqj3AEZw0mbQ z)}XqnLDNFE9D2txFMxWeY|(>L;fl!|SC~d}iD0PJ2u=5~6O4CSOa&`pWf165tg$$! zI<&DiU$ILZK0eFw@wnJFE(3}0{jv1Pvmpy=Z#1_~BY)3>Dd9o9Y(dDC^^t^; zO{A_y6V4Br1cL=^V1W)`{}FLto!iKR`KFvLsV#j zmY_x}tDmQD5GbyPR5R>oZNl?s*D#yM>%a-6q~8&P0dhXic>U@%PUjQ1?!16)Y(Z*f zV_cl4zl_R@wO_+4=^@Y4tWbmpCQm3TC}a>1C>LD$jaN}{k`hu?z6QLA+@dJSp_0^o zhueLv7%UfG0iG;2Zt0~|uOy80RHvslAzdpJ@;khXkc$o_OJ3eN%;)TxztJg!TMgYt zr8b6BnqEc>aF>kWGbuPI?K&Q-3<&{DVYk4Dy>upJp7+SQ=O>b3{JVu5+JESWFDLxvad~`B9}i#p5c{A0L$HlOjY&=1UO9)DV+$JR zxFy#}d_*++;Om5>KA1(&n8Z3~rNof+phUTRFC-(tLxH9+UmP?qGZ^R>Z5|q@> z)^UITfVXdN$!m}SDYju5ND$n+`{PUsu!2K_!*SE}k59OMeuZf>p{ZhO4cbDQ-l$fy z?$Nk7&?aD67M$h>oaeb14N-&Wh#yU#e*y_BX3^o&2>fOxSFiC{3?whAl9qB_wd!MB zBDKv}Gh_)A*NI|rAIU0&n$(7t+u^}}zzPn&sv*hg5H>=(QY$35GI!{ac2b#zP|vW! z3Yg`V-wG6+bzrg%Y{gR$8GJBxuf+z}9`!vniTJXzbX09SmQG%kQY%|8dU6AuR#Gn@ zXj)W)i#RNtRy(FujiOowhE*iG6r?biUtBX$jg&kSa48c{o3UPRF$@xdw=t6RQaNij zUb9jGiAEH=YFWa|j!hd!4o$+Ioqp2cC9+ab#bT>vr@}$6sE3`rnZbR5EejsZ@GU#z|7P#qgKtf<`mXO<&%5{jUCymf-=@1~ zre}r;47XIFAr*_Vq@pq2&?J0!TB!wlW${C<1y_ubF4 zp0&Q;&-VwkDJbaV)2D0MsLdl>m>b0?JE80qW+?}F}zxuTdmj@|IodWA`}YR|N_izjDx9B_KqpLft{Jl4kdAz>IPQ3U8g3^vyl#(9s}CNS2uoZUYI6vCuZmea7Rn$AjdY zfFE|J6A&1M9Bm6dVNP06Xdn|I>;e)X901C=%*}d@n>TM^wOL1lK_{4Zbb897qpv0H z1b~N+9%A0j1Z_rfArqLH%{>4E1SNy9JYbo*d9tc-8Fqxh#^5xh>?NaU(J7>0#)OUT z7;@HXT4X2IMgi$eZb=EI2rQK?@Bu$cXcuFTByYI!ca~IADyQ6YlNW=!F?y4dUmMS< zd1hZ{m!4Gk*=q8T^5pDld)kvqF`5<@9`*@KT?!wX$zZFbgqXy05AAM5FLf(bL&URs zd3{U!o4Mt_@Nkl%55~l11gM9pvLujBc&iuZLzxglK1U0SjQ{?B09daNEBxU)T%W<$ z)j#$XIC& z3NXdGnG?Z`GcI*v9!ezRnS&&+DmdC~aP8z2(`FMMXC9aoz2eK0FMtGS(`MN+Eg+-H znXn){d`dR48M)~a;5YbO)7D7CG|clJyX_7S9zMj?cFX;*qcfHqvb2m2!o3$DlWk!c zk<7!U81HqpZrDs~oNSIDQ=BkYDpVPgYOl0lc0=!mec$2n<8y4c7qG^Sy7rt*ppue; z=b%N3zRv@kSxt8$okin*S;kHCDWHu2jFc|GrYj6=(kIk(;ers+(W`$VPE#wAI(-FY z6OXoL=)FOV|B&OTR?eZLa z%@`wRE4}>!-YKOz6(CYa_<;p;g;^{n3+SZ=>^d7P9PD`(%vpb93`10q9TMt26WyNbk|3dCM~BizzV6ZW<TmQfQm7?vAHj@q9Z*%Y%%E)hDeq*?ce# zi{qKs0T^bDolKz|_^DVm%7l#yWAnC2#F094cHnYoNE{jxpQ5yLot|3a7?X{iy0wNyZsgJ zzj+@Y|LCje^BjlzggJ?Ep|Jw+c+WOsqhzp`;#H+l^4fVYX9IvXjN{`Co_YFdOfq3L zO%N5V*HdH{o1pm=)bBYuxL4aN+`W4j`*|NxT$$hFGN$mE@toc=&wH00;#397=t3`w zy!vO^O|LR_NcEh4UZLP|K}{;SP)w(|KpH`k1wMMx0^tQmihJswE;%;7a=IoX@(LJ` zq*jGDi@G4F#{12LC>g%l#`jspBkCb9PqC$>PnJc2mzu#PCq^0nRT0iZn8`xJmJxIc zD4|%%rO1h7!LaJHCRK z&C`3p>5Tqo{(DV;UR7#oDG89}rUE0B7Hs;uLaG}(>rUSEFH&92AtY zg9#HD;@TRDsSI0F10kJUcZByctqDf^MpB-aCC@tf;Bnp8W@vU ze>96F4yrU>LnfZND!h5cT}qGG(T`%r`*-*7*S2rqS5AQ4$r`7wID!J*vA^8mTAOep zD_mWi!RCE5)~FH7??9uKJCZc0s&r2E32mGyg`d6L(Wys{j4NgIR7-qY?w<@!uJJ@VW(?Eet^lGU`@9CqSEL*i3a zNqjp^Vp>57%QP|)`fS*3_jvQod)V%_=zIQb$?WJJo<#NUq55E&y84kTqQ&=EbJy>0fROO{i+g1gqI0R!}HBk-I#e z&-|TPBt?F*3|>V`<_J+03oui@Zjte6rDQE*>nu0gY<|&q-I56c+BLc_J_#NL)E8-` z@yzunK%+7yYwjf;lAaJ$tCWutOjE^#Q_o7gBegZHp&oSk3B9=qL^?WBnMr5=oidoYa$qbkIwU2LRcKe zhjg{1`~}(aVo6pMHikt;61ta=O+L$It4%m)(-=DVkftM5E9!(>(@anhX|Fq*_6{&2 zG56|XO;mS(9N1#O5QfLIQeM_46kV|A(RJ3xiw*~z3hO)x#xm|2yX z+ahu|{-cbU7xhJ=35OP?MvcGIebKO4+#648j1wE0QIMKVO2aZ+MF7lM17(D{Fe}5O zHKV>%<4COwD65c^3TF8pbD}B5ym2P%fwt6MFkT;W-M=f&){CnG4lYAB>pddtjkNIA z4c>uIis5hV?%}WBdj%hCTRgaVifwCXP^@Ob$^V$QKc)c#ZFTXOBOeWjiG{e#Mf)kbi1^6VOh#-hn!1>EOJ{eoDc<(nvv~U1=a~1L zD~gs}D+$WJwDkAfd==j5k$0`Wf$|F-o42CdJS2}in3)?pgTTG}_i%A}SsP=5eiW3* zRn!&NUO0ygT8pfTK)ROit|W>k^xQgbKl3zhKXW@`wj|`P=0P`BZ7i;sRsqA=`5Ers zdo#MJk`&%P3Yl&d;V3Dm<&*)<%3tQ$nBc|{A5z0$BXX)ma+s8q7xg)cW|nuZ{TY8Y zAYUQBTX!W{b3@5)el`%afk4ujbU0KJMY zU&=RqC`MRRiVI?}VI$(K0o{5XB%G#K^SKxk@`Yb?uxlKy!}S)qwzr;-4b>Nz#3u27)^yGQz_QFT%WaKBQqi~m{b}z0a_*- zWB5GdxFI+`TI1TuF^)DzSglsjNujN=SrQzhEV_jNVk9>q{g+vmIb#i|5=;wVO$oUs zr*f}*v^#n#-};QJ?Iq4Hcj&u0&MFp~eWA_Ld4TXCpW|V-Lt4~6iV^=Aw$R`%s5Gpm zO+hY|fM#`#Xm2dRgmDr$zq-Jk*I&o^#Thz>1#$L}0gx8qP$3R$wF&i@#CPt+W(g!?{-b5_dLg1OQc0C|8}z>c^|bh~zb` zuSqLAs11IIjfO|^T9n_VRxC6tRZD^)Rirp;`Py7N6$qg`HE%H1y>2?vF)-9k=331j ztJ>4*J1mrbnhJrZ1Z`*wRWa+&Towu=Mth96iFe1cug2GF9GbDwnd}|JIpIoypYezI z&a)5Wd*u#3a!s)pK@-K~3f&uS9&K<0!@RqM^*yZ*q5Ab6Lr8DYh!DQUPy_e6zfQG| zb%dze{fS!I+;BRr@Z#ziN35-F@yulvUFw&LwjY58+3XV0=v1erTv+2~S_%Jr=Vzgw zpREEo*ftIgiBC%<;oqK931Np=b;$NpfF3gS9t%1N(N;90Tx{tn%mbjx$YWNj7zjD! zX_FfuMr2e;N^=NLiJStdC%yH|)3|neE$GH=8l7#D!pkej?zHA2NI6=_Cs81mBrqe@ z3oU+6hebmr57X?1n;Fg?U%Zi0y5_N|+^ zas4J2Z2~yiVd-XQtznuP436E^4sYCf9S`1o6BC;+ARJ6Q7g9wM4kU{YdTle~S~Bi| zMQp_cxmEK-DUGwVf`Hw1vqi@%8wlA+jm4fH>6lVkGDN&Uso{@9rFy`e(ADUQSKWRYmb zyIRTA#l!xE;?ejM3PNyS5u_h|xfhhDfiP#r3#%8w?{Gk6qd|WNByQ zhR{arq%dmH-D>Kc9EYrVI&HDyjy3^D#~ZBIEqvpXqG_vC1s)+8jo7i@%~T`;j~+gX z#4Sm60Gs(S7A`P!SxQtOO)+&qC#p0WrQ48kz?Nht5s5712k@YsurPs; z$YQ`mfO0h_kiK>qK6_h$qZhHT%{BN~mUdW-pHrVLEgC{FG&8jZKWUL#W!Vf#aCat% zG}?KGA|$O!P)^=+Y^FIT2Ae7gQ*=(2)4{;!*7-d#sp)nTC98qS5axt$6FO-nPC;w& zS$AY~E6?2-*_(T~4ZCvINF#02%l6@HRD@?1Lxs2T1%Mj^CN9aWP=PxY+j&;N2uKSM<2ZMCJybL_PoC_+nM1ra5f2kNw@f`4}JsR_vj;d<+@_OUct~YnM3x5qiKz` z3%1)c*gpOqER@bldt9G8n@z?>kCX+-sr2rBMi-#7Mr#mYny?eYW_G;i^fqq0LIdRy z+&rd*Z{RsWm)( z`zfq9s{&F10<%8O!Bi3moLx~arb*-FLSz|DnhgP4C^scO2d4Y-^A`An0=k z`V4Kr{Wl-r@x=uTJ73s8*|Fy8uaJOW6=A*A=AClC+X8Rf)`S1p*FP6ltJM%4aX8Y3hskR5Ug&01$j0vQsefGg%V385vYtK_x|_km%jV z#L8{V0ZEsRX&_1>8a)|7Qz-&`R2w?7y}K7IOUPh|!2x5!SfHh>EtQ-?SQK(1k-7D3 z=46^k^|O_d!$l8!cvGb&ViMIYnmJ%SCUL1~LaLiE4j4KnBk8R+Qpuy2!FHt0z~{d2 zu(BVn!}S)r`n6N&pZ`Xzj)UN?v=9i3_>4ht^aB5el`Rjm3(ka$lpyhzTw}_gslF4o zNGbm=Kq3ZX#}^dEZfS0aq^2y8L7#H=^Ec}wtT)GywkobqPHyvjFFNQD6;HWnZl_W% zi40JM^&YZh!+v{HGB}(rJ~57i5{HmDZI4X?n3k z60Dt(+VOK15xGoq5p{S2yk-0@;(4N)quAYb40o9HP{=~cUL@u+)`VidJnuR1BuJF)I* zHGlH^TyFy%YQZDBql)+UhxqQR*YL}?6c;CJbQ4TIVT}pvRfFwk?B^|}*5EKS#@aTO z_-?BXIERs6(tt@Ps0$`d9-Tq0qXF16y>IHkJCAPQCNu_wEp1oXpcA-t4i9}OA5!@P zB^Qvf$YzAz=E+R?1X73F5~xn4iWt=O%MLs6p&{|97cFLHCf}lMiixOr(Nzn0nB6o& zm&&`R^p7G3{mP@(l)(uVsbv_<386AV7(-QlR2h+x0Tbk*tUG3p1K#QNYk2X67b$B* zeW8@uyLr~^EO^o*sxAek%uPLjK)ojZIZ-z*0#j?1nh)C}u!pOzo@6XKa zJV2-PuQR=(XCoq%9FvC6Hu()w-oj{fbc=#AL6H#)MJ5OfYfH7t^kkG&#;ixPDp1*x zmZh|F=5rhQ*JDb=sa}(zd_a80r7~h7Om0>^;gu;>T*gl^-ZU*uIhm2{`1i$>1t~-2 zGlCHrn=xXMq((IjhIKicjCC)o@K2d-`bsUPYzbKsK)(2W@Xdiybhr-JTkU%AC0~m5 zzx?MhtzrTi@M0^Hyp=DU7E5#bgNI71(OQtlxoiYLgbD+)A+kHrGHoN17_!(M>wDB1 zX#q(4*djHEG^ZMz{tVEvOBu&d6}ob2R!8EnYum5N5+xVE`~Q>B9#9Owt}8sN^cAg=PT0 zDM2CAplYl|EawulZC-^Y1IAKOW0gRX>2RI16RUX6Rzop#sYyOuqq&kysTPV!@VUyy z%t**7q;1?8kH!uv-rdMt%g4wFlg#&O1AxC!N5w$HDv}<3s58uSB0??(Ux7=qz-V#J zG&RaMb7~(=6{}3RRHm5sXy|k-6d|G9O?k`m%%UEAt^6Ed4U;r@iw14ek%A}IXya*_ zDOdKKh(HdTg|b28h-QGe;DL1f&GV1rd+z)yK6L6hKVHG-4xJ4v7fh~@?Iq0im|6x0 z#95&w2d`N)A#KJqg-T(bXLQMR#zS4>0-HPb2+PSd8=l4*&*KXioYS6ww&AtLiJ;?7ggo$haLFPkoa_7@qYpF3n!&FrGe%- zG504H=7J5G#o?4bB+DFW^c;GGSydE*i9SDi9L37}mb2j8OlziM77{!9umHi$>(}x0 zv(H4%vwIP{U92X+ar);X>sa{cZQP;EVV2SFi_CI%NgL0KhFmTylltM~M|kkyA!YYI zy5;G!TBt0VTlgy_m3N%dTWrqhx6HWBj4^WnoAn02=L`oenKvS8^cwPjIZ0SOH&OB*0l zN8QnEaHz7-Hi#15BOy&CMcG40Q4zGE#)z)5BBX##8oRwwBqyXwJmo1UFR?G+ zJK0PqcxBpzlKtSL(Utk_DG`ilDRLN8BB)y8zIx_mdrl<0rf!{(&UjA4*IJW2U3^|h z;{C8ufsFxQ(9A=H<24b%GMU7aZXZQ)$~dctOkk$WHN8+@zYzoDbU|DA6=tm=pB zaJ@yZ-Jkestls;1wO@8lmpy^SLdyX6#)!p6E-ImR$a9A@p5uA2PWpe3HIC8PlG18HSf*J_Wkt}fV|%%U&9(p>V9^YEQk`NP*u=e=fQoOW;-Mt{ z!53;W_O!vpV%)b3ORb>OVmbCDt#EO#LwHQv6D+8NII=DyQ$lrdhAx#K0is2}F8h^IyIDAH_$jD9gVtpZjs6NyuVBL2h~CTEOsLGJO*j8Ov>@|N zkhfB`W7#`m^t*dSd>~usj&94apZ_9&ks%|v?TC9pI4rv z9;o6ccMtK`?|vAs-r8WkwgJqr>Vo68g3pfq_6&W$3&1BEP_xf)!~n8K;3+y8EtoR zo;nK*Aq#`q@02`>Gk4n)WM(7Y(?|3YAZ1Z0K$06NqLlD?gON93Oq7rs#n5M(@no>T zI!P!>yjT@fv^1%R%*BbloH!u8JGdF)%Zv1jAZME+w4)eNnl;Uls{a=B>a2>9<=(WB16y)FfW~^3~HC+;2ajE?_ z3-j3*aJvK)8x;&b6IATg92bdoqTtqSjHY_hvdQXc^BY#iJcpEM%yMmuDeG#p#_18L z7EowYbPIk$q*tVXntOdq^rqJSY-yS&WH?v!97I$?VQk-V!0AKDX4cW&vE6R5-EVPr z{uo#LEljC^NVZWbJ(l@~w8_LIS{Kclg_O`j!P&H->4d3Gm~@4 zQY|8jICU48&lnn5fzq1ZX_pWOe5w+$YD`(cs)dXUlbG*Cx@~*KF z$!xMn_VSiP6tB9b;M*BtTZtfEwguAleS_7LS zV0{9tP92+Tz|l2eI__8f)?E4%EGo>bKNI;oRY)UZ+&h}Q8wy5)f zrVW!I29(;cg5r92yyx0exQPieli!j+cck(bqfsjh9-bxB2V=eBgB?mMsb)+j*@YTy zk=hXp|DhrA8E{Q+#TPB^|A|As1%kDt7b{7`IwU$F|B2(w$mG@^|vZ=7E&avzO}A{~x5?!J!K-*}A^oJG0_Ckf-MC?(J^B-xzSkCbRJ z%RSUn1nsogFjX4>&Vn;hbCd-{ zEcqcS65Vl&k>8TqQcWolIHr(8e&64KP19jjKU|0FEqq=6fiJ_!pZwFf`m6sHL?XLh zbE8V|t#uxvoIqZu%~1!O*+*J}=3&;7u0HHz$@2-dKbr%Jl7go#v*C#`_=qu7VC9Nc zn{c!_#_9D_tT!vDvJht@+?;z-MN*CvU>*tzl1O!CP9iINgN2Jo9Fo;Ty$D0dqdimz zz3pEIg&CmE-*O(Kb}JmV!Nye7ZxcVh2yhwt*_N22@}u3 zhDEZBq%xEcTsDwYJJa9tH4Ol6{N4^DI|h{)*{NdEVLRplzc_4;0Yr5HS~Qy4s$=?> zz7FyaemUls--Y?c4a@+v_xO)>wA~K6-(&re-@xi8ei-||^}VniFkf|ScYw`t_L%w1 z<^elurF>{5W6%amg{d~ZzD}VA76Hy1@Z(o^uyey#J@pR!o;UY6=?-fXF6TXbzk}pDJ4ZQ2+ zcVTmUGW?y&c|bxpTPgJ^qh}{wJ(LGb2oYnq7Qu%w_xQ3R<3BCnOF%3X>lOMuV}E&x z5C6tTaQ65Ns-tVF0?gT6j!2ML+%1w95-KT~bRaD@yv5Edi$o3pI6XebYFbC(sDkD# z3IgUvw<*#dcDo(s-tqYS9Cz>Ci$=tx@NQ9fiWHE>dx^1F2!XL!UY*W}I^qIU#R;YU zsRD2%Oh^mLHFh_J_aKIYMNv*(v?5)+CTJ0##oyPMs3D87nGd*CHV`})&63Jm`7f=L zj;^7wshW!u6z`5{F%?Rtg{CCbj2n2SlO&6Rkw+}BmrbXHdBsRcix#h3)rI(X_s#+x zMTCY&qDug)ddL>LV8}Se2A)jwTTqJ?w=}yIiJHWV4rhqgbqgG9lHD z!C~u=)_^ChdLx-ArfD{$%_yuK_mFavKcPgE8JJrfPLEHHVlP~6au&-(C5*Iy4s`Eq z7cVC4D#dw%p`dsf|Ce6+1Qj>hAxd^{g4t$Hlogj(SGah5hI!ue;FyX+6hwNYkw>${ zyhF`lZR8B*AOhy_-2cpRwAtX=jZF7CdI+a>VRW`uvWB^FFoO}jnoO*edMlgy zQf-Q;%Mi6@Bxp$e%!2#pA+%fsUh$|x^<}AUdPxdm`kZm(hO%2EqafkWCOmX{3=jfj zn#?n%bBR{1lnG&AqbUY4UZ^T@s$<*>);gfB)D96+IiA_H@I-2zGeM;y%opjO{zNLo z7z8;+%1Tbn>ONt?60nhMvIJ;X!>JM%SfeoN6*3ZbOqSpK6e36CtJ8KO8grK}UW~C& zCWk#sOwcv^O@!I1)smuQbqI%fu8IjjR|mf3+abU2_rsoj7W455b|Qb*+SmWT7Q~m{ z0esn)L%!+T(9X|s`uZnu^GDy0`Fp+#SLX)bIea#>Y$mEzBPGRHoQK8gB^jeki&83U zy~9Ou)*Aly)m_|v)9{toUc$={ceptVW;w!x-5yvQ;7kwVqcHcV72B#s6ugCHTl;|R z8ytIt^{RI-^+s!sry%&8qg(i;%M0A6@2+@b3RtIlMPD_)Ysu zcO$oC)axSpCDO36-uH(c_;>Eox9kgd`7a&ve`jRSfS7ps(>%M9K*bIw*%p$qkjf4j z(>F+RcFH)b$*@>EDgh$C39$gq5e249fVlP4Q+V!$7qFTpg01MuE5q7=T3Ppn=gk&$ zKVLwk(mug*h*OBDmjdEr)@!kW183(Kc>T`n+?-piwET=z9MT%Sm>L=8jLlhYC<*Gc zf=dm6QGi0D<9*}C4LtqK(;>@}U}C7GQb90xE3hLRc>M4Y9zH%R@K?q<86p?D7CRD; zYXYC(D>h3S7b2~Ltkv`+8mFy%pb6isiF{6CQu39}EkccKsP{N2;}kA8!9J!0TB1o* zgcb@`X%WAf9rw)oMl@gIkJ{KkvQc5t4C~ne3ac?;Zwu&?NJZqihgwCXa-gu_Xb(YT z(yL|JsxUTgYLSkAqds!8H8-~?s4;7<`2`mX>FAVNlQB>+h0Ia_I*-b{CW8TC3j2~m zU_A~O(TFp6?=e(#qxcd~Sw%7Mo&VWkH9uU3>$BwAKKDFU-}tR))55P!8-y1R0ffhq zEddXDy*1hate8nPOST}~XJjK}2)Q$^I1Zdj)nZPjeQ}VHQj;~!*x`u4J~}$ZYIB0t z+UU+M{OhTB6fXQxzjRnbu@+K*UkqW&vn_Uls9h|ke@45=IMl_#Vn1*3`0*p$J6mI> zy|RlTY5u8Uh?EO2nLmI?ROmJa5={EYrcL-Gfc4P^H?QBs(Po2bvqsZZG$yLf4@F&* z9PMevu;0tKrqH&btB2$6(jNaBK>#1S^cv&UD*PVeJ&cm_W z@_W*VMW0rH&}hY?Kv;fp$?g%`8(a zELBTPm}`UFDak6K#WfX>@q8=!EyKR!GC;MJN@1g4lD1rt#ehw+{t26_G7?^L@RfIE z4Ap}jBMV?^erH*qnN}!is4_scFh94kP6Q~|FvCCpeUP90S=d+pbLcO>3wCn)q;KU@ zY1Cf7j)(94Jly@W{|4~gKY<(XxQ!c6PdL85VFYl(P={?Mxp^6fQpqiK%n&9GDx}Bf zejkb-+daUKY#-q6wKXOOo|}%bc7^TdqQz7iAsR4Ixy!Udg|b0rwMKPB0!*8t!8r3Q zaeX#??(q%WmZ6f7g(6CecANB7IftG1 z(2)4Fu5R5R_(P#s)S5htqvVD%T4wCzI@IV2T%=w|`X`GnJtTyHE_{n#7=S!;=HUvG zaae}UW`k#+dj`)ueY`wrbTbe0V&OB?WR=STq`kKy|@EepD z1EcOd55)@zprT>Y39HTVvTebG0UIWLDLJ+R=4Fg`+4%?+F0XXWq+`9+a_*5|#OA%z z7q1eLo4f5E=NFfd?I&S#kD*64TPnb*@c^StE*g+bX$?ku08%zjg@%Yj+JvK{Q=D8o zLECJCgP<|_cgppXA>hT)a9mto;_`9}vw10s=Z0#bI;6*2b6@6uC~2KcDP2qXyj7&4 z_XJ|bd+kP+QVviPh^$#@(6U%TTx?clvKc)z=1WK>fv2gYRe{evSOO~WnbY;2pXnk! z!bfvUsrXQCcCM)s$i#+`WRX7ll-7GS#a7YSaA zHA8*p&9c>o;h`PPp!x|^+8`mXy!Y8eEBlBD84+0)cZ_(Eh#|%zRV*7^0svFT$)Ec- z(f_9(#{A+-zwPVegLl6dZ@mA5SpBJQ!qYDqZa%HpZ0M66wAVT}WYbPf7#&Z=Oi290Tn$qBWySFBMsZMRX*Z z;Dr}o#EsMI?8Y05E^eR>$kfo9z@=e7?{WXlH*tP`8MNt%mKTC=M1~(#1_2V8z~r<` zc*)VU;9 zm5fl$Nr8n;CzTzNlMPE4q;pGmHp>zaL^?ZV8<~9JjIs;qEwj@D>pFa|;Pah7V}Xzg z(9!U>IyYRcxGo>G@$_-CI7}gDPH4P3@kZ|=r!_L}Z>O@w7IlN8!2mXa3k>B|lt;>$B|gX@Y#spT+9NF%QfNvpwa~HX^Zk(8%>SR=!XaD5tuU zj6+#0Q>n_MG+Z+sz-ZHqJvF1IZNUOaPJqxhCkTo7yzjW$UgGlV65HJs=6S};^bApzgv2^AoSMQv}TISm|6!5V34(kT%Gj{tOwig#Tzq&-H1itxi zN|#6|apPp!$7GRN+O-CoX{A583)ZU@ZanoY)|(BERx31Vp@z^#JE{_f=$L)RejiPx z{cew|{S|t}r=ptXWhEr+IVpvC0MEu#_Ve>VU1v^bM0u%82OuvoiRPWQdN0dQ{%lLd z02QranKZa&%9%4IG!?W|o@OcoEknug64njMXiSxiq^I@JXoDb>Jnw%Lcv4Lo&q#Rn zia84oS*T^gmTB;=Fz(~Z5r6%?gc`B3qPyEuqy z)gaP@HWNPYHtag7)6JSNQcQ+@yHqqeXtAgf^Swbs!LhjpY`**Zaq+eP8f>-t|8<>w z<6rp~@X8N;7^g4){kZkE4Nk5z{io7G27Diy+_*8U9ro9#yNs%u8HSI+@I#M2il03J z&alHI3bPK|Z=v4NZHCS9d3Epcy%vGn$nK0NVB>egF|~iq3^wWNxvD*vDVR`Q>@=6FQ4F^HLLtIyc4v-BisOEqZLtD=*zy>=lU9M>usJZ*!p)esA(xgkW3bVe zoR-`KS;!4SHYLKopl}zA)d%Q{iWezDd5l3c)k@k(nl|`Pd;nc>=#+=5DZx7<+(3D-W2~jq9;T8qa4m+D(WCD+JnTrDpU2-BMEy+0$^?rnc^CeF zs?fR{F0amT=gu9x_WEnMclUmzNNu;6dq?jbeZLR&nuKM*Dtg+Z@hxGyO!=LpFru+Z zY*hAH#4EZvQzAx95e<+4Zr{KE5ZlWw_f9~yF^!6fLLJe{pyb}Wmh8P?(H_vh1opWE z(DLT#8poSs2pU$?gefBo$$tvFNrHhoE_YizI)8-S)t-AGFak4JXVIqUZnR9wU|G<< zSBkk}A}x}o(sG-MQNlkn7)o^_qDm`qV@L=`ptWV}y9+eqUu~hx8GkZ`i_C%=lXz=! z*a-z_E4zW$`%Tp#r~XWfF|ANoRq(Zd*{;0-8498#b zM=}4*FX7_v`_jLsYvt?z^?#53mw!F3KLecJ60Fw3!A&Y)sdRGI+sVpdgy`^JB;d@k z7vMwac>l#4czq?X22Adlw-*6*rGF?>cbYQxSFCDPw@AAYAzYYilpvC5yjgrNcQf95 zd=1Z{p_!mlz0HGA-1m+V1rOGzjZLE(@TFaY&k&Us>cKsWW0=&rA^CIN{Lqj%G$ekf zgR#Q^03ZNKL_t*RmpH!4pucRgq+hoI3w8mz(WA)?t?>&g$^%VW2fRF_vhcKpR6n!H zllaRGM8Xyk-Fw01_7ZpR-^0B(-;DUkJXRTfZ`K%(DD3dEQ>o$qB$bdn3NMeJ5Zr0D zXKGjGL+h{|NKD#Q6_@)hF1J^xJSmNZA+xOmJ|pnPcbfo08wn5QeQYclrIbL)$Zf=W zeS~M9xn0|*F0z{Y+^exO&yL-0hewYd;?A9WQG6}3oAbb%kEU}I?b50VP$T{Hj==!X z)0TPNNXZsqmXxU~f;rJSsmQoXnt|hBPf4oM_h~s5Gt$l#BQnS{C*CU+#dzXwDXJNn zpHbjc!>LzEQ@}6|CX5BJl$r*mYLGIujDl5LC>#<-8f*n4uQa3FgbfHUm5)ds_D3P* zgVt6Q=IU-!4Jb`Z&gd!YWJ66k8B3|C^(Qi1N~~1*?4MQ_MEw zYP-dwN00FM;w<)AszS65X()SMGOOG@QWb0-#G|q{D!EY+>o={|Sg+Sut=3pgD<)K7 zc;i!5$NS;t*ll;XIKRMt-m`Ymp>FBU$U^S3O=~bx&7ZVaYAXP8?wK(=kiiABnBq4} z#Q=|)*7^qJZBSvz>rTVJbt*o|!RWLZxC2Q<3Wp zi%Gl$?#oE0l7NXe4tlaVkW6u5>Q)}2J!4H5YR7IFH5wj%7!x9*R}#P5SU1(rT?{T< z$T#U!*N($}ZsA4%p^O-rp#)C$hDxYuOe)dD+58ya?@YQQFG2!->Z%nf>ze5pt!x*q z*cn4QlM$t&QPU}XTgG}8(n}fP5(BS_#C4{ra4`uLYd(h@aw&4uglX+K{f_U#<$w5{ z*k8N;_k5kaJUPYPfBWn3i68qgZoMG5c1wV1!46X|_Ki+p(vUxq2$%3xaYcs8JZyrW z-9ErC&KI~?DR ztBXtQc6~@yF-X8+AK~Yhzuy!7$t{L{JS-*=+10L1s2h%tkMWL|-dW6npeMPwGaS5KZB%v|worjDKk0~<%hKJp4L3pGXBET(0^WElZqEbQE@ zR4?KSsU@aJkf^SEs2XC@VVLt!p8t-qXzD;OFQrxun~OIx`GcQ_$cWr5-(P*eR-6n; zfE%YkLg<#cQs#*&5{4ydG(40=V5kuo(d7~SX29T>P~xX#HiX?Li_=lSrq!o5blQP( z;^Qp*wBTe>WQ0m1eVC(bUyTYzdWE&M!*vSKnL1oL#HNUk{f2Kktl)?1aDA3veZ7W$ z?bl;E^^)I49$YFH0)SQ%=!zsvUW1XLGeFJ8X*v)9JIRVe9`Y@FE-QBU@8+KP)TwPwlZDvEK z$7}MDd@ZJxbdtVrXMNy&zv&V8j7FCLNtzA-cmk5WruI^Ycg%xhl$?c14>#05kMy;n zRtRWn`b@NHr<_VjroH8)UJ26^4o}qBz{veOlr@s^j6Ks{ysV|5y&>zLI9N)hiyEYo zIFv9!c_Nl`p|mg=arV;Ck&sKN7{q7<=m45CsU%fH0h~-mXr_@2{#V_9C_q(&$|6n1 zdq55`)GwaVj^u;+n{*Rr;;5nWkLv_i}<%_iAchDzSc18piPTHaAtHd#^~(u;)= zwp6CUXe6x|l{#=OJAsH}^W4)|z5l21_$&Ts9l(DluUCHH6`VdJIK2r>Q$RhPmNo-m z^Fr+cc{S2qq-xdW1 z^CcAmZxcP0+1fe1TA^ z1OofYTHwVffOy$S>cA~2xnq;U6#Kd3jW^!FK4EE&Yiw!k(3=Rj2^x^ppkF%By$;sIeh>>BzjUS9-O~!YsiOP`T zjgnacyR^Y%4}8~Oh94gvR`A1hxPBL0=b!VrIQk3U2Hmir6ja1_GL43QZ0wJag|d!Y znRB01TGDd=tpP#C29Z0J6K~CFx+pjCoIS?D)Mmrw#RcAY)LE4>L(*pLaUcWsO z&l}d8HP)L`tfm#FsbMuWXcKhrXiZV0Qq~r`{SJ>FJ;K9>kFdSm^1v>2Sgomh%j5+^ za_P{7{EOJbyt9%}FZM-RUd0!OtZZZ~;Lz!7Pc=bH2kR1qXZiW?5aYvI;e2(;Z#XRGWBo{lY zL)oJT5Ac8doe$zezxp9O|NQf@0k!Ck_e?2JANjxRa*GjGOkpi zv?N9+xv9W+hF4#G6^|Z0tSZdBmm?77#e@iHg0bg7n}`|zv5qyXDun#Lhy3rk=bppO z8#j@3Uv)+#VTQ;=sc9%K%nbMM-^bbIc}#yWI{7(iNRC2HFffSN(VV7CHD3<%m#y+R z+lawTTSiA&UfhM220^C{Lo6EtnNd!cs8KRW@2+Wd{FyQY)qH%Ua>!#LXQ8A%k|RK} zvwP9RW78Y5R86~rbowY;+yeK-Tuv*V%-L*G%6yZOZY;)=5by5jW>C*8^`UziFjZf8 zW1X?!5p`j*AjdPxv?9CGi%XkJkia9*TP=j1Oo|H`Y!(@0p~Q0*&tDTIj8luP0Vw5# zY-T+BEc_q&!-v)Ta2>ASMc36o{gpWRn?H)P5BxNIHtv&8(&TIPsu)P_FP&4^B-R?_LrBqyxQXI>=8cs`X_MhSAP|^Z{Nm? zFT8+vzWgq{>*aUgsi&UCjceDiIX=RAbAK_BtY9=`47$q52f4K7h*7$J2>M@P7J zdV=+4g;iSxWD$zBuHv3JIUk)ZZF8(QYpU{MSYf?G-JfJ78DqZlu$x4u0*sHyZgoQ! zN0V0X$I)54jll{y9S`kzL+ z37l?JVdh@Dgrdg%7|K9nnu&A2GxJhhWJqNujI^{ns1)>)rk8x@rA`@b5AwI9ad^TS zut^QTGG5%v2junqHv_sF#1w4;dPSqBeM4jaR@TX$ap6%@LdiilN3`_BbiT7L zjG>=-rjU&>Ma0zmNL|sPwnU}qSSgc@N}B9g6~jd{TTd}six>(Qu{a`0a>xJ2T1_oP8l-AVJ#6q@b$~@6lxT8WL;vo}exaO=je$CkKW~F{jnf{qsBU zk;86#Xh{6FTus_vboVcI$5+N8R-^waI;?wv3bPApKGF`*0AuGlXs=dB!Bh&EkG^*L z^o$Ze(69H}YoEXm{=g679q)Vx-t(UKqIV1beFsbp(jD4b9mfN#-NPfv+}r|QYHT7l z8)Fw`;o;|We~|>SpLe+0?qKuGslP2Ep9b(K50_A>rbp7l)wQw?iUgkTn#zGm70*5Q zJZ{{$S@c_%sGXX!9b^jF41uA~d)$5fH9UCmfPO`ziE+`?Nxxw(Xe2l3Ol5}=unfT1 z5l}|Eh32#uyi3u-E5=O-2@#zWUKY|WB{|t#m?kDQWk^K~xNx;3R)cUf;enlYjkJCk zl^AV-r;6!7biX=0z?tc{^|(*s12CkFodP1yJt^`Di%K|?*MuvDEME7*Mc8l=HrK2s zhQfl@eH09!G*-27YLKb*ESC7Qj1jh84YRa`EWl=&WRiTBN}Z*83DKb|eg(@Z%ji8$ zqm1gD?mMi%?}xGbG;+KT*Wo%`Z{2Hue1g+I_ZP7F`Jcr0EOC2M7QqAxgX^l52Y#ez zq_r;!SPfadYfNlCqmF58T+|GZoJ~kYjKj2IEi0yz)Imqbu6OMAGafxW!@WCq@bOna zj$i+c{|7I=_%hz{;&XWJxo2^5{W_kybrZL4-omwO*Ku-sisR!WOsfg&)e)xF&?Z8; zMW8dMwh06@MC7DIZD8iuZ}&LAILG$tvLMPSH7_T`5;~YXD6xo<^lVmyZLcQ9P8**n zPEL+-a(sfRt$>yhot0q)XdHv*+T+1t3FAANa$*^=DVYhy1^`kenO@Xrrq8qhAcqD% zl!%_cpRi`BR29}{B8v&|fSdQU8!%N%B`hvc|3wWM!X%Dy@u^Wf}x z=ti9PQq7vy5)-Eil=YYT0GP_MQDBvMZXPv@sItkBwhmn=HG0`tr2*(v$Fec!>3uM& z1IS2-g3<`D4E19S_^3XGqZ1NO*(r#tA|swn5bFgL#KJbSz>r~-1yal`FNH1#F)7(s ztJhrWSi&l6fR%$o$f8j(nvR||n@aIiNERp_ma9ZV$pO(m79fp*pGn8@AN)tL|F*w` zt6NWfMz5K+Ug6=N`FdRYk^csd?*OuoSl&>E(>g2zB+GpEip6ysz*TqP6&QYEe+MU< z=kY$-KrZ)?d4^97u!g2^d?9%jK6vg{g{0oi6LS{8*G*lkgV*1UgKEg5pW+o@xQRs8`(!yahGLX_ZRz9Z1 zMVM#92Y=!J#CQJ>e+^&#)qe^vzx*zs_wvhyceG<#k$x?`5+4$hgxr{VOuuW*olqTH zdBR#63&DP#(dW6cqr)pjvXU!&;fs_JR2&S9WgOQcnzUjan^z$yZH1?wc?L&E8+N~A zVN)JLEhD8U)!J=$xO49wF0Zb}bX^@j69lP_-q|#$f+#*uOE7_wd@Nd%h!G*ie^1G! zvM`v`(Vf}aEH<(_%W1=+o*W}HUDCk4q>G^c(uQnh#K=~2D;IYxm>5~C7*Yo#M*k~G z*PTUKxJQs8?pUM{~mA=)AdY?01vC~W1=7OtP#~M>1WS~NsK9X9g0fU&HCI> zz-XrwIO8J2Hjv{xDT2RClImlf+wOOmA3w&$`8nQr<8}P{uU*H<=@D+6-oR70Z{o!l z-j4UY`#pHuOE2KYjhomUuW@>GjHAs7rqzVDYEYdZBAD2~R}t*%pcec2-VF18huwCE z?L5QXB96TGg4Vg(07!Bx)aa_$G}0~hMXS0gA)Yaj-fWI=bh5#Evnp_NBp*f5KZ#pI0s-Sj*>XX7N$k7YZ|Kcy*A*EpDm z1xFWCQ-!9KU(zBNe|N^xrV*)X!D2GFv`|~-QHc9n%XHr$8g&~_7vF|HCZ_%(-Iynwa24=gp;EXwy2Gyu@ z}?cp)cJ3yr1F%NH3no!o^DEsgj{4m=MSqx2kZna$@Z1GGRoD zm67$Od*#4KjU!&Fi&Eb*$pM;H0gW=VS9t?5hl*kI&EJa4KmI4-Z;^DbXI}pp^l8J@ zS;M?f1S(239*WjQx>*}HOLz{E4onJc#qlBEY=Al?lo2n;vd8t>ZN#4qe0;}Mte3VB;Z)#tEG3V=T%PR*a> zsW=A6T}&%R*}3*5oK{2*V9P^8;GqOi=kSHC$a@;7g*;<2)tMvjfHny=Mk*1YVa*VMno^}gLmVw1#>tb=Qxvi_gEPjX zCN)hf0mT6N?ik_P9eoD08B8WVOTH-S2z2s3;9n%1M{$&%Y(lmu#mq*Z^Yws-{j_D_U z4*Q?`dE2}8c5T_hGqs5Gor6V9Bu z6_0(~_2Yvbkg< z%_0v{scN(ZUj)KwH`v&A16lyF^MD;YX4pA$AF`%!qpl(7$}l(@6g>x3Sfq|+LIK$% z$YxlJ1UBi(aiTqeRz>IFA+1GdGMZaD6r0Yv!&y&6GKM7HfPZH>^cYGFT*Vw)#DOu%SZEPbAekmMRn|v>HMI z+nj-l3`c!W2WVZGRAOWv6mLR3m~U$1yPs?ezD}v;V*YN2Gz}1=F4V6u;QY0ie)e@Z z{7c4)_|L67KJ6*M&wT{febHmEK61=!o>8bskiyDj1Ba8Oj0sH+gFB!T9`KI0`(-@g zNb%Wn3Thkd^suu*b)ZyvF?6!dR6vCjuQ3T77ECeegt>RL?wF1pkJ&wqYpwu$-6Ca= zjm=Qj_EPPRR5&XIb1G}B)KOAk;;iG7UltLAT1_jT`?evmZAkq2FLQryQ+cTYuY*Ce z;HVg(blUa!_i!CH{#hQ{hi5UP9<$q?ah`C>klf&uxQlYr5eMcOhX;l?{L(LCXKMJ` zulX9>@Q6o1o8sW`5J$(yICuU64{IzM;sub*%vhU*xk!t&flkf>&%oNEW8SDxI664M z;g$VpB2cRDy|ewy3A>GyX;6ZyjD<|Y7Oj8_2qx$9WHy{SaRQIH>BfR4rF%2&4@fYl zb!_^Ky(^b-&qpt!C%hp!5^Qv&tUvz9Djo|OpNY?DjflgtE$Gu`RG6qRZYU&O=2m&= z)h%Zi-aO@8UUJ;xSGx@C4a08x6+dIZ547 z=MrLNPS!%$OPJLfax`y4wPFkJsgD8-`J6>y-l~9bi>8J;BDut7@>VkESu6F!Wf7&7 z(h{LFRjsT%snn?V%_Eg2z+34}bxKdCQn2JTZe_8uLYJuTV#X2LFZ~+k$31>~$KI~( zdYG@{GiP!7o4+0N*MBLFAMgSLLn5(4k{qoSMF7E!jQ)|PWB)|lla$cP-s7OyF=8tt zsX-Qz%-Ohipp(CobuJG;YBHj#&8mrEJ<=C8bH+2znEM9D>th@p9pi!f@5h~Y-i~Xp zyAC(qa09Nn?gm_Q&3T+YeHy#dN$i~7!O5MISgj_sHeqUvJ#0YVY;b(M#`@?uK$&xg zw8m&sAK>dW8gL@7VuUTWXvC0XRnzMNe#da6ts2gqJ%^L0PN8WFKwXQ}hRF=tCbTB# zb4N#qSx28cHpj=X*@B-m^Z7L6fkDdNDKMBOyCIoKUcT{Y%x9y3JSGe4B?x>D9tC4M zHEN@NRVtKK%%-|Bw$w`%C~Y+BowF< zsp?gvRw$sq$R^v9nf1I~+lIuS@9N!tL7To3j_-2MiFrb87%_xC;4LlXONPW@nv96= zu%|^Pv}B?{RwCep8UT7UCDaXFV|sFYbd1;j-#?Fg@4Fu_dC5!g%+LBPoY>ukcf&ld zv6@!Mue{}%ujG65weip`qL9fr1tPzKn_KT4O(q;1ALH=&kOi3y$*C@Q10_{O?Q&^B zh=Dn$919O%JWSl641yE8ySVYj8$xA(*o>Q7NJ%Z`Ef#o7s0#KD_HgOa6(njYh6!m0Jt@jwu&OL#@}E5|HqNuM%c6x(|7u2aViBO_ z5sMZ-nt9bJF(rqAH$!2E)+z5b(KybNVX{nD!6NTmQ$iVAZux1T3h)^K03ZNKL_t)| z2E#-!MN_CEo6aHN)fOqQ~(FAT)BedqeI+v@lKpNaT@2(oyUbod>S76$WyrC#+z{V z?0H;!{v6JnJ%^L0PU6JL69JT*RsoJP!}0MN`$vb^Y>r~`t-)c+=aT!ZR%jNB^^`Z~ zG~4rVPN_2tW(w7Y)wIHDXTr3aFljVIV2ZYC(ST+`hR-qhSs!n(ULQjzrE0<|0m5={ z!$Cr*4z&K0S9}8p|7q^XtnWI!YC#&6dsrn2q&J1?4Utw1tn|Cbd@3zD7?zS$W>5)H z&`l5k>PDqzi=S&ld4qgkDPT}1>&gaeD}bHc{TQ8UYSnzmbTt|>$7iBcfnbp^lI(+u zH5@q1R4#Z$a7Q{u)^b4FXlRoq3nHyY!RU}sqM8Cz)^~Yi@32Xuc~H+iqfoIJicoZ) z=&XdZFYBR7{b&IxZ8^zLOVNOnw^CeMUBjMA%uHHCa@zR5YSi~mU~BTJM*d1Gm4HK_ z|N6(!zu-BGq_I!T_2Hj*JFb7hqi}refHfwxd8O&3T`0e4n)N5+Pv`s|G{K=b-sdwu zEHiH0*@3KQG>e9=OElJEpgIAwp;2ioVweb%Z&QO)xozHIGC>t+n+?v{gqz!0{Bger zjtQVWIT<1et4}3{4{gu_*biR&jp`!47Kuhy7bD+n(I|slP|-)2+1=Y+cH5Bn3s~)y z0(&Y%UL1EFEs#?ZdV{z;VjF@C+?hDR8@M@|Mpw=(3E%9B334 zxnr7u`Lg3*eC5haJ%yCx)6iH$i;z81f+}=8_z4lt1K63_h zH)v~6>*%#98fFUuI5r!s=MC<^?*Tk`=>ZO}9D#0@il~gJ%TytB5`i-kixz|gXs0{` z7%@yFD+~`R59X2+cS5K#0!~qf_M8eN#XIGs8SCxk(M(w*StGKTwB%c<1a5WyK5fv# zQY`48!Sc?le@%~hIBWo;mg4}{R2QiRY>>A|_b^5>Iwj=rD9~$yNZt_utT%?^^^D6`_Hplh z_h9$VkKnfVp2mf9*W%i1ufh5AXK~#PH{ts0uEV*rXK>=gNt`@+61rMpXSITD*0}fL zMLhVxC9IE+qduUL5?Ck?m!a7%rCyF&BmLFNJ(vf~1RRQBnpS8^%LqWwqZbs>EVxBJ zE9T7xhx-ROK8hiUSqFTA@v&Tn+B2iNo$pyj_et3`bl4j(pwf_~vNMlV84J3|j!OGN zsy@_V5o5&~fXa>XJ1~JO8x3YLC@>~;w3zhAbG1c0x>tSZ(xZt*MP^t9QXLwxnyAKr z6l0<+A%vJqv6eEGA`Vn)i+Tr{CWacc>4J$O$+2Wgg>qP~Xc3?5qb?Fl+Y6!Q5Q4=$ z1`TzDXC2~zNDCDW>nH0>CG!ByHjKJCpo)CWjlnF-rlKqpFvux!jKM|LNtTDvuvR<0 z&jI|T#>AvJ5}|mR$>_*r2HLMcdw@9XsZWRe(7!@|I zG^VfSp;$|LnVgLvK%)vwVQ#m$M~^(chY8_Idqj6?5ZII<#0M?t%VI8+I$G&0306Ax>hew4aj7`o&h(=kw^6=)F z(BMw_efX-o0SEH|_Vy2P?>+aUeMqr0O*nPx6wX|@1{cnp#dP`>+<4w__WT*Frd`bQ z23M|J!W|#^2=*`U!^}yM51Wcr@|1GCDPf1zOfhofJE(T}-Q03??KR z&8xP`xdw7Jx$%hooO{RRD_3xQyotlCq^d=@M_9nD*((vx!$hhaxcdr5P-A}1o>1k5 z{JvD3QM(jCnnj63JEYICTU)uSsZ42)PkKV4Q-1#00Gj; zl+d^?D|S9TkC}LZw5l+-%JOh7rL-7vvLNfkN4t@GR!qcy?YJ(Sv$gfgm4 zuuLct!EEe2Dbx7oj!6YNMI7vo1xw;lyJv9jNO6y5V&07I<9wEBDXCb?uTXG&g4dSk zv^a-JH70YInDllnEfD~hCj)r%c8A?IB>qCyZ1WX`zDOG0CMM@AJtDirDm>_FH39+}wCmZHmLgBfS3gZ@~NC|9*VcSG^3+d){+# z^7Lug#)qvWKo&4nH0!*9QzB=yI&h-z^kE4cn{K%8!ArQZe}INY0MbA$zZ-;8^FS4w zlcMd$FSb&G2goWmvFJdCMM1ft<|ZO7y35y|X*hG{Y)#0`JV0G=Y{c2Qj6;0~@WA~K z;^=5i*fkpjmOKUFWPf<~JpEqM(VdM5K}CY4O+)i{SCN?qSbg z3#^#FWRrkXt_s(r^i|1h$O0g3Cz7-UjYdOFkMg>s-`L~8?y8IF3T2)iacDyV5#F{A zi2^wr2D~~w<2=#g?-9193pi#{g)PCWoD5WYxyt}wWg*9jOW1-Ipei^+V`NzqQRZyn z+08~8IUA@bD4;dxdm2`gAayJ3TV4VD_kXW8neE!H?b@#Y*t+tFN8-fyz8a^#?d3T9 z)n9=fQQE5`zFIuJ>Zxcca&|*`zLkGHWE?{K zV^IMTl-QkT6;8^j{3!JutPaFwhu}aY_Q9B_N5d)Y45=DrY&!Qwu>r}JgiK|fU|w;F zMp=38`N=IYr0QboSTXeYAOoj;ERPTYi1sMCdECQ8Mki^@TsfwIE5AA?X-bed=|4EW5NpE zLUpd0q_(gn8Eg8v3z{|TV1;uHIEM+aIj))BcrVN#m0?YW4SRHFm|$^9AXnAi3A%UX zIf|!^3?7F1le%s9){n>1hsz5!v-gU(mpa76NeoV3TrYIdNYfBP0@W}jC?%Nzf&wWP z3{iPhYf3jj`s*8#-s>qXv<|RK{qFa?7vJ+w|5yCb5B?B7_<{FBG`i~+**F~HSPjcl z$|s`euJnA7Y^w@K!`wSQ_`%!oz=IFa3rfX<4Zl=){RsY|Y*m^NLb!lTY75z07vvG! zDf`rE!cC97sT2XC$`lY!MlU6!(aAnKIKst?ccITSSu|FiOQ%&M6Et#@c7nuuDnMXZ zfTY>rw6YU~e=s+)oYWYDGc*8GnraMgNSZfCHj^@gn#~(Y=?2N)StvRg4L~$^q$8R( zc!h&l_`sU;=|Gss?K>0?gWVZC; zaWw&#QW0fQgpoR)V5o(hEve?98bucoTG@qhAIUI+P-r-TQ3>?aMWTp;a%(v3=l?zY z?|$PaykB#>wrjf{nss#HS{#1gt8w9Lz5&xogV5^dXb^8OpDXrqC;$;QI!kZ95E?9_ z=*F130g5IW{}{%}yuUf=?#L!_4IqO z=V>-9FW{s*<(iRDrI>amtaO5_U}v=xNetT+T$N;04#wp6Het+-3t zA5De43XEDR9|M%NYeq?Otm2N*K1ela+A3u1HK zy!(^38Ify!-3?fs3e~ImvS|k7RhB9Q$%Kkbp3gdzSMi<*Ze8!;(y1M+o1=Ay(jw)O zCFaDRZgDM4(E?BbCK$|T0EW#x!)$|D4V(2E%?)R#-C+KhhT2=|c)*04td`@tj{9<2 zs;%r(ndFubd9ZekR2IKlQYO5p-cy?&qZOH zht-hC_maj7yh)@qFrzq<5>h@Go7))8`vPSg8^P2Rmo8t%PyE!6icd&CJe45P~Z?FOSGgusQ#VA)|LB$($pF0i@4{G|KUu0>Ff>MOcvT}OaO!4qv zrSvq?BfJjZC5L2!#xyA|oIb~!M2jMj5kGV9SZ`(&yg(e653b;WOP6S^kYZMZpD{h2 zUJ4F_LX`iW)bgB4G4&z6uDP*!=T4oC(|mn74RI?#QVCAXpp+)|D$?kI)Py+F&>&v& z->~M`htVZqWdioS3?c1qlIKvO*mz49va!*u=*s}UU) zJlWOG9T%ez$hfTm<&-MgAdN{gQDCV`0fbOoi26+xuLNZJ5{ubMnFg{b#oQ{%#iaq* zf$D_K%l{#8>j&UZe#-W)yj|P1UH@f!`_2yTfBCmz_ov^0-Oqa#eA;kg4Y*8jnV{B6 z(5Ovo8ZXXu{F$&)D-i8|&sMR#3?M!%hepQt_IZi$IuIWHYC@)ezBA10-xR zCGKVGqg+sv`d9IzT1t4I4eP$a-u@mgU%rf^g9FSx4)_V|(`5J900?@e2t-4FcIN?t zQO!Z|j^;|jFd650>cmOtG+~knJ8cyNdx==}R9!{_hjI!UDnXxTbONJAhT5~9l;(b! z!=HpbF?~xktfY#HI|rY)kqu?8)l6~+dhZ2#^sez5Bz{Imj>3q86eeX<>B1z6Olr$$ zNvi(b24Gv5{4m10;&5DWZ9blG|MoW0G>Tuf^&n^qTd7fRTuiAmHXy z5C|Irp+c0Dv!@@Q2f9$6M0y8$C~gPQozQk2+5!iACXI27Z4A&<@_Ud9(x|)8$cx}< zRqJ!odJfQ~bxMjm3QqBbiDQmPBgx@Mq|?zxU)0xn^*k+ z?CjZ3`g^5|Kwk9qXicgi+N3cJW-wTUHwj-1SosMA8_hZ{spCC3z@763t~*T0h}c{c zpk*UUQT4WKgUSRqVdQXpXPY!ktwFkB_Kqfk>sDv5>!F@9^U-+A`-Rb#$1_meA5;1< zV7NWgf;1FRA$W79^DG*H5QyrwA@Olq)5GlryWo{>`czeaF%n?JB11XN4LP1gx7Rxl zSrQU>CGdtQ=8EYAqY{DQ9!TEmDI1MW??|kG1OZz$Kss-K`yb)9+dhD|{MK*aIe-0m zc;JV8qNzPijQU9ckV) zrQEP&f~e>pDO7bF#&>phaP5sZU}tBS2ikbA+zmE&^x5E(*5Uo5cVEQCix)!zm}F_U zien2PBbx|~k)w;$3$Q^Uu0<%1#!Rcn7G? zN1UzLXdbb7rlK(IWlzeyBd0FW&*X{Uc$Q^vs%P zYt)=E+ZOVCDg~5KyVwA|Wc6Ko+1W$|ny_LL{xzrRlANE>i+5+lU8|UbAvJa>^_jad zq;8Qo#5zN$=7f$zvtXf|=m}}-m{ad*7VIV-jALWb0urYd&lj-6;vZ_NH5|tM3@(uh zwu${?Y6`O%>-8EtCr^e|QLUQj-W0Y`%)P_h!o*k5#F|%9CY}rjCNR++;xeYKX~ayW z2wFU!Oo9-;kfpXxGpQrbS&R+LBryQ-*=6mP?}7E|+DNfS62H{RIwChA=;DJ}WwMwq z^R#1H{`{tk>w-Lkh?nI-+s8nm(lsPGS*?-=x^Wod4$+R5bw=_xs8)3WX7d3Ohd?K) z4OmlpDi=~wtAvMALD5!%-Z_Nfn-LMW8KFrv22~oy8o1)oZ7|Wmc+apeQ6&N+@Y1ST zQ5+LCodEZ}tojvr9-Pz6sK7PMI$AGMATZT9fglV@@2Xhs);S3Uch( z9VVfkFpFrR?6_G^)tYhBrjOr;ywJNbEut`kp`sAR4v|chl0(o>*Cxf4y*>QWFTWAL z{_AhSqaOV!_=3;>LOlQZFTkfi{t3`&LhqY}ek%9O*gWDf8t%LQUVQlWkD!TTEeUXm z19>CN(j{c0UU8*Kbq?523?4%IDkNtIR4G7TGaP5moWZr%UKF(5OERYNkm0lcxnqKl#_3%KSrfk+y*u>f>tYd2^FO^7U{ zrz9??mBU*#2?WDi>-1f=HZrU=Qili+9nvmU9Snq$UN#OgoKc#?>XXw3!6N5d`R~N) zs8O;D8Q#>)O?NGJNhv?D$dylPPyI53X?SiUI<;3LOv-!YbsdMQgPT4}q4W_)B1$$% z(&{s0#c&DBPrgGU7F-(^RD7jr^cSO@fP9LkM*TXGzX&4!qIFOs2>WyEGQOe>nv$i5IB@GPUmIXNuG z50!n9iV*%gMOqgk3G8@%jQbya07nOhu+wMI`v#k}V$ueg;_w*UV~5!Wm}hL(YaFcw z=7b>=rsrcc5_N`UJRc7i98@?Z`{6zBY>*pa-xwANHj}*TNa$b>72IEu8|@wm1V-gA|Zl`hZ}Fa9yeTn1LkgMa5PK>O;k$h zj(x^#9ep$7@ZbMqb1`6gk4NY>6Z-~hU8&Bv%amzjff<3V*?;I zR~xV{TjZH@nhyezTGMEVq!Pl0Jt6_WSp*>Eui1>Cw5-^ag$YVuXf!ACea=RKNJF=d zriD%|#S8L!n+FXYjOA2VbdA|iGALSFC(2hBlfcaX_QmIkMPVScgt$;f$rKb<7K;nU zg=a(=yU>;pR)VC;y2Q+R<7;SWVF!8EXTg8))$j`!ws+v|+OF;T3t5{B*8pGs-{JIM z`vRQ&cRz{aANxLR_Bw3#Vi<|AN!#;&)uXsI4Y~U=ZHuQ13JlT@4n{#v(St&ytyC8qtxK&)O>fTWALew6aoI$_KDy zMRlqUrGVtNh!O`6!0sL18dA(@Odlx3HDj-rpDTFqRw^4(Y2XwlHdw(I@{b&qJVDk$Jhgy-8J+0m^z^j#!l4%G##yt8mPsY? zaW&0m#iE(eB4uk>l=-MhhjGi&B>zu@-moM(D{0HLmN2VY7E!Q3 zgGW|ds9I94Mtcjg2w|?hQ`)Enx%8p&WTPTf4V|R!eN=*a001BWNklv?iq$C%y<#e3(O~oU~XKj9x+tqm{)f zs!e1nn3YHr5Z3t~w@|<+ITUT$32ETZ{51R-pS`{FZr65g*F&=QZ@vZi``?1|U;HIF z@#C+-{%d~>>q8#oEJ1xeQZ}T;{kHc|z0tm=Y&wkWrkqAEooFk?U622CTz}*BICu6O&YeAjQ)kZL#O^M3Pn?9oarwaqaPgjd zaq;3^xc9z$(fcM2C8>>I@r)9rurc)!BJs)54M72 zq(oTBW=abYlMR-m&HLWz}Y~RZ;QxtzE3J}%WIbWs`6>-0E?HBhOZ^9 zE{b$vh9bD^o<6LjU3FPzsD(w4y5dFIAAJ+H8e&Qr{R*Z6oz^qjR3tsvY9`qNH$U-}ejYj1R z!M)ybS6}15I(-s5SJvpFIG)!vUeh+Q5eOX5@mcIpg`vETS8W#na5Mu4$A^%e9q3wc z1{0dk*pMsZYyfH{W-xU|XPD70Mtf7T`m)(53!uF(fWlz2EWySQd6I9p&WC)phx3bf z_IGTi=P_{hhbr9q)PYt~rl0XU|2aZ*z=I?|9Fx@5FDuyAi&d~{tSGYIPw$eEa6U-L59kNp(zs7G(_y4$r~+x3vI<7=+N(HA@q=brsM>|D77-1R;- z6qF2s5#?Q6h0w|qX2`p5HjZmvEQ`O3ikQZN^^gi^^;D}3u909&B6D|8xjJ6ja}P6c zYVW078$6;=V@8y@)_pt+?>zW<%VzTcch@k6yRu@~RC5SXZG>((JUqZXcYhSOedvRD z&wJjBcfRW#_~SqRBfRU@cjLY9dmnE9$cJ&yJ$K{oyY9jVKXe;zeb=q{zz1)`@zESA ziVTp}#r{Nzc%y}{5~ni8J5KEI>?r>yIxHE=@vZ2hxx(gB4k$9#J0m|UWgzV} z_0V1lRfrcuqCQg@ypW6Xc0P0$ z=6C&ZSXh!DvaFx259sNL_=)caX#Z9KGND<=r}zmx;nX>_;|>$Sszsv_y~E*H(atlc zRjF)h0kfUbawEWwDpuNXtb*G&NBEE*Va?Q;N@;1^t`js?gh1Cd` z96_^~A(OyzsuDYltCwd93#zkJ)^oxc*@YzSI3Uywm-qMaTW@^}-g)ag@XNpSWPH&J zo{#_fDNn=AkGvVvw8H-WA@=sKAngb_VK+;t6>syT&ANF^R(QCMXKG_($XE$Z9vT3h zcJOJR_5@sW?iy^?8%`q(-9kp1AjEDnx>-mz_x6#g(=O@p%VC377gMo7fTz#X$3(O2t48VfZ)U(OIVlSSV>#iFmTGF2n{GH_KiV1qUjjB9$~wvNjO5 z9GAwwPht=0(Be70p==Qj$TY@g9`AYjL{kcCa7+MZw<>DKXpA-$5I`j@t zc5T;oecaarpYb$I|LmXR{O`RKoA3W_9KQDtVFv&zH)No`XdnPAHss2a&V>dFbVjokP==*oLMWbKUDjgyq1hIcea*ZWm z!-62mOKVUwMna>~8Y)U#hCJT8RZ}{r?xNrx8MglY{eA519}a+=$5e9C2`A2-#I@(n zLng(+l|x**{2-1Gk7CHuvkb9{p!Bqg%6QK`-ssvFD|y2`I92HnFTyi<2Vf1w_bX)C zU|q%oijw=Vkn9)c2~rkV48$m)8ZMY@pjdoH`MDUUy``vu7N;TVMuVA@vOqaV5sSK` z>pWUP?9W8qMrDX!bpj8?VRD{4uFL$(o(Hn3*o>dKvl+vPRplToQw!3FlJ6tstD3gY zRBpHqKq$skI%WBzq2NQ1L(6HzJT|h|6w*;Ks8T6(HdrUwI4S5Vn--!>2B8YptzhYp zc%ErF!t4G$`X_mu=YQ$aXFmt}+MkBa0{G1DVA|3wR9;z(=Jk=Oap;lS+7+TRx(F`y zBW%1Shyk7Vil$-Z?g*A%!qAP^@?>GB*hmlc4+PU{g3p5UI}JCVx&XOyUwseVXqj55 zV6vT>&DX({BfJ;`2^Coy11W$CuXJEju z2{(zuS4L3^834k`!5q$gf_XC1NiZ8VXqy(!!P}xq;6^`T#v767ti2irj$SDr z3}t5nm5m816jIh01p;vTe0p<2W5iDW&v+Rob$IPusJp#U*N%ojSF={fUkYw^Bq)g)rQ{5IS_?0w_f0j zAY3P?a-qxM^FkIvHdq&65$Zk{P^VNk3t4Z*e?^}v>@h7NnVB?Fh!R`yFk3Z6#$+HN zYOyJY6xJ#yTq#e>v=s5t@ba?d6uz=Z=U{`zaW&{HZAqF1?$Xjh1rDr5At4TY_dkWb z=*zdz%I(^&?fOf#=4pldKKI!;`NRJX=YHc&zz=;Fj_$o1^PXXI)L__Q!+9PD*4#5a z`8wfy9|H*R@WtUnP3~Ikxe}6ccY)-AUo%dxdj4fuRHTK^t}<1Gb&u%;ImNCaiPhoG zY1MS)?@5_Fh86;qTELB^b6=S9F%}2*PWBOj+@0}@kyhn!H5d%X^BU_1*0}t@WoQd- z6>O+5JdNbEs@6WkslCZih$M*H7bzO!&>axx@PunM3VfN$!F03OqY0l5Jt*xHYzAfQ zcqtkq86!d2K)g4dwgNBqL8=xsUZ=KbN;o@AlMv>Np^oSo9U2OXPw63BOv_fq`+k_kC>=^Z?#^F86%ur7XhhWA5N~wJu@W$aqHL4ui0dLMBPDZdg zhQE^u8B~NsBgjhm>ptq1#E-i;IyxH;oKklVXP_0go~BfYlpR+liDncG9a8X6V+^Xg z055tm{5$>$?95gb@u!#EcoT%FI87S*0t^Xc<7&~IG7j!&-wKO#G_;Pp;j$m#U~`DG zYP2RROtZNsyk-@c$VG2)y{!TGe2gZFbqzurChHJWoNtN~n6OuHw$9Jp>ulyr^b3Oq zj8dmm?YIX|MqBVEls0_JX{MAG34lLZT07;vA8fCbZA0QAUES?UgT4snZv${X;84u( zZk&WnaU+x%NCPgI5ph^59^hU=4pkM34)bAitOEh&uDE!i^Dy(NR zqyj!u8mBm3ALHHcc@I8%@uPU`;~$4R?zj_2^Ew`nU@Os#rs%`Bngv!CDyEE`iZ5}e zRLd=IFxkoOICJJ49&yW!aRv40G&?K12z|!U7gm^ zs#^*U6_XlCnzk$wClZ>OZ4>kA4j0kf$Akz&L02+M^abFHY=-iB=Hw@7U|rhh3f&04MUxUgteXiX|=NSLe>Qn%bF6P8Aj@do~kXF~qPYv9*jx4pS+*LH2! zU)pte>NF0Y|F>}VbN?DH{ObRMqksO*xN=`dzY?%Br4r7_84AE+@Fi3P(mmJ*s?l)h zp;U0s2uF~&vuFfoY$2_SX+_AQ7!}26SJ@}>yc#OE;6EurZv_CXn2vmG!Vwp-r%3s| zkAs3x*#|zAXt?BSMOjZk-jmdnI`{{UPA|XGA`u&R-*hVNtr`ijSI?n}EUv*A1k{8g zCo>su&a{v?D9cFDD2QXBomq=%9i|g-r`fpA8Yn@GoZMO_P?VMY>M3a|Fi=e&HWmm) zr9qljggfpi5V@2#Q{7g)lz_T#)AwWeX2dhpc=2O8#Sm(JFYn}k-=dL zD0xn(*%UL^IM}I_3MJFLRIh?b4UB~OV3HKXJ5@I0XJwiE#UYa0=-?@V?g=(D+HXRw zlqnj@bh?<_i+R-2t%NX98%7rfA{}`3kHDV${D&>L@;|ry{CV1k;{1|omSBsJvr-HO zRIF__m#|2|vnF`J*0_urH%}{Iy}_XwHl|pqV1kjP68RzyvlrA3F0lWb{P{zprs(c*BK`-&@&nU=z? za^8?p$JWMzM){)-Zyi33!=*b9&>o)AOM6%F`)~gPeDt1+m8I^LwadI-&9Db0=t;(F z)kO@KJ6w#LsEkQDV>VBpIfc`w&cHYEbH+wWBQZG(fw|AvyR?V9FMbq#y@tclH@!TT zaUy0{bof|e!+}z{uxJ8USSjSH28oO*4^rKcv8C~RE1e=ejVV{HeO8BTP}ne$Meezd zSW!>eps=CjF|7y~WGEAiGT!v_q*^1OkLj0n#;URKnIt|JUNGq|%?4J;Q)9|unX4>| zsJJj*FiV`p4B~l3Bg@!JqXd$ERrla_I7Xw$I4KcKFdaGZqVd0&05vq9s4|)1-}xQD z@4N%}nV;V_BerY1w(ApST{(XZ?)Z|Iz<=~@xan*EKF&YVv2)TG*BOdCsFMpsK(a?6 zuPq}YIXTZMbDqJ}M@%B)khOq|v@Z6X{uvgKfYi>_0oBQ;GWxHL&m&X5SX{*6t!Zik z<2i_O$RVTV&Is(>*DMdpl-?&S(nqpb9ca@2AQ@oFriVFv?lcK-VhQeFM#X}MA}`f> z&gp)FGxP9XO8rzNaI+J!Vm~Z-OCl5b0zM8{B0QAvW6GD#S}sA3ahMPNqR6mfrnM#& z6s?lEgf*Bfgsx0Bv1C+9b&|qzi)#7@-=Z^pgK?H*DwO*|0BNi>7-4SeWzi5}Vu&lu zda*&0|K%d^o(fwTmIgRPEt+D~RzxGo2eU*58^!bUjt_qDPn6sN^KrdeC_egPpfWBu zHc>KW(nhkFc`96eSaaMX^=^?GVj15Kz`9giWoc-i^hC(7y%qMn7i^mm|A}R%&Y*Dq zJcC4YM+f^Zzw(-Yp7nMn%;nECtYO&eN0`mg=ZTkofq0L3{rN5qV`WR+Jt%cH(~QiB__ zRzz4751Gpy&!3Ty8qeeU>u@&Xa2eKMj1^J-t}xqE{%$Z*dKdrQoK>RElL^niV@4d8F6WhF#}^y8^Mf< z!S%=*Pr{-Ac3xGkwpU2Tb=v4aDI=_;-ZM&Q8#ic)iAOcNM8Zp{DTUNU@#SNJ)g9VG za;o$n3o@}pS<4dgDKe5vs_i6WO2UR#Dly`Q&?4)b#9`46b)YpyLv;W)K|!DObDsly zl1d}fAgo{_HX=0xc^r_jO)Ja%W(b{!>O|ZopOp!$6@Ws zJSdtCT(QUWFHc$tZnm6E7?owoYmlJbOTwO=U`A<7A?O4hTlt|?7qOG%LP7#~VC9LL`UW!8dqq9PhY!J&~X;tO-1466WWNpufIK<5c2)q!H1 z(zv7Xd5QN$8v;CBu<_B^ASs;3NX3ZQ&D586CO#xXAr3;|o~?~`f=;Fat+q`RnGlr% z#D)Q5Og2;}14`&)6?bSQP%L5QY{Gc20}&i(nX5@~q^2r^77a&dNfb64@F19qR^>1& z>y6$MqRB=IQ(IC22}ZV7s0V%ZV}dgjCXpb%u!d$C(nHXoi&;uEB8I}HNnIjrfOtVQ zk!cO#Rf)EbfWs&rE~)ZxHct{HUU1V)trMV|l`SVfbvPie_*USxuZKPImQVV7m3;U^ zz`XgG|NeaPR5V)CK2b2&ie*XcJyPvb9GCn@Dill{Yj+$YxGF1c*lj1UJFT#q8nh7- zZ-!M9=xT*VDC&+jG^WG|6uWIz7Ii_xt_UvZsQ{Cbn^Mn9hYGCJ$?C+nj*B8v2j_#dPf*JE>jm_~fM1j4%Jv^{?8Ky}|MzIPi za;)OVj2EWT<-m#4BjY3$4Q7tc&$(o|67Rj4$9vl(cpsL2 zxDjy{DjCM0R`nR~NVlX?Q?=t^NfP>$W8$G1x)$-Iygs``4BUpQ3FUVJKk|QIe#iS@ zuYL{in8$885!z1@X2b5m=^!(1+YV6pbBOvsmt>5(5b(3_z;co&lK# zH?3r(aX=l7^^n+8!f!0*%}26>cQ#>+fuYGv7qJIzj{~{$J%t+w79FkeHFM%0^LGzhE~lu*gS}=_N$$yHFCgL9 zSx&nmN&-e_Gx@8)=SJO#FPcd?%s^V16myN z^F2(pFO4BoZ8|#&BBM4lHztI{Q%HjavkBsNA-@ZRZ03Z$YPwbj4RsD+Qq?04>22t5 z9SciSAw{sjV$~&@m7FRYO3!&7Y>(0j0F zOdRXkun_@PJLtB-jw()wVuxJ{$;6licg6Bv!Pn5(wh&97lnuzOFfmYROKC&S7E!Wn z$%t;Z%OPJO?oSYrXM_|_hZJ*}Kh4Zu(YV>pv@DoM7Im8h(go5Qry7I- zTEr7%CUkm6Q|=aZM{HD$6Z)KfRCArM#%g+GT$05G9`Zd2+)qY$R?H+LZxRGm5nw1< z_K1t(jVEOu-XWscJ+TY#GdAnvi1+M$BB|7 zZY&BJ@na&91E0SV0o0LXghIhIZa^K~9Ie_LoJ3G^8j?t=P*QT%0I|kga^ZUr#hpnES!=&3oOr#3#y&ciuFI-P6@z&>ugX*A#7Adgb&_anbMSJGpp!NOUOlkw!j21leyMR%c1LFISz0i$M(<1IF@&OO#x_JGIR+{?wa4$Kvq-G_HQ z&rCviq6?st#t`YNUF5u9$kkYFIc2kt&t3)cMzD{GkAZX{c* z;``GYur@=7!RN3b$K}_ts}lAcTCkP(6{9jeb8SpRt<=+o1cLmr^u#Au57kI39vm>5 zz22T&)5h)EHY7e?%dF4R^slM-JIwGn(UC}yo#>dFckXYBLN6sO$pk)g z`nVf32s)#yoN-+#&$FT(OG%SbCY1pkaX7W|NX2$Do@aHLbBdkmA!UVdObW~}f-RNg zS+A52DNkSAq~7+{-+|2?7p}V&XHT9&cf%bY`3TOPKaa;e`mxx%avA5&oX3R=7x0$f z{%t&P>2gK#RF^>k?GA53up<~1$)ItQagbM3r#&Qw%qmZtMpaUABkFJ_Q?;7Aq(3(y zlin#)BfK%aa+2?l4XXvTm6`#SkFErSQR><0_Z3bTIMY!q^(R9LC)%q(stG}JxM}%? z<9;s$3=QvXUMJDn7N}8Vmv*@9^!7#u$s$UXdOMK@!yGq#zK`lH8futW?aYw>^G9Ky z`z&B*cYAZ#uI<{chvmBX#V>~Z%@^U+M=#>+JO2>#>tBb>n|>biQS_nBn6_1|1N!_q zosL8%4-a*6cn}snsPO>KK?BGjGAr471vQD#4j}Mu6sPIfFSxH63EZs!hIw$*f;`%v z0y8IfNEWFUj;uXKV*|)&P!jCLBC3=JaiujwS(|`R&788TE#u8O1WD*3da*qzY^0uJ zp$4tkM-)53T&v#FLht8m%&gU3whw1wMOrltsMp^Kj`p;|!l;;>udJIk?X{BZK zX#&tS67HbneSFWW;4l1A_|DG5Xj<^cpt+MWvk*p># zxFvHfZADt%9U&}TENtn)42hJrxp!>T&|0dP1sjBdyGcWvCTwhubecIVN|QFYZ_p79 zthE_@Htb5nDNl88O;)q5$L21t&Kx-QjQcc30<)gx>t!%d#MtH?Zv^(my~+FuZECAN z_sw?q+cqRVzTf+9ho9*9D1*4ZV9x z9tj&Xi4L#>tq+^T*vv+fLT*wik;@UjCiqeU98(2NPV)0WkMOEGfJqum99{az@vSM0 zEg+b{M{d6zciwpi+O&(^otTpB9~@%RhLfjHK}2zCcZD|X;L@ebI6OS6iGz4{&-)?< zRnUcA%o>A?1`tTHwi3r;7(6^V9jOS3BG1eSeq^Jc@vKsE%SFQhsN8FXmQ*&gyO&~8 zcZ0SD>(QAtZnUK<1jgrYzTk4mApEKh2vov3E##<@O&4hRXQe`~$o7a0!{E@2H_fR? zYFj*tfNWZX^tB>l*cRyobMg1OGAYC$|L7I)FMA1a;lj2@yj|P1UE9UO(*8}40B(8& z@SGRm%*F4(iTB)!`Axr!&FlURt~UGf001BWNkl_~w1peH(I{%R?WE}nHL+8n?X z6T6L&y%f{Fx7Pqb5K4_Y;mHHi{NNhd(I!#N2prUY7I=v^n#;W@=RDuHszE*Vv$H7P4V?3~2 zl|+*9lNc6OC-~0c1+;e7bHc`jRD1{bQf+K7k>|ia6V@)70;Xxv^hhles%QB&#(rJ< zZw^PY8`4W9lOS$L#(8^zsOzZTQG*6&)59c&I++lc30YVq%3E4&(iY*_kV#lXOr6zP zk5NdX39Br&RVIG%pUD<;@>N1AuN6IO8fj<9xX*%uqMCMY60xF)+9lITQZ<1Nv}Uls z{T1+Ueg*ux>mPmxwqN`O%-{W8XgC$u?xhFee}>1?M?W2M&)XNnH!R}i#}I484Lc#Q zJ`~>4V8ZBDiAuXq0#k)jZM@z%Y%)x|YixRlTAWY1JJ1B{KEu34GGO?Zp<+0pJ2)e& zXpAY9J2}84mnN-mSiLd_jc#oHY7go z%d8)?)}H8Y?-pr~${W6O(-ky0B|=hp+8X?VIz*0)=^`=n@Yx*vvS8Q>SP+=8d&f;R z59v}J!VdRFDzY;bAto>J95d#Nz)kuIQ{(3{Za&CZE{hFhuoJxGg;fkR#Oc08^TO>I z{Xm#5(a_znx3>?0OlBh(eDDJ4VQD66Zxlzw^? zk@@;v6FE8Lny-gGg`CWKD^T#se~vR4d9t!2^CW zg7BxO|EQ?UgjR}0thT#DdJny|^^i)7pe5{*dwEY7Wko`1H5y_sjGC$Eu9^oSCX|#q zv8*I0CPMDZMtGxjDm#ngl`JP(I@7wSr~#2J)gdy}E0J!2ur#>CRIt#)rVF8l8TArd z{5~-fu5Cn|h8nyQ(WcoH(M2sY)T7yO z7{eBY`OJn^G!7zRQ%BO8r>)1Lp&-3fs`>tVLcI;J7EL&(WM6vPp234A>)d>wUEs}% zvC?>Tm@U*2Pku7&fBSyer$6ywnVvDd^)2Y%^bL^Cn0f%X@4i3NW3E5v^Wne$htXV1 z@@_HPFutD(4(SxUfX4ZT&!xGbcQ7TzvGRu595W2M1I)I8$_{p zr9XMwkk~dP{?e@8`+@581T%lTH-D0sCLSE$CxFN5(4BPSh<@V6nCTJp;E8I%CqdG! z3|4i>=bNCrGSMMNE93CoG&+hTWW|9tZyuJLV|KGqv0~|uRQgq;$*8meFxPys@n?Q- z#FI%yVJ4``*yPV{dv3}teV8^5*Geg>ByR(MDkcq-gr&JHZzxwqVN)tr9@UVDCOj*8 z4I?QdrZlmU)|5oOnlWi;mAo+_rGlu}FESE7g#J^*Xi7v(YljhxS-#x`CdAo%GK(gN zilTdGG}f?hN$9zH5OM6F^f!THh09mGRuJ=8F!abqPTS6d%qw4 zyywGDo!)-ow`;q$Yr8&{;{L+5z+ZVb@K>IV_77f;)t|f%_^scBz2*lXmk;8A>JhK2isK2!=y`GQ&OceYe*QmP=xs8J4tIAnMXPuPDXVNecBoRl)IUNiX}$TXw6aEAuJG{dIq^2~LsCuR z4Kfj?`mbJt{)Nwt#@EAewfDUT^H==0@!2!#N7S(M`@e(3FZypj=Ev2?J^>95o73v8 z3y$14Z-7Ixo>sBsc?j=k{7&ic-C(v3ZxS|&?#w0RrI3-ud5d$$#B+#=K%0b`qK9%U z8+icWeP(lvYA1D|_o%OP2ss1ehNXio10~3_@w(4@a6Cs*YN@=2flu=7*0ya({AF5Z z^IBVbiokycz?W(4H)L7SdO;v#`9FJ}o-HN`hB}PL_nkUEx7ZAU@^57Xl5rwnMa5ox zF*Uk~@XWjRI{ii=&}mT^07Nhc+;imMjotfro(&p4KUYexw0oQVLY&1yCZjmLW0_A~(!T+E zREf<|(>p5*o>$5bL4U3fWrLEWjr2qEJu){)*P0-Ke?M&(SJ^ohn|RTqEsG;7I3*}s zRZ2KxiU3Am$#`5gOR~t3E>*l4uZJLCE3C~iP5bgMhknz`;g5LaHmiHPwrjh#>(6tL zarPYgXFe17%x7Zy%9r89pS&0S_kI`ikG}?f$L&xL&|*%j)AA5@3aw#+Ya>4>Eu$8D z1l5z8!w)JX!-(f$$4khuv1rdpVfyveuL{_7MnZ|SP;L;QsX7kbaXM_JBrN5HDi7Y# zTnQT?59J;2_;29BX7s6u^c(4=CBdcP0i%4q<2) z@hajt25KWM!v#>Lovb_3;p`(PX(Gp z8e24$n1Nyg5IO9kY&!6L5pRpnl?fzB8>-Z=&6E-=#ZDX^9g7JJ8(Wg?jkK7;Azfi= z_464Mp1^E{P^Fr4FONA7?H0;fRlNbyt1j&pG3HKtj4F9(n4(Vmd;b9a>%Ja#_T0nv zUb4FLBbfij^PoP#WD^yq7|aYi&-$x4{4+fEeB(`_rWQey37zMX=v7&XlLKbD2y#sn zjrEGxT)AU+wSslSY#r9X*kIwAB9z^ok+?o7n*D$5y?L~4Sykrwn{(}Ro7V+F5I_)g z0-|68754Ng@rvCS#*O4P&&)bAM$B@5;2AS2AGnwCWY{cmej{mP zG$by-rbMkKlYRkaUuPmO!9+?SXjaK!kd~HVz*HhBf`Zq%!=bdq&s!I+MOwwmBo-5j zB!@+F0@7|OPQzq29i8L`J0x>Sj%a>Rskp=q%rm}In`zRm8z@`)hUJtYZnY`LBs^o9 z7p3X};cVcm-pS8NzNeQ`wz~0MG0->+5+X(OWzo^%M`g>)sF`ZfZuUxvhKV#77`?$n zy@NyM|MLY}TkVF;*lb%*uFbaXo@~2qtIf7;+s1|`+qUg)`}yAAz5l|z=XGYznK|<& znQALq&b3JneF(StA*1LDGMe(q9HRMw6V}m>fvA&aRsYq`BPm?9hAr!t;o8RuT@iOr z!n$tOW8y=dKvhzF!n}TT`>!S*X9=Vr4{=OSH0$5mQ_9Hv3hhAc(g_2v6zcKt_S2&3 zs5eM2Tia?P(Fkn7G0%e6HtCH~mH&{g$WvBk-|^ao;QJ%|qFhJ%bEPXJ%}B1j_S9Zy zxwkCx!>YziMe)8s^xcxGmrO8jC?RA9Q54bgu|s!sntUAZhFXoM9cW; z#q=HzGd9V%0`u5o5$6f@OEW@c*kTi;{D2!4x+DXfnsEoSDgkWEvKn&XoOy)%)yR7* zCHIlC)w66#DwIcg}G0>O5l zBa6ssr1u2n4tBRg|%FgdI?PN@UnVVl` z<#K#h!Y=>t+!EY17&#A}S!F)LP!8E|$hQ}Saa7`r`}F>N+RWV4P5KHK6&o?8EW*l= zRFF!jL8u5>=A5$bEa|~2riSearO=bFXguNwT1r#p^{-1+vJMK*bscV~gLbaG$|z3I z2xyfYbzrN}RFpd6vWdBI@r#rOH5(7a$GQxInN_v!bbUF z_GE?d`fcvIjsAXAcMqH?c09tf4#S1Zt~T#)A9%V$yL*fM>I za1d(6FI z4PeDlj=)7g9x}%=32;JjvsJK63rAb@*J}7%NR6Y~Ci@gyW&B=ypuo^|Aukk;4X~Ky zRd%+Gdfti(LiA8kl*xn`pmSJJ9T3sc98nih-9IM!4LZ4ItyH zDji5Da3*1HYucVoOUUNXr6V4SK;X4fax%{z#o%3v!*2h`2<{la3n3?DenNmRLW@~2 zhx%)DTw;2rDrY`{8VWf|6x)QU7IBxVQLX{3sm^bFyaP_ia9iQLY{C%34}X!_3dKo; zpb}8>jbhVtD;U!>sS+nM=5wjD=6tTxrj*l3Yl zfHDa$;JcC-^YkQG&D`HDS)UC1s)oRBuFyE$0ziNnE4a~EzLi6-w-Gs#%pt5PQXpQR zBf%?x$t3T#>SI0glgkDqbD|s{w_q2J7gCGK9MQo);^=YFz}FPj$li|sN)KFp z^{tBOUO1DG`JFFiR0qrx4nVwcI9A9bvKOGIE`JY^p2@@YsfGS1@9|<~zpgwRtoG`` z2@suLI9y_suS2M=#aL?r1Wxz`-0n;KBx}ZC0NxOzp>>*ANAsbX zSIbPuw2|Yj4*YQcteWv*akmtOzR{#i>|TxR)rR)On>hkB0NuKvTX~CqCwG^zlCXG% zRZqe>C!5Siy?CS#;-?Q zJO)c`1CPVLrKfd*-NTqD!#d4~IG5RCg*^Iv=g;X#(a+PVOM|FFuk#;@Qd{p`*S;)W z*9V+#qZhS&reojCSs0J92h{cDIKN+3dR&g1Yo?w_p#UAs13ZiOoAb~Wem63o;ab~z*wbXIIujDL+ zOWAasEJ@C>@MPttvenNI5?{Q!$+c*+_g~^|v~r|c3TQA?b|L&0M)?UZ9sq3Yn#*9N z2Dj>k)rmrQAHZHCoS0wLvVR39rocv8ZKIa8`)T7TI|v*P4jEo(}oDbVZ-|(eDF%fu`pW!wB! zyOvKyH|vicJjrg>d8cOX1rngd?I3%Tpbh6od}SN^I~LvNoa6HVNSp;tW7))^~*c{|&&6->Tbkc7Pj^jTatOcb`#PJ0W0DE0<)>$2lNYV}v@u&_` zK8oKIA2`G>aMTABC8B-H#q}s8uEL)!8-wpX$l~Y||2kPx%Yewm)9~Ys^@T?R5&_f7 zG4uHwN4&QFks`nz)h|h8Qq)*jHdMcD!vj`vKldnH18roSq%~1)l~G$H+XqMuZU&kxqk41jc%4G+U5b zyD6n4$|G3fvg`DWSJpk0SM7~gx$~VMIxK%6mbiQ@{BXQ1>yo^piZcDaCCvIY%p-v3 zF^Axl4`|DfbL8Eh0zVSa)W}d|d*YEa6GS!MR?B*B5ahfb7LyTCt~|Uj8@%Uu|{zR@>8&aIa7MBMq17uWCR6QI7KAY7``+=uVe4ag5 z%idex&5AOgXx_1-?6*FYl6G7mwmg)tU?*hKEr$H)c>5QiXOYseF@9yyk0ku;)j z_*|XKwA4yKdd;_jW}OL6jMkU3de&0*5TR-m)|Tj;FJ!c?S&o$iZ5qME?UU2~`8*ZI z0|U9x)%X=jl9Gcpjj+K~^M29xn)fTsuRZ6+cBV7Fna6PE`F0x0+J>3gGDX8TGa0Jd zy_ck1aUHfQ>)wKSK1+>M@%^jf5^cH`!CLAmjw99N^{&4Fc}<4Z7!;Q;520p<6pUea z8h2I_4GU`afWn>X7x#%ApCvWNN55|lmr<;oJGN!03@--bZ+{U<%D*6OEbKc{rfbL8 zdJws&;j&XGx2#!A5ucz>VfVGff~2nYUiRnjz7;cqzvwP*=;WjoOdX=&jSYrS&GdUH z8BGx3yC4u}m^8x2$v%-OmD*0$3XUMmgO_JYLdyWuM6cNHbu@SSn6nb;quzve<{JVh|65JWXOI0lHA6eS}0pFaZ|XLJ}G#X@31eno(oN*Av;;Q~vziV!1Lj z!~`_EzdMZ)nu29r;g%iq0*YKFXMC5p)37hV|CGAtNy_EVfHT|EmOtTt8Pk}Q7h_$_ zOXN3f2)|4WTQ`tOd7R>EGmi@Kw3DTa?WbFlj=56g^p-EG!{_;>z}wmIBnH|h7OU&v z(Dh$Ypld+X(X?!N3=Ln%Mit{pG7=9lPvT@O@LhD2P=maImkv7by;AD!IREDRbhBU& zw#qEqqwoo;&);+we2iJ|_ldT}_c*@gFD(uWH@NWB_g|DXV$Re%ir3YR z#Ky*~%vXwjn)G8Ls5j1OjTJ|8JKza7rLAbgf~JKoX5*?!nsyUoQ7aRrp)CEY%; z!-d-lPPX{ZId)=aKGCuvx)GWNMcjULVO^Dd1lDc5*di~DLNBPZV*tj`=KP<+b>GI? zcAs{GPM)z_scZzXcD$2^SJ^Hje1SC#>ihXqlPD$$B^Xd*Qq`EHo|nq2HC#$-#T<<( z8s@|9=_G7B!^$xkA?^qbeK`a*Q!#uR^#B^zY=zFLYS<#omAWlIQGA!mc%Fjd zW|zz_Hk=RYHtFCAiOI>Vex`Cdck3LrWT8B^pQi`Q4tmt&&_fK#QpIEEk!+{*;dw~) z*+>*)E$~O+3aBP@?2OcS;&>vy%I`{b#dW<>WQ|zw#N&K| zXt8g-^H5yYUpzSn!*EvuT1k6M<}qb5j;-)7iPIQhOJg;N)pi`#g!RHr9$EzX4t4fDUc>Zbi-Je< z5kM0dps=)W40csZAz{75;O~U}DJ^&?jwM3Dj<+jp&n*Duh-c{7@C~tWE z6qYHb@NH#g9hL2P4+jZEi%!WExSfLQKQC@oB4QWjJuI29c7(~MCY>I!AWm4XZdt|d z$ckY#fK|21X}hSeue4k)gf&H!S~9I^=yx(Le-*fv*wKdjPIfUn;8hR_R+c`mQDU!DpXeqYL}h)}s~RL~D9Xw7hl)g*g8 zm$}WfUgWl)G;iRt-U;cVgjA;TFr^L;^t~w+9Nj+0N)rCUV2h5ebiXD(nLM}-uS#b| zl6X3|!_a=3ry#*h;zEm`lua_qtzPV6B3 zN!x0%BX&N-e<8eB+D(UXlU4=JIyMUiDJ1NqGRR^`YxB%~5kMgM2r#y+yxth{?J^^VB z76X0^ep5ZM7@_d}M1!&tB2G%C6-kL%E9G4_3thtwoaE_L)=mu(Ek*J7bUw4#_xw>@ z5f9SlJR;c|pqTaKZS5YDb@y4iKZQ=UF;8ytUGyJ$M+>bbC|_x?lH@)MwP>9po&%if zvaaB~7agW~THm2%&!LsY*E+dAcDl0yV=gJ1#126Xg9hfeSvi)^Q#r0ZbzIN+Lwyat z2?zW#=!yhJ`}#G2rtf7nnIP@>#XRYetB9NaISIZCU4q^>gl!((BAGFG@`$_h!pan5 z!C4JXG^%9kG%hhACL;$BZo|ly#da$1PLXYAkSnuNkzk4a`Z$O!VN)kYUQdJkU1P=6 zY9R0)uZn?xPQajL{{MCXsOg54<^q+LtjBhCm9s} z^;aw#k-SWUz>&{fpf&Ojj0LaGukki`%1ikC0bUJ5h6MpC%yDb9L~<4GN*E!AxV90e zPI|-|I;h;KAvnWdCkCPfVU8IoOHl`jstp_mk{)4!pKF-2bVl7PC6KNpB-n zbuar1H9lu@ECk936Td)$8F*~N_J^B$^Jc+75mriCiqfE^Fa7<8s6!QFHzK@S0t5Fb z9G=dJi(|XOPGKbsNPiotZYuT1B3c^J%EghGSgopBGK1Y@KmjZgCquOeN3Y=lMhr@` zKwh0~Fs)@*Ex6#_9X2~RYL1ot;(fu>DZeFs(6 z$qLtddmglWtbL!x+tulQCp6q!wGe=(=OK0`eBL2C8A-s*-RgRtIdU*~;G)`omwcmi(G_IbF!(w4X4MAVf47#W#-mtR zSTlB%trrfcwgC{-ZRLskd6=S*Tdo^xiv%oDM|qEl>0K4Fza7Evta$iXsr{t1L$DRtR^4R7E1dJ%DrbPS^IWEaq znaq!bOUjXfwxCAd9$b&spwbnumMce~WvQ#7`zp*#BnT6$Akn#sD>7X&)f0!WIB^>j z_`CPHCc(ukIJu#@dJu|MnGTf4d9c>U&Ch~wG!i7Cz3Uv?e4GHQ8H0Z6^CCr=^;(kN z^6G;NZaQSn@>!D=^2U|cuIr1sXEMKRT=Tw}BEF_1DSd3t;p*IQ9sgn?=-p3enF!J( zTE3u_P?`IMUh4l>I-`OGpu!bWLqqdPnxww-8wwe~|7D(m=!g_>pB=Du1ml50Y zz$7iQAEfrPwxF0FFdaReb#9|rC&l?3hEs4U;&xe z#!%z>9LVO00J1V(pM5P<;so4Xjz?vKd7EGM&~t(kC$>13&D&)2dh}#yb?~?dyuY){ zDD#7`gVcpEtQoH{5mFLxlTE@u@1Gi)@7X!|uMK(QdV$!PeP@l5#Drq>q&k`rrm4Jn! z77n^NT31BSx%1yhx~H+3(V~sbz}BcUdm#R73)wc1swNM-tw@H^u&Gal2a{;6&nFWy zR$$<0SW#I%Q!;}OZ71@i(AjnlW8A_ZL~VuCme>88$O)IC^2!?2)JDr zs57+4`BDJR!Tpgd^ZS}(--+^b&g-39ERV$2IcO58-iRmGbw(x~<@@E&XBQ5ZX7BNz z(q8OYUHO*F5Xb_;=;0t@gR(|q@277+EKKUIuJedGPm8Za?jS4}VqKzWjfJpd zf#!VA_jWYP;ER8U9MvD0wP{C5cpgjr?O=0B&kt0lUX=*cL7<6+iN{65pc?ZC@Og1v zb584G|A0{?PrmU?R1Npz9(8dBSa3BpDF)vkc6LjWpQjZ6h;Y|1Q82Kr-e(smS%$peKh$rCE0*QnDW|i$| z+?ZD5b5ph#azoRmg|sZPR7xxJ&Gb&Pc-)Vtx-EyATv$op4FD8uCqkR(-83T>&LV(H zww)znhXh-Roc=UkqnCV%Yt}IDzFNzwv-LEMt}SlTx;pEYC9=LAi|P6Tee9)O@Y?yY z+(yJ(-$w22eRE4%$~>Vc{|lth&!dn0(Qm-=wLj)z617qk4NHo#Y!UU36;>gv?y9n^ znx-=YZ4QPj6!UY0o!!-mNPF!$W%4}Ty7|5hGT#JKHD2I1gj)mi_MCqtg^N^0_7P^#8;A0zU6Brz+4r>~_O)R+TK1-eP{A5XqKl2dO1uQI zw%NgCidj?HVy8o^R+k0waiJllMSb2`*`-1V`m#-5HFv}vQ{{zllhr|YCS0?Bi+y4= zWd*;BRGXT!@zzn!&^Dp-K-_tuRjsKoBd$^-o>qOIfpX0QGB8}O1~om>!ZvWdbcdKz zY>1W?1Osy_Rm&@wrC86~2i>8;GvVdE%7^oQv70Ih#Jq)Q+3;@4Cw7R}@!uV8tr9fD zTI5KEGBlE@Z?M#I(3;AakfMZ!w)^Ezi&IrL00U~Ip^{RhWHY!vHH$o=E5z3MG|*i) zBG^y)k>jatpN1A*wR#?>UzkH6MUMcJLSwU)Pm@o<+K$FgFt)kfh)l`g@gz!E{L|p- z60*nQ4?NJiNL^4r44+AO)Pe|hF|_kFLPh*(qb9lMEzg;n)vyrHn?ej+_yyq{^nSe-?YtLZx;eI^j`0v4%pJ6f64yEZU{ z&55zxCVDv9_YLoF5ieHTHn?lZgzdl5CmPWzf6V*yC9>p`!V9N5h>*C3SkCuNhnbZlt-y2WER5Zf2c^N;g>H5>E5;S6mq#4 z(bdoTK2vxo^Mwb2;9Y$FH@N+}O!X%S7bJgiemd61O*Neibxtzv;IL-&)AE{B;ea^2gaoa3o zwLK?XvlE=Z)A9B#q+10cRmsM;LPch(q)(gs^3$(UR$hZ;49mU4;gyvvTVb2~^j9rIQ#D(9PN2h)V>jA|vv?O@0FJi$* zY77^`QJIGSsiJdeSbODWF{Gi;_Qxb+yW>!qVskH9!dw!DI7sV)$hxQVF^27Y6 zS}5c_PU`tWxpp4Nx~$)h;k&yw`WlvtW}Aei*EPeHB@bP~q_9B(Q(UBzugAdKMf z#24mi=F8*eJZ6jBARIgfKYhKuEb%mFFa}~Wm#*}r7VoVCsc~tqg9*o$MdaAF5#MU? zE71i5E<0w5)|N=^1VX;TREn1j?^}#sFG7c4&d;LfdaFX@uhkcX6d0;na&7kMRnZtN zhZT-nAQmQ6w;q?^`V6Z%-fu-2J8t(s=B`ILzv}o3yq!|myJe$gMJqCeLCZ+(*#%p( z`dv|u6SZ&2T}fSdF91YrIzvE3KO;S|%@=CIldU1T%n(EWTodXSk7iGFe7#+V8(Mv; zcc9AhT@q?Jy(O|3$$=$LSi^n=WWb)dqs}}2X|X9rC_p2Jr3r9etJdft735?_2)i^L z+BCKKWPTHGJ@_s@gZc9Go@VrO{V$Av9( z);r$3m?W>=G4dkz@gRq$*jRSjAR~}Id6v%E-*|>-{=1^lqCuwpaV+TfB~`R%%Bqz~ zUY^LWrc0@);G5}+V$Q?iM_H+4;8EwIVE7|!jtcf|d+YAo>TmmynyC7}}1ZSxHY zV5qvF;f&;VBB+uv#8Va;l%Tlz_<2~jYiJ$UGq^!bm?CuG=@=;O2%fNdi6f%sro)1T z79wzD(a=XLTETMS(cfY|K$iv~8@AKho6>ZPl!;M*OTGUz>x5!evC{Du1t~V;&!D(@ z2o9EUx+iAUzU-9Qe7WjO8T(Ck#~L004dsVK!rU>eeOGI5-M|~Q45buU>4&C`ooHZV zgy%8DPNLIW@gR!Qpuc9Zzvaa(xrx(CSIi-x50PEYqcVIGIGJ0?~ zIs3^;V&fF)-tL&_i?EYr407C*#UbPz{1sKPwz-yd2|-RbG-0O2vs?wdie^$@ZxJ8e zNs9W8n{7bwrvZ6WSeiPf@UVu+s+^jsuL!e}bRJb$OxG9R z)uFnUYohd)TL-p>7Avn`^!dPkOWlF3u2%=IOy-tAzrRwTufkRZVN#XDjv4Ql-;BE6 zmtP~-oSiQw-Z5=FUm_kwm1NLf(H}5=r7`EeI{WDg39MrKRSxdjnFKO4?;?A(XBt1) z0nY}@d*4}!&aL)26^~h!F?BOKN&?QjDs!S8hQdT0Co>`l>`%@^7bM_PF zSdVIZ7Jl7znuVH`k~fYw;O8x8({^6!vZe|;vBz!p!Uu2sgvZzRs8Z6iyP<39GV}_Y z@P!ZT7g2N$WA9<*#on@D$t)x8Nh^lGPXx50GJ{z|4F{wssJ?3qOsn z_dA>(5Ks6s&5g?X=%O%GwyDc}4NKZlye)YZydF$REY`IJbs0Y)cr7oeO21%(fcHwagC=yd!~7Bs;?$9>$nE%$H`O zOH__MM#IUMfLc;;Dm^WSRSY=Fd#6gH;Dz!%6xymCHHc9**!(aUdUCX-%u!7RPYL*iFZYS zjpg^0po!fV-PmNlZ=P`ao&rOkpsYUYVOE%8LPC^^ zIo}oy(^0?62c5zgqbM>g%P*MTu7_6}Gf5ml+gd?{KH8;9YM>^=heebKW_J8hJu;||dm_x`V#0flUj$wN?wk=->~Z5_l=lo2=g<2pW9g}&>L6ms>7Z0Wxi zy`uqm+>tU$(!NDh$|!xB_V%`waFxnTA4H5)I|%WB$#T5- z1RQmp(SoyuEM9S~a1cw_a<8Hbik)ASVd@M%x{eJ;llGsh=PnM8&YC^1E?EM&1RoKj&Va~|YwjHJx|WfG0FvHhMysu*3W`MU9A zUE|_3cV>3Q`PwMa!Rrb+5L9daIJePmRU@Dq9z|A z#HbQ;RwZRXA+iz)c$L5xGtReB+d`fvmJ&5Sq?Mn8Kvu&4F~s7q(sS*zUW*@myIqLD z_9Lu`X2yBT4+viuRucO2X;4tc#WUOZ>#1C>^N0XJ@gMA-eE9KH6OKF^7RU*{{k-ZT zp0oD$#tin(OztSdZ#JrN5KVqW1%(!iKazRe2oOSrdn3ZCDOl7JjnWsDFH=YY%&8|i z?BS~~B13BPLPN+7RgzC|UEtM#uG(y9P;{uKF6j>|$kLpTw~S4soq)cJd~2EI6ja9v zuZ&D;d-IaEH6VToN6wh6dI!z1PaPKG12I~5YLrvNR5a$@gPAPumntw_1<95#TNd`d zZ0Bk%=GQ-RmylB0k+};Rz|(StLg&X8Z4O))`2{7(mv$z}{8>ZV8@`s4Pjb{6+X?xz z16tqdut+}2T#Z>&S?Sqw!tKJhJ6}`Q2gqotmQ1JaxkkR%<&|N_)}ohpUZ*AV-ISD9 zc`@F+^Fhe`Pc!@%*kk$)WxRBw=b);s!zSI|f2lRd0rMqhwqUH5(P?70WZL-6zMH?iUi!QTYbmbDK9& z*uIxo3aerQu;IRZQf_j(;vP|pek)>W=mtu5-1Q!;nrihL;hb%la_ND_0$fd$1_Fz) zLSYI0pMn8T17C?bFjK=@s~g(5RMv#!bL-!xExnQ(#e@uJzfGkVH{6gbkM?zEmMgh) zv3xF5wNd9p%U3mviZ+oMEJ(-H{!JFwNC7KtW&Ni3u73nloDlvFf>Tqp?PJ?_XNs|Y zP+R`V3=}8Hl_I0Zs8Bl1sS7QpEd&2DJw_*I6C|B*)NH@4sP6sj*nFPW)kYBJ{O%csg30q+6lc=S?2&G}*oT>|CYBW;y&ubSE2G_|NSP#!s>`Qi&w+MVOXE3xECKus;oP8HI(=@WLf@`;Ihp5Upwm6uE_C(SoeJ z)$N2P^~KRC8}7d(Qq)h6&#AFfK&6ri0Pdd;H0f`LL-ve(=l{6*e-Tq~HD%fM zzUcXR6SruEhHi2XUwUiqU7im|E$JNQ+_>L70jmr75|gwo=QaK08PT*p*6wl{@>TV6 zby3KVj?CAFH$^b}Yj;E4(iq29FFJS8qCxL*kj0>AxMI8niZ&IteY>gxT`*;uW=sh` zOrT(erIkOPQoiajt9@g4#7SL_%cTGKa8=&G2@tmuOBx%5ayVjTUBZQ;YF`?o(TRMQ za#2vyiHC^JuFE?|)9_5N<}^y@8fklPCLYd(sB-Xxdnvh@ps8%vA^Eu_n=AKYmpW+- z5k0TIZUdK!&b${#lbF7c+=-u&EFKZV1H|e!sb}%S45y_?kx_LrPq~IO+EQfjPDWtX;C(ve+ z-YRCLLPluoOKJ#VV$@c5ys~x_DFXVy+?57WL%_73rjx5RSoZ&APoC(SOhfA%TcPcf zTDk8HyM3A=<_|3oyJELN1?+7yurQQHGj@Kct_z9R1-(C69PaOWmk-`}He_`-kawW~ z6^A+jb}&)|#aeTNC8S^?Bc|q9^e7Jd*o?Z!A26HwYGX89=xjJ*CtRs$xmL$P{<%sa zz-~!98uf;1PeQBN7Lob(y~wm?l90!3bDUu>S?7qL-@=kX&4C`v034L1@=JX^VFDq> zsl~2fG9?Bb?3N*Wu!K;-iiD20w}tV8p!4VR@|rlD1o%*oy16HG)e}eU5tHN3U55?0 zYtwUsg~`PFU;QwMRC%Gd@JBy>%d_?e%6N2EIq9kCnqe#Cind2@6WN#HXQ)U_sEG!P zDj%x0ik^)9|A~X`oGG=+d7rA*v-2}HZ{w9NnrgncMMC!x(mDed#?HF8ioQps`Ps2% z!w15Jot4CCosoc(OzGOLPsga@EH*r>P?x;!C!Ha)BUlJVbcoKcKb7|_ukw?i?{V^V z2AlhC&Y}HA#n39$K-mE%_A%-PuDWkVy{1NrfuAf8wcV4iZ%P$6%k;ZBKLD@BDtyV_`)RV@5l%{}*lYL^C$1O(&|KD=+kK;Ht9cb(Xc&LXCjDHi`qp>AIiZ z(?-e>*tYR^INkmh2A_L(shP)<2FIJ1k~HXp} zf5c8V{zyqP#s6O7H~3~?Bi&5AD4If7jx(*jX%3~4LINB+KP|4~EvkjBk@OXii_UApEFOwR!SeG3l>87uMX~ zqQ%04`itQz#*W**>xN!UGbb1&d!@on=rjS=Aw5WwrW2&v{6yz{eb9`G-HVb|E}eaZ&o!9A1e6+E!flQ zk&X){*E*R}yO4%n>x`u$B9E(1L4a;Ch9)D~zTH2Ju|)Bbb?Es2b^)w~40`=qxVSkM zr3DsMz*Bad(VEW=zq@WGOT(1J`>D!arOpB3*af|j$K*p?4Qlj|M)w1ndMI^r&Qj2} z0Y3XEF}akk$#%K@f1a1Gth-)52fCtfh$LCp*ratD=NPEroeF z5a04`Ie8_JF(&iH#3iECE^U<*MZPeSc{!73r{YauP^Ig#+Zynq<3&dNryg%g9gmfK zJZqjikG>4`{F4hslxDWTUq0U3iYMI38#4_%QrW%@%XonBs9XdgMkT!{7kk5MOL~^qu`t(X2XhF-uxRJo6#&-_@ z34fEo6EXG+5^6QBod<+-A#;GdbntKkteRgQY8C#*^FJI$o}**$+OTcAyJtT?E5}uU zCBrG^W#yo^QidE|)gR)$Q#unQnQA!wf$(h@&Yjd1@;fS&C1)=wm7CZ1e`(?@pw@CE zdawJ6cY1ni_hOP11;PB;?u6yuZ`^NCs`18t-tqokh6+Tn0N0;WMp|cUZm|KJ-23@g zd*YtoEJp1%Zul~oo_l_H?s|{?nQXo~4{G28hdRoMMOg(%fV%cK$toX;373+RWu_!flxk%u%{oLHPV^{HSAX?I-@m>%XYP z%sqVAGNjzQ-1Gfv?djNuqNksp%2N)F59c3&3n&Wx(pQcgvVvYXOonk6KpaeyAy0ym zzuJD7?)IP;iD>w*@1w70HadW>GJR;7Fuv%ww|QIq1LEI~=F$G4o~;2bzKvz`oBba-}_pOEa$~d+D(Ii~3S>8aP5jryi2RYV8(^E&WzT%YxIiuS73L0zhhUTnf&RMyy--il4@z#+3rvFkf8C3YINbA$Ywhze z!S9dTw|Frg|CP=ysGaOjf#ul0D&x~cop}>Dwy+M?Ng!0O-8E0msV^n`Gmt$u4=~j!+Et;wwR;_*)?@fxM#oG7OC;8%w2$%NElF@5_iab@fa;> z!C$q{g`BNH5e>MqiRG;MH_&IfTm+1+ob?{&=JZfGx$sNc$a2O?zx#w&Bn!d-1kM&n zdRs~`#B-WSg~U(=pn{5V4~Oc6QXXXZWq4?m_fPB+MGP`_MDk^YvarI#!mK^)KO_#T zVmkp*EP2TC2|KaS^)Mmi3&1N}#HYCYx-MwnoV&8-%~KQs-BLg0Fy63?&61 zq}GJKUA4n{8($y%9j;0PJAiqzj;1t2UNf%oyCa3VL+tlfOyE3r73U->egsC{<_rK% zY_N_P{D$>rX>C!XLRTp&h<`h?*kB3sPGD|oY6=x$-`OWF_n}xK5jl4ODV9-c`TN?< z-T<+~5R{|9T%F@dvpJFr9kI_U;MIs2i^>X(lsW!?^_o|=tPkSe_j#yh;)V8=&bHvR zyQJz_)TZSab*SfGkf5|2&S_`rvLxR0I%A02YKUypTodYHpj9ou5Cnc<3Xmf7TuR}z z$4Y-I|KYgC(Ej4@rkC_Ou)@(yf=y1Xug;2#E!;`=`Hx|t9X>X90h?u4TzaE{w^1hY zu$-+eAZY3|ncJ}f5O^Lv{wIJOR@TM4ty#aVeJW~cvkDGiT zI*z9WV5qu`_<+&9wOZ8BLAtR0(CP|! zBAosi?j=QM?vEXU_Qfs7XnR0`i1??1yurrSFOPI@w6vWIk+?!~O4Jdwdl;%K#yXW% zQ??U$U@y>RUKt@}X;ytk?FYg@%l#p(sWwZ5GCwOT@~|KDl+&`b{4DFQgezkUs`SHY zM3D}FcCL|vG8MI%y>f4+qVkN+wn~Ot*9-3jUip7$T{Gvo;d<6*+i`YJvx;YViUawh zPjl7u%8H(Y9D7X?QtCEb%85NjSp3>rq(Sy#* zNZ)D=b<9YNytk_ewX>`3L&Hu#CVYs(4}*-(opHI=t7n`z8=PaPr7ueXk3`(5_&VY& zLj4%}CXOK?rkReqJQxM)!Xd3t@q;7?&!spsJT-k;ix414X1hFovuJCD^m2>L11guc+N*o}7~w-B93rPDhOE`H?L zZ7=rETGYeG{0n{agZrME7xf8Cq!sDuL`6nyqd6UNT1db#1U$gM@jYzlC2O^1`k{HF zUM_{EP_iOOIU4pDRkD{_C1oTY{ABY;d$!KuLzP>~FoOfFj$?~0eP&$ey<8-#4} zHILXNGzWN&PWu33D3K5?uMm(tfT3RCekvBLbpRJbjLIc`$Va%kr7Pz(CgyuvHnjBV zlFp;=^*GS$yn(UxT>;1oXg&D(4WO~|n$eBVa4YrJ8W{8nwu`iyf-Xfn1_l(!^40u8 z6=4=5)dwL!J9Yo%R9oHo&GW`yZIKUd)BEZ~EhlYrfs;!r=R0!LULk|$Jcsya6V+&9A&ULH>BG%P$sS}D1lQwyN#bDtc$BaB_X@w53ztVNKGhHyyuC3L zF2g3QRU-x)#5zI!=r1&hJ0qnbj>B^B2k!s10M#gspErFlHhJx{(EojSCEB<~U$y)8 z7xx$%S)eq~f}eF3GS>}4L-IS~9J-PKvxQQrT}k)vBB{k9KV`Q6ho)-`sQinzlWk+N zZDX=0+qP?}sV3iS+jX-w*{-R{wr%(B{NH=O@3#-Teb!!U?|s(kD%SW@Ta(0Ar!vR@ zi0(=+q}{@rfuos=!o#97i1t!($cpvsu3?>TG;1spLdp!~go$z!5?8Ir8y%m9Rw*)< zcC{)xr(hFq2X7w?mPO>#|LW|u3G#p5bUr=zh)LYf5l|c^1@(2yTHcm*FW(o0eNRJk z1$nG2g~Gpx*6)|goVy!JxBcnR#~xTO+T0;Blth_?q%3kiN%voy$2f*U6H{MWhKsng zlKk&LK3B{B;>UrA0YT2|7Zs+OvKW|m1H;f5|ig>du^xx&fQ!iVTT_0na- z$FtQX{9ZU$fxfRO z9~U?-7ufw4aL}3G6s*jn-prQoyruCg57=~rZLsUGY_i0?_AV&1Zs02NBb?yo-fS}B zRFsmmqZG1lTX|J5#b;EM0wygi&XXr!P;;ZB;TK=P99#Ic#X_}zDxM%Xb#k5A{@5lK z+V;KlWuB1aM95RfxhWWLs$M6&Ix?^UrxR4=kuQv7EKj2?X!d8)2SR5=Aeh2K0q7;@ z$!eE-++bfdc>k~orTwRg5l;8`H>XZ0nGf!WJ5qk+-DjHATNPP z|3#h7x*Pe&8gpLPZ8ptoD=!rQSV*PSV2NoCSn44C7D1C zgAF9*fTWLrn9a={Bgt(hXIu$ffn5n>qbmTPHKIp`9s9#UOmbHm1~g))S?5?n#S8ut zZj2~)ka2MbvdMY*&;tFr@+kPU^0Pw}pzQkVLY3==M{Z~CJ)(T~8!T5WufwUQ`&uC~ z^O+~B#O+|hyYL63koz$IBf{d$%=NzT1flTI$}aKXf~}!K9p?b|4bMphi5M6SS~+q& zc>wFazq~lvZPe0F4R;@4W$q4Q_pOwk`-kB7Fxxt>r7L@yg0%SqmmGD7l+gEuf*C!K zd0gUEqfF)Qnj23tyZhHWq^Q*{a`RfcXx!()k#hrk5jnh&Nq7%gN??pL>!0~E1R%F>jmG0h>eKjfm$x-lW3!#^AdU@_A-NL5FZk3@*4nQ(i$kQ+SwD%RJeCC5%oAaaM~oVyHw8 z4L1eAp@#k}ciP(KE!DrsxhA1H4;%(oIT+%JcMs2bbSZwZl#_er&Bo#%5^Hj#BRH(1 zS3bk@7J+>VD=1KEdRgt+ejk+YiF0t_F&LVkYBe@;)&gTZRbBQa_~fxFKFtoBCAH@p&+F$Jc*dOXv+Nx`aW-vM5+iq z{}TN1^Am11crTsH<}8v#j%$-T_KEo`4@FEhMoYeLnLDyj_WBBS_AiRxCU~A+x{b~= z9r~{=g|JpvzMYUU^deV-BgW&1NCU?(_zNekKoSmR;h6{2o^nInYr(s2{G^_29_#V{M!5!<{Qq=iUQt8@IKc!OzWKuEC zN7T_u0@JH48}gtQ(Vqlf$ZyOcPwipkRoa5V4=*5GZnmZLTi{vb+EXozI2cAVv4_>K zyOL#)iAt}VE`^g9SBf%HT2V#b&CVMoAwG%6Z!Iq4a=it5Z`@wKQlA(Suh-99L+##Q zuK7G&xZo)FQk#B1nt$Rvz(g_^Q~RPQtzIcHk)-8m5;6)UMp4{DNofx748Jp=)n5OL zi3O-oURMH8i1YpXKH8RAFYp&~KfUGso?zrx@sIbYzA22a3)X=blh z;e#S?2;B;LQez|Fec*kWk8Exqe-2ziwT550V3(18hrIBcZU{wQ60Buo1a`6X+yVgx zqSKg2EzTQCwb0=j#>wp3gYa5JtRb8{ghQb1S(!wL4W~U4>tFWk5qp|=W@8?FKYzq2 zk>FK(!aaH2JgzBir;MtMoMq<_*y>6ls8QvwNFMn|Jv1?|J+1zV@~e6_!@MzD zMCr)R-DH?#VTmMR9on6LRZL2(8S7HGQpH20B=w>&7&|u?BL|jpxFmcu6&^8LkFOB*4F%y<{-jV?fpS^wvKarl)7gb`3(^+h| z)d3N-kP1A3mFw@K)h}J4cL+-$WTYJ48KYnFTcB7I?sSHHzn~3fYagn)%q9t#@2%jRDUSiT!TUtMq zY_mORXwk=7HU)*FcU3OO++OL|S`BlQ5*@<~`{>u>F*?&P)jbuUW1oE4V?*jZeU*n; z7SA?3wnf&VnrR-Kofo4~RNvei{U*j?4y)onXT}n!77{|ZK zRO)%`Pzd2Y{L-lD5DL&a1?~1D;^Q^DFA_uY>$0 z->KcFSLXia}G9j}x(vB(_}s4d&4eZb^m7*^dfbm~U9a1l_T%(NZft?^SS5N7A|ZvK>1f^Sdu{YfncH zI}8J3m?4QhKVdG^I&)^QQ3Y&*#dQ)-k4qv)#!1-3i_?)<;4DW2F&LpveiE*tDj?{#B zQE@O}BnI=DKf z)vF+vYN2FHm{j&CHzOd<2U=0cNE6Ju#M}E=p)<%xw^|ZN0YqEj6B7e6@iwflZ+{b< zu>PsUa`2e?T5@~wqow;axAD8QTb6(8#cx~WbdhWZT&+>rV#H$7PJ`BBEU^HsRsnjS zzA-d9cnlHBCGBy8*Qo8$OIi{ZSTz*gP|QA$gY8!g&6AnJ_X9W3=ToilF@>?bTgq& zz?;BnsIUvPqaiAkZ~e1hCzd3q#8?;cwc*%cQUYN6O+<6lU}(~K35sSWd238Q;FL2s&A`SdrLN|VjU>4BTo{&H^LquR1_sKtbcc)D@l%)Lr;Y6lj<0GbwG3pW&vp*TJa0)7Fx84-)t$^S2$*jXRF-IlxB-D7tT-6r00xT$Cc$1459aw>iJnLP*R(GP`z5e-Y0h_OHQ zy3_uZBE8mzOMM(;;UB`yF$gL+Mj-Bet&O#fjszc}Dt<_cP2&JaN@&VqdHR+O2QL*G zKIr9P`piFFnB#l^=Qrx`(9G?8mpJ7npbpI?5gB~C#swCUgs;)@qe_L6RQ!Vgb}%|~ zBybg_)cFN-Au@uEMhPpEN*t>Kn>4*j2vedWl&(OhgcEa(8XK0a#~oq+Ju}p6)su3y z%pX;M-qYgHCJsUSk`}*>*v5N~h-goRRwRR7`OsznPh7`l|Aex?xd0HB&j57a*$RMI zyYIL)m=|`-jyvKJA#F0SA}SB-&HwGM!9mYYNsJ6V8HzSR%_t@6>^?xB^Q%TW2dz>E zay2gDw+5SC&if3Jt)}vt6k(}_UA?}ydpGYtj+6_=?R%a}Htj=^DHXMT9vDp;f+V!m zXPVu4t#E)S*>seNLx_#hkCEFuv$~#d_~+kQ-!R_8NLBrH^@~CHmpNj)JSr{guwwWx z#8O0ZfPs*B1;GwbB1cOr)VKLgFtTH$5jI>F}DChfB96f<{}Dz&73 zBetS&te$*;Mw04|Uhe8h;ni?}m?b#u_v5sp%Fmp-`f?mG67 zU=B2XxVCnRH1wq4|Gb?)BV-@?YVYlt_PQ>{wAwLvASsiaY3A2{Ix^JRom}|&2=M}S z4g%aALeUsPi>HuW)92rsySw;HFY+Xw%?z+JJ^px+BD;jT4AKZD-M9-;Jf0;>#8{zZ zdE}9yZ;*c#->HY9qZ1ES*6%v~#%C0H#~G!}ddif?v&OjO1D#T^ad}-aXB+}d(UL&EdWqlj+FFB110%pfCXRxPh6Lg%S?ywrk84$pPTVf6W+h`UblOUzaT8 z>TCZBK0KW-Ck{ZL+}@6|$FUKq>^%;M^js(F@%fdV+j6>+??moe&eghx;hEE{T~|!` zUUqhU{GooKuEjwAN=@>k#e*r!ZouZpmq!jzNZ|-gM6`-ZPK>T)}^mUx5&Rl^>!-c?hb*Nev@gg794wU#%;lR zWYayiK@zS$DvuC0uJ{L{p_v<{pyqK$R%~~Hn$(Nf@Zp=UKHHw zB>exo0M*l8U>2%-^NnRbcbvgG1%Wm|upJV@*aY{lQOeHy7!V9rn{a; zJ^WvrU|inl;#f1;DZ~yWuEMnz!@=^>_xN(F@T=(9TcN*0Tr~2GOltS1lBN9F61d;*GV9W2I|PJyXWWwtx+O;F3IG%RA#`#W&k` zCEYQ}Ih`tD(7QgOve4Xh$g8i|oai`$L58hQscWZ4WU77DD|XN8-F4}oSVE5>cDW@1 z3lsX8dZnS(hjLKmv4kX%ST*TMYOP|VAv0`oYsQqp8Cdv@y>eC%J460K9jXjlF0Q$j zpr7gZe}xiKi_Z%N(9wr>IF^?`v#-&u1AGTFlrr2SLthE%YtiL|dWP6__*K&YYffClvcxWg6r*px^d0ei1x%o^UntZQzN%Q1?Tq~leV(C@QG(`R zO+3jJ@Sepr*y}Xb>5DdOF4`!5?~3#jXcdRr#3vEll%P!nx+%mGJ!frVYua1Pzvv>d zZh;SPWBg2Yj#H&jXzu@woSZ@?Y$EMSJ-RfKEtd6G$=IPR&Z!800~3wo(4%!xlEW(-Vs!dDbrdi2aaW0%4m2tmyZBE7ed3XPE<_9S)FDJ|Mua1_^kGi!! zXVXftNGw%@tVQC#a0__QbgXjd!cr7T>Kv0!wC~~6zv+{6e+wLlQ_&W<`dHf3o4CpB zzAvIApZW;6UH*+>_kaU49q;+v$q-u2(@IXCz(Ul zM#3G<`puP}b-M>&2`|cC+wk5lv&sXVE?7Wc$GIb75?;RfDD>zn|M|_}^A8sK*Ib;3 z>>n#1`wtiTMP4nlVI}9iq>OF=M7*&W z5l|ZSRMO(_OQ@%U0AyYSreo6)6KPBt^4qadcJ9J;E9YltYL)gq(TbdfwB7+@XXR*#?wm5GSMmntyL%=$}Rl#vhi^ zTr0H$?b&e&43N;N=?@{-UyOV{{`}$KcOl~|h~jfbPWZk=jmO<(fp3v}BmPK8Z}$x!}wGd>;bR<(kb%uK!a@H{Hs3nMPM z)QZDj9;2mWzsm4|Uh#Rp8O|KkjFGXXdqDyz4?VVEpiR+c5vZUOP$28Q2|3&+eg-I`VBKdDtl%B}3~N#zei*b&KLq%ou#~ zmBQzXWtVR>yE5M;zt{;}Y&N*+C!i^it*ubs(BTJqE3mrK;0dr3HlEK6E*%cF)#o<2 z6SMV7Bk{uo+h}L)o8^V*NZC}0r~s!FZ~&^#c3_3Di3(~S%{XJ5ic)P{L7Nl}#3LAi z1(<~HD8-TfvI*!!>7cjlcvLw_4Z_I9SGaEox66Kq^-L|GV|hszT-yfigVwgDsUH*% z?lJxgY_C0^nMoVXz)guiW;iqwIi*(o`xhdjZ}s#`x?fc*IcI-{-QFu8UMCP*9>^H6 zctILXk#eRiAoU9J#p`y7|5K+(CtzJog8`Zy8Sgc|NanZhc8)mBvHdP`X$&juh|_CE za!oo5RmHdkW5G}Uikzb31EweYfM16mCXesUzw$v~Amlg#s$iGGna9hDmL6q)*vN{8 zjK&IQRU2j-kjB<+EySf4WK#%@n^vF{d4;ZZASyTC!G|mbL$iZQH%Y3`Kj?*e>rf2) z8k2y%a~(On^lB(jdaXDK1J?QR2QuQeX6~)m$*_l&n_+*c`Y~iu;@UA2NaIs zCu*w6E>%nd<@1|SaitLY`sho>X85j0e~+pNBx6}Geq&KUXh~XD$4E!_su@I+rK@5b z1*Xu}!B2%+tdDfS(lUw)+cC&k$#c0BLcsXF7Bj#0kH1hi{^JE~q}#vpEq*eSZL3yv zjRnOm-N~ME115sPKCEnsS|}JZo&y(H8MP0NDKdE}_h-~BBNp*1szPH#ix~T$vBn;_ z+&8;^HT*+b*hn@Qg9qxYhu@#QwYNXHxRa|upb%&csSZ^Y-?A%$8X35lGMV_mO}i{n z!|Nz3>a{8KRtf@!dyQyN_D@4Jv30y8U$siHs6!F-!0!O$@EvaBD(PQ^OX4vAff*&A zB*|!H9qQ2s#E{}h`mSSZ2O>+~1XdRluu6saIc_*|Qce)Vs4KE4365bI zWfjzWg+f=8-q#8kaDRwmDMymgB&uiRiGWR{C9tq$)*~t&lus5DFo~iK7PDdRlk-dQ zo?*#`uV~l@BpWxSBq?~RDwkUEg<-~?XlmlukW^{+H^}t=o!^GfFq8aPlWnKM7E?LA z;yui|O={71Q~u2Q@vlWS_|oOu&++|Hjdrve-$8qjg1%;DLPfTmW@RUV3sHTTp^K{w zpd_iqR#?|T+6>Q~p&*e%A^o92$l+(`PZt<74pP5vZfhh?2fCYMy^BH!o6kJ_nF!6X ze5=D>OSBZrh}7*5S^>=7<;i4}9<>tzDbjGXZuI^zCpCf??fX?PAr~NKaONbndHuul z>b;d3h)(PgFtmfDSIV)zjRn&sJzY5PaoWZPJC1&O#Qj6)5C& zobO1*9LtIGQB3CAP-8>q62ESYP2`mkk!hWirgPq%^L@>^X^v$2UE-%z(|v>oDqk4$ zZ?@~cpOyA;4@xt<^O{f*)Bz;D8nb79WZL5xm#vcY6TbLlbzyst;x~?R`C;Aax|x7s z+otkA&DC)5yGxE5AV`pjeb0S?Ed9K|s9PV`zF=4qpgMC1n4=oB&m2tZ%GfoaC2Dd? zkvXt`vAPh~@?E(kx?6wpy;x;-1O3=iSlnk%jG^n==j^o$VU^K{HOl2t{Z*SvDI%0^ zT%L8D&_4z54?a5C`xo~(L=#MRH)U<%&+4m^NS+6Bl+NCGv@frIyQ$hlRUB~N8aGDA zkk@snNnFVawEMp3(ZV9YKYc56lEZ85HxWKVX7QK%*EN{qUxlS z(htCUd29i$yl6=cPX+@<5{yvn*KeKqzyUAGpM7Tg1V0i zO3Hn)euHk^zWmquTFfU8ji-Ji?D${?lB0=F*=Wrb!0$Zv%D3vq<(%ZNnOt@$TWEKw zmK6Zuxzxt`;x|7W?TB6giOoZ|uEx7ZXffR%SW6dYdO69XOc6!!WG_m+^S8gGiSCe8 z6}rgg&|2`2UC4FCI_oik?xKxk3>Lr0zenINz|u>@y*4KGk>m%l;!H@9VB!v|KW{Ct zR_CiU;3=-NYA-tzBB}IYrE+*4)}mQEIH43r$B~Ygb>#HvX>s(?uX@L5*jG&?=Q<3% zm|QP?-lw#HS}*7KJ_^SmLhE{iCiK-skvAE7lck}WBQs^XvyY7Jb-?645^Cn~B6^Rb z-jGjvT}ysW0D_sFgnik?a-aEcQbZd<^@<70^?vbQNT{pIOE?{z5e zN!wcU-YTg1l0SF+PfPiTgo!pG(1f0mBM@ZQI4I#=Lna+ zL6qcQ9)vHok}s}rsj`-FPOU+WX^v_>8=&|!0%M;TUu%PdnMi^$0*yjB z5)6E$P@nPM!0}Gv%CBV-vD-7Op=(W|*fMX9&y(rNY-Tz1SbrL_>@4zgsrfvD509A+ z*Lhb0W1#fN3E*uI{!<7pz19%XFx8aPFgck4CPv<=uXz^?n^tUDt35t&2vCk!zpN#Z zH(*B$6;${d;dptM30hfnp53{7>`J}=e6TuESy~T1SE!}sJ|>lydm-)HGSN4exRG7m z6G4Mhtd=-(V(srfY`=DVOZkr=e2{{6?RMO2%%9nXBch>f#tX_w>OO-k>L;W1Z-f%dr{e>@NqfaA< z@5$Qk%{m2bdpBwlv=l9GxN2&3d|nH)jAA3f z4DJU{wIe>DldMc?w1ri-VWV7rQTAW4d0a6N>X&b5SH%Y_j#-!padMhRtU=@t{W*I< zROXU%Nm?d~^}HOb|Euj@(dl2O&?#wf=ZC{DRdO5e4nWN~u&!1~=~^pEcBC)~xZfz> zTg=FY&nZHQLo%N2tSZ~uazQ+epKq}|vM34EGSh72%B_S|poS;IO80^Z7JU4c0s|L}k!y;n&`_uy4?v zg$Jm83*D-^@s4~3(@m~d$9c;47k)^9K}AhY?fXsx{11&jE~pUCv+WVFdWD(1fMJ7r zwXufW=?m`lL+GlorAP>7Zz8D}bk4M3iRA=VCuQJF0%p2{cO{DqT`(BmaL`3o+1UBw z_uVA(wSLO{Ru=9Ex7U3Mg$Mz$!hBt(|AS8jW(DgN<{$XFK)r5|9F zUm3TMk(!_N{170>=u6aWXWJ1B=^XE)X=dtq#IA-ptr5MjlOPq=_;GcZ)?M8l^c~S{ zTypr(6MHlSdHXZNC;5FV1583`oh)t^UkstwVdZSMK;4asUPu#GF094ugJ>Fg+h{?( z-~tXq+ypFPqR{F^SZ+DZk?4}U5;U4F3R2r0Vt&ytiR})pDja(-&X0j=$(YrQM#`nX z$XRINqn|WkMA6M|>iR*xc&#P9Z#`YKacA*v0*dKChSNUQh=(u1y>}PHCLXG8W-`*2 z>%A$)wq{LZhn^l9EoMCLmcR|YDab$Ui_Ex|1YqpS|CJm^|DU#PN<=%4Yi=K(ip-T< zM{5L}J5-N?JTB>sU7e6Vv$dA8iY>xA7N58L;D_u(|MFDWDV4C!jpHLxU3ZcgT-jJ5 z&FjaD)`?!D2hZBQyJ8pDiuA7YvhiGQbfqY=8e0JkP;C%u?aD26pe-R;KxPuDJP;&- zHX0E+zx1;(3#d}mmbA)gA|xjINwn zyVCJ?XK3YecOPWOoDEoLGaTMT#wk^NZDo|f_2slSHsO0jY6WC-RbGgz^;uw$U0Gs?s#m30R`Sdx?C-L@#us4)c?3ctXn|y|pKsjK1KO35Ydt)URTtVBsZFA?3 zvMB{xpZgUsWKRyWB^^5Cj`}~~uo4I?VA!1bDl8=wxklPjfC3<&4kJDfc3-Gjs)yoY z{akYkO5_hUXD({ViV1e{S%aB+GI6c+PT%>Sg0e>I8r#Vgv_tr+<%!h%r-62>3C6M=RTjF^3Cg_0|^ zQEpUqpU&;tM&8q8HQZ$p;?pRf4aDNjZA2tnMU&qg$zK{vRX4qPO{vL0j=}5+m*3BG zEmz=dA z{$BErZ*jy~@66%6A!jby)w_1EFp(>)XJV1VNE{d?WOB*vXXwh$b^xjvx$C9BSL(2R zB<6~CUTgnDfi)##z{fp7ZKxomjV;HQqM}yW>zh*lnOo+S;y(OtdCO&GR&Tx<+Du6( zD^#f~%0C*m#TcQ(3!vzLPitX|!LvJzn8Jaq>-G)^a22hC>BiIDZL~C)*)mbJR3-WX zY75hqS9KrAC&A*wG7#OUddj1%(3yX|$+?#gWps; zDQHEmJ5ZAgNZuo@MFOr|E77Q2T}5nDCZiQYgORe5O66G%8hl7ad35S_KjlIde_vwv z1796==MVKV9tEcN#%sX9dPkXMCU7DIPss*{@EUTih3P&)6PTLMT8-x&ICSGWY>!rB zKNGj_28ZRD_rN1!d$faF83UE+e-`z4|ES)#0ljnVOvh!3A1@{cazZ9am*2#kzBVnH z)Ggo~U}x_m2m*6;jchrHYpjfu?IHU)fAo<13QAP57CTU)a)-W>TGv-JgBmeqR1n{5 z@>*IF8?DS`c=39FC5(kG|2>FD)FVvI*r+MucgsVxo*$v3A*b^tGwriDX2shH;@$u3 zDBf)R$6Uc*YdXL`n}%z>8(A`p3U@U$n?@R&YRzA*EN+WFxr`f_82^sjJ}hqTSS-Z~ z6EYVYo`D);_-M}iSXcg^2yN4y*R07E4$fQS6V=-45|9TOA#4>oA_9SsNTXS>R|)r* zxYeBXHcH|l1z1G4xI>S6V{@Zn)8Pc2=aY8&^iM^e7LHm(eflf!5fANJ>!)g4khfpenA<>YDmOPnlAm zu~+HRL}hkFR2b2~4@uBawdcJ6h$hWnNR^MY z^wdmxT^6{p&z{h830Wl~O*xB(>YkAwxaTmoYi?ux>i;jWw#=SfnW=^%*<{y*1x(DM zz70Po5i#=PeD98Lo3YKAQfLn*B@V+7MN|ZyHS21g#{RKZ2kE8d081exzwO)n5(O>- zgp6ikXyuIFaPJYgs{DMI?hvquTp~ZH7)I&AAQ0_IrCMocdIO~M8z`*5*1Cu&`7 zYY9a~i1GnAcznm2Kxn?_2q&acbZClIIk^t|jFBComTCFeg&7aR+OU~?VpVale!r}i?J9|ul!Gc1;=BF(1<0%ajKCt}l)epb zB4axA1NTRb*t_UIRAq^|3S|zU7AB&Sazo?N_ONLt7?|UIaAE|`Qn&uT4)No1crbc2Nu9sqS zT`ZcHrQd|ZAIN+n;jO>m_=LU^wo7qtDtS!UloKZ0-@&p(?EkT9jsZU+^3(j&+!ki` zrUmk<|Yl`uj*cZ3@{dyCm(fSeJ zPr1oFI*@LPsLB5NOUuj}yCa4j{|pyaMe6QOMZ}728eVyvkdrI6k;7Y5lJqP_9NNhj z2E3maN8xO=C!!f1eY{?8_Fi1;t*V}H;fFf0PoEMmuB!xARxKBlBoxC!g!uN@k6W)T zJ=O_zL0ejRE1yW!Al;sX4=76_Dpd<9|>gFVLLDn@Dk+ncMbiK|UpOMvW_nt|w}sC38j!R!?T)vs!&BgLs(o7W1h zSXZPdew+YdhgN=#9n?r3O6t{|m?#bMGcTK@wJY9ulr22XLNYvlgXPv|kZ|(C8gFpi z06BX=lp4CQt}cF2J#!sIRLzATHdSb8CB~y&sYDWm{?>ej2*%DL(&0F-0$8Z$ zmpAFglgV_3e_0fvhFj16H-QJbK5wy`b6#~@QE(+W$0bR>*}(UT>JZEbYS*ROgcXIk zlF*7+nOUXrj>LXKuI&c?1KsZ?BUqX}5Zy>!@4E5UBlR2*;Tn*(*nW-K&Ze!Z8Rq0u zT_zde^w|VLhf(7KS3`ZKNt~wd+_{q%1RGOJqhWqr3q+4bLySPxIZptYL_mg#Q7$Lr z1@^P8iFEiE&-t?&4CD0>{u3|;HEFTsP;gcm|=WD*Pw3>O`YS znSfNw&OQyBJs|FY;Yj{-kL4kW+Bnf7euHjZ2>VY&N+7m`Y<}1lr{$-^~lzY(!^br8y!t+#*WjS_gve+uyk?QZ9puLv&hb-A3cY_6{RW+nz`BWZ@` zvI9RspPgIt8O*7zqNP&;Rtg@q$8~!Nw{QOX?CN?$sQol0Hor~G?fH?ULY4^;x`T$w zN7LZmR;Do#cBLe@Qy?qE9Yt^)$bZH^5?x9{ARhu}nH4okO;w?Wvo?(TLOMrlhYB%P zfQe04X-$)ZBUGmmT?`Lu)a*jkk1{Ri1?-}8A(5UGcXHmP?>3iT*-?Yb2G1=L+Wa10 z8=D1Wij;VjL#bM4TLPYhiPi6&-`8Dt3U#^oK90o{_P!t7M=41{w*9dY4uS>tR!%P? zF}f)nM6=+j91rSxLS_i&DXV{dMxSinwvcvySbx4C6nZWBVz;>)fLP z1Ifi0T5dFzWKlnI^J(av>IXEwg57+(FHC=;h^r&l|V1=V=M~L_4R9N)ZZZsXAe~#0t^t zxX+3)yJt28^$L@=<^dft-aF@5AFxEq-xfz86+8t}!Q9)C{;Ii8M%5b=zHSXhR=8+$ z`8vV4p&&aqW%5cwvY2)gFF!@r?X3Ivtg=&ip?qKlGF!!uDCqb7UR`f>r9%u_;i7EywY7L=jJYNBcVa7ulsv(*4X z^i|K}k|<%^X6DfK?8gN!+*Hmxy4#y++H*b!lVzWyDLv!tnycwrV8j5HvRsV|Q(-Tv z5$6bmdwLZSBgtsl^*byZY?sr6?<&M$?(Xl;!(Ww-ML3iG=wwwS?~HHJAWQE8UY{F` zcj$lNRpQwPEf~C2m4+^^2Ie)XMQ>v;+|>S|6BI1bl7_o3C&-ZxhM&<%>O5D0^x9Ko zUw2EI+=j5vf)96;xzk&0D5}`<7zu8 zx(m*5*dC~P`1?OXTa%p4}oNax|mE~V{d3v z&;sFsTFpWda-k--(?{uIGd4OMXtd|=qjaD*Tpx5Y`=&L7^6|B0_*z{2`|1i_D&E)s zf|h3zQ{#D(pqaur9lWLmLjbZ6QV=}-#*RhVuxcY2UDL34=yv; zybxe^AO^)EAr~w^RK?`lQ?`|)LW4O+-@fBB=i%ii*q0ItJ{%BXl;OxVnZ+AVA)^sn zxAdB7XBI9}(8wN=JWoFMfXLkt)5z)4YTXt46)lG=W*6)Kf{O)#lh9?>qNS$Oly&eK z*0|MJxlt!5!Y)~|+03!7B{>;F)RA%UYx%$_ zZz?u2CKf{=&EB)b*H4#d$+X|^+ z6=&uzO`FO`86g$Bf8}0&QVUP-WOiZT<>k>(f_ti#+vULBxhgjIzA}ZMOoNJC%qj18 z-|6-x-=V}Fc$Ge~QeN!{**yw3IlD1|h9Ui)3GxB9{bgy3Q>A_g18C0y^0LG1P0&bk z7!J|d)N{K155lzRqp@%lGbgTeuVV85+7Eb!;A>~8#G?!D-@g`4EjJ7f+R~!S zf320=*RU;h##lFE1uqHHOcRYjolY77(@p}z0BYc|rJB@Vg@)OTTlFxy(8i=YFMDQ@ zqd&VOL?EIe%}0+2q}eSHE5=8M4V+qMk#aQb*YnZk+Tm$j;`>qj=412JQ4RB#OTHPf zCTw|WKcwLEdyxB6hr>0Gz}as~mOHDl-qVm3(RMKGGpIAgDu6!${B4Ad5LZ6E*Gne_ z&LU#&&*pN^EU=sVCNSJ}LUe>LKaQrAiN|ptmr};V*&Zv#q*3tV>RY%u@h{Yu{T{ZF z*XfF4LlSmtD)&8@nKQj1qhHmdxsdE>pE)TOypwEaGBliN!%)I{?l?UF#4p2c%XjEVuT`bJlI2t|kR3u7mfwtFSX50hWIOTDCkR zHMD@!ft}{VEoL9yaf00W%fVFCRmWdY?YivLNqaMNtG%^$9YH1uub1k7vx>2FCR)Rh z*PPEe9%7Y>8o{ab=oZwlGiumzqeN7W@{*2P!yJ_)%2f%2`wmEatOWYhh1il|fk~Ac zZWdlK*2{{iqT znCFO2nECg0ifnY9>^3kuVh1{aDouIX@BlI*L%$OR-cPwMlGiYLS=PdGvMw{N#HiM{ zP@Wg}N3&hbwB|r%cjY;Hbx15=+g2Mc*|Es#8mJQo!r^=T*N?sKhWCTd@Fl)vK94B` z(5CxT%c#=XXnn~v=7e&{3Nl&_flS8mQarT)9KTbo;Sq@Gp=tWSf845;)%ZZ|K+&*b z)>8#J(gzQHqe02dKbp>%e$bf(hG7f9>QLZw5k{p+w+AG_>2m`%ec|{)`VJ$*&;(@r zVR(>n+LKxaOA`~#b=WLHQZ)6WbA@e12iuHjC%>K*&Li$UYxgvZ4{+S?Z-SKwT2zt6 zrj6p!+4T`-Dat76G@!|e6k{ER-Y?{HzV-~>rH>%TLcBH*V~1r&L5wMER{7RWFS_8Y zbn~7$cAP=ESYKWjqatMZ?g2N;@KN^Ry6aL2Vree<|5zF;xwG+YP?Jp?tO`Q8hB({4 zAlD$8o~osFp4{9N9-hf@MUM;?Dx=n~SnnTtrVI{V?b993!tYT>oFybOc5!~GjZk#d zQ4SD|A%B0=fwiOJo?=g~UqCbp|0 zw@x~&j<;v{vf+r|-8jcy5>)-}jIWwe>TmBiLGptcBE|2)?u5z+??&i(&8`&wBw8|} zzthG--V@XuYqBHp2|-@_wMzoyfyvqbS0kTY|EosSC{jyArs@23nMqWg-($ulx8-)< zN}k=x0OgP1*m^U}K4`KCZ8f<IB z1||xkbm3uw$#>58Y}cE7dI~;^N_)Gr+@YVTS{nt;e=D{n4lE5S4y`%G4$x}Z(IORW zkCiFHV9XrO!9?ugX-PA*F|RWsVSF|%-`dVHy9v7Qcx57_Ju6AEaC0c&!qmi}-y-O@ z7Fy1^8$sgXB{Z9I+)@yNbCS_wZ!<~2!c&7W^JgPYTacz&=#_sg*Pi0zT! z#nh9p@KTK{h<=ERpIwmo-N8Q%4i6j>QM%Bog6y(QfjkFL#iFI*me`j_-dwKLf(8t?hXToq^nG%A>}tts;4eOVR}>51Ce(=( z7A3Qi{*Bi0{;FM4o&?EW9me11rG-ouFml%USB1gI$Y7h5KxRVk8>zq{uELP^jA`LLw=gEI$|kql!oVi+A@E)0MTAOX6KjOuE1-gnnC-pGHOoR ztuzyvgL96ipvVC1wNeWjK*o0J3V7ESEcvA4I>nyH+=YK7F8rG~gU8I`8{Q3POMlLj zX9xB-lQ27IJesAG|Dov{9OM3Y(!Cb`(QZQHhO+qsyHZQDs3qp_ar=lOmA zg}wLfYj$>KmQd*+w#u;a@p}F36{3c#Yi^S!!sMLjCt}EEe4jc`P2*?p8~wyecTSG1 zsJBLg_gY%xF8rt=H|~?{k1wr%3uZ z7CHr)C(qppH-vyLyyY)Rb%@NgGy};)ugxXh<;L4jfen*MY{W+Q_%2cCPVHEj5z=JF zifhQfa>|aC(*fkGK4LEu1KrypNIyi6V=w zfYAO$f<8UCSQ2m$@TX^`2}`N7m~*IztcXldswWm10e>aOZ)WC6NylyH>Sw4oVwTwJ ziL~>Rb`(V-Hin(IPHzfmZ?VFY{g8m>v+VKU*CnFMRn2x)gII^&Kh7n+58>uz* zQ$IqcqRuj{A&~!}VKwJ!{$G9X@l33ehVnkCJo{GmKW``;7?^(d72-4uEu|6(A!Jdr zJxcT2H5=O!CRJ-as&g7LP_&0lm^LY*ITu?iKL}h4UY9;Poo^8OTL~a~b2&T^cBdbx zc~NfBri?7(Qc{XJhKq&T zS9%TJwy&;M+Cd$q>(BO@!Bf_!jL&IOh2JNUcA&j>GqgC*#vVam5ZwZ!*Zc-137K`H zajrj^1(6q|;DtAhjlU9g8QvG&2lCkM<7{8ykCghm^AY*hpVjiP241RyiXOXM{z*8? zuu_E;yKSQAWtb&xPOfVpao+1%E1$^kPI}7cb4DvVEQtF^31npA{zG|alh95Ug`lVO zR9bmgH2N*>;j+j*-C3OD^VGU%kIQB?p!m!&-(UmI44vIG@}a(x>NqsB6}^=3>@c9| zE==PV>o%+N;3?r_nk!=_A0Pjh1r!ou=+<|b`L8$a;}k2fUB=C`ZxpF~gL|L1=hzqt z`Wq(bwn1L?9W*2xmQ38AvnOxQr>)+Yu?ktxB(E7K<%Ex`X{|%{nJ+RRQ$ySK?yg=> zfD~l)CtF)=LG%%grb7@pvy+M#6O^6VNgTA zL=XiB7gyStbcgOoT&|2+e#{h#76_NJ7!l&2U^N;Tt? z@l5p?g(N&0szwgwCR}gnr?2b9ygx9XQqp9jl2L9=NPMiw=#5%eXW_n`S-ngB`(?d$ z7dRRoKHt$Mi*ymjrt*gSNZ9QYi!03(mLHFOSxG|=MY7EBN4(#0RsbjqZ|Trkvr?3Artyu z*1Q>{dzzE&7~8eq;Slq~VloEsfE=@`<#=*JVNHUm*b{?{ms&O-v<7I+#yi;>V`LL) z$Yuyl)We}%I0oapyDt9jPq3eO_uq7xs6#XuyIeXSC^2rojl}kSfW1fkvjz1V@;832 zFdVFsmy1BBcN&MMnfjPnGeU*TLGYsKiVWn@RBn8)vnay&+WRqDH{4r8$jr@sY0GK< z-uz^z*SB7!3wal~;U5{&-n4{^npO6PzqR@&JPRg$;J*5Dc91mnLOxrgoiH6zYaj*$ zu7N47jw8&6R`0|dfx1WQFN>>Zr&#*9Rq}9{kpCGe2JHrJ}^Jx)H%bgenjfsdVL?7HM!b+or{8AK2K# z`C3L@v!V`J;5GBfHn1HrUcD0Fcm66%qY2_#lxjF!WQdt1kfH3Q8K;8J0myu7@_W~! z*g(d@2B}@sy)1Jg)@=;z+dk}wwsmFLKL^N8lV~UtmU-~4$}&=7hm!IKGqZxFnmoT~hj+n#W~c^_Z?39S3~CtJHCgzdZ$G#>%0 zX&U0XDoK+vRasJiD}5{o^Z^-1ovBb_%i`js1yb5}`PFvL)NJ$*v$=nPVnh zezRyx@MHsy*X?FKiyK&78_g0;a(^O?B0hS#ru|)cuBwSfG|h=v8%mL7L5a=rcZzjA zX1x9TA=A*t`XthpYM$Mk@Y6lMM`8#F8J2c#+?(i@BGleD^kA|0S+`O^~|L zx8`#q;f;W?&{?(dNMJq^0`xWy1zG^~P279xk0V%ygakt4Y;sV$2K$|_=k#7Z^l~o* zAb8k_-sNRVE_;x&^_CJ}k|$KOk899NfImoUsDG3kauzByO56PlobCy>JQ4MG zx~mN7g_r5g93T9xRdlx@+ObA<+qb5SVVUbQe9-hmRH1xWcqzNlenZ!23kXH<&epVV zBc|Xo7_DfCE6_o)ws*~Ob){zwf-{(CW<4KA)|*`zXQflZ!|TRXJl_HvY*aKSlH?u~{w-E)1Dv+sAZ1@m>kfC72xi9VgtoAdL1)Z;;^8R=0D#6K ziJB022I`+CD9Vj(fyxP5I`)-)Iw+=DKLdt zat;Cm6El76*DNQv1AtmMM8<)C8sn;N0uN%44yQnwYEeI6Z`!pCW-uEWG{IMZ1K;jBK?z%w6#LIa*aR9_r2XVN)Fr=m zS+tXnu)FNeRBZ?CQ|tv7ggA;`SakD)iPyD`g&}L^-6QHPie!y-htE)=fPrh%R(@H| zPIr^pUjVv#Cl4*YN9P32u+2dZcns(<+R$S`J!M$4jXh1haw4seZ`YCS`w|GEjLoF{)$8)Svr-N)YrV z>ma!yzI{k>Ho^;|NDo8*xxGfrugs0jWMFM1{>x<4o=TeBOKzcS%0G$i87*Kn4b!4dzB1Z0K+JLh zed_Jk(5v67>PDL3a;`2{{ob3acb+uL4Kzzp6f&B2pBCwfKxAOrMUeNZd;e=a=F!KI z@iZ`?V0z9#|8KmivRfpe&#WrCmQ9&!D5TJ$s--+>NttF6qLv01#Gg(w;T_{3$bnMG zQdSqVsGoRYAgu<$H&$wbGl_418*53}0Q5`Tm18Td)v{sbi(*-K?%`TmhLIj=%0Mo< zPUy$aLa1EVt>(NJ!#jN`Hp0Z3b*Py`JtPa^G5N9h3=u}TV(8(?UUslGjui>)JX>-3W`a7=*D2M2B|+sL9p_^JLX|a=9omH%+2U z`RmD1StA7Y4>>wvhM^^iYpt9+V}}2bvb6nue7%zk*jo6P`{YLUu>GbHD&UT;#o&z* zrV3w{&n5&F6{iei(QSNPO#{uOK#!C0l|CO$+Ik%FXJ0qqivc8P zvNr;J7!c@d;PZM(b42Rf|L*61k3JuGa--wrc`P-)b8CO49^ zf43OHEMAE08+F*GTh{b1YSUq9bh>f#f?1X*|8d3OjFfuFvC-RHY&BcFV@-$XulLrz z4u-vS1QqvmyShMhYM_j_>J4Mv|Fi&>CsH9UQjL@3{0tTxx0#Xoo8x8#A+6ONPhV6c z#~Czwt^dKg&iam)4@wS%3t{@i`EzV?K~8b;{=u9~=OEUHMfjy^(!{^qk}-@q)rW&@ z&~Q~7eB)mrLY=Ohvh=EQJaPCT1VxTTo+_k{PoWDNb~9+0W8dLGxUNmgVSGu=;m}P_ z$bjfU4dd9(hRz(Ln7)S0e0)7_3zxbD8!B9y>$3-)B4z_hse4;Vluy z&aK&G-VIB&9kedn)t_w5B6ZnVtquPoi`Mva4r_MxvD2BZR-=HQ>xw_-EvM|`)wnXFZ?!hw!^xfe4p916^nMABozZoKJmYMuco~BG%v3rW{xzJ{ImHCn_Q@2iEd__v z$+jzNf!EgT5RYSN^N3&LrG-DjgZ49F@yn9GHl#}J|1=SVlw@Pa2$35nCrm+SXEs&= zILZ-LcK;v`w3f#MLgyzdbFs2+NErXnP#04cQjsa=#2E{K&TK*FJrmnPPB5Did$sO$ z{8#9SdPnP{`4*04W0NV;%HzNZgbGy>jCDUN&G5fn&4@?kh)d<4(PsCqCgs$+R>dP$ zw3?tu6OQfU6>b50STsr_WKdqcLdmlF6R$VfZcq0wkOjecQ>A=(`Kk*m$IL!M8NSRt zu~dT9IMm%55l-Dk40Jqf3{0}*__&$*b=Ys@3B*1N9K?cl|4v;U{pT|~vDzO|)qaVO9lfvFCoR=(J(-qLI<@QU%2DbMz))z6V2eQ})pQ&%k6YW5pk5@~MF7$z}{((tm42hFqx;y8EG z4>i9eK$yO*cu)N<|GH@JzvRN&Aqlmfyk?Fb$OgVPoJ+P8M7Cuzq(4^1XNP;GEy*|W z-d;SLZ7k+6OD(qj2f06W)X)F5>YR3}?RDclh8>6*5AwS>U+C7-aK>yLVo2s**eL3m zaEff+>(nIM71AGfyQ))-2-5a-<qL3}SShH_aZ1vh zaE$iFydz$owF;{gY$+$;i48XjEHAyzzQ%^pva3h%m*jbYiP%E*N)w7?%wDmvO>^-# zA+~fEW;-EbI0-tTi>&@Y8*U@wqaj$~ua7Tq2cr!j+#&myHCrcIs`q7ns3BJGl2dWs zl?kFg#F#NP?k|5Q$&!6T_La#&Xe<3*`RycLX$8S3%3`1(dIPk9;_MzLK_ikEfY!FX zzT({cbu>~TNxJ+>&*2~D2yrkz7*g9kKxWImlsFA_ZZXx*DAQi*CRc$=4Y^Ex|66Fm zxLj|2?y+pw=&?8S&T{#?E$N3T?Z-t5{OA41+DpQf(OG4je$l?xT5xsF0&7NrwyjCS?MMz;w{i-T+H4tZiJ z8~#|0tcqwX72#$hmts^6cMX_CNdTgDTIz$ao4ikGpdV9l##-1_K z&+x&A^&zXIb|s3VZ0MlzB3KJ(=WZ$<|5<|qC97g!UuKG2lF`uiV{Y_Bk?f?Uut z9YEMP0$#`^HRzKV401a-Z=$ch_VgNGO!);a^0jfFvYovKcF|RA&SShRRely9Nr|g( z4N^!j*wVhe+8Mo{^%VI10=YI=2U60t`6Ck1$(IQ!JvkfFxAJYlH!uZ3*o3uTVaH)@u?d-7f%{k$hSM!&KNgdIv^ELzh>uCb#PRjY z1Z1hEMb6bPdfaUVZ4u5z@pSlIErM5?!i;$hLvY}2sLV@6 zs$f)z-&cSv8r}C$Jf>@)6Bj|2iMS;D!nDrw1cwmmEWEcn+aFT{TqpXIbJPy^vh65L zHM7wd5v%3@qzIi~vb3x}hMnZKMqLX#bVkhD$%hG;KlVb-eJ6a~4Ob!o)3Px0F0Qxm zKIaGZYt??kAj^c~U1x(}`)SZBjb3c`pL=~UTb51gN(pvpv2ta8R=+ON?)3YZ!UUmJ z_@YXWUx0?{CzY5McnXzrYWUO12h|6@-9Ehc8Z`$er0T8}{m-b}f!jtB6(M{zR`gNk z5Q=bWUEv#fI+!m}RwWg&W~SLREcDAFsq8T#VU;&;v!uhkG$tkL-^mX&e}S_}9XYPo z>R?YqpPk{T+2Wh(B+;3>3RZk~Ky{5Fo!u(D2pP3^$Qszejkl{BIr{dO2P%aUboTWTsokU%F-}wk(uwZz*s!UMAW(WF?oj1qS zJsS)^EfUxyM(|LQ!N=|A)Cql0u^-c8sg<=&N{v8Qpo5B$JET0&g>2;&nim0g(A+}@ z2JODBmiJ-%70^~r>(DWyp1hjWDM)laUMb{a@|OuFpbf_cFXPB1JgmFD&8Wow?wfYA zYp@BPQix@er7-OxPYQ;vMTINKQVmy~Y}tOLwrQPJpM(c>45SN>Z!6>7;KF7f#p5Q0@ClEZl|oJaFSxxpbnDsCiW0B`&g6fI|H z?AVRf+VM*yhcOW8f?S^gt$qap_Wt+Z_%Vx(S7Wh{^HEcg?{6O~!~ewqU9wi&)IXoi zh^mjmSQ?+Ed$F2{hpLoPVBtwZ2BY9gJ#vnQV{GExjPD!uBo%#@wpxP3dC;_5ZyJB` z?MD3G)mA&6ofhH&gQeU;1#FQbr+!WDEeTGt{&ay|_!0r8(t_Hwmh)v}jvXg%Z@wxf z?Edszz|Ut?M1dD8^8TWO?Vu#TD-k@M7G?0)zvyOrcvQ+ic{kK=U#$AhA;YPGmf3bJ z^cgwhA2`8*q+>diN}bu2pd`F|E=d^X$v(>_w{X}JNEIeMyr<%7r*6A?2*%LcuZ>3} zuMC0SS?XI_%1m+3mK;E z&-Mu;Wo)?PkZH2mo1~v(Y@ap+Rhko+&C8j_Xd| zZAV$QpQf*)B9BH``L+-mhTt^Sw&BU@(oTeHMW92(3r?1$^i!y(r^yFdR0*My)Dzo zqWjBmvh7~JzNvp#lEz}oWoR%ndcdzV5)6>994f`0%2cnCS7t67!rb~{A zEKqK4qo+$oBSutpaDvPb8PaJv@PL8+WmO^`k5+}HR!6XJ8m%A!;e0-!rR%+lsP!?B z{DPbsr|P}YxK9oW5Lgb^PaU`VDNPBBPV%|xz6Gi14L7IvTTMocg4C6T$l+wz~BGlStKfDS8GiGqfb-)(mU`7|`ue6Jee2y+Prd#!ZD%Key=$-J`5 zemE&#Q;rwnt6|{(ob@L*JT;j$dSCjyK!K=G-p3>FX$dvGknUTP$O_XxLcz8BRQP%!z1drU6nKRiCS zBqhw35($|eXuc0~VX87` zvGcHC5lQDeER)No)L*)^@=-V{XQTj7ly{pGlh;HZqwoIraJnCwOf}_J$S9TNUZrtj zZ5f`mb?V~7N!6pI1j;cXSI>SLA18Vn0-Le12cW%&H?9_g&sQtIdqi)r$`CPK5eJ#i zj*|cylhefSR%teC-lRX(@k$Nh(C9cdzOM5=Zp&s?qRo(`r>2WDP)d2F+=SE}U#IbR z`Eflu{`QXLyE-Y)D!@Y&aWJisV7URbb+(P%r(=_6F2|aWJ5T?7E?o0{SPIN+C_opE z*HoaI+nx_(R-p{qU19{*!*V~xzuLOx*K^Tm1AXJsshNK}ziUoax0-IP7BHr|<^g^U z{;j?(EhgWNvfR|Ws8b@F{<@SnIU0&cWGy$^h&5CG-uPdEDM(f0_#{)Hoo=CrnIwh) z%LpoQ(6}~8Hb~5UAkIF>u|d3YY&gcI@mtqLC)Fw`9N6sqrtwGsg9oSM(FoI0d{D#z zS-+c5m%??=I0(gUW^j5Z*G%NW^8di$*Hnw?xo`Q9*jPLwmm_|Y&2*&gZp&=8@nYyf zq7iro$9gFOt(YEn3j+c^^zXRyL37qZEg^5`&$txNb-!o_N( z648@>`=TZd8?QTF?J+O0BeDphn|g)pHOL)~y7~FpjjfS!UM~xnx(r=Ag!>zwF3Qg- zgA+HzA%@c21u5>uYgs((YX6YZR!&vNqH&sYmVNqrj^(hZL}tKPW+)Mh4mKCiF1FPa z-k=gJw}eoe;L-xPZW!X{oW`<*xwoE|x(@=KSyz(Csq0I?k~Y1_MT_X=0g?tcd5f<$ zPmx$mlRf+i{2`L7FydLS$A~ zHP!of{-30j2jz9=FVrd>)BuZZYR_uCRa9vARHGT6wYA3AU?chh&MfDy;r$*Izdf4r z*M|qB)}jOUeSU^dI@wRP6X45X-u`bkj(U$priF55y9)@eJr%Vi^a@Q8H>9D80lC8GYY=z! z2p+bRRcfNRR9Hg;{65C^{LBRV7&9$#<tKTfD8k; z4F&~R<6eNISt|rq19=6?BghioI#9)V=0+Ghk;T%|2%CG_Q5*r9a^zGMkwXrKMq%;V za|CnLC5N%RbtX;l~}+q{_{4y-fS0FWw7@I@w} zz)~B(|K34=Ekxjhm0Z*!W84ccXHRc=Skco5e3+O27vtEEZO)YS)OsRTCB9GAbIJmb7$Yywl=hUb6KRq6hJ|T~MxKQFj;O1Y{KIlfWTb!Hpxh#Kf&Z z-c|gaX6gJE-zHZn=7n!83ee0d_K;xJybzLYdWUBL!Y@R>&XlRfOg6;3pEjEnb_iJL zwl2S_EX=_~=)?wvA?kFrv`8{GV=!kliH2@V#={p_bZ_P+1+#OA&$E%C;Y6i_p`r^I zTY8I6T9(R(eG^9cyLlJ)dVj@p;LHvsHk?2;-rDkO@)_-E0hH$IV76P#(Uq7oaE6BM zsAP`4xMW&YOrY9FXHxHP`qFlUT!}_Gyba}t()u>?xhb$38|(KC4)^T*bM`estt-Cf zu{UI(s(;y&^gSwIIX*rtuup;Rj~UXvX(PM=elzqkJ+z8&1L+uEhsmd5)(y`7Hh*-U1l{KCVc%JoJ9MW;FClp#gY<@NphmZawX2g zfhO!1BAgTl+R6|1`m#?sN*vMh;3A%?UN7ODipsx8u+~_i6Z%q6%rt2HfG)wp z>gw=hp$RqkMMs?pD#hEEA;Z%t@IVeRMVZ4XJ>tkpG4jNwQpz#?Nh~kw#(`-Awan40 z`>N7^tP!3j{~M)Li%OEX{hh?{l6yoBfw^@5^jtceg+Q8du!wtBd{m9dR8Cw1S3p3T zdh`@N-(B^A?|ZB63)?xLhPpsHHV@e-3R4z_LfWP1DM;K53N6Zz13qMz1(ndM(0MKu zfibpZaH%lYp)o9OTJICN*y!q@9~39;E$jJk!I%8Qa-UGy_o*Day?4Cq0xoP>RVf`r zOiMMXVa#Kz7x76W4m0N1o=hULG*B3>9{Nq!t6A8(tl`4wkwfxi>vsJ?SAUjPzCyZ$ z8t5Aseiph?$$IQ`stdsKLlJ6CXkW&;l|vB!UbP1{T{n>YR8(hd zYdha73?U1>=N*?SFn)4y!QyKq;kEgWvZP8?OUd@QgMncgFcpGLn8R@A)*`!P-abJ4 zcvn#QldbN?`Ym3RU7S_SBuct7OLHLSpkNXD(*PR6igzcolKY#H78_;w?T)qX>tOxA z*#zJ21t`GtCoT6TOKzCdlv-5}WySNUd5Ve-d-{s`8!BL;D)az||6 zTIYDbq*!PjRES4DJ4X2PDJjzd>+qaRMu%hMLqY?Z0b+u96nF6uk*VnfNGvAmSnBF% zExOm#h%+g32X=nY){$S^Uih?bWV6rzNa4$j~#i?~K!oB5a$ zaws!2xUj#yW>7NKbjVvblw22>451$OXHIWWf%OwRs`LylIs_XhdFrlUH|)J?uuC-$ zDqPFoVuqr>J=hdhAzH{QS?JATbx|mfA%(%z^i70K|CGqt(0WDx*ESGi@%UawpHIys ziAW7)&i$!Hl)8UJjaO2v`G|szum%h9@a+rn)_)$h8J3VC(oni) z`-8*CPX01@F|PQH^j_sziHYqe$LUR7hpsUC4Zx{$!MZF|6zSWRr@0QpgaF`x(;YYx z0?KdRp{keV;8kexI8anzD}U!G46tILJ8IDGp$8WW6w{e#p$GQipDQTre+ziaAyuF6 z{(`DAHd-rp$~Se23Ki+IxcH8eZHf8-;gm-!#Qp`+YqHJQmH7l1)*VAiQfYiv%nB_M zx6OF>SYV;%p<=KM;9za3_K|dIh_3a9zdJ43 zczarv$Vze}pBcACt3|ttTLrwzwDRcW?jPGOWY}u-Jlj{N&hJTj2(lKhJTLz3G_kvN zJ#{|2i2Z>Zk)U^thNDQkQg1#8E+5ZSSsz?ZjmpwxjAcsi?uOPlaBt_yqV@OJS$j5i_*rJ*F>4WZCci11vi(C8zM!tprd4*e6I1~ zedIu?#dLlB!L|N%oK5&qJg7+UT4Zxuo0^SCA!&|BL`gCnp+YW^#I)UN_~nAE6|UO^sInIcZ7WfA)7LyZYT4 z=^f3ZnT?74sAiSt;Y(%@(+E|iGR%FT;pop{A$$Q0dT+`sD1pFH&X?6 zzk!6d>xyp`ea7UTgwVexKDKk{rvdQ`t44U&Oe9o*ybVuxFNA7Kq+pz_Y;lu$8mxIZ z)!koGdZIw_sNQno%5-cXDtZ_9Sbd<3w zvu$&kKP_~U%Qp97{6Hf%_a&dYo(_+jlht<>gULSu(xl}jK1I?WEe~yv=kyr&#;#WD zY|a}SrA%|?%dz6%^y>IY7BSvXTwkU8_S1N?jnqDD$j!wh*|BOR>VLZunEmU<_(Pym zvxeGf=IE9LTKy+g3)U&cLg+W&U@hBkzyV^ZvM#nBn^n7edW!;!u{sgm9DW$jQLQW= z$H3L6O_^1^a-2UZ9)2E>a>n+_bxu+%7)R2FoW?WoYvx;KnWEa8sE^8Ea@ zjx{#SE30)?VJ5{UBNB};wXyEo64h=xpNwX+pI>#bJXMQ(N*iWII1cMoHh^U3mG{Xl?h^DZUk37c)%7C!9Gs>1K50yz z>}Ey2d>f*X*; zHHA*AzeIizOHwdO5wgH{YX#`}Tz7v}bC9lqLXRDOswbO*&en;8S>p;JDx_DH^Mj@(ygWdYj-#MhdaowRyZ$ zgkbj_rm|dljqHqOn2i`((tTC$@1;PUZ^Jo_{>R<(MAHcPZPXAdFiiTE5%V152wT7?8 zdJUl|!_LpYRSnZG#hkiCE)|8GnnXghM}Axltu(%?tIV7xV+`-6-Xwq$}by4QstqrVvT=wM#_dgm#@H^Y>j<; zq>(zWgD5a0b9O^fuUOMONl);YvLO7Q7GMS|LVp>)%NgO;))XDU#r~bYr+u<)?brM= z%0Rn~Ux-m~8kKI|Su!*aguNjmnYB_AhCBs28goT@qE^I;$f9rLI2s>OzmjE*7zk8sce`6O2!%+TRD}EmYfXkM(w^c zC=4C#)eS55lMA^NLH|zkea81?;P5*zVm=^74V`n>vG~#FJ@`m}&(IB8yvFwB#BjV3 zT?$Ks*s^I-jto>swWQ@rqH?hbIp`5Ij8@LOk1~g+iq9gyT>Q>&z!al%uaS-P_cYKF zu(K!Q`Aj#c27MDli2rhTQo+(#MxuamcE9FJQ(j*@~aC{2~)0h&nwko!bg_Wjq_{ zRpGd#7?USglBi9t3+VX8`m z|8lAJx0=+5ji8$NIWZnji{~+2-s>jg&N}pd-vEzk(`=v@;;F1B5V%Bb@Ysxoz6k3;G?WfGUR57%LZbu2p({;sKr2?@WTs>C@aEdRjm9=Y%KEcY8yBN8%mqqD{?crcZ0utvl$(BMk`h zeh!qkp%+?iQra#`5`+hHQ{m0y%8d?cQMPyqmh20wGdGKmAnYi~%ZR@M z`mYFTu;)bciP5krxdFM$z3<$rd2h;Ht!B=fqSkmy)&w$AbK-N5Zfv+(LN5BIYx*b| z!t}CBPcCKdpwrp@ET{gOLs6(EErRUUC>i8I=BU__EX-)gG|8y!<`r}7t=hqHb%e*z zp(Tle38S&{f;x=fhm}zyTKp%l0J7gmBSdjqBilq{D%3X{%@N78s+5PvWgqQAy|{Nj zNS*Q?|JPN-;_$d0m$i@#n_ck2SZReE8!}orxf9%GrBN{?TC5l*AM?c8x z$T=sRDEYx0)HpV8F+G*`&cJz9{bAs`$+I^-f4@Rx=f!e&!WZ&fYN+L(#))Bq3watH z4^@Ty+p-EpIRmG6ve9z^Php~^&-HJ{3*^D}-48q=l!TUw<94^P>x;z45~amCMZ^8* zh8rk5{p}4FZd)k&x7?^Ui=k`kjJsd4K3D#l$8_Na5{e`6GuN{U_5Su>S*FuOQnkxP zuvkVL4ak$~1Lp=36}0Cj^U|}J()SFl|9o4CIrC0bVJD@PFx5Y8aNrm!4n6EM?o3Vg z)|vmdpHy6)icRO>S9w%Nq{*orfHMdHGFFbVt`z_FI_>JI)uY1)*&?3AI2*kWH=CD? z{@7HdhpK?Zg~DrM2tzhWsJ^N%o5Ns-Cb?)D*Ew1tBo2QKFCkqJs?|0uH9*yhaD4eg zag0zhU)>y>ZWU^1vqJq3nBy@x_lWpBU-^p_J{7}+QT>`!n&(D#)CxytUXa0fjK=et zui8aHIwdq6=|)tf8DJw6)_a9LuJd3zpiHhwywK)ea*lMDwGgzTB8Je(B_qk}*ho;1j!jA){%!X*zO9^7S!2#waeo&}XnD~O}0YQ_6DOSz34$|~g$g!fY#IUR=|KQV2ejy-Gfea~CzP4()lK)d1P1HXtPyZ6`+5!X%INjp!tGqDf{ zVP>Ab%rV1p12{H^V1lgFV={7htm(pmHD-HX4P%4a=;QRzdpQao{Y|kzC0=nDE0-*c zr3bPaR9xXbZmkp1ji2LRR2AZe5tZ+@8$dS&Y-SrZ!6%}jC1QZ20SPs%nKtaXfAa?U ztTJ+lD$H+5MGF-o^$1Q5^!~3+e2U?5J70Iz+{^r;+PgB$RS{{>KRzt z^1Xj???QtR;92TZ36>Zk!8J zn|QjZnxjU`Aj&n0#0i4W3FjUfJjm@Ez4fo&3@#kh4~BpHZqe?kL17*aqkCE$iafSI zlU$z^M$$JonwAG}1CpDVWL9i3=ZB!d-fu?WD zde|U#cd|la5VZL$z97jaevYKODtm&N*=(l5*mm3YBaOqlAV>^oDj(^l5(KCMf1KXL zMNTYqh_?R&c%P)U9XG=u5UmKn2A*|B8~fQ?x3MI1utT9w#DXOLalAR zz~!&WiL?~i#IjPwa@cRgKkU={M);!6+#1^%NTUi*-__=~2eNpM_&srZ2H!l-$P_MN zg%7A+BTD#_6*dGt|$H@5!ej@W+;y?DwEwvuX;qO>`geSK{y%9$sDD2qpx zukp;49AF?1*jqjOZxvQdgO@Kb>1m#rQ2Q0evO`Bf zxJ(-%VV?OuL>CkzTf#5QWBeu%OkOfzgkX~YFINb%lB`=wH zOt5!LKCS!;K!#QT&wPw4?vOKVGdGqm3ua>qG_;D3F?6U)X~V22xSZwWO^yUAx$JObmD88t#}?z& z6lpOnOHxo@VVbNEPjU!vnfy_T-9xWA9gj0Mz+L}Mgcp3lI`@L7>*SYU8P#CWnU>7V z(9-6gdhrH_O?QyCJ^McOy&RU#o4^M53kqz#8|JqXwVx{ahw=S|LqfyQqu*l&rB zo3fki4nOiCWFjC14Q`wSfBs%1$GXrElr$!G<(63J-1hz3au+{FPmKi;!zV+{V}gAC zkyd|ut9Ib^c+b#$(AMBPyQYnBFO${fQ3m!;|HG-_PwUX@cgfC-W(fD;6T>y^Z8MEo+fD+MfACcz1&FHgo`Ob+VMo9B%wKdjVQqQ156(T zrie&IPz7Kvfo3UsNJ;kMzfR#W@NtL$4MvKNl2y+OXqPGQad@H@&j5ZppDdeo!AuKu z`uXEOs*BeoxAvyom=Qz~Zv9eGAO;HF!1qCRNjLA%LC*{+ND|1Cf0~^`0m>x+@y5Z6 z<~!HbsF-*+UyE&#_(*-O2@t*(BwlB4o1%M!s&%wl*x(Hh+r7K8=e{XZmiRT`kHkdz zT??fm;kInoiUdO6l_#cD4~Tmb55%&(t>UW~nA-QyJ2o&hw1!3*E+8nB7ujijbSXB0 z7vw`Uw_O2<&zWhiITfRk?6$_Iuw1@Km+HGow+Ndo1Qyt;U;dJ5%&l#rKmX|2E1eU3D;X(1r+M;KnQBdvi z-_6kbta_g?qIiwCfQ=@QSH@is+A3#ylaGG zVfZgnz&O@N5nLwEs&$!(CUX{3fRd;ZgU7M9aZuP&y`>a&-unB_Ix9{=LFMH&iAz(8 zZ;3PBBe2X5yc6v6ZY7saOY2~b&fKg~qiwG_P>p=5*hAH;MTPD#WnIX#(M%~*ZGlXP zraLiq?t&9HMdCD?i}##HZqXXJ;dP!w{ ziFkOvP-o!amvMsfaQzBMDM-<8)Klu`ivPHYut!lVNbp)TdgljLCzETFkS*6&@&#f+ zs6g6RX6#fNef}W#)V?AKeQ~NDRYQ)vg0?Vte?G!e=li5FH|HPHqS^Qn+;B0gd5KJ+uz7 zGwRY4bLoSc7BQ}5r@TgOm{1?Yxy|jy70%j!CxWgMJd;U?Gj#TzekUbWGzYSWfD>;B zW>pZNK8{I<^w_FN6gVXYqZZs0G(tBD@e1?atq{L_t9nQUm39-wIb@K&?{s{;;?BV- zF4B;n9N(b9T-1relwSs)IfH^Po89IA@$?Ohb#P6$O&i;`ZQE{a+fHL!CnvVmq_J%~ zjcqn=(pdMj&->l|5BBWAS~Ig}&=kQE&3ft%3y%{HcMR%|WG1Y(Kj#CLwOg-EniY0l zKx6swC^2S9$Zfv>K-1eng+Bg>dgtQq8}nAoL1><8iOo#yScU>@QZf0$aE580gMqe7 zOaJ-+Pl5;e1r)FKkADAiD4Ob4-Ua9(w8ag)!1P`tWwA1HZ7MDAT{6=WlXB^%Z+gY; zDi}X_cCO`b9cO+w#@guc{U|5{z&I>~t}SdVo(ge%yqCS#)1v?24tt*V*gwu)?pX$+ z1y`AycRMU7n<{S9E3Ez6PND|wI-px>|n=E1-LA^F97dhAnt~b^~$sjv&cq2(-PuWqL8#<FR<=XG8!^Q$H%;!-Is$o8;0I|1_qM_vhZ(pa42r>P!IoEc7Xz7Chx#zcnTl=a_Jh zHC#dmf^}9*fIO%qiS%$}zKu9jow^t%`o3A`*L39NA<=y+r1G&R;B+(g=5xW#Vm9db zfqQ#6!(1)f+{|vmtE!UuRv_LpaSyS*W)4d`O=HlScfOzO9IjDv;18OyQ7Cf1TSnc$ z9=OnHy0?tY<|-eiD0f)X>~KUG_pCSZia7y@z-Hw<3_o;AP~&Tf-eeAl9C@>0$WK~Y zp;OC0=+dgUVZXey+jJVKOUqh4dr6sdCjH5P*8zTpmvy{U`GQW9KBKHTd5i;t@?#~@ z9xtiJl~ODG6dmRl<$7BMn{l+=c|F^#%;SXVdta6Ji%4rk0O^+D;I1Zw>P>#ZcKAs! zcQ%zQ8sMMl&BZZzVnXqHd)e9;(2|Wy$5?eo8iD4G#}VTdS=40T2+8nZw_5vswfN@| zAxha8TW*lwaX=<3IWMmQIOqF~svSX4`w!}{S(~*;n@H#z?{8d<)EOzHvv0m@1r!^yQ%eQFLN%n0;a!o(v#J5#XdVr48}M1pufIarZQU${ zBIRYFtP@X~nbI;j%itq(F8k=)fVhg_C41=GoVIY$m<|wc))h^L)c;OK5LMi`%{L3|)oht!B_p40`5!krm&H&LbGolT$+#}5r|FL+=K zO5x;1XXDG6fimPz9?~yzNZ!m+FPd8*kBYzYy1-GF+j@{44Uh080jyzs56tqZr@Jyh zi{OEOo($_Dt(LclZ7R}bTL4@ha;8Fwt@?>`6*!vVV$M4 z|K8P=V*fs4YUrR`08-DN*}tl|TU%6PzMsxYC+Df${Egs8uqx?CANVpfcl9N8#`@3X zZ+qulyM->OrDLCt(y=L-lSJL%Fy`0~Zi(0J;7xk7SyIsG*bYtI!s1?Nvs8Lxzc(=} z*V4b*J=8gl^bHj=KU`2NGmJ0X>|Q2?m2a+ZU3x8k)aMpE1{L4 z_)(mQoERS%`I%5l4jp+t!$ESy;1r&>s>&UyEHD}hS3)vl3A4jlE!0{YF&9NSb6CS_ zNbHM1Ff^$PWqiC73ek~*eY^H5| zq?ns~w4AeELnfxcmBD;r{{1ADJd|6D;5PXtVH44W$Hh7+foK1>?gM8<2o_ihy(5*j z+pKr(o*jLC{Ri!c_r16}^K7sT>{D}(Z~>q7iE`K9Edofn&rNSopn-idg6}NjJ+H^t zJ~hFdiR78LJ0Af_@Ey!O?gqS5-?oVO<~D>vc?$aqcN(ulJOTD)y#fAWTx9Lj>CHgV z%}?u`%^vQ(p}6SH^E>4b3B#$-K-m!8>MW;}=~j)0x^8r4(+nv8qv@Rlan3qyo?+z} zwa}t6>a%Kk48dx+Ee^uNQ1s4F98ynqeb4%Zo2C;7)wNN!Ep(di@@(egBwSZdIupFA z9*uqu;C9(Me4U>)L=NxVlnv*v;;S8`zinDhc{Bsjt<`m8N+E5g{X~VS$EY=nL4VUE z$i5B|4i7)lGv;DQX{(1CNthU-j$lb24a&!QL(U~Xk1`E&^l36CYxgXH1eKR{y8ezz zJAcmW`DvgNywwh(+HWt-Ny>5Igm-x|QrAH;_de%QN)WvY+l9An*Yz3jz$d&mMb$54 z(YhFFL%A^LN~IFi<4yHGDs>09?k4$dszrb1Ubz?Yh>xrr0OdYa2M~s7vN-{lDK|2x zMaA46D&NpTbSj_|1FeFJ%%`q|>OAo!<9?N%aU&OFdO0`>I^} zMLfot#&KVvjY^?nc60BxU2#~V0WBjD#lEv0d>yl(v9V#uHF#Hg77cpl+(TNEi@aTs z%HG}%KT4+%npjtog958y#b*Ry5u?BfzsPRzod?Jgn4Fy}r{c(N+o;`|9gDUu@QsD~ z@ZE#_&*&n|1rD=O^O5iDtl|UQ*5aaD9!J2yRlK5&6(_7Vo7O}&og;qR-C@m@P+7I> z1mN-d>*#zBn&8v#&G1j#KI=U{+VYROKgT90_Hq~n;0pqJjzgTD4&VCSb5Qgjx-?y9 z2wd%yv9T2HTA`d3JC*R~+6LdW1$2%1FIDBWoIRzr_i>lBh!BQ;qau72DUPgzv@b$bccW`;ToXuJu+Pjb19RFUlj$D=ywAL~qEIlOH^(7>dqkF51Y z!SNrS)%E!DZDp~yPvvAL8)uf{VCxtlhWH_eDYn~eJofTNPQ|*r(==S9n^m8P-*9atQk>Lx zDd|`L$EhYk)CNr0zREKkvxt)tknkEgUtV2wu^)?AbMilZd4PJTx}ck5l4#niQL zrcgF;eJ%7>v)bGF;`ztNLLYh7bOV$A_FTb%U>B_Da~}uRL~8Qmnq%unPmNT7a$q1j zb&8ngo$V$_VYb~?ZQs$F?HpsOAhl6g5cMcGZolVtp7M@xdS3ZLyLVy4TrfM*Pf4Gp17a>N-NB;{*tH z)zai4wH@U)Im$5l)DnHTeJf988Au{YA&U(~7S_>tQ+-k7tCLNescSt0nMqR@zrWO% z{YlvF`9n}WfZR~D|4nILTE0TD%DYNm3N6w&l`Z#v{vwsDy7VxVF* zUj%%$;p$8uE*Y|%Tls>px@^#}%$W1>CzU}a8xF~cr*QxhlfsSjsxjm22pyyFn6k}S zjjSs&UN$Zhu5#tG!^Cj%pD%TvRx{1`fKA{K+AlwLv>n&hYFH-Uq|W9X&i*G2%N$U6jJ|q%G3rwL^jl7DAs-(Zxw86eRr$_FMl|q-Q(^na? zoR6khPR29Wz@Is7&hkA&T%De_&#ZZ0Z6iy6l!>CzR=ngq`LvI%x&bp#9N(E9J@`3r zKLN6-n2vuAvnv9@J}xv(wqF8T81s2`eW@OPoF&XMm5=L+%&$K0rNMGu7Qgh`gw zgfb69YdjO7aug;aSPS@1+cBFh{23*YpMmQ!Mg~QoWta&ym||Z>*6^FOYANk>9+V<%5Khrza+{o+0O1tmc+1jK z@Z~E7a?_&^mb8zxJG~Uwb#m1wSe+FWXM(@3b+-q7*BeggNz3cEGkR*WQ2zN0tN_TF zM@FlwRkX?HjYPN=+xObPs=y)ehp*c7maz{jGd|3T?DMM1j;({6tnUmWjz?V6n~~62~G%S7Cbyw0rbLfPCQuSr24_HiJ|^yg1JS6sOlLx zK3zVkQB~BGP%#<$+Hh5!qHr7Res7(I@D_CjOv4e6ifDxjp>fPQc5+q%lDfxX6MLtf zos709C@D>;OJA`#bq_bUO%I0m`wQloByE)Oa@PYE#l!Yk)3=CuHz0o>-}AH3V9(XB z)*-Ng1p^;Of4-03ZjWic);-nwmJ-W?`&r!1%ZWo4vCarc)t{q8Zf)kvTy2wFq2PXk zA1OHRvoh$>yo|ugeEz>)0BxeQNG-}pI0{1N_&@v4WrJpcjEk6l2?O6!b=>fU! z_=^7pla^*q&NzD8utW=y=>#3r7P>&Gsd)4uhScGc55#7088l3WI^iV#sHJHVo`&7j zQkDyGO4`^#i1n5afNy|ALFIyf0mw@TQZanYJCeup;|5i?7F=g&c8A94JC%JnwcmG7 z{uJ_MxT)zTs^O9RL41h+YEaz{`x3Ie8~2vzuir7f<DOK>9wjES zEE&?D&%`u{q+znx_=wRrwY$U-;xvo^%-#oV=ljaq*e65|?7Fhg80Jo<_&ciR2Y#T1 z-UY@$Oa4_kmo>?drDWr}7EG`P@re#gI%(|lGU6voAPH*Vl(ijMkTXj&7(~>O0Lq4E zOZkUN4_|vb$*fz?$-!_G*(Juq4qtkR+*i{=WiJfpp(&-YBLFI!v;5f-BXW}6FKC<4 zg9t5Q_Zu@w_Kgk5nZ~9jwp#?kZWR5q}pxBb%)h0gCzX?^H+*iVmZoO@Y@3;uXP3X+mvv z@K0aR|0t1&%dx|xIJ@q*r{1O2y4esPW0#z^&Gwf}W5BoO5ck%W^%b)JBW_=ge3!nh z^+>#}UJ?6-`0F%WeEF>~=OTx$=ri@v=H!<`v;=zUMgx~Tqm)Y!ehU#<1 z<(_pO+59v%L~l}*OE4kvd-WWo4)=?SyTU@F=clL7dW0tq&z}UDh=+$X)i0mt4X!|b za-(BE@_nhM4=xw$l`87QGdn)l)M0Qr;LCk|+CEa(e^P+keodnX&aCh@=TGDD_1*UA zvv5tW334a)Nr1b>EN8=ArogGS8Xta2>rm?4TPV9J9zKbHwg}!$;9o)!r*6aA&ZZ&K zwsWd*Y{;247cns*6GWl)dM#a@E)Z|mUgzvGIDTc%^IX{7vrY2XWwviOc6GT8HiWE1&H@VHTfrq-6r5`StgR#7C1w}OOL&Lp_=J9w zg^*Uk%ou6k;suHd5hY+DEE`!i;@FI(`Oi^&_tXglmqCDni0EWv)oVXb%x#Iu1=!W* zYSliS;N#II9bIyaMhw^haEHJ zIm)G|fD67rP-1K>L63B76AD8v*xc&Gai%xL`;wNADGupIT@5{~6)Ves9>?u~8xdq- zS8*e=y6ZRL!&44S3r>b~NeWf7=zts?4TqJ5X*Ca9ij=Bf*NKsQN#g!C+2#LC2bS-M zK*Zy4k>CH)lpL^r1*~rPQSbph9bj)~N8sF&Z?Bn%Y&Y*+0&XV-PTyd>g(>QajozZ= zsq0w(-4&kJ4UF|Y{@f$?kS>W< z!Y^Z{yq%i*?i}+CwH}ha2PSsc`T^2n>Gc`U&A>%7j{)C|)r6c*%pA9iQPwS~baBXK z_TSxe<2{+}+NXe#ecTqdii}RcA_|R>6P0OTK8#Be#vtZ&n2p-!0wsM@<>Yz30SwI{ zr%uVv$sm!iLXl?yvaE&}rrqGRk&Si`jLKs4IbJXOuJ* z8p;A-qROeHy1wV=F$6lsC)@@2 z1LNs0Flzd{ckyOcL?kqqKhlm$1*cw%>#}W^R&)ikG$;90x^hjCP#3d07r?+p92nistNjGz__Wlo4BldHZ_m^**zqjmODyB)yr zo}zey)pQ0n-!$`#*$nJ&C4oPRq^sf;YZ9grzL}d-gVyr7#&a?~N}(%u3RTw_UeVM9 zLmT)p|3$+tWq=+VIb5eN#|P@M_XgcVb|FLsp@DlcW>9R(&#SX~KzQ`6ZDcv#LFSb! zR(Hg%8JUd8v-ccDipA~^J8P)P7Ve|Gcy|Y|c@w1nf729KYa3g<^$l!DHVz!%1B+Br zixs9eJnkg&dUCds1L=$vVd^BYEXE1B#Al(&-@Y+;#|_mvv68yqml^CYE-p5;G@cS& z358Y*)TE$|>>3IY1>99oHuM}eP{aoOo@a6QQHww_?Rg-&W(v^jDbq(PJCfR-UXrog z2+yPeS40$pC}X(Gau4LK7DS-#;CTtEi15>sHx>{{I5Oh6A6HK%kw!^iXT&9zjgU|l z8g83YluLKt{i_kcU0(i;Q$Z0D~=m@*1M7 zE#YQrsCzf>VfzOHB-t4;rOMT1XFJz|TUDr6O^WNZY#3!%E>ph`FJwhiOh!lxvDeou zG_wcGIE3>*qGJ-BnbL<3B+$JKot&PovGM*H=3+T-cn>*Qo6jTw?OJPs72N#*}%+VWR4vVqU+qE>X z2^mgmOTULp`xa9f8Bp|QGb%G>cFXz-BHT-`k<`P2k2w8SjJ$sJxBTWNR(J-mw)8oZ z)~6pJuEn3Vddry`W-pZDF-B=*fp;BHb7I%#Ds}YlpLBvVQlL*1A<_8uFs;Ju{F-QR z{P%r58b1UA#w`tJM>yK=tM4~mv2ON-J_p=LDY=pET9V?aF}qHVdG`ynmvp>)pL_Td z1sq`A$E@82o`Qx>58X_!YiFa|O}J*^T8QJ1c%4VUq=p=?+*HqFdF{Q?;%jT~=c5mj zirCHO%+H%Jw9)TrRLXo|14Hit+ zBp5Zztk+bSB}E*ibMSQULPIFWk~iFTH7%*WU7q|!Iym#b-Q1`<<9b$ft?Gk*Yn`6c ziD7J6M0CK^&oaMbhnO{lCLlVOBICWLi+B_;a%kfk1C_Nh>-as5d$2(v%}269cY7@Q zE>wJZYhgUc!+7RN@Dy(wT78y;kasdlb=~(f3W{2<-d?On&9B+GVgCz^#{q}GrX6JX z5b(s4-M)^k$TJQw5ix419Ap1237q7N3FY;qM4C)r97n-x9bVSvD18nNi%sX@5Yl2D zM%Ri4Emi$tKS_Hay0rE{sLqe^2^1;IWEm$tGJ z0|)J4m2murujnu7vDAwh@+=0*!Iw(ABkRhiMFnE9GJvK2g0k+pIcI-BVP4?Jntq&M zRHEkj?(7ZY!tT{-d+);C9T4!}yGt2vDB5-0e`kEPG@QBF42V#Jh+$jyEFXpT=nja{ z)xe;2n^c|Y3U2b6udXKw{F1x;8Z_ykA$8|trh{okW6Pn#S| zS8ZhYbFkN9a z+Rj*=C2`fcnnW?NUIZRPv(0NY9Bgsw@Zt@!#%;jgM>!Q&x{!orNZ@|P8fJ%ugQcaf zA|(u&4R@U}>YP__$x_3lP#Id=xgzA}n$xRgCxU<>lpsB}hB>xSl1?YCMxf2Rz+YmW z_VepGv%LBmvMquMVVOM% zF=iIp5oi_vqW*)2UhwGLQ{IWcH+4|xP4aZ68(Qd_3@D2ENG=N=^R4h2Xiu-FYEq-> zrDj$}reb2kyUq;ohV<3%^z$4zl}u_?^yd>&7N{)%Y!oOY%OzpTij6n;u!-=w9)Eg9 z?}L5I*Ak_a%Ys(s_?`Z1K=zfj(++R08|rKNSbYeBGT+XR6Su+U+b?u0$|~=CJNjsDMPs zD^%&jp!qdu(ke-i*f9dXq2xp}dTP@{403Ux^MRm$oyz5U8pf1=i7rl7aw7h&LxBj6 zTLc4D!LgmjKqy>n=C|{O?j?vQ7_shhO-$!wg=(e!7gI)_i&?Unm~aWlg8Y)^%+JIR9NA zwRORHvCyneS$R1OUmq}*RpW0aLp zvhS~XgEbx9hCizXO5_3#saE(oRj?8uIk^VuB3TbESCl9B9`xZgr8>>vatffa#sGBV zytWf7NLcC%#*3ykokqp&V3Ktx^B4}AkMZ`uaZN)BFD)pi+l`92B`PY{ev*~oKTc$j5Z~ihoVd6 z9R1oU-^lU%Dh?|32D|k7f5q~Nqn;<_t+>-gP1>%FK6!D+p2rBgmB%q>-NDU)B07k! zW`={H8pKRd?nYm{-Oo|&t9xP5>?2y4?GYFMPL#)Qywe1-UPj#sXEjpqrKR_9YfZ=O zA3w8Jwve%Jn|1|Ra3mipA13$NX3a#zG0LQ>@Uei?i}mAuup!`;bK=y6>48?1>1$;+oHvp+G}|q?EZxsl-a+TtHE-HM$!d z>DXD2SCaQ)6COyBpq3aYMjo&bAesGlTMhv!`n2mgLo?rz_0ss(qk*skV$Y#jlDXJe zwB1;$U0jR}v0@A;0xf<0sQLK@`j4@^LIY3bzg8~0{Qjq5g`4uRzkd?VF&=V*RtGia zJ1{vh+)!}nIA~KoR8HM1@^U{P0o_%PV@@u1;wX4jm}JF{3PW%>TlXBDujK9?^?T8k zhO1-}fhr$Rw>XuAiqi#w88*^XwWO5H3ZR!oQ8QJ*`RL2}``}{%LM>NyN^lY6+`7vC z(sDzFw0(++L(&_BOJaMT zGH|wqmhvS^Q`Sn7)?Co(1D^^)LWM<|E=0eimJ_VyAJeB0xkAm3TFbvFGy+-+A1_9! zx!^FfLL@+jqFToeHeXc3m{=j~&nfCMImwXwoo#_$@K*j{*|vw`pQ1ObR}iqW*|e7k z($b0`abjQ>CC*>#BZe~LpqM1;kJNW?qvjk-K4P7UTydbN`LS|{!>vl8#~dm>L;O1Z zdp=tP++m(7eqfIW+GdZ+@-LcwP6&^e{O&d8c})Z9@cZ3wx9^MZE>-|doYy4Q_3Dgo zYLDOYwj}w+458d!u%(|6RWX!9$9-zG*Q_g#w`0ll0DP@kWhC6*V3`Ex1Q>ek2Gfxz zWzb?aocT_oiWVN74dD+8lm(9bBvQ2~@_O{cVqUIg2Ehy0BSJfeiAdFqdIkE@t@F`$R5lTl!!R3@A^RLnga#)&ChnS{cC{ zD%>ge@M;*3%o(QA5cZei5X)s3-)v#tsx~EWI{>VCrafGzcsfd=GmGMiLY5x`v_`KS zsM`APgvSNIzi4hl$xTJT=b^^v6bsM{jhbd6#qXz51i5|2B5IGX@t;e2%A^WFu(hq2 z^k@#jal@qId>zT1i8^yU$7-^jv3{xQnVFUmJC(h(@5-raYGBJg1V<;F2@t3dm@@Z{ZQf87rZp`t zY7PyxkX?$;Nrqs>R#FA{7D6NKrB3V!^O1$-;aQF0KhW z%OSq=ZhPRb3%9!~zYiqOhwE>hIJX0e2r5pJlhPIJ<&+T({b}L?UNx@|r~)`N4c$Ud zhlhtDW1jeVlr2S;4deVgEDzi+1oYqbuxi~FjFpj{H*5s|3|^{j+=Mdf(&NVJ3+yKT zHVQxqtI#?O1ap}6Ey@=O7~udFL*k%-q#G)V&dC&}D8;e^%JLCGeRj46A?AIKomO)G zVRKq(sS#m3Naj;^^3#B0>EQ*{Od3o3O4wPCz}FXlseeokOqF0HrhJhZU9EnE`*^mJ z6j#ph>mJjTu$6^4(1E*3hu%oQnDyYQLpe)iRzpjMQR4fM8aRN=G#AZ-$~3}`Y!v0Bw%go3R_ZtHqQpEsBJEwBgy zn&CkuLiviZk>Iy56L(6AMS8_zcAE8LAQXKu?yhgYVXtW_BKzwc?s~1;vWfJ7ZY}%R zBbz2b1@y}BAMr6-Sv+oVSUh@RCrq5ZE(vywcFy$!W%h;Sm?HG2!VQ9r|6Jl(U-d51 z2ig1hb|}lvPezdwq^!RB-{T>Jar+eM@)o_sk$tDc{6}?hM?`*80V8CD@6byc4uM6f{2!nT*ehjZClSCrz))&2+Rp9@-O&l*`50AOfj} z03nD{qY0zbuF7Kooor!MZD)op_KN?mWM4=P%gQ?)>yB6V4E~{V{xEFpMT!7$y zbv0CjMZoQHa^AahClDrzv2~gI_hbaJ&5|P-`evBS#J5Zx9+yRw+&>3YuKNf`mgeSR zLoNpkV+g4e6d`s>wo`+98H*U(5Aq7|UDF-5S2A0#aP*wR-12NlO7wM8DXhVGjN!@R zBkigUpi(nD)ERVIYLFatrB4Czf?2V8#9nlVk;ba>@pOi>s=%23o|f{$XF=O3D{cg2 zCaqfIwfyiCYe?|bN3iI^%y0>n<0=+&R?xI2LAv|xBjbAb!YhLCl)NhsvA=C4gif(B zC08pp^uC1B#t)I1+ANY-<=S>ek*XesM10L0<4v%XUxo2niuKhRj5Ljgx&vDN`531+zBA-l-)8fD>LtsL;a-!^vVo6W?T zV+JKCB&p;f#8_EKW=|6?u%#QV2bGOYo4qIr@GEdc-Now4-o#7AA?yP-ZG^xyC6h}p zK%XE%e)Dfh6iRQ=kDt>3qgOT-@#tufR(5QaohDI9HV+%MZ1_soF0o`hl6Ld(ZY?%Ioyh``W&qQ$gAP2m;`-0n|aw?3@y$Or!bnJnb-L*m;Yo zhMzPZ^yS6tJk1|R(HnGi-zVc@YF_988)GefQ^9R8SDUKzoYmuaF~t|dRr(yb#iobG z`(vT$;ok}E8#$B?pT<37Wys+-zMLq0-gaIn^mfHKo@3ot(Kh~yjuA_qgts^SsZ4Vt zrz+zbXMLA*-Gr~56UEWb4UtuepBTgDsXdjMa?V4TW5;d}b;mO$)bJTR1&e`O6vX7% z*<7C~Kl^8F|A`qk_tWgb`|CWKsK_5ZS5Gg{xqRVE7rW*nlPIvx&dK^sE!oxGEkx<_ zb<25DXK};Wg%s6@+N_s?U|GS9MAc2VE}qZ+PbN-3G(f9-;a_STB|D7f}dkF0k-P!)tmH}J;tQ@pq7qnvZNdh}~S78D=aaG9NrhArpdTX_GT9aXj-jqtG z0<3VGF>PDP(0oStYV5EM^|l|mdj`WlzACKPH-+$@B$dIJ==9UgMUn^o=HX!2`qV5g zmJ8dLO@7I6Ea}SWLkfVP|b?rf$!mavRrtVLfHK+Te zG(6JTWIITjD`(qJ%OGU^dR)bpQ;FeLB0Dsa;Qh<|&Ey}U5-4MT*+desz6ICE%Cc(cVC%xWGA^d?B79~mE0E#-E5 zW#hSbKEj6yxJP;bSq|0RY^gsM9~F8QRx|`#tjc$@&{im^ul#s+z2%Aw_>9A}P6#ia z&=vUDrU0JA0hofx^TaQ@6<=CbDD%wvsDo|PJPUyf@&X6b+UsT#&086KdlbWhpz;t* z%e*H$qbEml;{IINCcp)d;pxh}iG!RZk}EhplH{A zrO+1)iT8~&s~z@rb!@ldH+3cCO-QEFbr;~w?^Q{AAkP8jd2RR_w!5(0~4Aqrx^c&Oiz~P+mp>qjjqJ@uYQ3Bf+Xh&Z(q@ z5(z2)dz&d5J|TY}ay4Oq1?n#Zluk31whXk-O$3kD=M}Drys=UfppB0SX$ul>75|fu z;*T|M;QnVO+}FrTOtcu57|1yn4|>~B7jTE*ed1`(*1=d6d4I-<6>V`wbPp-C=XqN3 zui!lYOD}@L&dBg7a6REzD2-R+r|6KAqPzGvyO9@K7P)GUN!Wp?FS9Jf`H)xUF}AA+3;aU@0bt#&Pih$9^0#c$ zC+&6~^-qNb3Ne8xkK~*L^Sk9n46WHd7w+L{vF?4%jm&fewe4_`%lB>+J9GHzZ;#BA z20yPtt6kM|WWO2Up$h8U{qT@ZB-jnp!tY`jhSXptGA&8cw_(zntkZrsa8r`$Rb*q? z3#G!ejd|11WuDpzhI{^7SU5f9{b*_mdtDC**>1NEmOWdDFF2TR^-> zZD4}0V!ZtoX6H7<=junMyOsu)5d;>T&LQ9KjbjTCfjp$v4FZ$`oXhS|hXjPlP6- zk9E~H5Vfjkw1)QEG+Fy<4-TwcGc$6=xKzxvm0Z?plbfKMQUaU&+=b;jkMQ)TOzn8r zI9w+C>v70dS?d&_XB9JHjzC|@o&wQV--W2|ky4>Up285z^Hp)*i zvgvTMt338hnd@u4skb93PRW%R)IT$wsx$Wz` z1VewBPMPShG7xovM+kcewKK`oOu`8B+f!Ql>m#OwFBHCqXt}+MnuNR26E#L|=0p`| zwv!qbt+84h!?W0srsmDT&~^@vEI?C0$ho`KremhS?Mudk+(r{rli$_X^K5228gzPj z$a+dPU3&U;%R=Uc;Q@b&PsvC4qF#~JE(I8nl1|jK+GsUjJ&qs*E69j&xVX1k5jYG- zO=eSPyUS9|pwl6yV1Bbx5f}W%MShgFW1kntD`l<6w^k^FTk36@*Gdg&wtA{ta}4>5 zXD`&-*2_>;Z9v0Q)X;wfN9C`8zo4vh5a5!P_W4Md9DUsk7PrIv6)|5fG5`Y%4Yg|R zRxV=StFZlFAj)Y;eCUFDOCahMr*T+lhnu`*zpN4nHG{`jb}E2Q=cco*@O?`2!}<7= z)g5G8(}f7Hv8<7vg2M{FqY}=6ZfseHun_6Ie||psVP-R7 zgr=7X|#?VKtjhe<5BXvE~gNeepeFb?(M!$ch^ z%v+wAr(m6st(w8(o*px%*Eauzj{9uJj~d6F17+i5Zmk>&a8y}zlVF*22sfseuB^gJhe}<@|;wNnWN|cB32*IV4;O;Ap^N$yE?3yh3$Bg;e$(mbc10L8O zr5r`R^

    J@DWmj&qR=b5uwy4G7|x$+4jcikZwb%1cM?1`p8<1?1?Jq*fcaWkQpkg ztLn7qYiG3>r2r_ppa7LpHI!}oKxk@vrZ(ASD~PZe7e08MK2m?4)^3aR1C)#0D{XLY z*^ePnK|0r19P!GMuj$gzU_s!OCMRDCnwS8{nT2Wll4F!c7CGCDM2WP1P4o4vn{tMi z#g55^@@2`S7^{*t6p{7+n#&)_&W%2qh+qsnxyXy0AI)IxLR#QtdXY!rFe7=cdPxutoj4{D@ zphaz;88qRo3}tqXRrsHPe3LjHs|?{HOE9l-uflR?&*f2uU)w3XWPYA6Ys8NC#W1y{ zQ}ORscr6qW4%xYd8M}oQz*Ec41{tTuY!JESO&KjcA7H* zYA**R*!^YRUlB%QK9Hg-cVOa&{eq8&Kups-pRz$_uA6Dhpnvyfg7I*diyt7CravC^ z_M)9prKDkz%xbCuxq(20pN6rdp|KkOwfSh>&OUqZv5Ttp>G2S-&q|GfHuKB>Mz8xa z;2f*n*0JkynBqD3%A}iQ6#Ew#mYB|7Z$CM+i*J!mqruNd+VJ!`L_JzI#=CyraEVE* zDe`NTUEtmZyMq%Shl*%wc<513?y*@Iv;&5!1O?K`%^l-uI*bZ_1;sia{jk7VpF#6Y zD;&Dqr!Hb#Hmc}Cm}&Zf^E)bq9Ph9HMRzv^tJ{U6R%e?e9kk^$7F8<3@amW&7zI;N z>gw<~nI?LBsYJN%cq-(NXl;=;?gAZf8o@~CILa6}WzG3mVgCv-If`>xg3v;L;KK`y zigp*Q&O)re&CaLy+RG9tjETZ&ZLW<2;1KciCoL+cG_>@!E!{XxIpN_4Kr0imgGb0Q zBTq@~9PRgdO|o5ozkcy8mrP2z#j}DE9*ydZbfbsEslM--EKNs6X?+ch zUupbQ#KEb&1fuSL&#jotjKDRTP`HYV#U_cDHpsO{oJ%RQ;bih0I&8NCs%%c{sJ! zI#dTt`KeE}RKfrIUn9%n72E4HXfwCI1YiQ~muSe6(>bOnVyyR;jH%%V0^4^T=+odfY~iERf}=<&AV(}4z|_+n9YIo+YTLi$`>~J8 zAT~YS64CH7l5icd`Lo93pT=~D{LVEs%KYm58Fe2Xg%Np@6|xeDtrbi>aQ=mZ`=DRU zJSW7Okp7l~E!tUj0Py(JB}p|C%B}fD$6>0SC2TSOl0qs!i7w&zn^yC5hT{IuV536u z@7$-N&I9YlMz}GFXL0}`B=~{r;Seaq&cbboe}&B!Gk{xVGHK-U#wUqrkp}ls0q;5G zF==ZyCU*SeN|I~=*4}K|Dhp80) z>Kqou(~DwfV+PG4`y>6oTFCSpR3Xy(vS!5xS0V+>Txox^3q-6IzCbmga6mFY-m4YW%qT;HeV{LneK`JW|LS8k%6H z&8*DHMm$44*T+mgbIX6=wwg*C-wofB4`=r?sN?yC$>tykp==}ri8oEZiE^9@e7W11 zbYd)WDuF-80-G>PfTncvU~wqd-MC~v{e&qj!!n}w-5mgG?GxjZ_>9T4V`D^-pE(zg zOo5GpdHq$r&n6vT8y@$?>I+SR+PynAHAQzze_SrV+i9~u*0-FNtC}|{r}yz;-r=n> zO70Pshp6|Q2CO<{T9`_7=824!RY57}vUsDNO6+vzt~7t>-9dW1naAXkJjj;qeb!} zfziPyHngXxVy_u`@G*hE5Ea{CUef%p1s}M_65)P9xg$QgKQY+85Bu@t?WjOWo8!I z1}1iP$vWwE`eWc8M`MySjc-otdlDGP8 zySAy_)z&w$X+8m~718P=D2RvzWH1w0mnjxohnhASx^*~sCTQ|1`L9CdE88_~#bm@o zcjQ-V2!+LIlY9>DixP@q-TQA{tw)>>4cjYpv5Bghe`9-g zP{t%!R+aE6$6bn6`O`Ij9e!W^Tu&Eco~tVAxV0nBDkI64tb~%1_`8YwflmhOo|C6- zr!{XGdO}VddIWmWQBdTu<@rU8p(5ZLrmbUOVPO$t^0UVYED^yDTFbq^-f&&GKVEOL zYJnLYvH3+u^eEzKi1&MUw)WaigxnxHF5k{H%0NN2$b(p2Kag*GN)I z9g(sPLJAN1IlqBM`EUatOxN;f0(1q1v_eXZxBBSSO!MytRkdB@GS_$<-ub8mMe8}p^NK>x6JaY}eO0_I; zTHZj$XRT52>Pd{Nvt%^Yz|Wpj##ZaPZi8+02z80*K+b&#r$hlfxEWYY@yNU?XmsyA z>vA378&sUQgpg~|Eh`k=SiPgz)om4P<=A?AN5_@iEAxRDYpEF1L( zf;z+s)bSr<0j{GMb#|F8?6o0NWz1oPIK5wkc3Bisg7Vv-6~kes-irTWH(X<7)~~>< z66NmzOtB?fqc1 z^nIal3_Vae%r>5&4&>9%Z5y&~=ro#frfV7z4ym?~FN{u~XJxP(|I5-5g+_>4ki(2m zloI?bJ1Jl)Ho$lt9Sy?XJ?@fG=1w;_217so146^5qWU=fBq5GSH!w!4gT78ssYAZv zK^wHg6|Str6jyu()l^gRl`6+l7+?9?KWg@h`nH*{%z|MVF8Pl`w| zA?+CbdU?*g^#jQvqx78BFNxXdn#73<6{7QM6de!?>M1x$J3lyxPr)-{;$LZx6Rwysim@f@hhsKil2>S`PtdRb;!!;MoKePVaymunJr{DQ4A6j)wbFCiu z&1M#q<9*gza&}%f|1@z9l3;mq$Z_8%I`|QN%rzISE2WN5;-=_$c2Z@_@{?~>II^n^ zB{|;X^mubD-S_fy4%(vc#%r|S?WEe^kP#|o{7CqhDl+cxWBO^8%6aPOaZmC%7FyBX zRSqR^Ma+EI+A_Pfh;8<+W3QFk0L7?}$+mZ6{1Yj!q&Z)aos1Vm7o?&4kfva(k|^28 zjhG-odJNK`M!$H|EDUP;78nDuEz*!SYW-^8qrlGXQT;=YJExykLW;7OiiuikwK$_5 zTfFSjt#DVSg_ar~Rw8UD88tny>0-V>#gPQfRiAxRPViSo;!n*I1~5Gb_`D5v&s7{4 zJw+|P8On=N9Y3*S$T-r5qK|yh#)Cm`;FgF9iYVEgn!eh?OG*uROXG_Z zxPrC+Hi~i`*YQqrIbrk!6`Lvt0AqfZqWgWiqD!Gd%EJ&sUShnT~oF!*QTjuxZm zSj&Meb3(kgb@y8S5AxI@qAOb1$z{GA^rsxg>;$`ny(*C!6B$m;%)Gl`F)?>ZJdd#$ zqf4_pJzue^suB*EuRpLOLt%eJx7&S3TP{?d@zre_E^x}t9w~k3FwJGE;?SPnne!DI zRuEC%trCmpx%>U&FIV6sa(JaisL#DMCF%??p1raJ4J(C9Os&fD=@xciK@EPP#{-yH zS!PV#Voixo@wkn&5gJbZDhnnNb#Zx#^S{M^U9k^*zquP``HIC_+N2-{Ra3@q>xM*A zv>sbpoZ7a;Td0DvMPP4JF21LVq4Is7fTy?I*}-ForQ9f!rhT)&2{x678> z(x$F=exAG6$JpBj0Yh~7L2vW@HH778p%a7`ke%yAFlhhS0T4j$>FT}ulfmdTh$sB> zBvOtlWZ$_p#DNF0`7m0iDh=NzD=O%s+N+NW4H#jmrOsNBK0zFizK7LRtA8*nyL#4~4yeT1L|3KJmHqL$wLZVr&jf8DSh@VYThl?O`xGnqVRGX$4N7<61 zkRBo&9uT|a@gY-T_tjl$!aME`E*^9{5%_bQE$82)E*LvnwhNvjfOVB8`ulq&k7pld zxjy=jIcOE~CwbUO8hNRw>Lk&I$xoO~LsGe$Q;GUh<3Y>Q8UC{q(WLQE=R4hwO$T~i zcs8ZXN3^J*?hhVf1*UsjIC=4LBblDfdsD6VqJs1Yq9B9hkzUl_1jNIqx@rtX%N7Jka+g#GbEfXZ5B zIh{U~l3++E#<++|-ji*6_!mVj8{{wX`g~hV&D>(#ck?Rehv22Xs~n3 z=5mnvPh=_PcWl)3!-pJrk#6$E1WDOKNPYNTWnSl%(^aoyyRl{*8hTN&S7YMDc{+n( zN|Y}dbZBAm0kfNR}F6LHi#br0ijeIQdp z94gMvvN9{pvPQSQGcFppDI1bg$8wCag9ISmQt+}OAA3S4aa?rd-TU`*2nTg|+%dVw zJhk7yH)d;m|Ef^rWV5(kPc2*qQhQG-V6p`B%rotpNm^%;q0NdtqOy!Hnn&cc=zxP6 zO`4T+7=5Nf3ZE$(Tmp)#>0eDNu$3^y2GBZQS1y`g#lN4XyP*UFmA8M8VBcc6F6l&& z9pR~#AX*FF&R&k>$u{y5G!J`2D3@xG-u zTSiHE(Dp4GGn}g74V|I#S$-iB6Pdz9+@7!_Z>wc&vEX#m=TqJiF}gOKKZ*LG(~4Jz z%;Yey_ISM2F|sc>y%W*p^usBhb9zoCz`(I0p{ z?6?9QFW94c<6|gLgr8WI^gDW_bYmcsnnUJknZmYZs+;eaHlUX+)rfdV1~;y7N(WHZ!&??5{@FVeXpHck&Wn@s2DLHK|icP3(QKY+0bhrmV!Fqtvwj41@&FC9T+iCU)S@x3LilZAk{@ROV%B9y{a zpt`g+*iHYQ91~jMYm!SIAE+Q~NtIR8s^~&rncH6KRnIibA09LrW4p6U~%lDP}h_ptXu5d0o&<5#c!EFU=i4`D$T-Y~ z;u3tkK0>3rXs!l)yu9Z6EFzfhQP^MH1Fuud-Z+r;*~hgko_GieiLPSZUkFJar&BMd zYP_f{(#!yi0OvhDjz(JUd6YMlt?SMYZtlFap#S-GI&Lcl~efp2A9?f2lz|@AK+k9W7YX_h( z5ayTk7ChTrs@OG9i!``kqX4ac3Zs~cxzll?703+u=Sa(7;PUE5Mf)8GE84Jqj^$l-rw$?Uvj8td?htHbr~&x`3sgC4lY=^2B+AK|?d6K!vpr}=_;sfB z*mH7lyv(#Rh2iV(TW;_-v^+fY*Lp}>SN+h|R4*%bMelFVSEydUA}y9wF4En7yI-GF z>NvH_5_C`5U-P6no!45xT#A`fItm7N7Va`xpEpRTZ12v|!P2g|teK z1~FViGm?t=Px6;9S=b(BlOAl&*wg}*1C!5Woua!xtV#pQq+|9+RMaWw5XxR8)+Zmq z_Y2K?qnMETg!n8)qpJ=l)_?S2dz(sH0nR(D=o5vFWT$&O@xQnL4h%U2nbqvk8Pkoc zPlcbjipIIiin8yxIlw28Lc>Axfz6(48)=R)@}!0)Mdb8WLYu zyT9WLzsKh8V}X6}x<9&N;jYE)7Jd%|^A6+qCh!*M{fso_=zL!$d^f2hcq1KJI{5lE zC6X*IrP(y242d9Av0YGNSt#ZjmSqO7Io+WZS6DCZfy%&{$(bM+*0_1TR3w}&3keQP z*w$iI$}wAlsvChL1}KBDf~OLdnUb^zR^3wIpN-i7`gkS4Tc9jQS{()8QfLwnlc@mN zBB3Z9O6EhP9Maq;Y;fWN1|_?hJD!fSD7re7r4S;U{=3PdNLce_FP`Fc|SAwjGYuoa-yCRSo5Kj zlWRomVf%VQVg%^6KJ;O{l;pnRX@U|On%75a-k*NA{BpmQdDU4o@-^!uGy~v&6F4Urxr!3zeL$N!_8@uF7X#FcMevXNtTEQ6vA0jz^lkZve(#e?S}nOEh_il z@u}2-jo)gr9+nj#OqhhJVNoZUHxqMLoR}o2lChN;N{j1skR}m%l3{z)nd0QO8Z-w% zCY?`2U`9!gi7&xuOqW;YiMm+3u?83uNmxx{6i zaF@nyTNAa~3(fiKi_+5qRw6EI~ z%)bfliMYR?DS<%3TNvPy2vHXro10w7tlh9KjfvO!39)(I34x_$Um%smaBsUvj*x`N zNj-HYA$s5mFDx7TWA+l7=m^)9_f4%FF$s(3X8V1EVHL0YF(0$huaTDW#M`T&?ro4C z%;4)!@XvZybAJe}8PJ4M7*Gg*J)%~v-{lrx7aM1c$R`Cv2iIGxZ1wAU(CK0F`1Yr~ zB-FIPx0wHVqQsi`W}{nFj1sjw!5c(Ru(T;7T9_rOI}bo;EBuyT>y78HJNsC?Pp{`W zuRB6wNW?;NqSg7&4e4eb3F~0%U8sOjz%hT|%IXdPHf_4yQD!5H)9a(7$#-;Ly*T7u z>kiN`Wd*5W-iNtAIuWenIi&v*JjX<3%B)LaL8uU+DA z^16J4@K!1lIiCm59}1PW6NPp$T0s*lXhgU!*g`kyrVU)1C${#^Ph-}KxqPEU?yCb; z`xBRa{Rm(-!+E-)gtB@y4fbqdXJ#szR9Mipo6YqDF}&av3cfF`uq^PY0jkR@@!)8Oo6Q zGiLT>SIZ8L`7nj5P82{!YB7rHT1U4)p}5H6O^lXf%w|VSyS$Jxd`MZ4QYFoeo`!u! zcc{3Obno#x)cZTSZc_fPoYvr|-%O2<)ZaJJS$V1{vIo^*SQP)1(Or9*K583uL`v*x zo+1oA9ywWwf;IVJ^m|)Gk_X$uW^x5=ziGpqE%856XfTPX+I%;N;zjQr|EX6vEzgPW zgS!}4+hOKVAtlVJV@%;|hJFAlOl%&l`>OBfnupT3&Bw{#QA@0(A9V?j5Lp;P`vcSQ zgxuYfvbXP6?Du<4-5s9huWgyu2fbPg0R)nnFw$B6`H%E86_-_gTk^rm%s(A%3^<7K zoqXh*B8B13Hr^JGpJS#RXTX0bUgxH3Eir0UrvHdKbiB8mj6_Yw=5h~(O}PWb^)7Z; z+#OTSU>3|V+xez{n4`R$ERHQbR5JRrj#z#?buL4lfD&al@XVnalL*In&qA#~!L$QNLf8u93mu9z%TJJA^8W_%Mq4GyK%0)w6wm*$+>WtHn zPEJl7z;1Z<3*=_#@Lz+o9nL=AOdfrA+FCmxj9z1DpjFX+$+rMR}g-3EGaqN@gDk2dG0K=*kM15gRoZ z3hHRgOl0{`Q{^lU{dhR6s**sd1B(^~_c`Mjz3mza@wWhNy=Z078>3-U)fGSeCS@|r zN<-{_{w<#@%_k=d{kg|`HBXlj`^>+8K%QzAyB~|~80T-HUjRC!pqOWPn6Q`Hcv-7l zb3o*=oOL^E)^FZMC6}S+6y`JUDzqWy|HRZYZ`*x&BB7=xIh7Q$f73Vyf1S?G2<|Q7 zNgzZmMRQnAI<1_#I}H-K*9&e82b4Eg$0)41MOzmD)Dt-(60r=opDCSL#()Wo=rijm z+mjOO4@|Fn2QAjPzOM_~-xiu7O(4GJQz}5AqmEE1*@z1v97T#kOZ40ww59cgjUo?z zeTg*Mhak~O36%3;e&iCZLp}fSDe@6cLX~;x9fM39w~9R;dxmCrXTa==9H0)P zB>DNoJ0X+Re(8&cW1%f@WV&*pM(khYooQ};%;}7J&F&1?7jN40qB+YHYj57%!)JH2 z_k2^Nn-Z`f4*SlY%SNzgYj^Ijaz~|~Jl9gJ+ntDW`i7YU7MV}TBd#zlW+-XY~!7IndoQP$;bb7 z!p%jW!sx0~s;WeX-U^gc0}J3@BCAgpiU|3`1Qt&mCESMs=PE?XyK973Hma@Eky>I@ z9fmuqR$oIvgSv!?wv`b2jO=Cty-P~-r;+5;Y7U?2XGoblJ%s#ZFl9BNqR0>M;TnI0PM0gbc)n`5y~X$fKhA)Ktd^EL*Pg>O!NG^9UoV=PFyM0~0wNY-mqLa`L4 zcbSa=G6>oB)V5B0OohmyqEbt4ENk!WPvsS1q^f#46Ci0Rnw?hf^Jm-hGl#nqyr0eP zWl^ZDnpCn2kT zqK@aa58A`~5{#2~d77*!PD;DtR$hrGrdFgi=AFijU-jZ_ATBxj^cp^Mo4Y;dxGc!;aFejuu~ES%XR{b%iy=M z@_I$-e{F7oegb?Bw~liXr-v9Lf^j@rR(}aFm2&om7!nx<5((W~RWFKzN+kB)HD9mk zginWbQ}2hf%Gi|3!Fc}aQZqgInTqPj3FuIh30rREkuFni(9N0kUT+q;h<3Pa3nfzq z*EB0SNz{AfdDVc)F!?Ioaz*41t3}3!d1WF0FWakC`z@z;D-R(`FFU@8E~}VHgCu6J zIGv0Lf)x7od)AwSRda#rJDM#>`x5oWmL21-Ah!FuZMXmxE_JXb1ai?Q zG+&)>7}|6>;y>7smKn?4YEwQ36rs~eqY7N1QB^jO)VVZ+;RZ&1%OpdQc_LG_wK}Q7XQQg9ADW2=F{{yydyhy_%*0ot=S0{r+XQk22-&^XE z)z&ov8>mN5dIic6S_V|q;%AaVtg>^pz|v<#JEiff2V=Y0qBGvz=(+{^q-F4$y3@2< z{1*i1`sOO;e!6$IhcJd6>N@D7}U%tAoWgi{shsq&#!u9Lono zHDFe_(-po(;c0CO#)@{Q*a)-v=K75%jZ6kFH&ZGD@pGczuMJ8fCE-=Pj_2POxi1kz zx*t}nCj#F6Ds1}Mx_Wooaum)#S6k(vCy-YJ0E4T88G{Do4EdqEBUHBdIl&?b z`QIBgJ{>OB8!`^Z`xr^Lu~0^YEY3hjEZudNt9WlD$wPHh!6HG?3Y5~<5G{bpjUN6p zUze#6iQHY8jib7pt?CHCk3zM$i=^2r@cy>ekQ1O2l98jS5RqNDKS!q8Mdd^9rlcG65 z$_h<4dvtPY02k&@Ri{<34VY9n zn+Y3c4N1Ibk<=?JWyClPGoN%;^Yj0_zAiVd708ShZX4DCA55q0lVVGJpJ3Q3T{8 z@*#Ovl`)5TwD~%9hb{PFQepbPT;ds0nVlS5vg=}_6{d_sO-t#EFaf<;CPHT1jEhU_ z0897Os(x~0KF?A|XMLVgi-uYyh^fAFn;G*|`MU*NTxqoc^JHs!7Kd)%BjG!OB9Qgq zNto}4XE~$9N2WRuqo^QSI)1l zx-B*#au1AInefrn>UZnfP())96)VZ5WK>0lvd z=tCeEjiRiF{(nLCE9u#T(tFOU@FQO>z2|o{*8Y_|qWp=59TN%^o;qY8I$GU)Ss=DV zC)K4FnIa`D3hw>Qm>-D~niUA=Jp z^ZrDR#r=)7Ygc za}3$HncgwIdvlSR0%*!gR0a>3Y^a9AagDhW##QD@6DQ?i3LJ*#1(X*E>7!-Xe>K~3 zlsI}*C(je@58Uw4(x6il)}a3cAzHX@qi3B$;2*rJp-~=O%)!W&8dhZ25v<6u82O z{657JCnK|~I6FW=e1~K+ZkI`y-a>AO%0syIWlI`9IU)J>@>q`;pVE2}C?KegQ719( zhR;s(vAd}(k-W2?a~Q5MdDXJYa&Kxg&v&G<5q7zf^a^A9E!1}o>?~wcmjnizt+Ayg zEEL?Ib@TCh1>YyWkf=9wM0zC#02bqhB(YO(a{?6!x@=wJaxswjY_Udd&h()gmW@~oH6#wVN5rYj zhTF?cf+-bLP52rsCc{pNyH+Zp>}HnkVQXq7>2@0f%78p+TQpRKkoaVhvpm39SMgXP zEL7+Z43#aJmeNL7z7vL6P|cgcE8x&$x%N3xc;aZ%hiq0SG)D@Qkp`C6$sZoS_T`4V zwL=hh4`3At00yhZMlWi22VFR$ERzm3V;*9UcV{Y!)fQ5tVq%Sxu6RsvHla_4+FkBa z_!X(7N)#`9&v|qd3TBd*Cc*T*HdHJv{#VxX`xYSBvPTX^=@v*u^ACi|su$BCeBkmS z`r0?@2_7jLvnHy4Zm}@(ZoBGs7*})|dMZ%h0UP60^j|i&ikkJ*p4-(F#wJ9Q7Y1dcLpiknw`=mkq?S!lvJe~0ptS6QN z5|uT9+29Z?Xff$j$mluTn%!G&I{6wSZ%kumsI7gLFgH%zdi$glU5 zAjz>5Rgn1|#0BFNfvY@HinrcQi^rl|7}pO#_Mkld3w4ghO=s~-{AYQ9+8z&Aoyug^0pV5Jt zCd9ozYwa8;$!n@KS?(DAiv=5>iVhYNIxE_Y{Ghd|8ro26;?I(W?c;fai-YVF{4@L! zTWh~#S$5DsvBweV!Mffj+)9X15BR1Z@e^)x`ycaQ7mmO6aO8L^P%(Wwr zkwb~+n2=P<4-iv_7K^Pltf{RtJs#PbO4tPsbT=LAx49&@A1}1{+QorD6a$W9`6mEB z^+wD_T1=BVF3?T7+Son^u>Se%lA+Cu+pkc;Ot;p?YpXzn0w~Kg0$J#40GFu~ux)gG z`&xYH;gvJ~{^I$;vALQ>l7S!6sUrEIyz{FYMX-y=$g1cZtC=mUdywS4NO|Agu&>hf*lL5`v>sgtECuhHlw@Vv%J#gtH6|8Z*( z8P!QZd?ak9SEjh8-ZoQ+*kKCpS7WKXbTx4HTUqvGT;69#ypI6^Z{wbm!NSKW{J2aIX!u{*JbmAm)c`lsKAqS zC+a`+2MM|xi@?AULr3ssb|*HBT^yX1I+#Dz8;E*1-L`$D_be#pI#r1}!He%&nWj;R zU!X2op>Mc0ynbsV;CM*>{2WHef}%rGgSTJ?ihdgec)LrmX1X_85E-V5EqB;T>AIt5~0MG-JCF4zzrPnqT5=-$>+&jmgM{ zo#uZ`P~to2qqVdN4ZE2r3MT4h{N}gQspcHNl^S1u(n8l(pv3`pO&1pdPg|pozK3L^ zfhS8}X2=w#9>SG8$_lP@`VYIIuj^kGiI8+?&ys=43y_*%g>W!@+0usvX2IR_CQr)q z?K*dIVgfrKQa7KdcNqU9gBzVEWt2;5)C`<_gg}@OZ8R5Nk}sIDNvU7^o2vk5m5Cw& zpFfCOr(ks{yBETO+F4F`l>R!F--Q4UN$^~Eqn)@u8;Trs4xTwPO%L_;cl(?>W0A@- z>}`xb(*~f|c0>wpX=_6EuODB-CEkJ*o~K}&Vj^@;lw>|@2`tjADx*hI*tatJ2BONd zg>l|}^fl;vzi1)0`L8#0;DN4f%QBFmAx$2c47<#JUG`zzv?d6+%rqMqjhaiXkfLFg zk5+Dx^GQ5$*lBy03ExOA-)wo^+vtCer38O%vrt>}EIa>xXJl5485dECQKhIbb#JcA znZrp(wxwn%9T*tzsxk756;Uol!clO$XE&d>T}G)PIBB2nE_$==mD`GLzN{&pM>JsreuHoEE9HtdEeu? z%`5@Y4GPx$4vV_~d@Ly8qb5NxX+8>&HvIyBB!cg6-Wy6KgL7YPkyeo(Gbo+v0FbA> zq-@;0DedXfRnFkbC(GRWz?p-G(aS-pxtb@vT=z*U`^ec~+u9}o!|wcK@iyQJCgu2c z&Ug2}yn>Rf+>8#YhJg_aQBAehKVwG@CwlP#9M?+idIIt#$Jy&Xhr_E4wU~(q-E<+w zqcgg5MF+C)h*E$_`2L=k;xvQKSl?Hz`jFU^3PN=f`4mV2=?1pEpxM$EaNkY zS{PFuOTXdtmpfNlwp>q3Fbll#Vu2J@ek$dfXy(6~RwmCm^?SAMEw6J`8~%=-i>HNA z8H*+-X2K^vu2;KqNEY&#gg>wRq~TcJ*Qb31Xa8bjv|#@uwgi*qY4uv)Ls{F9WmB|( zVtXA)m6XB+B4!Iy;k6;cINKJDsc>2k13D8r8XX&9^w-J_Q;0h4sV?Br8GTZ^!G)IC zj+m)&no@6{{JWrfk9oG0G>3k0!j1%*MC9+etH;sXqtyRfP00tO-iZpl7K@w(K*L6Y2+>eZu1vPS_XV_Y z`sRD`tHMoP(e_aVF)$6uB0+_Ea}IdA_~#SLh8L2M=NN!m9cN%$IBC%#8h?NOt*#ez zeG4-P^Im9l`>!1L313{-jn1g&v!6*;mkj@$F+4cGeoY70Z-(~0`?6_^3J#_rY1X1z zycpeHGZXd;38&$W1}8Ls;;(-QwgM2Ot!!-Ecl&4RdEtjwS_|3k`8ksC{7jfPWsWlw3En0iZ*>77Rb~sq6O-4EtoKTNZM`Aq>MyocTm&<*pL`?Osnj`PR_~^56ZI2&_ZJR{(Qr?nP%mq81sDu)&omkusNyh zsGr>3fO05m*mui%hBT)RF>;RbxiOxi+pTY}Nqsf=w2H4eN3egc!rNZrmF52bQZ9J1 zI&ZI8)p}mAGxS5*tW6?g!nInC%$q(XO;55N59zYkiTk6blf;|O!>%}q=~I9u!ij>q z9KKX;zqPIZNOojOLVvrS z17c|+B;6aVntDHfjH`DaKSu3pPU=)KXpSGQuGfm)5(65y^g!q zkQOBzc5Rw7w$p7GLz3B+M18qA^rdhWu;BH$5N^1qouv?wpQ?{-w^(KuM|2k}qV&|H zm=qbE&B`m{G7_gw2bB_7tq<;*K*kT*!Fh=mk0=F{wo?tqAjW9~wm8@$;pfO2h4@Hv zUQg-1MI&4ahEo7(AuUk93cDj`BX#PAZr!F<>;L@wTIJ~8#NRd$_9{z`|B>NBlF^6O z)l7v&0UqugE;s|BalH#EEtOOH%l(UZ{-Dt+3wTXat#EH!kbFBzc|DpXmi^aiyoBJU zIL6xj+?jp(`wg2MY6iU`QcAGtV8-SV15I6QV6E-qE0%NbJeO(0fYp|E4a4XhTUZ#3 z!$SqQ@3XZ$H+o{w-qZcYRosgMJ39rxF}9>3@NnHW44li zgABiL`sqlM7XECjt2Js&jQn##s57{N+4{*kFMnxz>LYue>%Oe67(7h}jY7Y8LO`A@ z{OIgy^XdT8{?{eF8m*dkm))zTfA$K!27(jDa!8kJ;uKt&DTV()Gw;NBY_SKz58jm> z*Y=7c?g?0aj>MM)y8Dlr325|5c|Y*;685;TyN(*I+JOwERHrcmrfiVLp0xVyKFO!q zM0-UJPqTDCvGoY!Y-&f=*ER0$YRqW!bB?PeUrlZsZN)nhRf`2+aZE~cEIaXogAsBD zUb$t?4m{{D0)@SDPYBv z?p<}01bP`HK*tgCt!RqlX``ADSwMpxQPZl3Fl9%|hVX(kTnU)f<5?Xqpk>(Svz9gD zPN%~-p{I+2&guUZj$c0j8ceFvUrG5i(AX+NhZSW*5tabg3zW{}j~OUc^!)`Dtp@r0%3!e$gDQW2|QwZ+&0W*ffXzZ1=ivKmz?%uSfP&X8j#wKavJE zZv-Z$YcihH8&lX=I+J)z5kcRq{kjyk7=m#AyR)$8nAyvu&0fRzkIVv_jQ5zDdE}+o zz4VvpTohkVXAIaTJ<@ym2XH(lDJPJjG( zo(302ThJ6XriK}3Juo_^QX#O>A$KG>|9p-7T*@Co>{s6?dc6>y_YvO4KGgcjTih%nESo+yEY-48eJ(Tt$Q>a4$aA5 zg%e;YgixrN9Iyjw4F3>42^B%3HsmE&)!2Y3uV$Af||HkyoaX^;cpulkgfG_q0lc4&B?Re>STl zqwqa|(e|B79SkTm7yMDwxzZIy5d+s5KwHF$2$>(E%Kl92Ot8vW8I89rf2*+REISHX z+&QqCHH8gb?g}UCcL@5{2_A$uojdG~U=g>pgc!YZrARUS9j0j8WJOGW&8r&t1D9Yj z!(pC&zl{13N3N(Mss{oawv9%qR^z6izynST$YeH7bBb2Ry`lLS#3-ruIy*K10PZWFcY;o8SwbW1<6;f!)^GT_y|W98Wg!XXB2 zheQ#QkZ@=g%qxqg`9JSHJD1@hR;kk$!8Q&2;*GVL6^qwKLDsIGXB>QPd1$F^c25op z64L}_+yaxsXq5bXN=5aN#=TUsdpK7Dkkx~a*uUA6NBXVt+<}c5YT~ZnaIf{o#^U>8 zIW735k)>H?tW~U+9Tx__VG66mmagZ`u zG-JdNZOnBBKb6~rk%a6^LYqVC;s&JWj%R)%UUC%B940!M9 zKPz^0^jm(11@YQHFP$II>wnP%mIm6*ALQ5r38W9F?lWoqn$~2~bYq83(kbPM; zje<|fKuirJ{vQ+-a>wpH@`=iiy=_lNB8DGG29&%tp7ny}z@E!F1>BILF+nwv&a~5S(|nwx_%W@I>e>f1m+{~}O8dYC zW5#x%+F<2T@GTp0B#D%^2R*?kk@8krB}5!M#sgt|wycZ-k}3@evf(kmdVVnXy87K| zPO}Kdq{g8f7%k2_z++5?ZeRt-%IdWMGF}5|n{QKnVRUKIutmufi#eQX+vX z7k~3+9SU7^!z>o@a!~qTxY*~%yR_d3^_kkAnT07gakScU&7rr8v3etjjeBm{7?A3? zDazvWMgYSY#;`e%?_h-9C>Gk>-_XHhk}zV=;M*%*`qx0xvQ|;uk%@x9nKGebmv&F{ zJr`r7D`4>QC24vdIRUyz7u`}4+NAu6JyJnoBzcG7xZPKqaA%Em)_6;14k0UqvhDyPG=lWoiBK5)t)JNp1+<&S@|We?77C3hmdH*U?>^?bvczpzR#ww$hAGc|%$W4oJ%f>OV9bGVXM~*6fY-*h+wo^`B4#|ZVH)P9 zE?e;6ji-DH_cm_8&~>C?F|HLKe{LV?Pi(Pqt7FQ z;Y7)|r;^KQ6hAufKz+_>cqGoT*=h9Ebzk8&B*=MxEbVQ~WX?OQ%cYbu6>v2^!nRbcu!j?HXz_rrG;H5@D z&?I9p*GY5Iz>*dJnDu!!Y6pYv{{nfP;m(FSy+2?5n(XnrsS*)@&0`!23ehe_94Znm ztPpQII*G zUygG_RiE1DF7+gbA>VG_@Aa1aZEv~(pN>U6#=Us-93;G>VSYL9;S#C1o06aXUxql% zGlez}cWo>PrhRJc(8z@iolMX|RBGi?!7Pd3<59h7O!8WN?!Vy2?X>0C2Ee7L3e9K~ zl9x)p3Ea?b+o2*EIDd|w+*A{~om=%B%(i9vsbf3&7@q1&xuUnywwdG2TGJBpWba)! zeyC=-w7zDNmYg=70n!Dd2%yxyIwrw=FZQE*+Mr~}{&c;qQZfx658e5ItZJ+P&{rmyD*LCB}+~>@k`QLf` zbhG*Ri_1^oz1bggPQ(5r05{+LcuxbNrS`rx#HV+z(64YfZ0gBSFi`Q&zT%;m$3{=V zc>2^4!ohk`@AAruv2F9f^$g4DeMVjN@@h^)gLi~6r&3I6lvk|_{~QBb5K{#tMd#_E zh_j`dF#Y?MRnzKcQb<(r4g4;M*odj5R!QPqCB&j-HEU|>hasWvEjWKNucQ;lc5iUgTXMC?_yT+sX73mB?>(L*|2hD2^FSC+_UT0f(e0ha? z>4AUmbs+|ipnzelN{C$Dx?B;iDPk+3XZ>nQy*B!I@6r*-oGp|RStfjxZOVx?TzpUr zH8xmWjVjmpZ6){OUuEUqN|ery;k*Bpb~o}fmyk62t~AHGq#i^=#tDB9e`y|r?Qf`2 zGJ~5PrXByc@u_`|8ZSgpFv-at2q+k>9Dm1?9Uc=p!JPX;t)pdk4k`z+J~StcfC+Qr zpV}TB&%WYMTbOR4s{6#bNQ^bROO1W&=x)6!e|@Q${coiK&YzDVWF#y0*y$T2Pkl_ZrS}D(jD=l7eLXAKLF8FFr%_ zyPK1rMu*A9P~H!L{AYw)(#(&gCz{f^Rrlp@vWY!Pqn3KxsaqOvq*RBbJt z?6o*3e7kF9Yq>)i`^SH-BiPw9TzDYU=*^gA>N1qd*yZTeS6fHai$#?9_SKh-G!O}% z5!6W)doX+FnWm-_%q1;bk>Jr7{;N}Mfa|O2LjNe3674$Wi@Qfju#JVSfX}ZVYx>1s zQq0*;j$T)5`%CTUQMbv?=T5RaL9_G56nJ!TOW*wE383}%i+qTA9C$j}ZgNo3D+diS zm|qcW8e_(*7bi12IuL`BK>5ScX}?v^9&G&0*4u(HUe$!B?!`t=&{H7;-s;cB+TD|@ zAhQ2K#UJ@aXVI$yrjAhAOO}*WNL`!F!W9@*fj(jaj^cg4S}SXsRFb&+VNT`Utp$dW zFuR^O0Bmr0!a53S<2R%Ue`w=U!aCHL!VK>@yHaJ+nqnD*d9geTU|W`F*EnYoG&(A_ zNNoCV-zcvGZ4Pprx&nFTYd8d&$QZstF&Tnz6?vIETlvRqgXkJDi*EXh+P7al9ODSB~2_5u&(D? zI1=qOFJ>x7&h@Po8Sd)52AX(?mt&mm~?jG2>pdvv4Q6^sbl(smU`B_TkZ8XJ{*{$DzUz4CQ_ zbbrV{X{W*PN7wbzkdqlISW}h}r@GiWl^Xp8=>pF>MZWw=HpeUxoI~pYRyFWX1UZ?f zR_x<^sUV`#s95?q{9H6-CLX*+1CEMJvy8OMV_V)C9$C9ofL_4ORKy9v#WUSu%qn(8qzRJv0xqyf;}2Y_~K-( z#yhD7t0F*wxehxNGs7@iOht=>17psmlZPQFdi||_V(X1J9lF#S=S&}lVDQQuo>WX# z5)sn@n&Ubr9t$UOCcWmcxQgsL7kn>M4-3)Ma4}u{%#H^(3+i+SeM3;A;E#JB`*=9- zJxh6*<#g(MIXj;nW=#@U#ttcN5o)5&homemeTfMUfilXA9NIQxP{|G zy9apu&bUFwR0BSPCZ>VUZoMX5Mx40=Ntmn%tEg4S-c)VH>%R&n(Vgu8Wwb#BxHiII z@zmIuGZK44#Ce0qmFm>tTig zxVrU^d5m?E+q}bj`r=D?W9UackGzV6!aysj8PMsLCsF6 z80S8*)z>d+_%XBY)Q8CbW-1gjSIXoswnay}elwp5syI`l48y|u>qr7>K)Hfj$m-eM zrdZJ}_j**5o*nSLTh|mp-QmdO7_3&+uGn7aT_klja`hPXc*O%L1YOI#g6tjF8r9D& zxsw~+M`s&M!!m5ZdK%+7jm%bL&jA}WFw+O=0eP3V!8Q$f{a!192x94p5crl7ITvkv_qUl05aLCew3+7+&2bqJmiVC!;~ik~H? zh4=P2X~k#IhqW+CT4rHyTBBDWm*!)q3xM;RicyK34sH$pOY%zC{%H}wyO{v)grY*= zHt&92(cKMLyPqZ&{YZBe?{iyXLd-6o22MMAA>y#0e#c_h!tN#mT2XzW9h5Tu- zIF^0&OoC`>+J%`@>a33@ESdgO@?3%kw|MX-o=cV>SH z$bFmX$Ttm}#vbO_^+uEBom$%lSdM6tu?%yU zT_v1n)0DHR+{+*rmG2F>H{Q}^rr_1^vyo6OmrRK;6DVC!gIal^iV2pdQEx=ULWn)d z0J@R0jx;6#gDL~C4<`=ywdB!6Al${phKMIvX|~!LypM~!-%_|da-Ae6?MYPruNL6n zk}`1DH~9{^3A*wxJI7yb!CEiG4JsT3QGMu>bEWKRuR%FvAm#?71#vwnm9~>x@60{` za{M*Snob=5Jwqk2OS+#WUYXv#o&SUzm3WUaRY%xf z!hAWnM3n$$tA20Q&o+lo+HtSWMs%j&%~F|2L$)Tn#>Xgq#p*#CwI#d=wzAKynmY~J zhHeBzXw~nt6|Ij1lfjQX_ryAeVwjln@X2Jsm9uiJN~63D`N85b`Hk^(Gx$0asqx3A zrj*)o@foqX5%A8_y{r8TpN$2ONd;Y^Cr%dZh+~3vM83)(Opz9n7D+FP%!w;?q*yv!6+sIRxg91zD} z&E&<6rhuxRaIHHXa!jC;*BTW1nlaPQjZ{&zqFPJf?kTw{$s=6Yl zW8=0!$R{6>p;n8nVQDe@B-&iDgBW9LzwyV}h}N)?pZm$f7xll8q#OY%V&_AdO+`Jc z-(%?Ko{QM)9)iUXn~t+&&}PNKBFN)`y18F^X%pIHqlb+~)J8RGJ#!TMOOZ%^qC6rJ zGq^FY?0U;3k>&@EaqqZkijn=|?!9npv;+(p2WgO<+Xm#zi$_|vCxST$89JQ?o+}cOSCpS{q$|Mt+&Roolw()g`?#?zCtFlUqZep{`s z{fd`f5DGXAjO7gYUIs0I6U&OaqrJ4Kv4j(>p3)%frXw5MekPWN|I#NDk{EfIY-^?C zwK3}8#bnEI`h zdInIasS!gDMJE)mK!O&~{oAgvHxo19_(AzVGu3NwEz5GBRC(<>#86TgQ2e>Tp3G*i zJuqyM98d1ncC|=7;p&^r15%2#F>j_)$sA0tsXT8#>>%n4_gV`Sl}W~_MI};7mXLq^ z%JY)zM<5*%5)!bseQ0x&5M+~@>Gg)^caEyfopU#%C{l0TCyuQlI*V+4Fzq~gM#B*M zerho_om4OB4O4izaos2_gXFWvkv@+9Ql`1z4=mgZ>|5z}qd@M0KZ)6H+7^s{z#`LP zsvEblqJ@@})L@X39Q%YRyB5#h`;~u}S`~Aj7q@PgciIzSO19ZT9PqTG5w$p;A2pH7 zKs_gNPP{UmUu7fMJt(c7#GVYTb;ftevgg%j^JM4?>E`CMZ!&8bFa)iT)Akxe8jM+C zl!pn(Vb#K+5^UWQgn zZU`{yw+*x9KI1pHG=KCj8GCzY4!8|^)u3K+DbG`&ef;d{%`s_$CC|<{_E;|I zc77R`nd7X(5|3bhLlzn@uBz(##3yK#^&Y8tv+bRKL8}tTiU2RxyzrDkZ?$C2Zqo05 znex(0PFd7cJK(XG0|%GP0_@O_{Jau~cS59I3N*rjAe&6%AW7(#Y){D?Agd65o4&0k z#9=jdjIfo^`5|j2zkYaSZBgNI2RgRb@zEe+C2PALedsbxk&31de7shUD&|FgUC*W# zg{3td6@y(|oRX?t{J9hdh_l+!=oM(4pT;;6b|f(4;qij^4`i*@5zY)m4DWdKUYi|% zE?|UM$i}&+kqHY%;#~wM^dzv%&hrDs8dT!@G`hIwr3lpuKC_(^%PLQ13G`j>QKd~C z(jU4)#NsLDL4uhwxwFuhYBcs{0i2>JbEvSq&jJ$F7M>jQrk%kpJ; zU-3J7?%{b}Zg}+c0nmIWT?ON%(_Q^rVd)K^73oHek-cr9uKJ=$HZ!33UnF)>4$`|> zLY`^td}mUfj?r`Gz8DQbN3-R6V&Gv-wbzkbip3 zCOIOer`r8<&DHL=nV95|DVRRVHFwWq`~)B6E$nv^!TE}+210U2`X+VHW?#b>Z)NgR z{%nA@4JT4^NzpJ0+bU(kjJYo6Jp7s7|3J48;o#8^`X%ejb_H5Cmdy-QJP))mEwxF6 z`IKv;K~a;U3>s8GYe7@-1AInNK%%j@c@hdS(ohV+(aC8}FZgA9_OsoniECgWZ|ypf zi13fS6C6onK&aXpidubZUzQH^w{IRh!ufI~zwfwjR|w5@wAB07xwWdem$!~yTV3`1 z-=gJ8=a8Q=^xPNJ$KX?5&kP2I&wlFvGf zA=mEd!a*`|uBkh4T2mOb;^6;QJidy|W1PxWELo@ULY4fr;;`N>Wvoe2hJ;#o&zTkmu%Y&e(#c ze!6>ZRlcamN%XEbTLX106OX)mR$KX*{^}lRdHL^twYN4lj7r?4?>J=|ssgki^jpAgeZg*J@zh!3tzpeKgBs z(DrYC?+2@^q@Exm6G44;hRfAHEl)%xAc!RaDOh_DkpsIiBlIQ0#YKxGcV1z|I4LRb z=P(r7!7yUwtutzfUIv@m1)MTV)?O!Aan7@908M{(E#Ung4*rGWr1R{XzE1 z@XtWyZ@U5;;ZfeXWtZ|xo?^Q>Kd!u>Y!Ns6iAYg7aSHl?0*aCx=d?UI%5e+TavTf6 z5JLwBPOnARoB?=)t1{4v-fXsSj@=lgLsQwdfan$Ngl%m++@BsXGUooUM}$5w@vw6% z7`G8t_?5(Gq0M^EwK}W_bk!ZZv^h^fr_(|K*q(6A(VK2cKe&n*|P*=;)7hG73OHZc8LA1@!9+QlPHOa^V%DmZL5-MmX*;g!{mLoNph#*U%cNXMtkS-v{0M zIPv2~B>&@Ho<#oUc8FCDBtk#@p(ucyRanKwbF6T6PAR zrnt7QsprtA$3?yD5TEBef-ATCAv@7yl;u_bY4u)8=|=CV|2g9NW7iA7Aq;aq3Kv0I z&B|tUY+*>JN5io54)bU#w@^-GC0{(g0Ck*;>md~yv~6sl=2n~SSJ5NjaR^mI>+^e0 zLa9QI38y|Y!$gTy=rGDa89QS50!ZB_BYKum2tPWuK0{%yvh$n0U4Dkoo%>=k1Rbnu z_@KT1w#Zet{&ROO9)oRxxR(}#y$bVMVDb-dKC4c+*dh%w$dtai#UuRkqAVO^OPCIPwedqX)Y6<-)tw>7BpN#>mk>7NiQ>H0}R< zxjee?-W=1v^ku4fTUc^AYQOaSSds~1dbc$U7-g?d%%C6($CdU9v2G{|${_QMV3pb1 zV8G>KKE$hi3H)ec>W`7^G8S64yshaDy9F77R76IsvL_2ka{}{IPSmsw$%K(gbTDOQ zLmmL_oop(a^{613oyiLjixE;swq z)%@S$nVw$1xZch=Snx6-^hFE|22@Qj97f{2yu2uhMsBaHnvwFH%d3YlYdC%{dGU6C zcz9sFs;_Tq?E$&rvTWrg1t>s0AR{fWZJ0ay=MLBf%JQ{$996eJ1zGy^VtaaxO|%$s zlB4HuW9|5_q@k|Mss+rR!0YDau3AOA(G0s8Jv4NW3L8cf#3n^)5#}+yxjY!Y*vuEW zY_LO#?RtTkI4^VMdq$rQ6kN<921SI&EO|D7h83ql9sMAt-W4_Lj$>d=Sz%y+8Q|#C z?dzI<=UwHbRS%kNY|QBwb)YF$pY^1U;h338{aZq%xqgWdoeodS#(diy=_gP=#$SfFpp zi}!M^*`rac_eEFe%cVEbW9KE|=KDhns4VIQecgM@_miEy{WVSQO~5N=c7X_izkD=! zenL}_^hv6y`%?699j2uJcDM6&2G!mD@^u(-1*Xs2(suXF7&ihUlb}TU17{-r_>^HSuzS$ zBTNQ6mW~J}Og))6{6fj1pjiM6X%qlm{a30QwT7&#zxpFJC+VJcnn^)a}SqiA*=}ZxOmG^(#_r%$N!~8AJu*1?OV~C zuV$~QhB%>EcI4@(#bF_;WXOzygMvkGzjO=5RxfXIl)_&^%%1eWQz${ujEW^k4+NN| z>W5gdb0m*Mfxv2IF7=j0u|2rvqkL*7g`Diui4u>Ny2Tu1YA(;f;UXskFyUKRkI867 zyzERp8NaQd7=vqU-uL;R7Z+zPA8(HhuJ4Q&D*~hM@1R|w7+d(|#9Yg-bu--xzwZ6v z+wn5V*9ro!r`FsL9KZV|gV6MAkbgPp_--%@J_oESVhGL1~oyep`5>LS{X@!bPgG9VeaZ3vT8AZJWr%pQ3sXe(W$MM ze*vKvgj#f}3^p{sN)GLTZ`6Z83xwU5(;cx|?LYS=6xUtQ;SNzX$bYv9n7-9f-TXTZrue(gYWV=420+hjiwsG#@?#9rp zl$%NM?R!$59SKup!J#TyifQCbi^R^+DnrhQB)QTiTJH?ckY%;LO5>jqDp5k)(SxG5 zI|tDRC~$*JaB~|hr@FRos}{5vAC-euODCPoUSJ-FUkVV*p~4I5{TNvgV=MqH)+-R2 zLv(v2!*)hTvlOqsmwPThW#{Is!p{{^jRDD>#sfXUm^wk7o%)<#GS|Hy1v)O8Y7TA% zFK_qm$9*VlEjv<5cc4^Ir0a{C96eE}GyRWmSDt5?F5&C<;`22|xjx83KHmF;VWM_+ z88B3k%95bQG4H3T8wJvvI3mOagGxxt59%75Ii^M@o2$k!7JX#iV4D3^l)ppvW70;o2ONF$}zOA_CeRdw^)JJs{08`zEW zS7Y$k0eXTxPdbKvJc0jEnLtrbvQr=BMMRyzog$AGw@#FGhs`7qW|UP>F&}rtRy@EC z_%78gOEdn(m9m&Qb9_0i3iuk)aCUy-a<;52c)gc92oJKgUeU69%`ZJC^gfNUhceAO8$|(DyO;t*?qTmudIWL#s;T z?Cg@b`b_?DCEdV8P~YkD-TD1Uwu>}?exT0CX4g+*EG+EE{5UhVPgl5zwf&Ix+xxS} z1INnsu+iY%S~ui}_1<{Nn$B8tCek7;T)$B` zq*DJeLSdK8iTX4&x=|T2TB~P*B$};aZ{%SKeLtKqJl=V?t9!^Gy{`k@wE+Yn2+}HN z*B|4*#u^@<*6*Pvq8{*dA%65+BHs&hE~V)L;wsHKF-FtYFi#fZ_`S+A3K%5il~BvQ z(cHE5bu(8n4}t5lY7;)h6EdjWNZ_xv!qihQI36$+(B( z;}$pl==i)aRaLPgV)*IbGr|*Lui|4-c1qywn8;OGEhfqgLID#;;75rwo0_(Vb3YW* zsbIwqorlgE?dN5G$Tb2k_A#L$a=NxEQ9)fKB*3kgzvw?*2p=12zxxB`CbkQSXqV0N zE<$#=TAm+?RSQbx&u=m&ZB*Jp?6x<>|G0bPEH=@8}7BWwe^o}@aqN{1=Y^hdF0?* zulYcUL)5~E8n53Qd)p1=vW$8iKQf)&{9~W(65+)G4*2wsVCK9mDYB+s%tlT19<{ms zp;eh2H_TX6;?Z&?m6Dk1X1hX7AZpe)Ut|AZEBf`t|BIOknytjySoy#`SxG!_ z`_#pq3r8J;QcTO7*2{(rZ(^p`YAirWDvbR_G8~^7UN?AKYbHw#)c|RR<%a@)AMM&g znvYb2zjTASl@xjJZgHB{8H+wjDv{_@g*(UH=Zyalh_Kdoe>gjWe}SA+^{{`URGZ0) ztDA-JGkcN0fvaf7@u3Co@o)jE6KojvfNp=%pIxfjn%DlUnlQ(PD-GsFa7Nan|H1vL4|Mvu zA%gO&44vgjhFFr`}?~A}!ZMPo&c9$xPRfBq^I3x?q3P!QN2- z70u4G<>FLTQ$y1aJqDJ%Q7}yd<}C(NL)Flp|I?ZFAS@NKyF^P5%Gz*6KDK#KZ&Psc zPa(dkL>=XH3E?vn_Pz`?~BgFC@7Jcq)T#7KJp|T?TeQ}1E<})TiBpR zH)qjxa(m$hB|d$1d#NmR`QLy~)8dThX9AUmRaVBXrad=Cxu#Sx&S%T0B@M99&@aM2 z+VO8@z0LHKo*FzyKx2UX_iN}f7UndZfA-Kpo#}P-*Ore{p}TPo*oBq%>7-9_qIe)Z zJZdA-=>_iwXpWtTfh*n!ua0GqPb0sgj8Sb&FIf-u1>*b}Tmo6yY0lawEwr13nU%|q zgdi%qi;lSuU!>lnl=(ujpNf$&H!l1Mxe;b>LL~gA2Lcfsp>f3FjB%-YMSdx1XIZ|~ zSg=gekbG$XJB5A2pF=VW(dux&2E;zDM@W!X0uoDxa2WZVZ_XQzlXi@TQ#gkjw~Hkr zTD2TNWdviYC$u=c`=~xiEoA^FTovJ;D&%J0Nj@fCj1vgzyfDu;AA8WNPydri$@=de zJWLGlm+ICw<72?Ib8*-yvv;Gdu$^G*Nqd9)hO3IV7jQ*U%fqDS9b`z5$w^?j;BT~* zrLotQC z0|^Ko$dF-NmwYsMj_04e!W!i6QnNnu?WD%8Dh${e9k7g4Rp7MYGILQT5>Gc_sKGU9 z0EDm9U@5|6z1_(TZNHYlNIbVWnmtQTox(Tm84EL>=WRV+#-;jeB7`;4#Pe6!B^ub$ zE>zma2Kp@r^<%1AN;M@OX3SgICp_7q)UiyjmMh?0CQY)lxyelzS+pOhUII&n7*^8=b&-3{mJ< z@&oK|(O?#ZT+SV;1JUyI7vBsB%FaKIN;FkjGkdJ6N=DZ{hGy0_!E&$8-Yr17p&P#( z{Y5AA^I*;1wt6>&h6HKv9PjqI6MaNHbd=k7T|PZ0>JC<_B2U6uI%!hLAK8*hD8=#k zy8tYkKhH%;Qd^F~gBJ)DKYbrOUoN)>@g_Us_l9Df_J5nxW9i05xur8Q2Z6YZaCEBW z)%ES(;7Bcf2}aas{EiI(X=D8gqOsgLa%MygRnw}-!>78R#>dPoMOf=myy;32tZbIr zsg)}f~g?w(SXXFgb zj@AwiR+adKokp6#=@d)S_@65E3-kvlr%_F8>KN96Oov<@c_6rRcCfTSKUq=OqFEQfNeDQhL!fYsD@WikSn7MZ7$M16N%TfI2#bO&n<#Pp3nZ@0?VGExICAzwD+a&H z-25EU0F9rg6V|bzX5?p3(+cX;kZ4sxs4|l`NrMT<^6(V?%K%9Z5q;H^;-7SS?Z4); z#EX<86>6+nkS^g0?Vn@=4C`EW;8n{AvsM1C)mwCxw3+#teXm-l966Ea?Y2^Zad!XF z&<$d+K^gpK1-~tihFiyX9&cz!L-E_-=yW7Z>Sr3lkw;Wsem`NEYGb=S(IHrC)=6Rd z#(rP2OxnfJcT#y!;o($NM1ghibfYH(?DXq}JGSQ|$ges&am(c8Nx3=3!F2<@ru288 zZB|+mY-XhVH>Pu!-F{d`KCgi1Z1|Kh0)DDK4qGA7>%8>btc4N_hMTZOF6^#{hwoZB zPdvI`(hVTC?MHvu!(RDA?*&$+@Mo0>-s z@coFOPOIbCX~2P-dB=r(K?vJ=jn|uoES?wuud#nGEtnMGX`vkUiyWLm5HTEhS~O|# zhIkD6nI~}W2e$4XK4XzQAH+{e3Hubf%(&9)9c#utS0Zv6;>=Uu5N{j9y&E6QPbK7Z zbkR&3JgEx>_4sWBZ`n2)yhL4h%bbSka;p3;evy}Ugb-j~eqU<$yl46nLm)6|#(I!Z zk^59D1&eCVo6DN6u(GiLDH6J7sg?j7`VUl+W@r6EF>3pZEtxU>p5ECaVVO3a z#-emp_Gp5|UPIH5R9}IWZB#sdhFLKcX1|ys>H=RR{83xw*3%}`tM7=N-KIDe%ZXw@yMbqZq~Bn6qsAo<<_w~Gx0yMZntX5qTI0+6VE_L zB~QOuULTa_;+$bjPRP`MPGB0sihK|Gi|zQmBZ{pd8@cbW4@?RGECZ*0i5ehUkSHoq z1p`HCu`tF9_DdiX&Q4CNU`EbW%J(g_8`bL)T#aba@lQEIiIh_@PI-qgINh7KwPa}Z zuA0VLRY136G@z@ScV-nCTXSY;Cnv6yzF*9hK+kKmOTWkLEIxk+J7lH$r?N6( z{#ybuq$csYX1lO$m7!We>5GA(Eb_s?EMhA)h}!O!*k_PmHtt{$j{5ZIe)lUjG?zdz z(|U}MU&GsG`}VD1_RZ>2&_;e$H%XP)sv3M9oIL4ZzJ+^8n35QCq%H1O4tn(hX|u{e zyg@K!J#@k5mg(7Sm?z>FOTEmQKCJ}z%v}2H*+*L zW+VnTr5{-~%|&nTu-4{`QefnV=qvuCP+8}(2Zem$9P${_NDsQWE#0?TgvX#5L&OfL82HTqC` z*9&7x1*QyuPz$=9Y6@^WJom*_R~LOrvKaFmQE7MaTRQ?u`ht{(-6XXE9cC2c=>))>djr7XF@qCz zv&LC8e|E__wrf6WNlndw0Q{_gN?;pA=1hHl>9D2{D3w z1cqy<_&HVela@%@5l0uz4JoCqVu8hKs~Ez{woG~vK@r@#2b11y(7#lMzuI}=i$Jbx z_s6%3HFP^nXtPhr+@I2LP^XBACEO>@2yvdbqS&McM18V(03*DNq{~rp9b#-) zeRRMO$bRP%^Iki_z27s1dVYuWitW#39rsm^2MpbB7BaZwfj(hpF!wO6ww$QLN7bVP zp^Pr4x?7`%hXGa5@)iv=AOgpN_xTrXt8{vO+JyG_mn_hxmnvkN)YbLa)N+%f3TH5) zb(dmmsJ`Xf@^~S6B>I3P>VoGoWlm-5@*f8R=SJ>dCn{mpaRhGjC!9(QuUnSiI+)Jj zfqUR0DY6z@_vcR^FP(B`Q0TS!DrH`sjnmrF$tuOxGD{Q8m>xrB_QkZ+b#i2;q!_Rq zNthZ-G9@;is7b6Aegn1CLYC{wk?YA44nU25nG0w#^b(GXqIud%W84*!>q{9n@BS#+ zPkzJWhG!a@z7&7K1JpK#?D(Um>rYE_3n$O=#u!|ad#rB}nmMTpB|R5Mh4mu9D>Eus z&jX>SD8^+KVs|?!96`z?L80-G~Y(bnjtkzMp?_zCf%w?23%38pVdnvQKt? zt*W}65|PlKtv8NIO8Z_;mA-yhbm3Ji6$~DqXvC^^4KAul963>e(1iw3P*F!n;qkOXX?H9`Zbs` zAuxAE(+{WP+&&DeKgtFN`P90`W+{J3jeEt4B%&(GJgfrWrkxrgXx?r7k4AX9XR`Rv;?(EDs1f7zKy|F!aa!R}gc*X?h#jCAw*P?xG z7a9nz2p26aKq8F)(}8A}$+@JCK z3zwnBpuuIJ0Xk7tv~?t7#Vfgam9R3qTveEq#w z3YJT1!yEQpiyZ_GGI?CzSeDUu3(EZ`c0vEdJge9_!Ol|gG{qx&PRp#$SC^dIGs|lp z|7TaP9eAP4BflG}oB7hs@s$S4ba|@3WKPF(%jUSSe#rV-ekEugRh%9~3$UhfTA&7` zz%VXEnGVG`-wm_K{9Lhn2Rp}$VWEG-Ndd7uz+d*4ZGo;wQ$r+Di#3Us^AZgxGMlbC zF%T`r_nt2qG^=RrbVvd;!0(DFyOgRl0<)zrGR~8fYNNt-`mEyjRET(06A!^T93-?QN{sI*Jk<@vz zM@lezlTOHqyQaNGU?f^Ibve(3mX7G3`o(^(uu_>H-$hfvlBeQP?$e;{o8InahyY47 zk-z*^EC=fLhWsb=46wc15+9WR+|<1Epr5TIJs}-hyJ_ha8{g)R zyfkOC_v8k}K0Y;1y!K?Knj~hvmc6l-pOwgpHJdqGj>l`4Tk#2y`CO#fk*?@@*wFg0 zTCu#j8M6-Kilp%(rv^M6t>E?^z|`}6f%H*Ps$mc&jUs%85M42Lj=E_IzMP#TbbQO+ z+Igxgw6nL%`wU%yNfnc0=6Z_Uq9#Y>Y7vPz>ISNak>vJ!#a3lNa8J(9xaMt)8y&B~ z&sF7n9>Cx|IK=hBRfzV^;4=`Z7UYzS4h~LWz`Jg6tLyczU-JI;IumDX^rw?a((t_o zCHK?d*I;!3nGG3cCyiX<53#b{nG66hbM;6us7KD+t zs#BkjUq@07&q$E7jX%bQldSeleWYJN37<`PvrEniBU70{cTG(YNQ?g~1<+^lI`6vA z6)>^mH}D<`1VJ!)G@V;;ZEo0Q@zMlTn5r*EbuFw|X16nn_;MpFB2%Rdy_1C!55jMB@>sh=+88A58AmYj`v9S*RQ;_iL1VlW>wYHQp3 zzX+>m2+U~(3)QA|H|h1XM*eRcot4h&UE=w zVv+lwGRCbs&aeCC_yXU_U4m`wR|b@0X^b&(++naLEI<^lYdkuS zof2x@X5T<6JVFeXwRoX0q?~+&SmeOcAbmW0MX@_ZwsW&#FLqTfS7_+2m`TFn&0ft? zjrLH2KtqY+96chUcz_yOP+1n;n<+d)TP%OE?s=RcQ30teK@YMlx)ystq^uIAe(pfY zRj0v`01GJ^G{_ijfJN3eRcAQ^0?I>8RXlSXP*g|X;Nd~gm521u@c-kE@!s9>T+Q4_ zkUD`xtI@KZtzCp}UINbvuDdk@8a<9XyVjpaS?uJa~X@Q(gu?Z2}S zphX;zwILuCwkVkJGpmb`Ew(upOq45X0QsH!OO3CI;WB|g%o?b-;AwPfNE4Fkv*|$A zLH(dkQi^^84r-Hx5|EGMlz)R>hn=khYXL_49tIP7Y_VR6Ny*eVqz$v5{KFe2bHJ*O z13~_!($s-Eqo+l9*81Y3$U@7@c6a!LB)wH6lY)oijs9#FH zntSjBxMD-w(-Su>F1mT-I`p+wgo^FWolk+Y!WexlbAg;i@&MPHETiCFy05QC2T>7{ z-D~J&;N>5zC~Z2RF;696ftqlItX1l^9d`wVT%4XmQjN@0BNLKysU`#~fA&OBG`l?I zTS14C9>f9~4|0m!eh@LeIXg?V4Xk3b3MQ>BUA3oR^@(Q{NUzYNEQ<-7s(Wm^#SyJ? zyr{?rJyJkL)UntfcbQ#YmnXJ0zH`Cwd{Y^D%8rXR(~Bb#@j4Ex4>mo66$6O$eGE$^ z^5@7h9FgIK43J$osRYTd+K(I1n)-A9u2}4A=CSvMSock&_QnH}jru$!s3HER-Pss; z$R1WGLg4yqpb|?1_ps~)f6%-dcU6m(-Hes7CsNYq3mFm{Fnt0R;SyDgIT}!cyc^Mw z>iTxSbgR515Pn+S9aAxB&}Uw%;ACbSxnehJ#%Zi0Q;tNxV1GSjldN8)Rx|E{+=6f~ zgb`M%eM_^qGs4}fM7q4G_deP$f3jrw&! ziVoVm=H_Nl&^yTL+@<%}@LJM}<{s35N=&>59NVULSbNRuMyAw~TQFD!IfywmSU>{8 z4ve&BXzcv24Ug839==b-2z`~%>-e>M`i|7`psto1@B>j8L&G-Obh|{mD)XF+;Z(d~ zpfY$WrkxzJh6QET*^;9g)MzOR#4c>aByc6r;e#%J`ed+sltNCwl*@0Xv22_$H&lr^ zq!f9I9UUh}7QjV8kJoK$k?UaL@v!h8zaE2(D;Z1ZwIT4h{zhMu5+tgSaw8FLvmO@~ zZt!HESnLRU;D8fG4uqvt&cs@qYy2tY8CJHP2q!8G_mq7ZQ0lMePxkk%>*82|yeCpC*uBI#EfSH*-dSwtJn{F%+3_r$>utxz=&+9-vi^nVC*lpAWJ3%c>j*LIY-%NY3 zhyDyS!*gP(D?z*zC^;EoK3ZydZ#<1U>Q;j9Kgrhmh}TudbQAL^`n;jL!{pCPi7u0# zN6m=<#5u$=BW7qJc@UzEuppBgvq3?on3Fowu3X6jBMTJQv9}~c!VQkg-#P!-K9!lI z9CDE_h{HYY80KB)s8R?el7t&ZAyQ*xm{vz^06;I)?cO4W_@<<9Y-x&u5{6LlRZzzR z8;SFsyI^BGik1uky}(4qO3JwfRnl)UkJ;6mJ#&VCNN)Bi91056y;$IoKm=b}S(I89OWjKSP_Hj(A z^&QQ;s&J9YEKFLYv@(BFxlu7)tzV2>6NM>=LKpt1EGnoG30kH@8PnMCutOeWZhk7+ z{G;{%2aVu^@R4m0i!S|&?xca|)ssaL&XOxWX*Q%QfiiVF}f_}B^uOJRDY1<7eO-945 zGD4hJWQ^|Lan29{sry)>BUh17<$;83ny$65`DE1T3v9#xyca__2W~Sv%;S$TC!DvPsu?l{&W%yxVyvLP0J>u3kA-4zj1;K^*xi5D^6!yYu;AtU)p7rmC z6XfddCri034(u6~2D<|!&|TT;D`a)-kj7LPE!cwz4M(N4ovD5zv|kwO(*ZT!n3L|X z%Hjfay82H`dXldt+o5pboHUU8`lWD9V%))Mv`EvW9ZQ`s1B%lj8M^i&B!t60i0Bl#DHS<&uGD*PyYL1rV}%5gKsf)l&Fr1*{}d7d|z3lP*8Q=f3Nd-xKN%M7_CRpy&pY@-Ewg;GoeNZCL2%? z4+=C)^-zs`b1f~=^dP!(9=o+BfxCfwS5BS)y=s5QziNBN|3jy3{5(%raTp)2r2O}< zHba_Rs^#urosqWJR1!dM3}%p|XOxrF7?SoC^0rbbsGn5VZOTJu4Q}Y1^1il_$QBRt z$)~X5nG3-t(ar<1LB12)ZHU@|TKR|8;Uf=+B5x9By7X4<2EQn)4Ri&>@*pxc4YYZs z&V;WNffbiY=kSe~D%0{DL|7~~s$tzSZ&-NTyY&1*g43+?R!V;>zm*$iyhxDgiEQKN z;C980PN}B_Ua*SZzh{?!sspu#Mn+FuOOr$|(CrNi+y8bpaoQum^t{afIrYU&WMER{ZTWn}`i3DfQ! zbBg2L!x-Da(EV{R6!GD$dZkqk^k0mFCo_h4TWuXP*A^7uoE zcjhT9KYZ|{JL7;A(w`&NQ4R#1#5NyANJjYxOIZ-wDx9>sqE#MLX$PB;P+pZjMMi@` z0dbk(-c@pG7_C$Wau+uxFtWB(S*Aim%HLi2mlS*+AD`vhT0ksprOJ@46TrPkq8=O`$ZP2>5SY2a-5W?ed@E?pU%@^((4)@ z_lqXKOD9i|0pNxK%OQqJ^(Ayx;g@urYOm-JXC8C{4rlaN0a59{q~!{{=(yM*rx=!8 zmM~~*p38l#cT8ewZfy3AYyF#DI<`Ic2v^<5wtt@JhO9LAy^XHab%q2hahj@2E5fy= z#QYO7_0*hp-B;MLvdD7X}k_>Daz&s*z1dE=9Q zLW>*YXidO*w(kn) zBa52r`gK1%`y)F*pnLyUz(S52Gxsx3uW*O{qe1|fSW%Otaj+lRSgb6L#x%CY`_z6G zaF2n-1hc>Ya(bTwF!Su5t4ia?Kh>7bqH~p%SDkaWXFkU%=i`h(6!b(4U`Zr0d+c~5 z5MVgsfWHD@fe>|C2LXTcH^T`Vq5cH;&sva^`9p(A^gB5gXruHa9c{oJObe@IaXFtw zdR#wiRD7!gJ{a~V0-A`l%;nR;P9@9niPNm-`o44Kt!f*o8-Zdkd=@1)FdWVrXezW@ zpVYWkblcI>b#$u_IRrA~2Oi#USNlQh=G+=b_w$0jXCSo6dj>-3wU$Ce5m_oJD*y8O z)S1#I#O7dM{7XDF4pIJypr*%B1 zglN$o0Sty~O!G8-2s@8+VV;}f7X4y0heZEh4$`MkQD=0w4Pm6LIv~A=NYmW^ZAJo_?*B?5}e>%p5I>&PFELq=(`Y!JAJ}VY72% z353%WXBG5K4N#*;7WP!An0j-frzGVP`6NMTP5Pa>;tN4110Tc zj{-HU%Y}gFQfrb?iu-Lu&Y(35@pVYk6SuPVS2GK`cx^8>^}la?0|LCFqhmBCsqa@K zS9$)TEI-F>7+C02fi?8ftl}#y;MF|wj)aJ->&3A|%Vs+I8E-H>!%n9v{lL2k(~f5OT~ z9zNf|F;?Q&!!HpDfG(nn)O&s>**5*YE#MCq|K8%q#_XNay-LjS2H$k#gJ^bvz~~1O zC_XTg1jt}20_g+8rHf=%HlCzJipO}cE=<-Ik*fA}1**aMD=f$`^@SN@h(2kAxhiP5 z^EH|~Q#=u1!qcaL$&soB#)jb=5~dR%qB_#HnHGT~PO!RUFU05%svAW;;+Mw}IW(;J zyqV=!EaaS;eNrT$oMKH;R;g+ctEM9gy2$hoW#S|n3lr;c6XfTIM-(}zM-O#EN^#9n z5jxY#fJ5 zv{QbL^<`5=BV|=ZwUM;*e@P^9oMV3)-j%ndNHLZl>MsiW??BAAbYYf?^#Dc}^PI=? z6;4N+kSe7T4vvlEogLq#Bo$S1=Hdltu@H}|{izUQZ0z{8&LaTwOf3<189er+)8;sp; z%sSY6f?cuu``uwKR?iJ@nYnuL!Ix2bI0jSuXKJunWZe9sHm)qwFO)PH=D)*xV`M)R z!^DUz-$m}7V<8$2eFEa04GDmyDp!f|kAL<`V=c7~>X)%GRZuu7`VSdo{~Ca{SD)F> z|Ke$Q)7sZz1SCk5@1Xj1ou-z~_t22%bpnq??-G0+&ukm14&$kx?8L53waRRwu7YTiwLP;_0o>zo=bn5biNw9q6yO)kA&)b8P;$OW+bY z6AgmO9Yfk66@X?EH8jUcW*o^*kg2_pO?T=oC=1h`0YY2bIu%AC6BR5n;!2y9g}HzA zUt3v>kam}&MAH*v%|@K zdet)u$q7lrA~V2G8qd-9OECK-VpNkJnPc**K`D7MoIX6@go{MxMnQh^9HL_{g!Mjd z;&xs+_Y7tMR0yS7{L}7VNMi2J)V|?rK2CFvZ0x?hEPEEW*Cl-rbjr2DO9Xe~XH#gx z7cVk)P|fbIwA}7gdYI?}Gdjmf5Q%a=%U+r`BhP75HGv`}5mZ{#OUPE*i>Rn~HT)S# z&smz0UbqXdpbQ?$adO3Kr9fpIr)q2j(z#W@nnYH$g`W&){dr9q($9bPg%U{)kU|Pf ztuP%noT(@EC8}y#dw5P?Tkg6wt&#kHA)8CDl)Fm7wRq2798e`1iuGGULY(%*N>_4( zLdr(LQDTH-0SJ3g`0XfT_)SZ5*J4(3QLE9%0HXabQ!6_7Kk00>S+m=LA36FUtZbZQ z=1AT+K9Go26M#(XLN6!LU@X$ltz|4hN#+geBqAtBs7M=7zdn#3p1Ef5i|0tInXN$v$YrK4bo=){Ae+LToKfFctlmKXIR_X)YY|(l?^ORvaxHV z(WXj@&!NO~VN&XJ`EKc|oL@23 z9z3W5hNIX>=RL&AQnNg87waquS#M+WD*74wJBr7Ck87~YP357=E?C;(S=Th_G$-`2y)Yivqu6 z5;f)%E`FY09*9IgmSs1fuhT9~6GakS&`q9AZv^O$h2ZGa%!du~d!0QhVvn3IYldq3 zi}t4L8^Y5++^C5l-NRBUcLNesxlgHlFn)Db#)=k&-Eaz4ljh&G2=r?OY3)eZ`>)tY zQN%W-gzk2>h>6NsbJ12a_e0efQf#7L5esS>VCgAAWMR^3x;`<)Kk;S0W7Gm)nwOvU zoy>s9RU*e?oyQb;U%PnQ*|jr$UUpyevt%!F8EYpburrF5f<9t645nYjMfSjkiKkWt zy?ZoC#l<6ui>bGScLu>!_*gt%pL4 z_nXQum2M%TM7=!vtK`cH3r^)IC0D}M<))N;@uidE3FJYM&XOTz>$`$7S4x`T%*IA0 z<&$Ckn5v*`!re1~(CmIX=lIH}*XjPN&aRVmS&>Np_2a{jeSaC;gL#$f(pw4orms^` zxxZD_5<#`!T5(-YABAAz(g28+%(u&sX>cn2gm4?iBAHm1q^H3&u^L0fACweYBv4#b ztfB)B2%64SlA+)>eb8c7_{VX&GEJK7tDQd~`F6}p#Cx}1q{}d%)Ukq~7=7OI<#t?Q zVnz&wlqdJ;(K2mhaGDIkRTf{}77{7pFGs+LrAIXV<))RsA)fP@o7__s)=B;RkdMaH zudEw(Z&_yM`^ml!dAl`>fY2xc_k3eOR)Gf^piK`-_=1VA za=n9|HC78W^R^r)I-PJlPc(7Lu^bs6547bnarDXpy!qRo)3!LSU$!{jeprxiCVmEx zv`@&_``9F%Iwgtrgs9;e6A||@nd^Sj?au9V2_i?wM7KG3rBs}sSK3+Ewzc)80mhdu z=kTQ_mG-ef%b;oULKeP4XAoPiLMRz>B~>-G0TRk7I+{78+r-l{5u+t07%fFXe>hXB zCy8P0h}h^b2ZjrpBFx>1lG~452n7R!UP+^d+Q{oh@?N|8DC2X{Z}L$WPl|}+sGVn7 zuanQAm3gn#z{6 zE@6uvEDC}U1mQxA6J27CIn#iX^h>1Wh4Ds3G!>Vf(EPdq#b!lzR>w zOP;+_gkR**TJlLCyeD7y&26mzlXp7RT6&*MFDrU|w+PyN{zG|&umk1BD8jZTg^2>_ zrJTGes8rae3Bvdr8Im=LiTb3tm51W$BA-DeCJM#4`hJ|%!Sg{DCU1wBRF#!78X6k= zi)pgc*?a?dE*oPD*?%xG?Yg2CGBRY;xPI)eJt07bygMaGUiGcUl#0}YD{zay#YUK0 zcq4$j7;QZR%wHWp*AOx?&NDfih5#HA4rlj>s3>REVx_n00%~+i!3ZTAi`mmIBU^KJ z1EwrG)-$RE0UGj=_}-`k4-11u%$Qi$uD!9JjN%dctbB4PG8_FN`GZ=RGgWuKE?j@wzBJqQzaubDyhkN+fJx>R zDW>8UhrdDVLD-r7C`w4iTj0^3=UGZulPp!*B(wel7`??8>m=X^YpGBCZJ@4!wO5rnOBrt3m}oQ~`?dI2Q44Cgk2!Ai|h3R#1+g&lW3$DDG3=HPa2yc|+Y7YLk>U z9g)%f0KW;=Ll`^VdG7~;3(PUDce1QjiTJa*kXrAREg{>J90-e|FASa(FV>Xi9(RsL zm5NL3cJu_i#(Zm4 zFLTn~#Pn@h$N{kFxy#bxYb7IH~#rOkJbb}W~5 zbg`_##@<}(4SpJFIh(ei1p)(6y@_Nqa;-c;X#V+ zIP9~FE$Hd9bmOZqWtE5=!brwe7`v$@<0l-loz4ocF9z;h()|_n@_N7T4~Qp$CwBs- z=MP=hsyaiRl?RS6k0wK$;0D^Zpy3w>JHBn~8f@R3CO=u+YL=c5o}RzZ+o%*?jVvpq z=;9=M%%~hPKV)~NEH8cu8_E#Ks7zof{z9~G%&PfK>k8u6CX`Ud`T_62EAY`!xkf$0 zZ3O6KIKW|bTigU2qCRC$Px-kxBL_bZ(1WdbtZ`?79ByhPp%X<;h9-anXil3ZAR8?U z$es)`lP&PNf!^18rpBXmOnp9KMnD28mQT@1nq-~`G~_{a z(ZvLTChL3XW}TZxtvwg-FB;^qo@t>a77S?eGgO6(S zl2>XXvA0+2tELD-*bys!2_glMG)Twfm=M(V&r%*j3>$n9;a@sUa$-cidzI2`V%k(PCF z-mFM8)opyDpGg!V1}c5E!o9w6?^xm1D!8Y&{Bz#b@!yV3Lddx6K~I=>`1SKMg7}W^ zagHilIWBj$8SWLsCFDR^fy8eBSBtn4&uSFDEo;roFAg7Ow! zk5@Mq@Grpn{sy>BQ|tM8EdXe6Kxf22K*+K%%_2e~=|9?=2fWDHDUnB3(?J@nG3sy4 zlWlFKbBPv$DkbLIJ_8%Pm<=YwEYc@Vq5q_y2^Q9JW!YOgMntse9ALZfXT^NK$iLM1 zhVJ@lbE&E>JLKw3atf7N7^+nc>5@BdWIn7FvOW1i+|$GhIWZJR9_gICkKbAke{JEf ztJVB|y+gO{dQ_#;<}n0tmE5XitmO5$q-K%(>v|}Q86~6jwo^183%e^4>Ox-M2XRW1do>nx*Rh7}}caH11b! zDyFFv%{Ug{+oFZ`M2HOj!x!!)KF)+EmM$LoPH3WeRAuJ*hD7sYTkexo0sDPxUNj}- z=bLuTC+Pmv{{k(WfV-RLnPVEAISo0`DZ}*K_{7yntprrmz9UKk&GEy41?aA$d2@@` zUNm@SnJGb#uKDi@I}=Bjkk6j)n2`vkWp#F|Hl=E9(@WeQzx^B^aB;d>>g5qX-T8sy zAt;Y@sh{ZrmQzqX@GbAllpXtu@p6~S&&tNs0bW_8c$x{-SyQZgRo@8v6DaWkrXnGb zCz_268fm{x9TS+3XB?h4UG6sIJ@IEej~X~Uo;G~m^ES`{0s;Wj%MbkNd@fz19&2)U zLbMjArxqe1W@nV!<6Q=uDdKccoZ25hp|T?zs$)81@`G#00`DXflxxbvFpmmFQy`ELR*P( zkvoRuQ_g{BDT~zNK2u?)#!}tx!bG%ZG&GKdFS4;fx`M(55Z8+4)MsNH?$#0AY<(LOuxe?8BjqrYE!^7S;Z z>8I6=Hfc=6o{!k`zVEm5eQs!fsEi#z!lwz~5C_j~%=@y>W1}Q8C~_giGdn}81hZQ< z3_0;>Zb>YT#-xc3(BMP~KQZ>bF!f0>$1zxfvlzXyq@q*+Hp0xcgX9Rzws(NyPNBRj z-HiLPxWm)MDnH=X4}DU)mb$c6Wiqon=YH{v1q*DCaZZ-&k5Wcpo+%Jg9pVJi1Pz9~ zhhVLy#-!QBHG%!*8G^iScmL)3dM2-j->FlSWR;91L6lsHf>Lch5&UU8^c8sFOqh+N z0I?BQSgSCyY=9KDIz<2-A2xZo(bT!2d8g13ihLHBS7or0*nPhFue|g|7iebb*EhS| zuPAz)zR--u2Q$5)gNkmRWs7P!N9a>Yjn*wS($dV_4_r;K^ZWua)vb%e{cvqggg z5VH@?XK((N&K{lI|J~@l^8eIqt0z{!qr3Pt8@m2t=;z!_0HLKCl4v;*GeBU`jm>|p z)lJW`D7Zk76_KG0$WApicEfUJEe(I_zc|*N8c*@$^IVZWSz#ZBWdY!xBV-NOhg&(K zUt1}^j(wqtQ7hS%v(_~2ESN`)nM?QaFRB6`Q$K3iA5aqJbW`;0)xxQ(*Up;l4Q{?v4)aHqW4uoX$zw zmW}DNVI1lR^s_?YM&BT~nR^#HCIhb1J+DGptFcfs4RSNMop?C0QDT1$x%ePI9io6} zzHE50LGpByANj9$F0{8(G)0ttdIub@9o->cs5#tpg&7Xl>NF<+$$Vn|TLK5JO#BT+ z5aoGbawAJCoGwk!(RoY|NeE$Pe&AWSRgL-ri2%F%QD4dD#Elr1zG$pr3Fe~0VXIPd zLc`Odr2{DnMGF)Tc;Y~l#%kkm1Y`nj+NUaQp6ncgNM%WWt#4?-FU_jYIoFW?_Q7}6 zv`syc92?(72(-XrA<|-X6|hHyvSf0*C?TwwTb5*bIH6>Xr$Edh2Jjn>N%j&8%(32C zsYn0m-IvE2qq}^0!SBt9v>ZNW-5kJ{?_)*&WUHY2RIq6G0ht<2c+#Dpx|GuLWXaPeAl1GoSO?TG~H%-aZVQV zf$y@H?m;;1{o%1nee=C5JTl~jnykBS`sP3o{Rm9gO1qbB(O!%~Wze*##31rv3E&}e zq=5b8z*>kmttO*k7+@+M=G$Q#?Iisqr9owYxL-aCSe#(~$zeakkdK%xhS3cdID^J(_CwW{7% z!wT%hjCnaZw^Yf(2)Sr<;u6N~IN z(2X7sYZWHIC==;bA=W_mkeSQf32GiLMv}i}z8z&iG_N2;t zZ{4@uPjhcS9*vV0B)_MgJn?#MFU)uVyfVDDjcik6b48@iCF~;O1?t&~MRU=cisOvG zJ|Jk`viA#&s&zVsucld>Yqa+PW^e{xjuC70Cn^f3ifo2y`nygE2(;K~l=@T!3U-mv zuCTIObB+NrSfLXHgb9UIC-dC?kyb&v$w%X&XpAM6Qr61BRyH+l1U>iF4zinptZq{? zMtZlR>a=yLsHDt7#fN89&tapo?6~s)_eGLUH3c@X)G0- zv~odsu!BKPGkqm8iS+*P;p=e6Uq&kCKon%F7|DF>W2-n%Wx=I`uhc#m4MzmGQ?_5u zY=htbvhaW2j*L{=ufFzL-IDnB3uKbt0A1GERb4upSVzdLjCPxL&V7#j_|fkS>&Oy3 z^ZF{>lG*18Ox*~u;H40mNBEV_ zcrX??ek7Vp(TWU%6P&Pc8i?t0i+TSc@6G43;c!gw^?3?*nH0p`1W}QD?SbKC(#%n; zr(CJ2(7ai9bWmdhvTnXXwk1Ed7@PcaR=|{fGV9fFc(;!Tpxt|wK$zkqAl3X!~p zH$|h=hyGopH#Cz&u(Bkpsz|+H_geCT_@UKEZ_%FRkcwlqt?hss4km#7P;N}$=+VaZ zXE4k;zBeNth0;#HIOYL3wb}JP{hU~z4c0gR(EW`mnwivymDhHDCBKd(_!<|E zlooin9kTo<)r7h!;z+jOl(GJURr%7lQK#_#cEa|zSL+PhJ6|G#!t7Q-u~A7w2-Eo2 zYXj0^;tK)Z|8fCn@#&~q7@e#gG~s@E24HG)%mz&}O3uzZ^^TRt3WA`-BZ%u?-Du?W zwqbtTM1LKrIW#u2>DB^js$S_T<)f6=oQ6ZcpC!B))kg$*qSt_ zA{p6cMAV7VPZ=w>`@&C(ihdpwFd|e5IT4ly+Rqu*D0)XLDIjMcNoJ-6(cm%97Oq2S zy$>>;@7I~*==E#YmM+;X@qIhi0~A|19pxiHSkl0iXmOunyJ&%T7VE(#^kXN3PD%HK z;Tnn^0v_e$O&D_=KZg-knrrG>T1NW%fUVB0oJ{5s)2iB}K7)~q`oc?l?ccVOII>}v!sgK_0TiGZ(xILFkTGaeS{ip^j|dwI zV;diU{E;CY^)_&bKaNK@iV z_$#wM+$jt3NJxY$KRx|4a+SG{tWe)nIt(kSi;%GlO?o=eYPKMwty&8=wD?v#b%+)Z z({pV0CIKNCE*_*0`SOGuPw$CLZ~UJBRjGA9ntSWgF2h*Fw7_gdQ-cQM$4oS%mO@34 zC-Ec77Op1RWPCj>Nf9ASw)K5Ss8UEw@Y?Z6g;`k z$Y6 zv?Qz2@SK}X7;!ArfU`tm3+;2r9r}998l3BYJNSPd2l*|@Uq<%s99p%Ro@J78`YZk8 z|H7q91aL~WI5rSXHw0OkE9T+;`BezJz-G0$UYuu*c;pm4V_wZR` zf>beKI&D2{j^H`wocSgJCdsGg--R@o&c8l=#|C&Hy>=opi9|tcTCu#VQp;9c2`3=t zb0R*8rAUB;LEsMV#rGLb_st=%t5rMLyu)~OxBx8KEdVbT3YS7CMsj){JQ}!)n*Jdr zdg2l~aI*Wo0l(c84A9h~oPfI>7k&m6HPs!sC z>)q(z+1GvAe%UA*IIZF$mB;wRNyfHPZ+!>tNF{G!FRKqhabQ4eT{KkCFmg%sbB)Pp zVjaiAa%ckDXEG`wLqBASlKtoOp?x6jZBI#&t@EH}Mw#Uu!i)64Y5fYkWQSys&2W=2 z7rVP{J0<^HMnB6jJ^@BFNgf+*YNB***Foa^_S2#jv^1D-5>T0OKhBIrTrHW+Mjg_}qUKoK?Z1~@bn zEHb?=9^V@@-_1ToY$cr?xZ8Sd6FI?tz(r&DK*JY#ELYBP%$#X?g&m+6r}gSpmBu*&GcZhNz+q}J-a;!LN@Gviq&nD$f}l-Z9LjlsL;KuZ~|C0HpY zQmUDbMq!>+Mo#2|NHPn-VZ>EPKodntEc8ozK7x|xqI%zY0dy=s$iY2ZMN>1F9F@Wzv_ykZLhM}1KRG$RmZ*gMxdiCe z$)a^?9%+G$-{2-g-QVu#7ecPEumQ>GNOBwjmEBa#FvbxpB$JJ-NO*`^Ulye3V_Q>= z9sYIvfAg}Za&0AqB4Lbo=|UH|=U`7l_B5DHHyg+0w zJ!gT%0##`$cz9lezijm2JZVv~V6gz;Y%~5Kd~JA6zd7*a^405l3wR2S$={&jUZJ#m z({cU${EO&C-|T}WAc%x}(v?4@!S{mjpy(K%3g(SSA`KdzZ7yQ*g#ZlFSXS)H8~5iJ zfKKwtQqf_AAxt2DAnm8L;aSKhFhzf`*nC$Tl>go1#buTTqQX0;rq1lK&(^Yi7r5#^ zM_682ZgYQxY1z0Fk9z@_(BI_ZEI&^kkNhCOtK-OK)}xtcCzcIpFaW4Bn4YD**AyN4 z7uSe9XhpRlWi&+SFP&okn|`=gT>fsY9xZ=rscUT;b7}M;Tf(nGE!k70DI!@&@g3mN zb@A`9&jEMZastOe2uk1FVQL5_|4WA1n=A zGr~~RHT;1N+#QJV0{H6u`t?8lqOfc*Co3Vrlh`tWxMHJb4y4Gql`#O78A{7T$(y0i zblJuV!UOFynN-#>{(wgb0++7z3VMMyl5Xf*2l2?o0H!>mXVA){;qqttJNE=DLeYlVaKMjdlA`@uIrLPr6?Sp-{U z8cES4JLZ`-DBG^5#YS>TR1@a5BUteSYIcmNJ8%}VIrebglY~qEmU#;j4d7_hk6)zT zJ#CBooJTDJsCQ2j&vR-+T_OwSWG7%K-EfyX*QfUCcez;M6AAMLQs9GgB!J4{se-J{f8TA9m&Qkl7y$9gkXHT4Lo+e)3W{oC zWm_?@G0I9xf?i*#{{8jgCgt|bD-^_57By_8i%E0D8{uZcm%*Gt0GVjz{NkBZOd|j0 z>S%+64adiu_G6cth%<8PIzdm(>hR?x6_?%C-ueCKirqzBlUw?jaere5;012NkTItr zl^zM6P(vhGm0GtIVZk!xDNaKE_K0~-2my$S&v$u>UF80IK9!c<2mQ;8-&i}69s7mz zwc97aXM)`?>7!)%!U_m1+ZtFArTkKN5sZa5_eJ#?Bo-VNMp^SZn*cR!X5D)t zF_vIpvZb`0cA1ON0bwJ3W9=L=T1C%Xs{#SN)N4$P?;Weuubt<K11_^8~%rP}TJrz!PADivavT{mR z3?^F_&`AAdl~Wu)ov&wq`Nm3@-E+7JDIo!)uuFOQy-U%Q_`66VI=h8Bhz_g_|&YCnT1hp?Q~$DFXH39)%?F`jvUVr%I_ z{t-#=WnE50?bS%NRg+4HIm8J}fX-nWRT`4tsfAXC(?yEXnCn!(qfuVxJH@FPQkA=1d$2?Qxm1`0u}JO_Qb;0xsVASU#@KR)C< zgTJi3xe|@woPVDa+1JXuojmWh_@H_n#xQ?Jz1OwvcMWT@U<(9g(hmc7`I}>>B2OUb zYKo`dfxyzIn!+3_D8?J;0#8VY&yx^i!x2wFNrzE&6!jTj@ZNe|2EuWtBJ)Ym(R^BY zN*tJe}C~!U&o# ziYMJId!W8UyN_MH2XE&b08}pXH`)cTQA+t=@=xyVk6I1e_o%+&zUYSfWHO`~%hNjQ z53o-N2&=1Ghi6MR)7iWuPD}XT)K{E%bgNMlEHa!$#sd1dg*&Q|2U z)xBU2u!cc^%brfKP_J8%r?dB)sVWD*EfJNihmKJ*9>_RJAoDYez4Ew|`Q`eSk7V^( zTV#)lG~HbYRXEEVg!pHMRMloYYL8DKI+{pFBVO?JJSq;JYb|gY=U8cQf%Q}Y==U~y zvhp;VqoV;Y`t;%*=nV@52O1wflh@a{we@)G4>Gzp_NwdmTe*MvymkHbeM^gV-c#~p zU4@n&(kHK1L-Ne=iSl-I=E5e=#5G~?xF{^WalmYW9d$u|iG#)?!BawxRWz8q&r#9O zRIaOJy-q~o9}?T2k*5kCl{?qMs+p`z4Kg-!!U+*c}mG}cpNo`@%52i zLHBK7bj2I^v4~&h+mT=Tdv@TjOg{@PIvO7k_$ee|e89*N1HHu-hLU7S9RVpt;3+oJ zi9s=QVOS=dpPRv2GD_87YMPR;OGlm5)>=@7wa4+4doy2-1u6pbMxE| zCh**y6lkw+88oLFuofURRNRp}OOy-!G*6Gx!*R=Q{bJONcG+~gc`}FaOgIe$P=lJ; z*iYL>EwX4$e_Y_*93)NfyzB(JPu0{3gh++y)W74LKMJML86i-qSC^lyhN#o-0j8I&SmG*CZ$2^|x*3)yxj z={!SU)bpV2_U(2loB?g1N)L`su4Z5o*)>TQhI)69B=I!`SVa#eY=LDTB}Xv_wFDDq z%nDle0!c`%wFJEyjFlOZh~_{|qe_-L4STthn-q!=rfudivd=2!`{674dfHdd2?mS3 z?73c0caaPH@<2d*YTJ@cm7%D#v#qUbk4{EVU}Ix%d)%|}Ik7Uv|CJqTh~TA{_Bb+m z%xv1AA;DP=ADk%WYJvve761>Qin!~0v^Dvi@?H{@W}h(II?>C*ojvu>Q2qT*C7a`& zV_^xsoL(XE^OG0g+3>fcj^MhDOkqiz(uB&-p@9Yep)Zo<=V1e1 z%Io*n%>1HJWrmR*m+2jUSE~EBkDO}ZuZofo(9*;1ojNw!LqtP3(-mnq5wSfsy@Vr; zg-K%64fDDKX;fSD1UPw4G3X6C2>dpBx)nGU+SIuNLW|L#bvdulZkp-xjYd+VA`%0u zwZGwq6%kB2-PS@mioYY=@EWH6QAGgUORLJ=S?%+IqC}6u1Kk^GVt*?+c}SZ=1bAUw z#o+<2sPRr#?s)isT~TB>#=P=^Y7U&E-`Bk$^0<@Nu2B%> zChHz~_?(}J--M5lJp>wlOXwvXpL8#UGsbVnUpyl&pU?>32PgcD%kSoG3pRRr9`~R( zZ|}F?H@uUG2)E!8w5imKH#YX*mjfw95aJx^K5859z;J{l%mI=%ppN-?%g*{#lK7K& zRtti5#UcL5FMObf?NQXY_e3ilmcJ`l4}s4n8X-dF;m{)r<{34z=ELd%M+vFBSWTvm zJINQJnX?s$9zq%6!(v6!sMCL^Pu{~_XTJLI0R$H9HZ~(0_cXw1cq28>lR2P~Ggnqt ze(lnw!6U(TO_fDR={)=(m?@12uM<#7)nZ1rsZYdNnanx^&JWc}hhfhZXSgQGW*+H^ z>dP-zk4i<;Qbu@2_=locTievv*9{vwBul2%dVF%JRqLAC*&$X~FszM|dovmQN`^~f zK}ibq4R6kCR)CBZaX6o=c@LTFIgw7;KLHeJtH(>bg6a-SLxjbys3=gq|GbR{zFEiuXQtMbVkgWUwq6)Uvkg<4lcaVph zNRFpuQnpMj5Fm?FUKVIgbQvT2eAm>ac7!cD|4E$}5a#!||7i~YSV?tX5#>#M+TmLP z4mCz8%EVt&BFI#iW4LkIr-ZUmkT#^QXYn9%KsfM)P`Up0-OHhcsCKgchKdCiqn@_E ze^v|-twW%)7aWW@YR1DyntkMS80yYq(;N+3Uo(DGZTtQ4mx+92ReznxiRL+{U+~$< zIT{Y}9(27Oaj&7}K71uSHh4{ zsbE-tLe&&%9I;$+yqnqkqtFdZVE(b-mK`=t#R^|jIDOMehFTT^2x#sm{N{pg*4iDN zwu}g~Bs4#(wz*mEl-%0Zc3zeoa4wDT^aL$ua?wj)34;@JI-3|PH+Bcd ziDp+9HFbP?epc^!f?8i#SJz0ZXUeMYt{X#@=b2{Cb~x4P`BQH_`9AccLar)lhzBT4 z^sF7RIJ%D^}vnk$M1b-)aaO))^4Ay33=Wj zu% zBomxNv?9G8k$hL@MgM`W^}x@(MfHC-Ki1`M07`-wk?0ZlyCy?NNYTUMb_ZYkdSSJT zQ#+({UQXsT)G7Mnce+b9COn}(&akTVZx^P)B-9W6yH$b&v}ae@g_~3?xK}K4bTJk} za<4(CJ@eBWQ7rpqBcne+;`HCp4-|TG8tBDmCy=}eSJ}sg1P^Al9XZo`&Ff_gNw>jR z=e)Y^UXX5k4iCGKb$Z{dACNt;onTel?Ld$yg!aybKbYf=RXy zg>0-3#+-1%*4T@xbAhG-F)eI_o|tglM1xNX2l2aVvp|j5Y zh3D#88fMO3QBjYqayfhvlt1wCf-H);L&PCzj=Vm;_(c8``u*7L^4)7H1{~RY(2{`W zDT14R`d{6IZsI~#R&VRu*d$a{;byL_n>d?A-DA?#Ho2YYbfz-`H)}E?E{wbJAG@*{ z@#XxoDw5Y99YkK}l;di>L6ktQy1nz0+cE?d_3u)YAQZ&)ZIppv*wMd^wW3}p?7y%N zPKI7_skvP|r2c{b!-U2x0!c!Ex-JTWL*xHY^-j@|K;71MY}-b~R>!t&+ugBkyJOq7 z*-1LK*|E*P-ZRE`-t*tpP2JSkwf5X|J!`J}cqd6&GIXrlh#X)L6Bnw-L_DkavLONX zgS^^8IB{X`%>imOjH0{nx%3RwWPLWsvKl^2nKP5REKdT$&u|brh^5S4j*jhy6u^j| z8nca88G?nzDIGZ$RTaj>ub@#L;{bAn;W?^$Y%5)=cPsgX!XCRIs#uwnu6+ixl}m z;{}4ck3c_q4g0s9h;isEl`6p~ebzM_KB0}UV3td$ecm2sf`ZpQp0{6*Ji8R5kU~j??>M%Va z$CS&D9^3lLW8gaMu`z;6V&`tx{;O?)vbyW0hK3naMS;J0oy~#0(O=N(-CXPek-%8K zy*<*gXw&0pPYPR)>7HM(qc$7x&`*+_!VERVuOKqyCI29}8tr#cQiz}`qFeAq`+Ud-ZjozN%4(HOtKjhns0$n!`BKX^ z-hu$ZlZ#A*HQ(2HJ-Z%UF1{1~IEfDLR%ISR*%& zy|(BZy{b>$QC^@c^*^ZPs28;mFvq57eT$AZy-=72XdL{gm0bw6v7*-J6POV>D+or_ zRX~73^USuR%*w`USi%cup0I|BVO_^qwugJSd=ZO*y8~o{p(*v@~Ou` z%;S!hT9`p!9N!a6_hrouZ(nHlMfmOsHyc(BF|82CpzZU_5!vo?M*pNBDu-*2^o29XUFn>l*k*}J3A}S z*p2<)Tde-y9G+)JJr{f*#th9jam5@a?jHjCFDDT?*rITW)WJhO)+UiJ%<@?F4U?P! zOcGw_9&rxP47?L!w#l0>Iu$_HJEY^63*Xp#ZqZ20s~yWrgccrl@*P+4@9ZS!T*{VP z*e{*?H0D1xBVYH|EP}3Hde>a&IL@y$7ph-mkAS#bGPGW5_B;EhLylh1fvYuc(2I72x?nNk;-D7d(WB3rl=m%vzf@|`eVmn7z-?tkPX%n zpefy&sF%G>lZH?bwi$Kw4>D#iHZd@0X-DD#NTD_SAK9~g3KbQ;bj01X}r7F7$GT#3sNaK=?98)b(;n23nl`IW$1 zj9OggU3-LlF;DijZ)+QRu*M?UM2>`nye7D9>i-P&-W8tB<2rS9#aPCvD6V(R8-8Je zw3OeL8_JZybUbA_MnE{7F^#Qtnf`9L(UN3AG_5bSIQ2s;#zuh@udTIYf8dMtkIIj9 z#GEK4SV1_=6KsBq@F|O56uxw6G{h9-RQO!sg#yfr3HhVCsRKr0GU5=Nxo%w0f}X^# zKfs!}RBQD5!ubCU)vuYYrcasSF3?~$j6XZ;QmM1w1+)oEl-gE6R89WcEkK;Yl-97| zp^1xd!Nu(UvD_^;KPe1#COSL@v;Lu#?ovO{&heLgz@Z)|8NphFXtahg7?^=r>^~AH z42@F6QvY+(CpP`=bg`57|H!4TDCrDAyZz^gvz{lY$dCWW0;qKrpCk|x?S_qkqzndM^>0uiszz>(2eio zkc9*UAB9a(O1Va}@C%>z|X8MuHlBF@5iD6AV7eR*arMF*Jsh{Fv36 zr*>`Y|jX_ZU9k$N|o#hcz-*yWVr9!_YJ-dtG zrOUHr-}h}oo&Fxvv2pI{%@#Y(e6oNpDtT4gTMM zwoqa->Fs{J!s~JS=c?+KY424^xY_i;A%LlxDM^_LE>jN|AmI1C2<~0M~(n zD4@QHU}rCdfQl-ngh2`7T{fXJQy!^x)CyM@>p3%WDXB{WH_Iyca^O zYty33^N*(nVl<N$iP}}Ouh$PvpWm4j1cDV_`?hb1I_!BMT zVRuZyg>X2}3oguBDu>6^nG>ZqM^rD8r|@!>Y7p}^8vK$)90RV3O{Ps#&w~0e6r{fA zz8$;m%H=l+{3||`jb>f-cnNcxyF>flNgvKh0{R{$-qJr&TrGJ6Lh&$snz)^1--=Hr zq~tK|nEe(mh;!0xcSW`rq}j0Ms|vy%0Mr4C*;B&xzMJ9fkvQC=hfceI=;P0G7T3Bx z#d>z77ztHXxWXjcX$1;5lqrgP^S9vE?@BJ+Ks%je^SxPlAWL7Cy}oX8K1K7_@B`$k z`v9XYyKB6rZR^kLuYWwg#|T-vy47{ntok|guIOHDNL_=sAKWX79xTo53Hk1m^=<8C zZE}pJ*4(8W3qL>+-#2A4f(nDwx%qw+pTn)JxUa2l?7sa){F}3LH1)wQDaoebxa|H% zmJE&c?V*b0*!AzlO~mIAxJ46>Zw;>U*>5up{fzV@v({LB_556BB1~w_0lue+Q6rey zD*IlJvO$HntYdGCXVE;w!wDDg+mab6c-iKZQA88|U&Rk{%LK2z#Q#Ra{SVujHqAKJ zx&j5?XN)8%1H5QU0vN_|6qsQHc6v&jAd7@=+TCrzvQ9rKxO1@OR7@zQEooz!bt7g_ zzJ4RHDp#(&;6?XHL6*CLY1MQPrIXNNy{B*`3?xtg&L8U+_yzSO9?TL{a0s3;9}qL^ z_vW5PX1Mme^nZmKm-@~#vhC>=XwS+LlQ}Wk`p-GL#e;eRXfWK&e9(|_Zu&5A*S}&E z)K2usP=8c5aLa;mdc26}*<&{e!f?6+a}|iDe*M6UTd^KoJ4qW-&T>g*YW_%wXhEk) zhEannwf>Wem=&Q~^Lu)w09z~b7l)g~tn3m-MQW#%YX4tt0`ARcyu+4fNR&be&Fm#G zuUm&dO}X+N4}mEg*Zsf*y5M_v=C1pvTMHj#O&-2tJx@3ye~i!1wmrdbn~RkzaP+q` z`pVW`zC%f@ABRZ`R;Y*|in4#`2`ZMy5u_idPj&H{beck^f;}Gbw&^{Ck%X9V7Kz&pWxxhLq>whUN(s)IR83wNp>du{Muuh4 zT0IJn16E$$M*NjJ0)UaECcUJ_fTBhXDm+bt0)xoe3c4|nesJJh`<~s?_qnybq4O(l zPH+_GSC1r1hzxs!@AAi(!28|hv;MW-d5&q!zgK=Eb^u_==*U$$MNf%9A`)XHl8p&M zsa1@SH1aPsNmSVCZ)}I#nk8@fCh++Iyc9-n z_pY-MR>WUM0S7cc0YGHi}FyS$;*v_9%iW-)a&jxxkIIb68>s zspcnj8}A8VSq1>kSBqge5KnPP@#gVwmutBOg-SOAP%bpduS4aqVpn$9 zoSQ&D@g0M&lKdSv^ov6Sh7+=5w?rx`@jybc@{NPf*WF7mfdLS1RiVF(hdc9Kb#-XN zHZrr*iwfKTC2sJGxEP1AT*^PwQfUG)g2bNK`X^9nP~l~yzA`Y)l?Gn2-8jshT)F3c zSCgY6Vv0tNdf+|{+-ntVbrv$07ZWm;oBg=bQ+&#-H{4V7a-sHUvWQ?rysi8B>ZY=D zC1)$!3Etr+Mt+xO!oWuef7Wy;`@q4c8)V@RBMpndIcpIY?0;Sg5sQ<8ich)Qw0e5` zV#K1N%5L8{GsZTJpW(+2A&p>gppw8AW6`YFAFzH{Ha5x%AXbYLB%=!@jtx#w|D84a zgY4!8g+b|3S%qCN^Hq1X0HtEvni>pA>ibG8c8_To`B2IwO6-a7+;5=hG52@D_-=o< z7kB_7ISXeS1s-Vcj5LUS4EpY!UoxH6LSPUh3JrJ2A3bS5treR-rxRle_tBn%EsneQ zzeq=>QE^vn^sDs5N?@5Gi_9v?v(vi3%Xtc07U;U{9v(tD5ajJ1*XXwUzH>V42#lP@ z>b5%{;80UbX3V5pS;D*AL&+hoF9rYt(d*Y>^MmwvOk4E#e#$4P#Pk!N@-la{CIqqt z*BKLQ6sux%Ne0)V!cr31~#gMz5Wp&t3~e z)omoFnm_~AfBv<<2*cY3jdxXpA8iq|3#yrc!~z-wJjL!NUyg$X;Son9mroT${s~OG zo2J~TL_~UrLHqlyS+hOO^@QeU#v$Ojg-lK@A;b3v z>im5Iu_gJQ_mnu92rOObLgNzh;eIG2Z#Qi_%7wE2p_wf_4!m{QB=6eMWCETlPCbp= zO!wMpi#Xvaa;bPp*{ix=k?0S;f$ZkaV^ z0S8hz>azpqF$GrKHl%1kbMns*_5)rJ*}ThbQe%K{)xl(ogFkQHE63d z{E)2)4?v14JYH8f_9iV=z9w7Jj)j+beux;bzo7X}PYNf^IkI)hT06t!)c#_=+4XLb zO+a{X_<}N71pW{WX<;8u`}oG7p+`pu!DETgG1j_c0mHi#4kT!B8^-MMeZD_|Y{0eg z+8>q6tX|M@qE~V05{Dy4mK-Vlq5}&_MpKRoyS5<@e>l(DIMk%k0&F;QOw1YSrnhe6 z2mjyF?WL=i#uci>RbgH-FU^Tp|2(&e=Kndbh>_Nx@LH@0?wO0dqRP2YbE?KeO5gj` zj;!TkcIn=6kxEJl<+nqPUdo}U+5dDPd8LMmFId*p8-igQCSrPw3j_#Jp&LRM^{e8u zyK7f6d1{x0Y^I`;_;jXlu@du9N0qa1u14;c4B5pY@B%`{a^W}Q|3HnAbkU;>Hbn> zBXa~DPky6h?mT09T=9OLuN?Kti^gB2rYGjU(?77~g?$Y!Bp`np;xv*yQ+YD2rSc<5 zH?zaldcz{Q)`aCL3Bm~Ft`3@s*W@Jj%u=g9Tu3g81k)qq2*-{cV@!;TqXg|iZraqB zdEKz2;vU$BC}~U6Tv`NA7^*4yr;V z@gD0O(aFsX85&x4ldmTOyHg;&X?O-t8~HR&Z&IINTJ6No4^eaHs8r~Qu7p+C;*nw#nCP8&A>aH^Var4~Lz}Db6 z)jQ#3p9*woP5xmBRbMy;S@Z~O!8mQUb?o9IlU zJYz}#TFLq=X7$n_N)sB*U4@d-1sX$17-yj;-3{K17YTOaJ~BG!@F9Aj%07k*2Fwib z`6KwK7w>SoCk%cO z)%?bmj|d(D&)(!7Ih|lpP*8CG`7`F=0Gxmx+4=neOF_XgKWo~_>i{YZI(#%S1Gl)* z{?cDnMk+E<#k41*L6S@4kWU*7Fg_Gu3^)k~^<+l(Lan$_KqCzQc8(qAVRW8piVNsD z{DT@laR^tBo&tvOiL|m}o899Trs=pfGK9<@^Jo(~Z4o|nge1SwdHC^*i9vZ5ATxpU z2C8{(t5>6;b?t@|2C5AgVJbq`;>p}8UrtLVHQkCi7lB5y-2;{o#IS$HNTtXMVAOPf z3i14Gap(h_f&845Me&9^>H4Ps9zs*N&*X%Ai5Q|PuOTJTqg%I&)YoSmg z0BKppbofHa5X2q?OO38&EJ1|=GNOtY=Pwutn1*=`Vwerfs&2!vXo~}i((2)nI7Z|I z4i@uTnc24Bk;fk8Xsc5_31*f{rQ**RJI8;mXuZx8B*izG!_KJcZNg`&YRLsn>>76W z2tNSD*bn$Z%|_`{af8x*o8cLSs^>`Q*79|kOXxEgVS((Jf-0qg za91iGo#)+%{%kKFYD8rg)?`vD$0;6NV!>&IrY(**30W=8-|RM@ z4nNul35SN}{-I;y=@c^J6L{nk=(b##-hS8PbP5^=yFh%mOBP4UegUDnBRO2EzARE)SZ{+9< z&9W|gZ@|>Dy}fsEdJAvJDT47jVg93SGhG~kyAU|p$Ny#={rCvgb%_Nmy`s4JNs_a5 z{RT|&oapc%4W%XG47zNez6r|+_ZhaCm?^*YT-hv(0=j#bt%PRX( zY6T$Rt0CP@c5g1rceSUZ% zD(>(dn7}eu0c8+^^WuEseGx{)s->+&hUdzJ7}RYn=Ja=@w~bG?n})g)QCwN^yxjlq zGjz4`JodnuS8UTHbUSz7@X`o}+Xms=7SfgplEbh--;DZr;#L`Os`SL$j!Ui{Z?nzF)6J{r+`r(hJtCT&jq_HSNHDE+OL^1{Nuw$x-{m;U!NH#YJ8|n+WzC zKTGz2?Q7;2?Z; zh3cqfNX=vvb3i2Apa{W>eGpdvltA@0U#Ix*`F@>EN%%?4aY|VEz%HBz#^<0gf{diL zKy{ISCB1L3c)3?^aDT+Kx8#o88wx8GyTRJ!dv_vF=v#m>SD>LL9Sat^&|{sAhZZ<| zKWUAebhHc~ZLF5itb`SjfmwvXVIdldU-|8E_p`tFDIr588r#Q@WTBzHywBklUI`;F z0Ie9S4k8tRWFmns4W{7|mbzl2c0Y*i7!}bcdDM-4Tc!a0uisuowt`s+C99ek76)V- zEHrRG*_D!o>4r`a^6fN_-B=he0E#;xpo{XPst@&Hw5XUO4!w@*9}H~H#HZ1qCUpuj%r#7Yq-ORo zAxx(Lak}_y1%jq9WQ%7ah(JSe|;^V#*;Njbdmv80HJf z^h&m4*}-v8LQ^Mj$sAz|27Jsy4CG{@My)-6VJGChR^0NIdC-w^Mn`2aHgn33*vFuj zRbkf4?lw{%RK}|RJOblOOFvTz_*}C~+*7C=_Sn&pW5m~;xQs1KgoAKQfIwNer1$}q zI(SW|BFN>`_>hLFZ$-{Nf33z2^J^H11f_iq%xF4?jaKRu?N-|o+-eyXn0K05}tzwdAEBvw&9;4<)kgP#e1!hj% z=GLSPJG?!T*kJS7URAXzs{XX13L!;L(O^!?008LlHw1^1;`lp-q>A+7!o$Jqw9rCM zqDWowkF$X@_KI*H<&-_8giZzVigeU zdJ^L6dG0xG2EBayc^Q0foDMIioaTOIXJXv5edkA zKcMo-Ys3y5Ai#g$gF-`rb!+9{)=n%5_C;e?KWDaZD>iVO&u=~eQz~Y<@XLR~HOgTM zrFH^Y3`qvFrUFXt_IDH4!pC+*`_w8mY{G?GX+&dPoG+7%^k$S8%2N+cX`GVkVj)^> z=?!XUhL9;NzFs)(Ko3^;CL7V3?<3omq@Y({N_v)53E zDj&aY^3L?RA4~P%Ce`iPW0y4FgGn$tD9xpW6*;oPd^vTbDTFngBUVOOV$uv8uBnJF#Uw)3MYD zE*SYov*)?-zKAd#zAUn*h0&>A|B!@F0VT2xOB?6Bm#^o-kK6B)JZlyC^T&?k>rYu6 zzkNym(v37qOHUfZKN&EGQH@H++7J!8gi8yptG(_ch&E{Ns(hKML%q1VO8kj360W#l znbvR;K$CPG11URxkx5Q4C_N_yjA|2y@Hl`tiR(Vn$b#)(s67TF^+rJOJ}N}^H!9* zw^pY_zaV7~y5YeJYCu$!gUrHzEw@XClf0!&6d#Qs*(d4SC`t)TsO+D0LZU&OO`}d0 zQBxh5up_8M^DY&R@8O1lWo{S!2pUvb-|D!8D&N*SAkBfsQgvuIGDg?m+;XXywW* z6<=Yu!j~P%n8qt^6=a=>sJv14#c}-3RjO$BZDD1m2Q6o*OZqIs}0Xcvu2i2c$B=tV~N{CDCs? zR)K>39nE9?>S?KOM~~p%JC=iQ-t*z_u}dj+FYUY*@_)i(xh4MbjZJ{XrRc$0JA|ww zDH15OSZU)JVu-!2$prIt(*G(9&u)kpVVMWl;y3xT1+NO|Lz2y|nBGxeC6x08GELidnL;cQvfPhdy+9-6 z7xqk(yO{#4`V|UVMkHt48ZlZikSo3)PIWh!C=pFP^MjB5<94)AFbQZj^b|NajU$Fo z=360k6EmJAUe2P{!jNvhbUzo-A<6@>a48?*`f{7O z3coyK&{=CiP?+dAFi^a+!Se4U$&^DuObP472WV_83sn>kjPT!g{LI`y3Wx{zX@Uvp ztr#NfLswfU=Q2S|^9lm+!E>lqNjo8dMKb_KpG%Z0%UIxiwv}xPn~egcb%XG4*RSln zyrRQiiKh{M>z{rjr8~x3*US&R2t20axEuQMTa2cy<4X3IZzZGnOv~J6OgM`sW(4_g z81k49w(mK%=gJfKrdLKm)M_d8fR5eQOMKGLxf7KKRP6C0Sqg?a-crds9kRU z5l~eFCA&I>Ar(>BDnW;!p}cwq27AE8$Rj zbuqMNA=Yx*R%`M;cnVFht~&tW8c?<1Bw7mB*txw4Od_U}{WS_Y;m&yjdm_n1YENMm zyE~KQ61YKiNCaO*`&T|U{0K`q7?RKTM}Y&5i`$KwqMm&4ZZ=_?Huv@CtP>{K|< zdMjiJ?rJd|i9-TlpS=dCOAZ~yS#_K1JpbZu=HKCwnNvHEg_;0@dJ#^-iOPl?cT=}O z<{FG48jCSp&%@<~hPIjgdWXsDYAI~FsUDT&9a&X|V?7ok2&p2o8`i3C&py4|y`RN5 zdwt<0!^gB3|5>SmFNc(h+RH@HnxUL1DZ$M?<8A$|NY?YQ- zh)cs=`85b7G5jk`6As*>Aa~q}5kW9IlE{fdR2pGof3&w}e?0CVnZ3=W>MNIh_d{CW zyU(IB=iU>m>cK zqGAyr{J@UzNF~P-&quB;TuR;~WNRmhF`q_YJ_8XIO{kEfa;l+#wWd zOOX|nP#{H_LOME=5D>)*x`vdl9fK?6x7$nsCL0B~xE8#Dy#c7gX-K#fF>|uG#6(e+ zeAYn85=a$ona@nhZO<~hiE8mLF5(lJpfHhPU^gg^M@t0iQ5RPh74296esMbzIk0U)T=YJ49ydK(#%vKgWD+ablrp z%xUZ7>Ka<`V5P!p4)dEUdks&%NU02jw!O1^P?o7L;>a#8_=ps1GW2um6~W#nkz2TH zwAT9}I>P|Gf}JxOe18`bRPseMNT&7-@c6DtAxddF&I=S3cCES*WYUZSRb{L4T3mwG!G}4>J!WV!vDv zP(Ag~5NYZ4HR-8SunHhR&YHPvMbeY2Gsh?9m1FTQ<^|(@5hhr?Bbfn$pz#rCZC|b| zu{WH8`F>>3Rb#hE3(wsk+U^q3FX%FML3}XivJpp#78*8~v}3mOFJO4x<28zzn-Ia5WEaZAa2EgK~z#Op*oz0+Hj5^0_T zZ=P~unBWaD>%qIT21QeFy{``%(cdFpvt7AGd?kI4oZ$I%&i8ToJuf!uQnP^IlQsx{xPh$PL*)!;jzM zSc(%>+Ja_6{nQ+21kP6}r(zI5(MCPGg?2IZ#f%a8;czJ`I(c;WciSSip4$}G<`ktM z&_IgjHEi3}pK@tAy01RzO3^4hw5>qNlVCw#ho(`fFU5?YVq=&G!(r7t*Y;;(UijgM z<$K5^EK>tOD8^TO7;XeV8BTlze1+xDInJLH`;a%jZ@m*%MsFJ&UaLb&snP zarvO0w#J`)he5jdRg%cPbBcz}5P#0elv5};w5Y&e11eg{MXXD2ByyV)s`rRqB}~cn zL-^1%>9;hCU~z-t@YHb~!r`;DA}7u```il6b}6WU5d!N z2N#K@*qQvOeS+e(6J0w>`LT{TD0pq~0hhFR+mFEt1xHsU6>Gcs$ZN-m4}fVS_JeVJ zoo5=^yCsc6sbykx_H>N|wwIcKm#ULr9PrbYTktust7Kqk+G1e5`UGrS>HnRjSDH5P z(4Mo}y)vs4PU1j<0er1X;#NXGMdA$!FI{4tiR!f&mxBw-zZt0-cJ+Fz0*qo95zPZe z#cET+0c7z-tYODPJvqaW$~!q3tcLmM>*El?#`EQX2$1l>YEyw!Hj zl`yk($Ziw_Ed!^*c9xw1>mIW8G}hzc1a5MKG%g;=$;m0;bq_3hZAyhn1zaJdRNg8= zs6uq5d7KCM(`4C;Vs32hl4vSc;52!j`_gV-oW932y(8bO@#c-#H4G2@40LE1?!KI@ zFR@EOvg#}y-56D$?yUo4KyRna@!N?cTu)5Aok^1fW2LCf{sR?dOq<#lZ_oFbDr?uD zBv)BPXD743Fs^HS9=YwVKy5QUlV9Y7`)O4EgFcPr0sLyKFWfRuaA+=8vtEmyC2M-t z$rP6LB*7z~oWxdm0z7OB>l|3Wx8Ou^m41RsSBQNltR!5MgBDfH2ysJ)4OAzn2xZga zmoysTJbqFt!YPwdC^im!KNZc^eVm>0TI+mIb-sS(4L|jLD%deqtfRIhk+iH|P+%w^ zxl_fjBUXvV-aq}L*UAZ3C8@WtU2cCK^1>0)IKU2slW~(%Q%}QnkFed>C`Ry1_ENN# zb_hg>Ce*ZQ$X`F^n`iTzsZRgOF?otlPd&kR87I#DzgjwwiH4Q=WFzH<)Px}NhqxhT zcDGfi&;-0$p7x4M)OtHJ3O=28=D@gAy>PY05{P6q2_Lv3%cw&XCR=*?ipSGmHX!RV z2rS-dlCs&`9xyC3-KobawK6?|*W5!-SM8c=L;m)efCA43svPzY zkG4$EXBPRZo-ga)(Zvudcl8Y0Z8S(SNbx0fsAi&+B0Cq<;BT*~;H7pnDWus&oqS~y zI6hF(3aHw*wD~jJR(90LUGQiWaZc|*%h3p|?|@Hql3NKxUY~@sViu%}LIefasgPH@ zu#pdtCi8G@UeOIss|{SNd)#%;Xzh&xX*Nd*_7e+2B;+_JWEr>f!t7iF zYibqWXxU70Ar)YwmciRp!OVwX2ucE`T&;MVhAQs9 z0QQ_Zh6n5d(|Tw)4NY*M>cKq6QDDN7SRQ!oR(LO3b!|75!Al0#7~K3RIG(_%4^Bu{ zu}4za#aD&18nMp~>|i-E$|DRNxy$Ye{c&EZ1lcRdN*{SvP6fX_?fU*H78tQgq$5!w=Fsnf#pckEI@&##uSmquH++%Sq4}^1@kdTR@zL+ky^(W zVMucu#}Xv->>@;cUME#XRNfZUdXODZDF>E;fs*)11lFZz3vyh>{Rhuj$izcicr&q< z5fYT2x>g?9xv4BrV7wG#4Z>Pu!2V+cn?5l0(<2qCEl}cw*z-(e*q^mP0%{bw9+o-~ zAS^}-@VMC}=)1j*-wpk+_~7-xa`Zh1o78Qa`)g^H;dIt`uNPHC0>`;Ql&+bsrEh4= zWu|8+5a#`S4eUf^=j9xnoxxQ-a%fZ`wq|iGpA**BF*RJej|Y3+#)j{991~H}BH5@h zZQA^RF^zG?I3QXTj*;B)l&c1-E+1uwIWzPCM%w(QPa8<{0}(e#_u##VUH~jHosG1J zyDfSw$Z#?-2D)zxy(mi+5Ass#KJh_2&4fGH9An%QCl5f1WB!2^0)@>2;nu=Yu%w12 zhT>xnROX9f96yDEr)2smD!dPim+DaJjbu(39;j|_5=zh8t~NB(*XOR^Fd)*xOeL>G zquzS~TB-4IwHBQL)8r7BTx6P^z1_dR3)AQW*M{}Xl81{8+;r^mV{}su{T>&H5g}of zMgV7hGG=tAj^SP8ZGBQk?TPI4v4!RKAwL;S^)H^qSeG|Aei8|hJ@!=T+~V(}Zw1>! zt?YTV48Sw>hAg&7*@rK(pLAl`Zxq;C8gY(<_p6Jj2{|!W=&+Hhv5U2&lw&alQT8%p z6yheBG_>SKB=Z}K^3$r1 z?1YN(Q(Vkro4?2G!uOS59VT?xv8Ts^kmk?3AKYClQ&w+C>0?J0YTrkE{pVkIGQ)8G zp*(-kSwy$ECaHa8tSGT1I5jX@-7$5ukd^YA2m;RPAogSJ7-nNK5X2O?;UzE39XV=AZog z8-`>DFUdZZ0_Lt<&^-R9nrw5vl*a>&x%y2sFJ`#}w1iwBCTm)(f$12EB^6zgv|g`1 zMmlos?Dt#z0r62AsVmzQ7W}4;%ZUYBvpqaiQGPx~)(HU-_9k~vAQ@O<(5B^sZ~WG* z>93vvuO`;XF^h1&wt?3*9s{3O@lEd|0nT#{w=4#?Pow~ogqTYUTZ3PqGFQrti-kHU z4S5FbC5KOeBq{I!7PU1M{2)68yk7;VFvrOzh_-S7YP~?z5svX94D!=q>Twq3$}_l_ zS&60b^5@d2xiG_2>CvxZ&0mum3^EO!wcMq8eNUluJxe;a# z@0N;Z;n>> zGZFw9H*Z(}J(J!+-x!&y|8$Dk+j?^m<4cNc0WcDAh zDV?)z)Yw*Ygh~!s|BJ*T7`6=mkZOMG##IKd_z;LSE3*RAqzv96KsCj$mZ1u53@B!K zCmqj>+U^FJIOm6*OGDWlH+U*;8?8{(W*fPO(Sp2bQ~E?By#8D#{I5wp(wWf<{^$D# z&33hnkP0*vEs0(Y=0z0AmM&YXhAkbn}B==M9(B7`75vqRQ= zVkqD&!|uQLdS~pQTVQscFlaQHzeTchBh25@hZ2%BlnBsy*lGrPo6e zp_e6UOnEt^ZGu(nsCYk6LIE3vFxj)YE4Q;tDoxE4bBwYDn)WjMw+}FIT{G<;TYC&)F`Y5&#gGuh zLm`r===PGXac|w#t*@MM+{^>H6e@L$>Xe6nWb#e50;et37D-u1A0W_&L9^)@$T?t} zUctB&9QPx+_~Imqyb0PI`GH8CsBAsAmdWyTL&gkq7SpT?;(DoQ7M=^+0UNtr{7b9+ z=+v&&A7^`tNXd;fK9JymiKut|orAM|w5&%Y%71vj@$-HibqsT>AV^InLV${FV`l3c z1PI*Y+GTR>-P}OZr^6Sw0@7VYBJ*?8I^Ysb!p_gZXuJe$VLNL=vuips1a;WLk<3g1M2XcvvPdy>Gr+3D zO8XCg(7H{_sbjY0ir{*YLkxKtoUoWBi_r6Py6ReBYvyU3QZ#C@^!F=Y6qh1jZ^n?< zQCf47-WJO!UNW}t#Zz(S`n~FPxX-C>>2;R%XWID#Iq~qZqAy!m6+7)8HG0U%99;04 zxfRS5x%y$mHloJ=kE*v|i!)rd1#ySQ-Jx-JcWK;Rg1ZwWxVu{j5}e@f1oz+scPD|y zJ=pXhRJZ6 zCByf~&6CdEB9^c7u22zXtRww=7A5bKTNS7AdXL4Z|7R@X+L`Nct3~*~j{JCE1vUc4 zGFSHgSx$v2cjEzy3U!uSNfOskheq4zZ*Z(OCYX$74mDVkhd8?T!?3lwfNKy}9BiuG zP`JtrGDJNc7!!LyX2Af=wUTYGznbz;wrovV62PMT#nE>mZBsC76`0i(x|+ferw*|j ztu^Am9=<9a!83K8I~hPR>F5&ytUkalpW*T4qO-BHxm|7(iCw4d-;4x@y#Qt!w_G^D zQSLVs3TcG5nN)=}-JV@h^6f^50kn;(4}UzY{5I z4V?~H4oTxS0Q8^~G7rpQcO%xxJYp0S5!>LEHhOkWdvVNW4X z9>M(JcilgXtsg#6o4JY_E9gIurWn1RXT(Ux;3$|*%|TE}@82wTCMjK{Y~ zh<4uG7okQLEC?yh#-`_~lwIMYB=6svA^{*c@E%5~Y3Y#qwL`8W49dsrqt*O@Vug(= zlHehc!OsfP%!ICM;EaS_TFlfS;`Clz&BbS&GO0_7;UQI=LB00h$zdA#Z8(LkqYee% zw!z9qo_Ano&qo3rokN8z&qbH+UhbhwOT28${oc)z|y2%Ku$zxGOzkNUX z?IS_cq@=HQ5NP*bzPn^x`T_dWnJf(Z?uDKc0hW(HdXe9}4*aJ`jE0v>eWEn1S!ImN ztAE6VqhdX#2cD#9dF}2rx2&Mz>fL{Pnn$K58;gZ z;k{QVed;68=zCuu{51x~=h2f~y9Fcm-;qxkly7nVuNk^NTu1SFBx)g;y|Awkda&36 z2gg<+B3GCiiMMIMzCO#VvaL%+a+BVn$(hhs$(+-i1GC5gufp#o(Nm(X7)m!6^>w{I z2(GzK`Zve+&7zT;TY%LvI#-)i1F}fo;*T?z%ox}#OigLfViBH1HxWh_Vv3fQU_~}S z{E>(n9wt=(KC*h6_oDBWtLxJXlGsn7QHO5nR439X7CaM5!zwT-C9FQvK!v$sIvTYQ zE~NGH{!Hf!Y8bABs2iD)3Bkx)Sl=$3wff^jrt?6JW zCuKI2!&=EPIHVSOC?N`t$Z0W!0*-NnZfDyG*}yX~oyVCQ8(Ka(+7~=8e+6`Px*Db5 zNuVzXAmvx17~GqH&M6s+qU&RKO0$Y|-HtRkb(x=X+wbDQGs{UP=}@YsMw#QXF1^n6P~ z)O1r3$Zmb~g3R@_k%4QBW}g5oc2Nd2V=Lw~ zgr6KD*3v zktb=`GKZ=|^{oiTQ%0ZDr?G1oQT(-9pUuXu8{gG$DA)b7+PFUQ|M3ERj%ahN#l(!# zM2Pj@U0GXaOF#6K$AWlvk4y&I(_NOJj2Ky(9up6$tMP2Ie!PfFSu zoW8NCi*xE|>4R*|^I)ZFRH7axj&pj8hsSbQwWdSp`J9Ly3{d{$5^jT_G;85srp?q{=F9N*qNsF4c z;S2b@mP$4uuMfSLBe<|iAZIZwyMKZN+WS5L`?3vQ zMIx~m353hXA`iU75_vc*@jcv=cZc+b)a8b5AjGCiZ;da~)t#D`JCwz)j@fNfQS&-l z7fNWbza|=fYBFo~oSASyO|_6vFW_;YEs9w$-CBZD&4FM2j7x3W(~WK0zO~zVav4H2 z+MMmB&qk2V&y5M3I`#xA$>9~xQ@Uj0wmW;gZrFgdL`4R#)?-q_@YfEJO2oaeaSK9pJCi^?*I7= zZzf~z(%UW<-vFM#_C!FqFL~M~Qp75Qnq05q!H=-ctnpE5Cd3?LBy`(mjxGTQFi<5QzUsk!Rk$ zB_+G*%4|stZIC`tK~#q(VbxU91W@let|Nx(R^~TLoCv~C_C~xthn*U@=Wy+|{si^H zYvN(h*IbE&02jzp;?e{X7 zKRF1~HImT?o3z5dI65v-WnbZ>hk}aC<)o2+N&E)#WJoZs@nDoYsB=KBg&=d5C?rTp z5OG1a)w=pRHz0GxooRznVD(6UIA*;0ROeXTCu#WXX(34Dg!9uBN5O1L*S7)2Kqf+! zd|v?m84|7Q;@lt7O2s5BDBBym{_0~%0EJ#-3X!}4!cN4Q>g$Bpxg7)IoIvOQO`fXLB zO=?b)lA1ww{<3YnN}_E=$HMQFjINGAX6$hfpQ4VBqTQg8xMitC+!3etUG$#LG)1dL zN=ueS7Qrlo%CdXgyvM+@HxTaji{u8zs*}$uuV~u_`N|m{WI2zs`8w!7qzt4zXnjgQ z^zZ5TN&!>M69!Y+NL>y8i~SYlt|+>kJ;VO(cI8I_#MJS|+aX|SN1kcDM#EL2Vx)lX zP(#MF{Cc7o^>E@}K~zd903 zHj;_Bt91#Q-&L1Lz_}6-sZ{$DZ-g`436r{9bYW5t8_V% zbw5342Jh3l4W_#Wz+wOZ^R28IL=qleq4E~LkWOg;=IsbEAzBQWn_auI?l;EDlF-cN z+~BJPCH6f_iWfpt-*;Jv$eU55KR?RV#I-5!Lhfs6$6>&Uaj<7_Dm`wTDQP@~sP;g9 zwkQA{OQKb#^|Xq&-XR*HOk5wyB$2ilCQX7F_hDMRoI^#-!9!C)Vhs7hSC^a&Fmre? z5XUKav8vO6*|l+G8T!L;x2g$d6WUqpuTypYw#PmAQnoNJEo?G-ZV#_f0Tq}H9qaWPh3-fBv-|9 z5KS`F>MtIQ<0(?2KZ`_K4d=npT%kX4{MebH4YLeh;Kc4U0s(-KQ8cPFGy^c%4Z$f|A1I430u8bUqFIvj)H|kC<_fvg> z(Pu88b4(;hxr4NFl`27+GaAxfV0J0o% zmS044D+qpK)sZKnHHJznJd|R{;(mU8+J}pJPnHlNS7xOJgq*@72TbF^)2qQw?dPqQd+oup@@T;Fe~JUy)S%3iNpC(Jj4;{r5E@h<#0w?Y=>U z$AY64wBqmy>88g3opuwT%YwNQg@F|s2kg%ds({#W`8IfU&+zK%>gHVivUep?L8IeZ zkFM3lRasJ^=6Qhi_!Ux1pUi|GSG%vI1^*Vg1q1~810bB7*YdbRdCi}7L6k3p2o^Hdd>+K58_)z|Obso4*SA;v@(mLwWF0G;E8JqEsSjri0Ta6g| z_SAE@-tPt#ouTikNCA(tcIVmeT1Vs`*$2wZC+ZC6r3-_DexIzSv0cUMKABd7GZPT* zKz7$~stug0s-{YhBvY$1Yl+^oX!+@lHa&KGe5VI2hY`f+)!895NKzY={&LI6 zl5_P(&9_9vDOB6C4*6Bn;yo%aI$8Pu!Vl%G?T6hNe;f_2b=_SN+z;Tx@UC;5{@L5Z zxZBwZ7usPW1}Nf4)S>jCbhW7{D60}Tu%s8{@NV~Kn;yZ?LY|x_aK0^7DTBe)!_J4d z$n!D(?$Z^LO0`|3EOJTs*{JiNR*I(O9GkV34lRmND5ZuJRy>IoK1d^D6C!ney}sTF zz_JC*XefHONoMmbo|DnpJ_`6^8WfS^y2}_!B=Sh#zf`^KLEF;!HmWHL2nr4F%_HZO zUw3|YOggCM~ z_4nS~#G%S0{+e5c&`RYRer{g2;3wcFo%)(ReGCEo|M6(Cu9w#~?VFG{7@fO)cB61s z)OR(isjG*GXNSG*qsfcPI!e*z%*HQ4(O)mOM_hyc0e}8|+#Heyh=+ymVIr!;M}_AO zU>GNSkCV@<;)lArS4_8ago?QcWZT4SBxsmCWhX}k#$9#vvY?@ zzqIy>RC6X1*cqUC5g}Hm^mX$FeB2#SjP2cEHbbn_ErnLf0L6$To4LctUWDCL^tlSC z>Kwe`T-!AgKA>%erbs4`ss#{cF^$;%D+Vb|-fomU%+GJu6sJF%Xd>{+JD3aD?Y$lO zC6WF+HT!VWow_@nB3BHjtd>x5?xz-gV+vzzQ3XI<+7Rv-i!5~ zR=?}~JYwx5Xn%*4_lT^vy;fbm)ZK5qpgrdKU*^8tS7cd5U}Am~z>t;fPYP9(T3gl8 zW|HKCiwTF51@%+8oBvQw)&BMkiJmW2EApr#uPJ|(EzBK^DN|N~26FJKM)d0YZrT_0 zJMePmWRn~GX2cv^(;D>B0;?~LD&xrhHFfcAyT^9*x8d2#TL8Op3*mT`RB@G4jK>BH zoOUc#6g8s#!|0-^5&p|j>XvGiKFe~4=Of$2Rv^*Y(l3hQ77nK3#`a1$lxZciLOFLM z6vso7xQ`%2nY8$)HPJuY60t~zc-lIN%zAA5ubq=n(9zY-;Keb!4ZL-^`4**3@$Lxs zAUu`a`I;5|niabZ-W)pET1FU|=CUvlpQaEp# zr-THq!f_E65gHC|7vStup&X+u6%JZ)4O~RtcKF>>8FzUgZ=^eyN;b{jR6NlXa{ z2oCJMnM~Q(F1PTR+h39Klhkk^AWk;JyX^Rqx4ZPBCyTuzdHb|+k}jInq2C~f`>p%8 zT56BC9|1iw0tF~9QfDwiRmgoD&_OPSEen+G=vim@3#5e;wfW3I6QC31U`K}XqA?UY zTUrV>yxoa0B7CA{m%Sl)Yh#RF{5fMQp^A8D9`oMJu3;Xlq&U>T)1roQIYJA@BX`n< zPm?=)^;3PmBCC)^pXLp87%Y{EiWQgZSGT9_l@89s6U7GoA=I6bExFC=D5eNpCNU&0 zkgV^~ZP&=4mT=ZNjP0bLoIP5_dgnr2qwy4#O55cObY&5k>nPs&%ekTE#??)9FyVRd zQ_x}8g)4qw;J?=32zjxq`u%rSv%s8`pOLff==ks&@R`dOK2~O{Z0l>8kyYkd9=u^* zPZ@A&(uw2ncE(XYQowC)IlC+}%U3*$(+jl)M*4BBjEDdv#r&?=akCuKN?9QO7@<5h z*5la<4VIER^&2Z@uk1!Zy*S6e)9U)(f1xs;0^izz&hPq)OP}yF3qo8!ru9yhp?C1{ z{k%n@J7n^=NX*~FLTQCHc1Rd@QXH=Bd^9~LF4*r88|`X9=IS8$tVx7+Q-5@Nd3i{1 zsejEPl-+;U-|c80xEKyyWte&jNB8iZj!Iez#|01|k$RSHM2v(l&j6YCu>3qGR9^RFMnL2)l0>5SO>W%5k$xwFgpFl>?Ne&yA+TioomU!L0OHadK-MMl$+ zw*?7l%H#|6EjtZMoVh%Hn=Lt+x6bIZ;p^Ay>UaC?-yf9_S|r)~!%E1dbFb^B$QGul z+*~$jdi)j9ZZvGuuRYG5#upGaGAm=QI97eu)FwN`^8BMsalO@bYQ;ln=HpL7z?L_( z#Qe3<|3YNc$mrqOR?@|&Mpq?8Y_5%!rycVoUEA1PfU8^1M6cp(z5N9bVTiN*w{wEL z(NTv98-ajOC*U~Z_cPk>2|k1ps$Wd=S`rDZ$2;&%L%ShahMV0!6!kxEn8*Xc?s?q} ziITiBRfKpyZYU|#c4?tro2k4FTA589 z211=|1l`)xQwlmtY@Q=Lf+w$BoJC9V5|9|~(Pn7g>e66X!%3R=Oj#+rVbgMkkl2FRG$5@T zV$ik1!tj^pmA)7HeD;;MWn<$Fy|{k1qD@WJ?b^Q&#(xF5iEy1z0@}Qp85usX^4dS1 z#;KW+1T?XzBt{tG`_p~Xf5{?pqNqk2Tk!S0rVR;D?^i;;i7m7nOGuM_vj16Vn8 z4q0OA26WwlaGPrN6U2zC68(HP%Tiw|hDRU9{5RoXMWU1qyIuI(*5r;545{-`dp?7y14e`qBzkoZcQ3Q*%d6)_OEs9rR|1=!(`0v>Otl<9 zx>gg}voj6teWUYlUmM$fD2n;UDWkFFtmp=i&i1`Yqr%21gF7e27ecTtIS9>~c!wXX z;k`sbk;k5rV6^aKQe4r%8hRMuj{LEjHyh zpyY`4B4rByWm?&=dK@8%j7XLiGDz7D_;ShhqSZY!eokC{k22j9dbpfs#FK%aWQZ&7 zfwR_tKzGs>CAda8N!4OQf%O>BJTjYwXCv2axVL=f!?D#FwRKC-(9;v{E-F#Qtgcg3 z2WiKVK|$_CWx1PbZ12weZT0A^SM5js>=542>4TuHs=$8UoSMe~IzB>$+xi`VG zAUX`22OLtWd<}e=TS8zl73f=UP7FB^*_1d|b@1MDKmO=R`#sovhGA zeZJq3y4{t26D_Kr?Lhd!ZT$Y7eJKB||Aq&lkP~#}DWdK9=Yoo}K}%03HbwFiu$yb* zw}Ad+&q5T@K0fma7Z%Y+toowU=?bOXFoGwM2Yz>!JQaaNGdlvyg~6`|8f|0ZM|G%O z6lxWP69xBP$mPG(AU&6|zsU)`{~75Z%6Yv{9}B-!vb4*@m6X+_j-Z=O2s3jRb=%)VDtWf?und!>6P)elZsF;=v!L7sp&&`!Rs*vA9_XX6YeZF5wd-Lmwf(Cn zxb}<9fvz_^oLHTA)vMtV{C#Zq?~)=$IGpu+715s z8ojLVl5t=XTOEBkrjV>Adj7(3>w``-Y(jNpJy-$8QrXnD9PyR5DYr7vMoV^n-tOHY zZgIbz$msGi?vQz6Rb8q`6c-_`^G5LmQKvl>l!uEl+a!u}sPeMEXX0!nFQq(f#a%j6I$*W%f&-B@4o7$Ux~-ARrqJ zVUd5i%g0wa<;}nN`FG4k5&2*Msj`lvf>|lhzWJ8as$`A*9bMcMdI{ghWoO`aRy-Mz zA^=mk`-GD)5e@3wroTxcB2-4a&@fn;ILraDS(E>E4z7VRUdkh$g*9ih*nr<|9U)Dn zsDzTXZ9qms+W>w&jhE&Ey&a5i8!8Y?wa8p0*_-gx*C2;%WBbGRK2xu;yC?DQFy&X{ zB}!W5&v1(lSd9j~&#?Wr(scZL9dQ$42tdhSVF+sm{1G+UQ3Vgr>h^HZ&~HPZ zOQ%gX z3;)Cyx-0m>ZADAzT=0Dh)wVh?V3nnGC z2zHD)ZINeQYN7BvPFU+PhN;7_#H|k)5Bnqpbo{PZ( zVVH%fm`L%|39t?dNWFogNbtupYt-L8`ub^f2p}FYsYl-bFatN#B}FY&m3B?blkz$L z1}GLPBjHV1rb+(|0d=m+$7qR#d78M^>TolgcXTBkJ6a9OaLs~qh|h997`#)E<*PSb zzTm7DmuP7eI9WwB7X8xkuphs=B^aImm+iDlPDiJL3C10Uk>zG_RzsN2sNLmD0vk?; z_Y$1f-{7o83l34(vVVvd;YxAiQV{50>YswH?6hoFP`PI61ZDnL)cbV=b6`O{Z9sU< z__}Dn@mm4zOd@8e!JJ~Y0yTy{+J&N6!c*l3!n0Z+^Oe6_&BOh2&DJxc_x4A4hrdaq z0DON!jfu9~jU^X#36Q$U1fSMCk<#Nx>jApqljGtSzDQQsX02%){YwKihMBd5bHS3>1Llrv>K z)O?}R%Ftv}lZ6gxT6KGQaAv_rwTeEPKwvc{y|Wez%m5AUi<+|;*}q_t*^D2O{{w5} zHrB3B1@k{UrW=uxl3DQOLR&3IUz^29@=CyjJ4OrIgz56}XmB$3BeCiD$byCwnYv{W ztT;fvzpEU+`%Pp!C30?U|7YoP!)`egQ z!QczUt=;6+tn2nqZPqR+8~6)3|0vrZ3iHUu@pq7MXqrC~61eWLsYt;)W6iT-(banO z7LS{Jqa|1fl`uaJ!?`I28{@GTwvoPZCY(hkiXq{z7 z^AUm`QzO51z ztye)UBa0J=iphoQMms%wqQgWHv&p^jHj?Ig6uMr0E`;N0Mm+f*@J5g_P}q0%cyF*0 z`OHXn`5{T#_cZI~4d>R@u_w@gj8e25<>s&we5|T7vIaG2Y%_viTk{eFyB+qC-)+Gl<*ftCdS9$4N{x?;S5DBF(PL5c_YJ!J9CwVPAr7sQA=rh z0&1I`I=ES!>Aw&(P&E_#10L&HNMeV?DA3VE1U#8r0@^{gKguv18%MJyEBU`SzQZ9JaHi^%Fkxl#Fi7z(0V(Y@k?viEMY=GG=zk>=w0o#A=@(i zTV!7QE}IxMCmcjAPdgntM0ha)5UgA>yF{Brnkh6D|W5#-HX{ zFud{6hM(Q*Fk={wZMmmZRUraigE4EPajkgsp0w{*#wi zrlreXl8utI9`Cuq*ZT!D(8|clqvv*>Y&-DH)Gt(ZO#`vFdt@hQvM{Og9aTcZT>gf# za)+*;`` z3~}K>xOntvXLwcGp#ydx#c=HmZe@(x(hCvuP^;`PY9}H}+r8(xsadM)JuNij3=u!R z?x~;3?14zda7)GkmqiQ{@12Mk85sk4>PvV})qTYAfOqOF(T1jaeoMmZH}RMu#}C{| z8W{U9%?C%oOWyQsN$DJTjC;Jqc@5F!dp6c%dPqcly2nFjl>8Zt>syLk0;`C9D-|Q6 z*o?wa=*~a_P?WU|oNu(dJ|j-~x&o@gF%TyhuG-BovuHDjXcFjh{u zM$RNszsFT!(3MkTKcwchq1}f7Ao2u7S8QhOrdUz6-XCnOBsXp`ggnt%8kgc)hI}N6 zH5?&>oDon`0AH)SYX#`W115h?d^h3mvV6%i7~iSeK=M5(`gUIA+o^AU`3{|=mIO%? zaeSI_|JY??Gx6PRClhn3BJWr=3}FJstfmYtGWB(%cbc^8*DVX=PrD1{N_rYa8EQgN zm=L3pcG)zC&#__79CQD?8ocdryQZEo5r`gWkgFn!JvXD&Hp8!=vzsYA#8fCyu}((J zOu24wz7A}6l}2)g;;oAq(O;Jq^UR|lnVZH%myoep4+b3ZL|P2?0WFv$rex5HjKMg= z2(J0lYS%ZD^%h+{iTU{^38OTNOHg@)`%5>r+JR;izw+$vRDiZaP!lMM zd;%?BT3{KAtW(NUA12FZnE&#v1I1$3Db?08w82iLF=vN|D?(nA+M0c~jcdiPBHHbM zLBxK>vP7qM!(F=XhSO(uvF!nZ$? zP$bI-QXibSruzOq%;{p>2eIA;!F_tDV;||>eseMI@WlwPg3&>y?f2*$-0l$-c?5et z9j2@8y}xy1x?Ojl?Dr%has<+p&LRbpCnKkCMknBzNCT^Yx*UA_duEN$A?P}VDE4se zt~&$OkQior-z{0Tiwfkxx5hx*B-%!KD7tA@+H;x_wFI$S)h4+`34wL`bac&5n;Tpa z{$|_qZPUFIV=;}1s#6loL1gxbxGw@H<_!QuCNv($$1flxGP=DLXx!rwqeQ^IZ*}&k zoeWgLZuU@e}AzAsi-Bhb<4=j2rGvyo2xUfT71_-tSB1k>;MWG3#+D zbat#H%s@vWQb`a}Ux?<&TD9PdTP@F$l<6nqw+Y3Hx4&ekviQ&@+{NF&yk3uIhiq%4 zbVFECUXen;IXHE+lqxu9mA@IlTt(U1&iX18Mjg8Fnz z)vyJ#Fi-V{n}W~SmfHX>PE zQICT_7<5w)Vg}(sb||n-AcdzCS<38!nQQQE!QjTnoNy!W_MjV:YJPAVf&jn-Rh zKi}CD%Hgj+$J#`>vwh6vhuFR(r&d<#5211FRX`(|eWCh!#e>x3nR9j5@}&b!(VpS` zqpnM!^Z;vvAv*+!n26lXNq2Z0;R*|PcX*y4)R_j}`H0L&5v5wwl37=YA#jK52QYUI3G2u%l145Dw6R-mSm-#vvyRa%tRR4Vq*=yw!!LfD6 zRZ1Bwy=b&9r>XVVq7$(R-y!Gt?BZAo809@{@tfG!5Ue-HJOjhySSd~cJU>_~2 zV-_l_n~kjJZrY1(s{oRHGy2Y28B4oOP8z9t_~KxU5uaneor~nS|8hbFUw@tH3SK`- zT+_k=WQW36cQxUc5>(~Lrt`&NBW-!LVKzr1@C_op{Yj4P%vM*Cwpr5ndm1++zm(oO z`Akz<;7$>IVM0QlIad1ldc)ii^d{6{pyP~n3O}TTvi_~For7GZp|#c8 zI9f+a)U4V0>FnN`gGs>-Sz>8T96Qx@9+}agnPti>t8Nd7JJG}IRE~kgxm(^u{6ekC zYfNU2ZVvLr!CjlU2>ZQ8`T~jaeYjCH|F)b`_2D4I;1F9IyOLr=LPepONQ->O-%(8$LFo*u=ybOD{4aWMybIN`g*kkW?4aR+;cmf`NeWzdnI5S8z zxx3yl80tXlOt3?#TFW99C7Ao89se5H1R~k!+?baEwk`BA-E-%_d>FX*R2|*&hEmNY zN&;C>E$&%@1=0LeOF&xSGiFnhJK~ymn$d4}!5_+a`j?*8TsEAsDQjDr|3 zpu8K`Z%k!>^GD%pH4dq1EY~Wg{$YvpBw+@eLJt;h#$EVAX?jkN0odoT7-|kiC!R0$ z6^er|N@5~nU_ua?evEsw>kFzLyU8eoHxCWR89QJ^xW7^n6|=R4+!hcS06Qfe`JE3{ zfCgz@F+bB>%ZloxcOmGb^5GdkJHXT1a{qk&#oY1mq}1Vi#6@R>qg;g|2MPOZ~>=)v;@A z+tWqE(~j%0)p{iJ93%V}nLktq9rT*=s2@~EtH}U#%>ZwjCy(>$Wcps>L#ea_dtSE2 zgI|KB#DaVKd>`-2morBgAH=Sd1U~+K8Ud$Ja=DFU7wF}5dtztF73C@jnd`-w7wwF# znrC{e_#DStF*E|kX}Y6+sc;A3+cQ*hWu#Y96P7QGLxGghS0aOO`zT*VMxp={QCsd5 z=#hoZ-ij^se~37G>Ydl=Uugb^y*UHO-4oCEZWEXJd}MPt>g(Zb1pD41T3oJC6Q}13 z&!baKsX0pHCcgS$g`;=vo6}@a8Q!KrtGNMQ5cYmL1x{G?Ah2dEpp0G8?GG+fex0aW zL6^b%%5E|^^T2l6CAW{_#U%zu-*&vAa8cTjE&D;7CSFrDBu>)`XFzE%cL>C{wbjpK zb^q)e)7}2q&>jqsmmkxwCpi$PXr>7p>9?Lff!vc7SYMaowd5hR6e6<}`Bq$q zXt%5czA*LG0!)+en~o{5n+$pK!^!)zn~N2cWP+s`g=cS__WCfn-A5gRnyxGH3I`*Pz*QRbgk%58$p3rn&+5w_KV|+35gt7Q7N?5>T!N z0D%w=P37PE7_mvbaz*;8VIg&=|R~^NaVtl3AF5+>&>7Ig`&6BC_Fg6H7Gf%1X-xWBJcQ zxdD@K$Afx@kT!z$Pz*^{AES4dWsAcPYF=AUF$Zifa#R?gg6H#gsyo*vQ7p7!Wxr=+E->sf^%jo$-QN(XRc)X`b+7AY5pYxCN-hlzOlJE9XOEfj z+FhqFQR(S8;GLbZ+T8YrzP{vpzqKzvx8C=HD6;WJ&66%rlUuMnW~88aN8a}M(6oZZ zNwP3cDN|2x(C#l)n#`8!8cl2ar$9PR(xGkB(JeE?7Gu#+7TGw@C#Cat-A@HztGi`mwMnws-a;SAzJ`jlq0b~Y3aM%W2-U1&>lE8QE3o^3d_}g z{K{3GPamotg&*e7{9KNl%hcfBmuolKh%TZ6B+SP7gff6tJD2A(q`~56iwde0WLc-h z#Kz<*9d7)WB3-s>?bKZk&t(Ueu?+-`Ig{dOfQyyWas)wf+;s`&ZlK#NsJ6h77A_9vHHO1Z3rp&?k2TQL3g zZiybp+V{dbM|J8&?QSNz>{Kjx#}I*d79C8Woo2F&S6xi-2WR{^{IY}D*p2o}iw0haW&Fuz-a59dY_Kv% zjXb7RO%`)aE`FurDsderu?k0xZ3+v9Gd%7@JtBCSj#Vb8VGQHnwIat1Ja^`SDQw*d zUv;q=c4+?$x1tp!3v)>ErZji?Kp=CRNWv6md zXIOH%;xhrGD5gA~m4<_#81l21lW7C0RVKAY(p#L{p@sg+ib5*-XlUt^%;=lD!uP1S z`RMy9m?%aJ^e*Q7E!Y`nluVgD%5SaepbviPcfk7Q_TS{F|M^ylXD^8Z@9(piBFV#^ z*kE_6)P2QlHR6p9ze@u{`Sqz_{^2QEg<0oTZ=k@Gj;Joum-CNS48X$lq_S(2jM&!a z|0KyhMR8ru)(Tj%SWy2KMv2YGmjR$;;#d_R*Ab5Okzj^?YG5%~Kpg93%^;vQcbMpK28PFGP`8GTo>KUunwLlXOijbNeX9@fR`O|UE#}|p*)2zc z?{57Y!_4u3!#vXXBT}XZ8B1$Ch;k{@)p|NXrfn&lg*ibB}M-g z#0ciJr3xhW*@2^aavfi&qPpb^2CW9rwm~&9(e>Hfc0VTA($3k6U`^7IGD;IEpItFY zdjykr%)c8s`OeoSb4$s}z+)3tiYy>mY*@1^3-$RQc8^_VvdxMv;0=nI2OyC!(V|=t z#)* z2cFn~OsqxiH}(Y-yL&%|MF!O16xXZ2)TVeq1WA zs%ok#`^*TGQZ%)jE1aTRKWNMJ_*$w-NnRRB|5yhZaGVO?r7 zJv9(MCbFVB43c3loe(f$Z4k({T&m$HC@>G=8=LrxteULqCL}yE@t0G#8gD(1H4+xe z8peQ+jaHuv*hn{%zx$m~jyezg(8T;PR2dHLtddU!-+sBH`Xm10^{4$+q3{$wCNS_l zu4$J!=+va+w(-R65-$Xww{QZiFdlSahg2NEpel`Lq-l_zmbUbxZ zakaBEl~p~Xg867?NbiG6}4upsoS8=cg(tUbfvZ z)1Sp)Mv1FRQxqaZ2Qy_sbqrDw-m8?3JuZ+GXP8avAovi@=i+>C@Y!>9dwaByywG`B zz<@(H9?#=JCU4^?j!7H8K53!;q&hsNEt zad&qQZoz_kaCf)h?(R;oKtmvCutoyG2@>3G`updcXP&w0yS{n%UVBxos#>P})QN(n z;fQ<*=54sO9t;~-6>tG5c<;WM-b)mS{~wz=VD8XiNP!u(RAkb$&z~(&ql6@ZE=Pgr z$nm^P?i_lNSS=m{7RqJ}l_irie>1+dnWSypgyK9Hql7_}!N{FOKCFug|6AX*$u~jk zE+BFW6(w)x@ayyaWlM4(X%@TLn5U2|psdlsHBSRXR8{=LUDQchiesN7Uv%%?RFWvaw54FK7 zZvpB*y1yKXiQT@$LpH^dFI^M-IH4ndsal*E?Nn_N<3*7zo0Co(T-Ack3sF z#6y#)1jIQR=;HTKc2y^evd<`jtL)xas+%yXYRF5SeYT1k^LCmjqbyM#Zr(p4P9h``SyJ z0~6vbDXxdbBm>Qc8Y?~Oiu;%QYF|vK&zkP!CH_CvkvLX|X~&=e#tDVp%3~GEH~f+U z3knS{dRgMM~ zu)EkCaiX*wrc0$^sEJ0?2!5BT?*xx^S=zfcLyC~}e|7%(C0iN?fv+$EPCB4=h8yWJ zerM`HaL%Zf3v-(GsMFs;ZyLDbYBceOO%{9NWUqkL)7>?)zeedoEsqlhz&dO@R6B-A z{a!2KYyY#?i3>9kO^8E)IU4Q-Q8Ua!C;2NXzxy}Q^)dal7ME&I&$T-^#i*_V}pc7^fzpQ9+V zK0ga|(a%LbNIb_vpgvRv=;{QIU?Tz1O&ZKaPG^u7BJ3QdV>FX1Ts{ZVfD9h3ty)M%|~*;lZQ#0kEUh zl0^#J(dt03WpC12PuB)(uW!gunHqGmc%nO-a&*FGlD1#A+i=P2+@Ah_wE$c8YquH0 zW{4pf_^iV-t@J5@Mr4jCckd+TN{3QXUs-SG*a!xMZWDu7%%f0C*Y}VFFe3{Huhu?z ztX6bt6Cn#AN%l!1BLY)IKAOVrSXsd&Ztbt4tEm!N=Jh`iDB+pYVv$g)paMsh@GglS zo-H^dS}vO%&YLdyjC)dyBxf+#O}@oI7gPhsL$ojVJdREM`B#x+s%pp#LC6;W;bnF= zE@ANeJm^2c5p-)L*fOaTJBv}+p)5qIdEVUnLzvx_uw~yR+>a%s+U0fpOBjGaBgT-ba#ZI0v`32ra{6j$Z zpCt5D0^i@3oTTRd4kRPn$hXCv0(v&I?*lj}JlZW}`>gtGP`{O$o2zR~f1iJMa4^MB zFiwcn%Wd;I*C;Q-_8mQdFU133h95oa^%j+QZrZm>f|v}h=WYDRDc*-UAyC{T7J`GZ z9?>5h|2Fl*V1AExtpk=buTyfN(~hpppzBAjFpD;klGf^uA@2TGxQ1pV)_MpF2De=B z7JDIB~^vL|*(3q0!hEE4=~bin8o83X|?1Rzxf%SJSZY8J)hOpH_KXVc)Vk`n)9n?dI8mv=RO+4>HSw{RdRPQ>AfFG*WF4a?c0qaH0{@-J zj;p+bO2Klyy_N#GlpYC{NKn2vHb#D7&Bgp|MvxxRElbP<5WnVkyK@QRwt2-WZQ9DR z82#jPHF0n8b{gFxW<7)yCj+N@2bn@&vRc{m4qt_%uzM23Wko^VsbFE!*CtAZBUx@v zVfOS@Z~Q>}K5lbQlgy;Y6_4Aly%uYO-~%bm4c8oo*+?RkF@sAxmPdRV$iGH=?b`ZJ z$*_b;3q^cvnrp}MIe$;^ANa6>wD6OK^{~uQ4hnfAQ07>j!(%ewZ+~!aZI4AQt@rHN zMPK|(N8+;)fd4Lt+vt9)pwfbe;WAqP&J|K@&psd5d4xj}H|qT*yjOA526H>b1De5l zW{~=Fo&}qbK1EZj+6=3zvK#@pQ6DB>vGGPimY68-QHfBIqyEI{zHTXI^ry$RXn0>a zi<9)j<%Kk6_?OPP#+72pL>b=VRceX!6G_q(NZfnd@)?yQ6E-tFzS^D$!fj+B4i{h9 zu?nvit3W*^Rdp3hHqUC!zgjZ&?|} zZm~OdlU@XBIcQAqSFB{g&};0C%v}{EtW|pL9!8Xa9?X~bG#yg4L-_E{&;N-iq3<1( za!~220=guEY8|lX$|bFhF%A{<+p7&oGf-l2wZ0B9!K(aGvD*$_juuFnnar5XT&w#7^7zl zD}%a6&Fg38jGsp-hF7mC#*Q;e zzI#9U?U z`|t5|1D4W5lc~E)gd|C$KsXid2p{uH=?8>$mCa%}Dpt3QIDW@LaM$ZW$9`HnB}jm& z2IhmERhuR7f-4Gt^*#GNQ2jC_#ejY^8=)nSJ&}T_WSBB?&FL@O$uxzm>Oh?mcq?xd zM>^ui83oaQ%^*YJgTTz$KTEDvDchen7O5Mf^HTFP6*bjp*B1!z*ilM?S1gJ^pMCaXqpA=dbEbs&gQhO; zv1DX;Szw#wV)*P)w{Y;kZ{rPV805x*mRsRMb(BAD#cpD^2?D|=W*kyT(=gkLed3h6 z%_ETOIxy%O`)_dAjRz7YpuU3MF+S|t$y2O)&^Jxna-3S7lG-!|C&+n9Gphh}vt;pP z@d4U%C?(?T-^;LN+6g&s_(M$?$e%{YoKyf;sU_tvyC?<65E&ejwL~iUf88w_>!|5% z0uhUgWMTmt$STFSLugyX>)CU3dqnvm^9Vy*uU=CFd&R1W|$WI!&TyC-EJ` z62aUcF;-@6tpI$M7-?S#@Q8gQa`BI6C*{*Q5o?Dz@C^M`)Jmpo(qE{LahSG)k=$pY zv?WKGSQRoRO;YD_xoV9T@x4#utn&w8XK!csH-rM7B+u#q7*z~mz(P;3W08$t8a9tu zlO1m9r&vF&`{`Zt5Kk1>21Z2~u3&*08`{Eq$=A*!=`3iks!XZXg*g?2oe{ByTMaCF z40k=fqgGF$NSNWjQ0f3a=|0m{J#uX9<8v?@205IiH&(@r<&fbbI~4{pUufXjUM5Z` z4^dkiJj~F{3?Kahn@-J`U;n68tJBtdl>wBUx^nIiIHE2d_OWQH`~CE%K4}RIm2RRSjisP~HyKEq>;!;`91(#rkh;DL zClZ?m0MmW0<)l8X?Y%7035qtc>FDF<#D}BPGLkIfC%DP%O-E<*by}8m6Y>eemPvd7pTXmdU`~Wcv##635;pdS{KPZ{_x$z% zCPCpQssd4ulf+k18E&sT^l4|8l89W@E;G<$)TY&IY|ldyc!`b?=MVc^l4zpj_n9Gb zFZmEHYxHP^#A2u@MAHszC+qqxIHXGak;l7}A=_C!MJrJT<;(fTcWKPvCz05*Gp9Dm z=2gQ4=I;d7e~rEi@O@t(8=(u80FyWu8G(mlUm%NV%diT3Ff`cEktG&~nLi34hw*4n zfeCsYXlw*k*W{B##1sH;KZBB5VG1bVLLKQ2AE6Q!W^Ar)ZM8~!o(D$$Fo+vFEcLLY z31FuMDM=&M<4J0T>eygw-tOj|^{3k#Ea7xh8icS5;nAUFqT$MT*eauRr2B0&*~7V^ zEfS50U!GIGKd75u3Xb;OEv@7km|dHR$9*i)+z3o<7HcJmFgO`?a`g6S#w*z?*4qrB zSRdj=a*z!Nu4+D+P3!QB!O057wzq>59kNvMHzQ z?#I;J!X^Q>7|kn?{Q-EYSHf@)_dP<5&y2BU6Q}xU^^7k7A0en!TqZHRFe%LP)dJ!qq#X*%o+kb5f!4*HVpaLV#dO)#^fT-x$-QB+` zuK?95ZFD)_qs5R^jSyNwuNXf(Ti7J%ntIQLO))&%X&Wg7p3x5`%n>Zc%hSZq(a%0- zw=1sup+6yo&Yv8N!?iGoKWOMeBak(r*=t;2Opkp4S(LkXUxX0yY!V0sQkkaMg$U^V zajb4TcsTCRPUMtsD{7)5VeqoZ&CSd>y>8*flTPjto|_n#t58@cGQ^Jn{>~ZA+cHG& zOs#d_zmpFSyM$)`sBYe=u~$rqL!+a6SX=_}ff@f=De+^Wq~%daV)@Y_9j2>y`%7a_bhsuVnP*Teq(=1bpPWyF7t z%fF3*_(~_%Ae>EshhpXc8X5LEx7!&1m?>QH>BtBFbn}pG{1TaeR5&uoPJL0GL{J9< z^q&?Am^!1RFN)bmh$o1gX0q94T-|*VgumRoemUnmXv@Kf4q%i=)L)gPpNK}VHZsgD z0}eaESm2weFh=JNM}b&L4nrBdhjfK0e;yahG&XaloYKcfiVQ3q@@j0)5PUt=SJer{ z4G%sYlzNLaleu z44NTA|Jag><-|<$w;q>_qf}V~m4F_}SYRro-R<{$_hGhHxbh}WzU=NHG$TRy3YIC$ zD9;3<0MknH3n}MeYgTaUWbpBS*!6#I(!l-};_5g0vZv-+m7KqIi!b;DE9`eEvZUcL z$a)K{UdaM-`_$ykPIx&R-#D4l!czJ6j^^|H-u!uval&DRtp%SxdNcXQAx-)KJxL@q z$I@%CxN(`dVN)FQK;BceuUV02cUr+gW|S-;P3;&TlzV= zEStwQ$HJTAq!geNL8#F*9t<}=9xWg!IDC1DasZXoaAaXk$ST#Zbqw-ZVRhBx(a5y$ zH8Z%$+?xU6FMB|Qedz0pFg^HFvo;D|i5`SA77oxP3Jn^EF)N3s&ty6)W_?MAO@Ywu zCa7rzx@NfnY+EMf)5!&tBwEco^mi`yknivApof55&DMjbhYdrng%tl*<&cb{K20G$HdMEnL?DU< zBCyG)-E?w~7jV5nHB%5HL8mO=0 zOVgfR2L3k|g_1De9b5XNSyPF8BhrL|H<1Cb?haP-b1;Kh*VfP9WV zo7h<6=yq2Jt5jy#Ue&}k@W(AqV@;HrU8z_K#*HWLhUpbXFBUxBx$%zQ@rkyH?vTeI z>22doe#)d5%9q2@QDNUL!eFJfu_+RUOsfD93!OVqMsj|bO6+n^+vnxXW!&m#NJ>rX zN>RIecS)dkJ~C_Ptnvi8U7e1grz(HI&!b-w0Uk(~G8pvgdqc?&PrL z)Y*;)Rg>cuu{}D>!+doPKq4TJMr4gn+(z4|BP(Kxf#w^yo!sU@a@E`(ukcS#PpOxr zIYDw|p%jFGNs>k0<9RGE6Qej(>dzxOVrdonIh4yA8H-qaMiB@t5dn~~MG*N*8^ipL zde*61cP`r-?jEQ!YES!0#Uvg8aa}`T=WyizT7V=5L*-3*u=Q*<7qssmD%J}<19MNK z6i;Q{0WVGh0T}wT=5DOWe%#>Lj2QeR#?J{nsjQHEsrj%sU|hF;Ves)^XQP`LNhc6Uhd?*BW^ z`=7t!iAPyhOi9U1U1_UQMI7a5w-+S>&n3eWh&?BHCXujDzSYxAe3#0p0LwA{0n~|UT`vOyRFV*^ZB^mvd9=JFG5OBhojdM(JX#w~=6+(Atx;zbfG);Xj?^-8Y4N>k= z*P~jS>%UOzTJO(zQwS5z>XdgS$-##p+N+X?mNRY{oEW?|FdV;APdJl>J|>799Yl<-Vu1~fSmd$AV)#_`P)xbV6w8;vOB~U+v8qC<^Z4F! zf3Tfts$1~>r6KU2hHlrvqyjx0EtQ25av60DI2gmIUGjV9MBoKJSH&XscV{k_#p zE+<_VqD!c&ZwpZCq}UTj%0>A9WdG!+rZBC3eNKsC9gkXk`8bgIU5dAF^a)Kh{hhPX z8w)}5q9ekF7kOZF7~uvD{fP$aR>B7Xg)R1X>?3_Jd+;#7#HbEF?tW0K4UtA-u4laL zgz!b;LH@UX49U4%?vip-FZ~z9eGd@`r$IG>KU?`U>rr-`x+?h0g@VB@5N!AY{3lzf zr@YM!H|3ks!Do??t+Fn;j=_R!!IftrLTdSZcP8+66u0GDJF=zsGg?`Ch?WTxoL!-% zkP|1d`uzD9&?!dqKuc1hGhx{!0(If`%u?6(9brx06JMERgH5}YUH8*a6t;&iknVZp znpR@fc~tTEe(C%cnjz5K+8Xw?fkj_|j7N2BX9tdefB@uRlRZY#n*^NiM&hhN0+;y*6s{# zVtb0YO&>q6s#WHfu@n*B{=!1-4%p^8YMJA*_SmyEy+Gn$lP98N{Sg*+DBzoMnykIJ zVl}SG15YuQ5JeC%)ZnM)_Fd7x*Dqp>rmOSYeV5>XuPeIP)bFuY5RKuMI zqgUI9|A`8>I9an~5(;T(c?!iJdG#G(`a^oHPe7qrZL8=M&yZ+Ns0uANVQz{`hwC`s zDkw!4K)rfRS=~=xDfQ=*>H3pb?ch2aE2BHMFmC~Gm;pEV*ABH1Q^|s2rNFk;Rvurr zYUPoazkvG%=LZU*un8-jpu2|OkxIv6O9mPfNf*9E*{kU+s)i-5>ww`r!EV3bKRIRA zm3OL|?C`NXO6W4>x7lE*Z7vNo$|?NVuZmt=ek8vzdY&xn*N64JaPBA;y{H9j@2U@# zaBPLF5~sMkTVqlAhO3eZ#xKK$`uxORTEg|xnG*TfBur?WGbxT}?Ne=i=IxNav|cPT z%$2oFVZRk5Mr%tQ)0BL7>!^C1!Z4K*l0i)Chan-(nwC6@h#5u+of=>xbe&j9`D^r} zs+& zW%}V95otd%ZpAa4`ISxJtli(j+8TU~EfO*x9LX$$Fzpz%?HVy}7qQgAOKMPH$*Di? z!!zc{$L%FOq2*c;+8^+%7V}_@0l9&-2cY7k?m$KzTXf+@hOs!OpdCiLNB@cEv?QIN z*<*#Bm4hQ(7%f}X78(yeY9|sFW4=&1eLcb&aI=R(Uuhe6)iDCNe%a&hMyxU2y_mhi|8c{vD|=1Si2swiNZ)*&(X{Yb4m ze9b8H(_?fA$@s9Fw5+nnesQ^=7u*oLDdd0jBQfU>59ybYCj*4PgI`!wlIS(mc+Il| zd~B~pC^GyQ>j^LP9E>M^DE{Hw&^uhzGLG5?Q|tG+J3J{7{Yk5wG=^Bkre;0Z|fFDqZE2@GbT#fAXGb%H# zLfRpbX4=8FU$so9Y$?zk#TCK6ETjXJjcy9fn8M&pSv105JJ!_GqVr>kDXe5(!LFEP z-u6AVz=^nPH$?G@`BRzP`$)%!BHDt%lolKycuRnJ&hrfW5izJ#XAqNw2ZsAI43?Vc zKHlkCKN6bq1Rt1*>gx)sSV}fuwi*LeqoIG^ShgY>X4(|JZ60F4ed~0oIbO%u^O`x+lXs+7A0!6Az`NP3Ne=$ zF4c;a7yHKYjSLcsR#JTu%CQv5DNa9ipjFa0G?s`aEVk@SEQ?Mu-`acyqCcl>w0qOC z{HY7ei6RZEygCTr|or4c)iwe$+w-co?Dx1lh)SwTxV&2O+Z+ z7iNcyL-|EYYopl|4F`9@jzGbRu`V6i6)YPwZ3t4w#Wt7rx`xhIJDG~R>si=-8J$7W zk3!II4$Wr5+F+7Kff*um zO}fK!%#Bc{GnU8~k(pMz%H1&M z%dy}h`!q*ZymeC2HxG8@`R*tbzPI6=@!Drgp#+itdYJrwwE(IeMaOOY()yDKcoLRU z)L{ypP<3MD4{ZM%qdY-P2eQS5!>?4dz(hIVhjGZjC*e-V&9L_D`a2!`-OK;#%GvrC z7*3LrhOUjUP4kd{=J30h=xqk@6UWDJSCUCCqk(f;@ajaAiXBAd`P#MiEV+NE`}9b) zhpJ9>gZY~6-NXMn#%)-63X!pLuv)l%CaU?A%mX#m^|nOy``?%L&0jlW)Q)hcdos9u zu1Uw-daqu3#sB@~G4HCa@%pe1j3J|oC>fV)Z5fU)rzByhagi3%^nk`QcxU+IbtZ8+ z;g9pU_Le98=4*+WgvW>xNK8fI(<6oo0^4NK;|8Alv(YqTX|2m#PjHwLU2_BPbq|~K z_yr7dZ91TEK_CO`U{96wEX*GMoKL@cO0XK>5Z9EohIsLb`ie_8VOI%B5(@#8f*dC7 z&t8%Z`$Xm18#c@mVyr;P%y4E%_eeMTD`0>wThrq0rS)?gE2<7!j9(Hwt~9cOM-I|) zI4s}uF#6mI&i1qHO;l50;Zy)wXmFZBdfe~H$|s;i>S5+M?LALhSCB>zNVmgJv%`tq z_b!}krBXm=)?G26>%8tWmkjq^zALpFqz$P6{2?1R8%L`qHG*zrwh|Y8T>B;({^nWK z@um-W^VC@~vua9sSs#(6Fnc!E6(M!JLi>XSZ|B!xuP;Yr~T4^4JluC(z8xDlxyF@?fVT_vWKNUYL|2-Rv_R^$Dn1j$RtUKf+rR9 zL+B!d#1G36wy}03Rtzk9h_k@KwIK%MRN0F%tylzQF9B@2ODh-KpU~J$DUbmXh&43& z5bBh!LCiL@H;g_10s_ge!@rZ0U*Xw%4*qr@zrP}MZUhij$`c8-<$wJZwJkWr=#;nj zPF`&UBl@iZgY1qs24yL~iuYk@%fB(Ll~P}f{-L|^zC&>&>U#+PqaBg6hH57(2G|(S zJ40efUiWsQ^eMD^4*H=b(P_i}kG~4g^C_weGZ%j}YU`l_(JAwPp5{boYYa{{9sL6~L87Z#&S;+nJ)Oc#Sv= zCr|YM1P4hud0W=_T$3hrYGw?RM<`r(RKozuV4?RdLR(!J!>G$qT8W^!Iz!?#?;s!K zz(v1+A{UO`+b`zV&+gYxK940_#Vgbmzm%tH4 zin3v>1q1fq&E&*PUasN4DtDUdTBBXKSe1uQl$F7bIdBG>}F0SuNPL4d@%G;Tc56L@?orY>?-Ts%C--{ z#)~w|P{xLz%QD)VBTL~Qk8_h-+}t{xAZBz%0N9X@fu@Wl!16dcxX0#kwMhx}kM3?B6Erio=+LGPke)J>ftFgkckV_L8oe<8@d+4EU80H{hT2Y)}5_ z0-U~&pBkA$`zKSWenFCo4w!jsFos!uaf{oBBeED5&}a`w-j_X$jEx^6mI|+(ooG`_+q1Vcn+JmZCV;m@Vzw z_zdqT9uBcjt_^JPnte88A^f1sE=E4lH>T|VeKO$tkL#3=r68K_JWQHRccDhV_v@CE z&Y#T#$r7qahW7D?9{SbOo|LNOlXc_z`SyBf2WNhE*L0IwGuD5Yb^k6^_A^GT(=$H6 zEI1>B*9(+l8tU?d&Fo%vj5fTzCU(wek?lgfRruRg`&0{ zJ!?b2{9l;rhbw%WM9!SXbrdChT1}&tgMD-|C{>NClBJM&UwgFT^(otfoYhMxul4AY za+atFM6;Sq!%oz9SBSP6>g)KEgTCqn%S$%&yqEpDM=Bd>&Ob7dPAZp9m6?_4HmS() z=|m4L#7gbW_n?m8GiSjxCuzdm*_W32GfSub_~GK@O3=Do&HbW%-O$EdXe#hl&RFz8 zn`spg_zwoBQ`~upyfJ~&hD4nR+LNtxfANs+&PRN>$*9tM!!_?byMkbf|Ib zAb;F#TOs?X48+DEmUe$FbY%z0?^cl&?FW>FF6&rdir)Gyk=X}58 z34KG;R&(jr^ji^aD}0qaBDr1cv3^*;-{i~7$))0?)aq4sG`|Br{5tS^qLg*8qh4gC z^+=6-OZ==FheX%rk}__lMFgYiAKi4!^ni}zB4hy-W)H7D5oZ&zuow*~{Ky445SPn9 zT7A)+SN+j!rqM^Xu}H$%D`{rGr=-z^b`p(#xtT=3soBzRS!!=!G>raeQ(%`TlTnj6 zMBCWFd_k;9r@s)*oLf0;#WiL=H-7v?{L~LizYEf7x5gbRbgrA?k9pYL|L+G~pQZPdE?KEPHo~1hDSpDXbe8a> z&&YHrjQBFVUXxi1=R$W-b+{35EdJE-aYc{8wjgM&edR2usa%)NtH)s{)%kB*6-0)M9d;G+1t4VyPrnW@0IkeuCHyb?>jXm30m-&egRluM&Fk@PWWO57T;pKe+bsAr>9-V9@ z45OJP&F>b*7(lqf{DJC^dC@zf=fPs$n|De#m@td1aDwnZVDjMNl0LVDbaAd?6+?y@ zyQCLNXK-4lkZKwT4Q*=qUfA%5tfOkly$u$X8yU-pGiwTC;jO?RufoYrHM9l!m>Wgv zt9v)E|NDlDdI*5vUHfa_%&_`1a~d_5Mr1tGkk7ooo?(pM1g2f@Gjl!34IbQ!>=G6A z-gd4R^5&>|QxQ(M{o&Rx+Rx4iWYO%^=4I$qMg&U5bE zO}rb4_H(7v-gxMCfSjz6DB6Kk+gWqQh9bJ)(g<7oRZ<@sxt6dgq@RgV(ohQB(Zpc* z>Q`C~md$@m)$x-MH23}KRqQET?5STYstmjMm)@{_-+5D2aRqx@yPNqYbOkqZ!x)9| znacib93g3n5KmZvU+t=AD=H;4KxttDJ$(Y$W)P(SAkGGtevpTb=TY?5e$X5dL z(o=YOBjI_*g(mEqo-}LTM#uki=>*g&ahcugYTpuT-#&m|@k7PrWW;kk2Da~>xQgDO z8g*Rqcd|PF`+xoitm^aL8(F_E0_Y=(70?w{M}8^wMX{i_y3FQG!)fWhdDE!am!Wol z4~t2O0THS818xvbAuS^=?XSCt8cv)AY9OM-=7E8vvbs8bBO`h40&0HW89q2yYW+cl zb%N50HN5I~s(+B}pb05PFCVKBtcI$X1ma3fKG;qFagSR+t+4&YD43CX zQ&X84qTm07=+jZ4d=j(_KfRLol87cm(ZuZWwd)vyquiPCsqL!B&(1rlsmmi2GN;jW zW#xOYz>|`u3$=9fgNx&Q1nxO)#W~j1sP3OO-K-q&H80VzqLe>DbV*koh0x+6t<8Of zn_v2~q=HX^Eg#HOQReF%6Dt$`kWVjcxf2Y$q!fYg#tY)!VBw~PelR<+HR*FUE}s$` z*|i&T&H+p^>CnlEPaS3hbg+6}r33qAOYMI$m*ni^;25uGcMse7Q46fLC_||UO5RcZh|hBV z(-2ANVr_A?zC{(~>v_Xh|GYgBDu4IEu#yZ(ls_NlR$29X_j-U((UFDE&Vu^#trfVaZHp;4rtYXSS};?$Azy|PVrB8x zxkaE8#=`1Uz?Lc$Ku878jXe2b_M1{bBdDfXvxG#kfCkFVHSH^<`uxX)t%qjR|MXXz ziZDs%^a7>WzDOOM5=*ha1IiX!a&?vNYVG=$vxRG|HK<*2G#J7O5#x#tBS z8PUkI574hE&(=DCR@{tk~+AhYtZ%feVphthFc~uXnvveHss5 zP<3PfcN6|&Sg|%PUtW>KeMWGLZqBy6Pa3*@`|}lcR_*9AS>);Yz)<7(pM<&m``sGi z$GritTAnQtM1wUXMDR@$fd?i4vQ?`f?d?R$@p;2!Q8CnYmIB13eVgF1HGtm77!l~$ciKgq5|H%=akdOdxqJW8GSTX9_0T+Lo`j-5c?E)yg zGnUb>rV|$*0&q=S9lk2w8Lwh&$MIo?R|ih3SftsX6|H*VM6`-J!*!P_D91%Q#YIvC zh#f5rMo-MXqUJ`6uh|QILdGVI7Ws>*Hoim=0P5TfAflJYwXz=x9X(d#H)C1F!>wqk zYB#Lz#1M*2vlS@kqkyZR3qQshfwMN@-It=Zv6qet_2zU)=3EI2Ctfs)u*^X~4=RFD zU(6TB`N#`pH@n-Af68GYz=X()AA>kVUH{CDi@n9F&sot^dKTw4xJj=Q%eTSr_0Dr# ze)VE8+#6mJGtV>EbowbZWR5KiEJw$QY7Qg*Ge$BHH=QRIIpG<%Fe?DhaJq&<3#E&} zt^zXYwsyHhI$Sz$yC-kz1mIU_e@Jp4LQDHjAW9#+A_GVkZ(N6hqZNh(0whqVi z7Qo3!5YTdZ9uyhnN&+=N;3yySd?~0(&h=z-eeUeOr8SV{$}tK`bVF0GQY-PkNG-B5 z!p)|+CU-e8;$=+K^Uy7a!xq$C$xMIKsN=xbmTopu5`u{+XVwSq`yV`I%i_@Rz|%<@ zA`o=qJP>gVog%S=_x7|YNv}^SeRfFHM|f8T5QGQ(hye09S9s3aKSSDm*>ftU-7dZG zESZ^Y92nscQWA_=yhPizAYzrH0^Q$BKYSdfoUuW<%qKD8!jn3az$Z_L|1|@THbFa= zpc4!)GuYUZ+Fko!Z7ERP&;&@rz;r+5I^2n#APu~ScAc#U2X(DST4BcT@dYF@uG;jk zvA<|+1R^Z!&g!yBIty!HcL`+Zq1CnN{9@+r^!JpDGB4}17)~C+hDwBUYgT#vUSWIt z*^@}c;zsJ^q3^FK$9MI&*X^D~+hy1|$cni`D@+BoKs8lzbuWCt^gVby|AqUd1ve?x z++kIuIt<6(Jl_dsv41;}2Z$~QOnL%-kON{m4pG0oG>DFvu5f*$%JPPEr5p1|jzerh zevwA&cr2!=*#-w`K#pjYUWtdL4ct4TVLX8x%W^75*k;LNN4 zt1h!=z+k%8;@k;@_R$GZimaGfq=cZ-KFqEo>1#***kTH=U%R$ZKrB;W*WQ8B|0yDW z)EMxfWkl!Sm(8B{tw_wExrJnM!C!y9k(|2AY=H_jl{eFU-5c4EJ#JI+KE$;%$V& zZPHomqMf9auB$q0lFeOPvh{1rtgX7Uh+!xZltN-Czvd%$4E3lSXX29B+tE7|Fa|j{ zozYw!pwicgdM!ywOE41=T2`g+YC=iC6skrED$HFTH`$hwn01)|t?zKwjjO==iy_TM zx?=0_PXiuZb#R%?4Aq%8&>y-wus($h!1sBdNmY#iF&OlXOh)FHXtI+Suo&@BrtC3D zD#mb6C*j3;%hO`Ft76efuxP_WLwY{)kJ*uc)fK2i0UAQy^M+< zz3Wn#U7cOFwj~UGm6{cA(4H5a7AviJb6ioQp=spPA1U0>`0w!5_Q;VT6Kxdko$+HR zbVWtmRh=3dTIGntzr4YX=_qD_&zle1m|;!f@~_J^koO9CbJy$fdEw&hL?_x}W=dNB zq9)%+|j@!_YSA17KPRNWJ~#onDy0!q!8m zn0?kltjyw-25-!bHqZ|JVw(QHQ%gF+#8C8dg~ri)6n#f7((HTtqqc%4jtN!fn`PUE z)U%x7;#a)YFdSKBrGU?QoQSa6Jw12Zk^U&%FNG@6tOg=gRAnnE@5GhmbP>0e8wVX(Hx4&P)>bANdX7yA(l<;el65R-die? zYTm+?*lA!eNHt)KXj?G%#kF|m4npvMU$;I2CkJA^wVn8OU&UysI*Th=WT7x?E8{zwXmYCab2F^Sl znDVF85(#Q48Zq+_#n3v;+_fp>zS!k8d(B^a9ebQQywWI^3J1~bzrF1<5v_$WjE_8z zqfB!hm$KhtoF6V6QGJ#e@|eHFGYR&@^yVu)d6-j3Sn*UUm^sxUIWQ4>_Mo-| zl&?y=cdteV0Ho~yD#ds1~liv{-GjNlwYqQn=-3f`6MpdJY*1e%> zijVT5SShAp668xCaqGBtV5mVpL_#@9Z&+8K&Dizj;#Xx$>b;MOZDIlI@+iVbf*-~h zm9+&QdXK+x(cPwr&pf#??du1jW^C+A486$e|SA5oilz4IPIg@VnP3bF%*Y z4xrR=BGGb?+^(uECopE%=_s=5ME!}*#*9T$OPPZTCeNS7rG`n9&@Eo01cj%;VFm&B z`icNz_abu>RUuzahpe8}crJIz5Z(tYM(6wgAELf8Eb4H1ds%AfUAkGiyQRCkI|Zae zq`Nz$K|&Of4r!2-ZY8B#>27!z&-tJC(|+3PdY*Y^?zty^Gli?(cvpuG9L?z*#?)IV z_Ofx>%#lf`Ly^N1 zcU2@qS14sFr))-ZQlGQr|LUKWy6RECSm{Ik!vz21dfckV7;kmUPIYyEp;!fPz6@1G zMMmwK&il`n(<4-cJ*IkpV8OtS?c_!PzRRHbxeyNAAg^6&MM<&Z2M3$5GFz&&5UFx) zC>2`x2+&f^o#D+n)WZfk_sK5@D=3%5qZEtd;HL;zs^LNdijbjaMC@1b*h9c(|MV8Wd_xS%vut(+?a_?iDp6YZ`4* z(*oC9VoeqKzEv=Cizb4#Vrs|vuhUvzV|}B&tfMBWPvwY~m$;$Ed&sj-{7*2;_7Xfn zZ)_JD9IlT=pE#55v#IYfKb#gN1>#Z>k#_@_u&r-INmNwg^;Y}f7wRbe>#?p#JT#$3 zykE);&{^U_?RSi-GEgUq3LTT!^OFEq7{ND>?TF2g7!02IIx_tsDJl)(>*T>FRVBOv zEJKcK;g(;nt;fXnis*QV@8{B5V}UjA)rII9#=@y4eEK>*6pJF4F@dbhC0|!-^X4iH z)iL1Z1Mj|5NC(bSOW5Y4yxP{@yE>fw{MznI*rX|PH(77CXuQCiP2RmjJ!QXen~ZYQ zSRNy1gvE9wBk(63_FIJa*_1Lv5nS;Mhxz#0Ms%%z@tZ%Q!^c%ypk6wXSn7FD&cT(S z%z#G8)Khx_fjs=TWzDHC}OS8Pct6XkA3tj5BEVQ_X7UjGvikW;;Tp? z{8Q1n^EbI(bWmF-GUD&c>|{8fi;BaA%CJ+vdFJw60vK{c`dpnnr2`N?C@DB&$>P`rC!zbtQf%e_dI7M$j@qa(wj$E;T}r78 zdwGW2JG3yUknLqzhKA4 z1vdwsnzv<+yN-&hd)C0oeh6>+()G!4nH!JN%2b%(z}cf!8pzqAeV)09>HVY=UQi0l zP_5sM#p2VAUyBd_oz??7q_va%TtdADBC4mK}+CfkgAXM|F9)m0^$>)$a z@BvmQeibj^b~rOXkIVyKZ=x{P&A2~pZ1Sat+kBo$FhHKN<`UkNU)kC-U$F);5%Gs% zzbEU|qK&p5JZ-MJ^?9Gh8-lZHD=Ky(_&?hS0V?Cz8(&{iLCzBYtVgfqu6p(S+eF|| zeUQ)X{%Y_e@|9j2QdkFBPY;w5l@hTW(eTcs?Ke&PS=BeU=tT_Pln>xK#FgnxdTElF9f3b|UEY(zFa|7saz?^*EI0Phe0w=zumNgdB&4qybLPe)L0 z=vHugmiJu8#iNj?0TchG%<~X+9r_8~U8WGY)i&Q%HUIr?Z+cpc=oNy5>l~?UUfmsW z#jLI;eXpIL;Y}Fp5va+z4VOTer&FsS9wv#MX^sC z7ZUe5mue{T97uC8`{94G6c=0NM3XmUqSOUuUF9dftTGti@3zJY)?>PINriTBItC zh|`DLO1OG$UGz4s&k}UP`(c_yt^`7{gutibrc_PpH?gV8hp!#-zY_Ps2p@TZ4teGZ z)X1nW3U4c2>?4Mw#Cgus+hthJG+FCBaWRZeE+UV175*VHWt_wh_nX*$R)1g!k!^*1N6^A4mb-Hc55 zBu^Gv2W6cSfnGIfE%`}t|5!=y}7@p5YKSr zpFl75E+rXVgaVRLqyvX{Um*?c=&RSl0orxo7sl~s)X-yKzUC`EXhjfiJ@M5#`wrhQ?Y_^#e6AkQzze9n1q7&YhfYxyr>3dUDZ0XBj2 z{)v#F$F^E`t^P#~pb9IsTx?vnMM=}0=`VlVr_S2wZT>n%0#hx zS<0(mEX}Vrn>TB}klU1Lwe{ds^q7Y{XhuSsL;t;GHU|%~#EGZd@YgTxTOXrDzX@7Q zkVA8;7iszf4Kv&2!qzCAGk!p;3K`)lc6@e(lE*ufw3~ z3f!!Ey;o#Y5AFP8OCl-100jir$rq|%WSHK$f5I|LTHt=dvAOO9)W27iU8w`2jL3jV z3Bb5tg7%J!o)57}n@u@|l?zdf#4Gv#Msq29nNC{rlbG(zi z%qaL9<<^3@P#H#yokgke4gN6i%38!zOBQK%?L9Aran5+yi~Z|x?LO_c`hHn!*18sY zzUsT`!Bzv{r$h6&Xh$9jTtp5n%%ibSXXS3keDBZDrTsikHV5TH;f6@HSdV7*s0$I% zbbhU#$#Fgg2(zf-F&*gS0iwrRYXL0INcj1ey=xAlF{hs=r0QSOvN?iDqgWIopN7oB zOs{(de8bFr)5H_@TL#@7L9yCBPDM})ijRhg@by>_|7CdP4jI#1hnAM}071igSNTbs zU%y~;ImpglSyMUu-n%)sm=p`XSkE~pk6QYe!x=Ir*-GYK?PGA8y|U{l9w&Z znVNeuu2iRezhp1#Ifa45^uINt!M1jl(GHc14=u8!)h#c*5A8q+6Sgp2PSUU*13TQ- zxC+KKvg2dZLfw$qA&sUN9G^NLgp|z!iEQZeNeQ??iCXVe2eORc(T=zO9dO&*VeB^n zzNw1`jT+F&LtlM~J%I}oGbVkF4`?rNp-ROC~*FD1j(Xuk8ePb4EgGi6o zFYN@ZI~hjT)OmCQ5GZ9#{J=y1zq_~%QIlT{c3}CQz^`}@57GNQe$la~qqD}bv|Q`!l;qwq~cz#hC91vfZ?LVRNbs@C0gTZYs@;Ru#b7}tiqmDHi% zQ1kK3L4bNzYHK;l%dKqX&F z|4t~5L!5B_59&8_@ACo*R{d{!*-cH1^~3ENS@=0A8j_hQ6g$2|DFEp-8o>gRRI*q4 zinyNe!U^A3mNH3vy!#Qly>=QsPpO#Y=s3`c{de-6>vC2 z|6%Xzv2pof>gp&CigvvLgKzmbCtRB|>4IJ9Mpi*P`G%ae>dmz1!cv<*s{lKT5HnnS2Rz?^+LmMv!t=2$P#reCskL@ScW=LTo=SZI4*4HzC{6&ff1Qf{! zmTdX+RGGq#?W#t%u@wfr@$xwG*=>IcMK(PNg-wk6Fv~NY58^ubH7Ta5v!t!@JiNgc zQ$*!yUPP;E8X&Ksxgs61MNz!4HMaF7J%@4wgH*Qi(1tU^bgRNfJ-+2`4@GRwF892-k0bWMv(WVxcs&wgMSRO1gXHdr?id zV{Iud*FLiL@(v|~W$acjS~>8wRsbAyTMcmMh}R)nW&Cn(VK8b86!f$#y!mH>ROBQC z>){ed?G?(21)xO*9-7S6<;ab6SpfndAmKn?@2&CQzowQ%o2 zD3vl+nlm9YZbWj~;8pHt8q8MM&!=PX0^WDvYtP`Cuwm4P$kj#_>Q4c5G;jX7#;Qigir3iw)yDO7MSYOzt#gZG=mh%gHrpvHw2OA%H8fRGo^uqzCi4EmX*f%1pa$2eOkj z>C4C%xL)Pr-OOKRIjvw5(18-A*q(mP-1O~N4`g@fZSGYPy$?j5ek=Yf>wd(^I#`l1 z>{n!74^Z=Jox$)&7^>nY41?=P+xf0AHm7DlV+d2qv#C>85nvGBiR% z3uBP!n?tK@kYjzF8s2_7z(S@;Y6l(Zg_X~ zu(v6FkxVH2k`01O-k^oWA0+Ei6i41bH?a)Vv);tluOc}jUkMF=4>gCE^LvRe}jyeqkx{3RPGpr z*)z&Ba{`vtoX%2=S^9u>I3}^uvZ((k!M`Y}nM~k1?`Kfp+_=cJ0?H)%r>)bojjzty zhs3jAzf4+_J6bkRECx1c$wkz8!y}JjkOL5Zm-KAxs~{yDAb$w|BUBZQhK(?QV*579 z{4gRY{xRs0VE*jK$fQtOGD#$ZK|_rSR^f&{(`y|45XZ{Ut~9I@M-ZZsSCWi+>er@U zCFPz1DeXww!^u*WCehhrJw?FbEgwNtsty^QP7|8viZWT(*DweJ3;c^~Np{}P1)NR( zATaaqOlW98Rw>a!%ll>}*-6yY9b(XgM~-hnAHVX6((%-d^@)%Qs;FK{#@Ca2xZYVE z@$tJLbZj28`(}OhMOTlyja`54y7nqn^>ko9(P6H#WT6wL5d=GCpw;MyB_Ye-{enBd z`|;PHm7xrzR4IK3sNr zE`f}f6oqS>>uk2|cgKk%k@tOH?Q$rEzqmr%1obYtWgW)LyVFie60D~G7Ma`prtz*b z7If2wOCC+f=scwdP{mIfu_eLedTq{jx7T)Zo7qXx=Hi2g8Gx9Th?B}D{^N{>RWef| z4N&(0ieM}cU&HHhu4cdxEz{*Z2>Znqzqa+4g%ydD!pY7cZXU?mQ6vTXN7^LfI=%GP zJ_u3gWqtniU&cv6bG=JE&{|niic2a!)UVyp<5*S_0asIZ4ipj z2pUJnX&50HrQMNtZBlcClFAr>LOWI0GYnhV;xiF?V|T~|x&7}N*gnsiGRyQ=@=nDJ zid8DNq@k(7JRwBtYlgu5in8N`a{L$Ddgl740q-`qv{Ze6?5O0nGTx#THu6yBTGJe~=qTB?&MOe_#B3qp_WTT~UxM#zFI7Q1zF6s-|sQv@$b z&5M#*`7fmFx1ZV8JR_O6PlXJI?kY9jVp0W*2Z+?Ru5mf7EuQp(v2p7o#3_|^f!eJx za5W%#ki4kDXY8*11WB1ch{23-GzrP=ig>F`RR2Y)(4Y_hd49KE41UaGukiqa*}2x$ zDyqPrD=SOz90iTo`cBN^2;vJmU>V(Y`poZe01c6BZHt0W?g7S>XyrIp14<}dNPNrg z@N}8N+%OPW8WViI2}*2-tu7s=H;XL3M;T*iv1F1W;%mRKAqN&}zvA@QP(Qm;5amo#HxU1i0ec-+hemaaq%4^M9v*0#9c{Uy~F>7B?};2852k;$;qP zsK9Xk?5ZJYVCBer!Z9*^Q*0fJs?Xu<@O*YgF_AU6p@_-~mwnPmK~g9giyGF7`j8g; z@W#0I#p-XaaWWF1AZIV~^DLL!i|-xCf#$D19Pp(}*)v~dwewXgQ|&ks{aO?PdEjGy zQ$Qsb36fIb3FFP|rBQ?{U4p#y8HKOF22R z=p$4mA8WlMk1e85(19S$vANLZ4^z+=K8u>yKb$Ndc%4W#d~mPiP%W#n`AhE3LKx5G zP%f1uD+U5+8f}anyRxHKSJVFg{WUt^Tz@oS@C^NlhwrP6VO*K zSXkBRJn^k~4&|Zz&lyw?czBLdJL=}YRfP*I=0pfak|6QM%TOYK6s&&US8{Onhq35p zgb?Zk^^O3W!i@=5>A%4J!$1uBzk`ko9=A6g&NrODQarfDJV3dsk$~?zqY_TWwm+ZB z$zcB*Bnt%*B3uK6#UuCVJPZuKlwq{Lz&>(fhK_Rbt<#O8@RU@9F6%N zm~_{I^lYl&CA`~dmHA6Eio}_Hmws;yutiV-d~ho(nkN~!NiCR5|Mx7JmTE1(bF<7S zH>LU_fArY6hP$n!q6KA^BfJJ;*!(AbX>`q2U>9wuzm;*ti;6Y}{A*S5Vz6Dk{(dat zzqf|d|2_usM~u?f&o`{UETMg=`{{#xSf4*m@Xj$guLD9}0f?W0$~Ex;kv*y+5=XrB z02*}@0B49ET~>g*nQ=Ow>J5=e(%&pD-hS4gM zhA#IR;THkSa+v0K&Gh{du$Kp*rMjINt%kq7RV(yUPmx3UROK=4Xo}Kg7Yv}$66?mm zCu?*kL(5eL%s-iaOB52f50dp)K2k<$40qSj&^EvyjDp@lg7axoi^sYDm)TiP$o=mA zv%7xc`|<7O=?xs$2zbc!)S&E6d8q5~s{5S5X;R%tKwBiJCv>NIOASm3F&HGfJqdA) z?luls1pss+jh_u1gATmR{WpeQ;*(t1XkIPTQ_BjITR*}IYz`|50e78OxXhUyb1L2H zs3I|h;IA4PPI@(WT_Hu%uWA1H@B*@u&5P1KJx{7_9Z*7uFhDSj_Ru-A|L+(%7O#Lv{R$UlqSo4t8QG`2Ab6e+w{M8+VyK5z;l~;6 zj;f{t5kiP!B+mlyNIFE>u0Q?(XBCft7W6&4!g;Na!iP$C^_C#3idJ1poX)OzHmjIi zGkYl(ZX!p6L|2~4=K@3gR{p(QR~{jsk6p482BoJDUdtFfbUGJs#|OF9 zNe5NEq+~*cQvZ3(sPk?10H4Re-KFESDes#a8$PLGAmq1k^IQ)CPN2SmJb+og^r>Nl zaVL%@Ea0?&fz1jQieJO>a#T}he9;j|GqEA2J_jSF^kFZvz7@*%u5U+0j zIqTlB*4EPLC&l2HF6qe%b4$>|J>g2IhNO!`Nus=Xv0H)veR>bj3$yb=EqENYMfxJP=SQx%*|EF3Wd15Q#3>a} z7UOwk_AZ7ug^@-cW!MJ7Ugr>@rak0wF%!lxxPP~HNK&#Si_)aE`mhyQbGiA9_5S!a zq**}1Rqj2a&l*h+5A#^eaquRK|O2 z3f1}Q?T(|P1k1ac1C)7aZ$`p}XtJmVZ4eEc_6yE0wfyTh5K0&jHUH`H&T-Yv)y<74 z?n9!&*O^a?vvn3`PyTPb9Ez9$~w=SDSs#vH4ip!*Bq26A|jwaJ=QRIcT-;!RWzX z!z|}(aMZNqz=Y=I&p3<;B(Ha+oM=l^gG$8HI2NFjE}cv-Le1wVPzhrVx_vjeNs+jd z5jI&8_T1UQoz!?)R`y`*P;j5~jV3(YJQ@LLr=BCRhfT%DqtOb|j7?e5L`ad;);=h* z`((`!fr-ab4umaUfo4hA!XwBQiHoO*7NLOH*J;_STaNQRu5vC14DW^b^{454mLD*z z1_O5U{Z^YgkMrtx`RDS2i@3p%Xw|pe0yadHR$rNti6rAR0y%*$KdIp`PwUQT<7Wop zxAu|iUPlcUD(l0(NN-{0s^9nsn8+Zc3|2a% z14lqCe%4jJzv4B#c-}C|Cg&0hIF34LUltTo(#atv7kTq^EuR$`FDrN|ku>GPg9vT) z11|!a7n~+~R(>z(qS5s64{2lu0P6>YNzXfPJX|?P&9<64WC>{w zBVMY3GW1AU_{p=@YDoNO(KlQIP%s91IV5rTgG6i^T*gWAEd)}&lfZH|+D{hVffpk5 z{_)a~@kO$s;ouF>j^Oj-g4qw-Q2x~UL#l` zG<$vYL&b~d;<#0)mGN9Jx;h+g{_5+*+RZ>gQb)Bf?an=VN7El)C8wHSy>66-DjFQs zZi&9CTl$DSXYUov5T{iS3BR=m)-o!EGDc> zOpiI7gMR;h-k2I7A9FHlEUs*b4dyU!CNnqxRN1EIO4d?#j{t-qc<@ml_xEsd0|lj= z8%~GXzJ}NHiD~4LP4r{*1Y`8}|L9CenCmc!qwdqQyf)3d%QNelMLSM#5OYJ^xhK;f zU7Lc$4^^5IN?Pu;3k3AFr0v;lDpncC3_AQ2j3M7K6~rm^RReS@{HnAEoBi-O0uM+2z+rt(k9=2QZ1MUko%diO?4^lI)>u_br#B;*ZvZTMDQ6`#W z(EjiB&PTh0-zXQJy<8PNmKgX5?Fgd4;9iKQiSM=`_=wjgB%=xiNxz{3!Qh4Cfn1nRFD43`ked9F#38^vzED3Tup^k!Saj<*TadR&_h2N6fNCT_Ulnq9) zDmC-nU(9l>Fsa2Rzdma;H|+ynuB(>+giEy*r5? z^Z>TP8$7Iu;LTpimPA$Gt%beLQ@evU`^X>U>@gFZbX21b)=Vs$HVa=;(Bt1Q0t+*< zlFg{mU3HPdDc5+lbySEXc(}U^@7VesdI=w)=37Qo-t;jLm=5$6g>lh@gOSs&zh8Hl zwVs)=t#pQ3iyGj8{RpD`?i3}+pvG2yzxp!%eT=ByBx7GBI zA|oS~_7e{+Q7VqnmNt3|`l`+;N@cL+c<9YM@L)rL&euAhteUT4Jx8zk`bcNoq%H>C z;&&E4rC3_%`eTRdfe!~GhW0(R0IZ}oc(O5Xc^3%VfWrnBAS;C!D;DMfUY44IakHZP z_k;mWy2>ER@MI_qB+R(QZeUY;k9S4yTM8E2Y$ptVNLY)LVW^h-oasb{yP)=S?^l(R8KG;uQph*hHF!)HzEZ3%bi-wsb3{F*Hf@ zx)N$CS1_O`8+?OnzWw!0;!Mi*9!|EM7_|x)CS)2I@b=A(w6Xil<)0q~9C7OT2M+jj zuy+x1@3yShro$^dX?r+)Gz^-g~a-PEN zybKjZ8pz0SGSLLqZ;5(jaw^4J6N0CX4KSln>%L**4@+3+zQHBM{L-EnqEbn$J5!Lz-+kbJwSC`U|IZMU zNM9r#d7tN1K(Knm*Yu<;73MecsK`76O*XTNYUx#!AB4pD^Ns-8NB@|`U?LFJ*spXE z2g^gK1@a&bhm4%Sx6KVp3~n)nl%fHMv-ywZhBR*TZue^bfB}))FK!#~&LQP&@c45T zMNW=zy$}&`?0C~TRB%4F*|A1Q29d=Z^k5bUhvM7PNdx-PkCx0)bP`>!0M%}oRl;Bk zM$sN|enKlTb2Zw-`(pOa!vBaH>z}xJ+OHa_O`i552##7G&eTNT9^XBYR_bFr0)%&e zS~w||eY2S(5@~83$@!CxDee4DJsXGjuj%>UNQ-z^>u9>#zsRqZg^JxZDkz^0!{&Ya zD{I@Ao8Hv+QBE`+JpkW{?9R+zoeiw#jEE&(en{MYLV5>~klx~zfX})8W%>T6>P##r z?{DknA9kbW3C3U~`l{dY7JO{ojy)10)q;blIfR*(B|o3avkO+OZu#@rl>ZSp%t07| z6DV!IU!M5BCv^OF0E;Z(;+^+Mm#|ds-zSzk>%c(_90+Dtw751>INXOC6?`}DP#PAX zxUNt9@MMiYOYj@~m4pVA+>oOh{ZOTWhc{vg4QheLXtbCHs``!qz#YzU?B>&YX3e>~ zd7z5aD26^Tn!y$UFcFE)Oj#YtB0Idr6Tpes7Yevfo3qSt%k0SEuIOP>pdu`UaVF=O zgUu`;XM!xa({^n^1poPC`ip}90~3s4tf#f!d!Os4Dl^f`oiFAii^MjS+h{s7K9R5A zg=cs7)DiO}vqi2WRtwcM*ye;sv8*&(^w^AHZ!ds$t zZPhcDF;cOz8;u1wfX3&!f&rD|Yu4to&Az)#;+=hrfH%>!5OOHV8WF>ON||IqKO;*- zXUfP%>3&V`7+6p)LNP8PM9TvMgO@Iypj=0!)URPY+?ycq|eAe?$Ol3tzsivp8- z)Sq1(G9YDNcYp5c!%mVYti}52Xu|BY#b}HGO&Y<{yEnyBT3ag;!;D{~9vV-G7oG?h zn;gvB;25wsO1C0DxhUK%z0WeQKCTuQ&FbA-IO&$3>w8UsJRYQ;7zuJq_E@Ycui0;@ z?mti?k8)tU=dUoc+PgPGMGqKdFo^7}>t`a3E+w7kB-k#mLW|Ffwy&_b3JG0tmmeOM z(IAfK<>qO$_gOa}d1ca=0L3`+W)&KzzejFxhsi!10d|AtsoRBupv7FV`#%#a*KvBL zp64rm+lI2LMuj_bl}ni0p{^r?Q8OnkyCs9dWZ3bGB8W>Bu#SO6@QjJEa!-T~P-byd z4z}$sq@WdjEVy}~?k0f7bN`XGSZf1) zxB!FCFW3sP)#GvD~5ayg>d!69X0K_XzVG$e{$AtxV5LnxcA&agcXXI zHXy_bYXffhgmhNO$wguqpa;}S+1}_#!xHU%`rM`Rv=(8=;x;*zB^rrC;fa*kcFpR# zz1{Ww1IEZBhHf!VciNI=juR8F#m@%o&uoO}cZHA-jK4mcPH`?l-V}07>UO`E?h;mv zA9Ud87sf-k&~T=?x1Bs*D(;jor+*2qBnk}Pf?&La`PhF~=z4WO&%?BK6V0qWfc{t< z2m(gy@FBQ`6l8!2hn`_PGUkBH-^ZcZS)mE^cnMKxdHswxz*>9TA4unXEWcH>)h5Zuc z4pEd^sU_v;`gVLN8kXq-n;lW$kS~W^THjvndRZm>G zq~$hwa+(xSvAT}wq{T++Jg($vy&$N zwLa6I0;{+Bjg9>5I!j3@A0r0Uq<>hBZcuAS<6mcNzGCzG6&eHIe|mpqzKQ438`F6O zF!uQ4R~wW`z2o#M)2W$i)ZA20N)SrO970)L>5hZpHh%WE@bf;yc3bL+rhe(-1TJ&Q zZt1>FGmGV0H~pw7(R~|&i@hNEItRoESR_M@^^+1Nb|zB4gVPCp7W@e#HUhlHIcCVm zqw!g~m|6Q05!OA3lsus`N>4?1*!EqiNUD`Fv$VKHrsUBpC(Zb;oeraD4Gjytcj_<>or7{O_C}rGE z$hXMoCOzl)bRy9;dfk2F0eSqR?fMy}CzKMBQQ{ys`55fD+s{T#PLJXOY|s{bD|p0T zB~0d`V+OxWewU%JN&rKaSQ0RFeWy9CW7k{lXcf243HPX>_201shao&QI8zu8lR~cb zv^D#VuN9+7Lc~9T!r&dlf=N~^4!J@AKY7eOFBOPowtq7+S7)mphi;kTl1_(9F`*r2!R70OiNyl z4Lfw5K8q-980n&s-!38&sSLJhdZ?6jR?qF8V72H1@w5t@_G};SAab+Ea7xx0WB(3X zWyJx~xz0QVQ$5wI+P%mh!1ypN$?it{qmAh{az)hXgm}teJ1!QZO)UB-j&L z7$bgH{YefOWm&tT>LMSCW^PD*0qvK7LEw`CTSh@{rISxF1PjUAU-w;=y$Fm!Ub$Zv zFPOwR>W~B?!gYlsuHrX>AF>s_NSrc|{uvw%e zYXmro$EeX+D805>py%k9)lw0wkS~8BWGbb|m-o8`jb)iI!YCPjIq}r+HuHX4Hw>`|R?m0gQEoG1m7b*7``?qn75=;OZPQ*ya z)WgZ^TJTXAnX)H32!Wx|vG39Vv8_=tg{N zd5w}aO@iu}FzI)Huiy9E&s@h)A+2U{>BoxJUSldG1$}t`+1f|_Fp~@2gVyCHhuXfm zi+*Lmm*+(ni@+hJ8n;S|=RoKVMu=mxT}G(Xg=0UALcQlweG`9*cCBN=dJjXWot(i@ zogvQ8Ogk)}Ez>(Bp^mL}qxwC@X}?SViYgGgvUU|5Z&5h#{!g*zJ$_vkH;ogVXF^V+ zp~+H5^67BpW18Aw!LZauRvU#Cxm0rSYgPuiU^A!wy%RStA8dwK=on<2D0qBKvKU#s zmWPCvzdcMt*_WR~UflT(|R9A0xh17pe|)T6*}wvV^t zxaX}`oQvt80E4SZ@ph2jD&In+CJSv7c6U(Kyjw%tt7v<^L}Jjdclq@k_Ut|47%IGw zSAX7@?4tmSJ_|vB(@4e~;emCiEs4nST~+EO9a@`#4lZVG$buwCCuNr z7>Qs$^RYH^-QP*Q>o1URe;BDqM=*LHj!l!SGDB`nj^X0zz?8A?&2Csm^Pl7oZr9{$ zg7|BUU)|NIR3W(=5HSv(M{WQ8szF(P&bz6ie_HXmUS~&Pr%qSCPn~g(c1_S+!%AMN zxxALPX+~Z?!i?C-R~0$a)er7XQtY1IoBuCq>8VVmtCiKg*{j^&0q1yC{S5Z#)cn?_ z>lwx7PzvrN2CK-NDdcuOvrFaMXI8oqA*by$^Muf59ge~V(*5Aw3pr%`V?X}H+qL_y z;}(j$StmoD5SQDZ1ib}k90z_A_q9v)k=s4pgBfv<@xM0u7#;#FXL(olEw0obaZ#l{ zCELv4S!Gm_)#LsS9+F5QV)9AssW+ozn&kS)$;^U3SJ~x>R@f)V?@!!?2zfBet5A4R zY_J*GzjU8c`xtrL9y{l`K~^`FE^n0c`}VF@?mX^OxWdmmWFjOy_QAblU)XvXA5u4c z(7z2ryUQ>ub3$jXSU|bF&{vMKjzV<7o&G{sD|gDQvykC2vrBiDz-uD?)sDy?(_Im8 z5YWZq!!8Q04r`o-OPnx)5Ql1H<@>e< zeK}MTkGzMHIjhYUwIBXoQf50o4c5GkHPrJ;V8ZpX;+)2Dc3GKxLSaqoELnaQndq9V z{*+J_0_hm=3}G*2Q#wxX*c6;MHH&aW3P+7WdVX(E_5jn(SHh5wzxlT z0CT0#38Wjx9R-#bF8ku_DZiI7tRMcurFEsR>A44R0LO&k;W&qR>!jV#&6CAFRCC30 zR50VvaYu!grM#grJ1s4}JcOya!;U(mQ{a709o8J8#m25HIT6U?lv>w{R;NtdJBIIH zQ`ctZHnuvSjr!1n;7IO+e=JQBWOIi)?;$k{5iT^mk<};c5zv|V$VI{NQOOZDjMT4{ z+BbnLjwV`sOtWh}8l>Oi(%z%hE0CaAgMhaC*F;D?M;Zmo>VZCtl`P(VIO`kF z@3&_&M8^nOiLq`~IlBk)%DZicl1=oG5k0%~|8ujHRRo6IyKxGBA3d50(T`f-(6LP= zfyF!LO2Mx4OajRa*HZD0-FLniem7Z@`~VRtB>n_DWv{O(EpTrgQD@PErGEn*MHXuw zOXz_~0k7T#9K&;p-P#!Tz+aXS!L@E?u=_x+o{kC3YsvTGh%6Vt-RMcu!4NHG*_m?d zloA79w~ymV*gyW_j(X6O;wft;um^j@10Y0*oFex$mT86=yI4P?QpTf)KbEdiE(_%X zO-4RJXam#kcb6mIS-ASN=$iabt$c&v@)EX-`>UKv_#`>L41i6~jO8h@9u>`$)N`kK{lL4>EfEU5`T|*uw9VNo z1OE(Z>)U`R=w9?p;=qXi;s#h-cnO&3HVU zUaGrpjOD1uknB@fk_Q}sm~dfUa|qkuRPa~gaQ+&|{L=9k0Nr_pi}=!bhXBssJv@~8 zf2QNCz4SoN;}`Z*l}euvIhW6kuMWz0i0rXC5DOYy2&!kGwDMo>8njne4ujqTG|(8X{7ip^Jqxv_De^| z^#WEVy;BN8yT8VcL+n~{zo4>L?#Ze`79gFm)@aBH)^nC-OVf$o*4J8OO5hT~=KFvp zj-sbwg}WUsKi5M@Iea%(@k!;hx7;8MuW!+gxn6oZtqWZQbZy$%d(#GR?dh$CJJITX zQv3jV!L@(&9AxEmJ6qlp%hWF<+(UI%DCbBQ?Y(*1gm}36xf;37>R&8Ylm-cNwd28~ zIWrRjzn0zP8rih_gLU>ShKPu&7C;y74(2mi;6&V46;?YKcEMxO3zg1TqE8C?OS|_q z0#9^?bJbljEggZKJ%*U1J@b&P{0IAQt^Q0C)=Nsroxk1IMsa!!OqZD+ z$bwizc5?R?`z1~MQGW5E|4>_CGAF!AYs}RR;dl%15pGZS=Pe_`JWuDUmPXCDC1lPB z->SBH&>n^$vSua`lvqZPfdT8^d zD{!l}OJuzJiQ9#JPTK8fkT1c>^m9xCRy$eTBf>5w8@CPag=22%Ulhxj$c&s+B7r`H zG@<^pT1bWXbVa07hEM8e%Cr-02Qf!Jz8*D)VJjYE{fHbcdy@cSG)CUC4b{4@y^dS5 zTjxZ@fmJ%PGISZPGfSLN>)GKQ$&e!Mm%IJPx0*v}7Y`7S%xu?uTK=egH5VW&E61TO zn^fq-wHq9A$2w0p7gsJOCEbAQ2&KBi{g@5G$G9kMn{KG{vE(>kNl*}EMd>}ydq2nb9R!{RE33z%Rq{h7X2GG;YUM5P1wXk zkuo!lF&4tATuOI{PJYS;BD2v)cR5Q09m5uv(ULu_$rAo6PQ?w^x3YN7V4EulH9Pv* z|Lf(--=Y4Z|9g-blfjHI!^kqEq8j_whp{vw%SVc_#Eh|K-$KY(LPE*M(q>7?Clga+ zOUN>oGGyN(L?{W_Q+#K9o~Q5kFZjNHc)y?LKIeVUx#ymH&$;Kko*Pxu`)3*Q>EEk( zKD(=Q=N2Ro0Z@C7z$zM7nMt(PXOYxx7=nhvG7_Qu=)v$b!{&FQ$ct&2*K_QhM8}SE zr{8y~Slx(l^Yk5MWDV$cidcqjfw1$}IRF6WY;34^!7HXxmVRWg5@BzlC3K~6ns#~z zO`Z6``%U-zRwj8`Kt$4^=89WmfOx>q&w=%rcTU96T}4o28f|BxreUiC$a`(zPbRU&` zE9us7T!~tCuwDbqGBXcNmSi$-pfN9RQg&syH@-qe{+M%)dbJ0v;pSABbZmaM&&Sp+ zyi1p><`l`%N>qNO@yMK^n*pz7oZ5PvxmeGGL&GkK4IbMbuZ-9Cs*Mk5wO2yVm7|hA zmj{FhtVNn2_2eSmmQ{(1#R(&#J-#7ozfNv)b!iCxUi(0t6JLsKr}n&9g`q#XX%5pfhKFI^-#SJIhHM<8Wklly^*!X+9KHWOjMc^r`j+Z$~Y(^ams`$S&H~kRI$rP-YII zeT1=cZuUtzaJo<0i7JhQ*WT|m`fmDcZ7xOl@8*0)Jm}<`w?ivW@P8k^Q77^1knc+C zkn@$#d8GO_S~UNQdOimC%Uo4nUJK(Ry>-r&TygVwm8$*>=F^XY{{7F?XmdH7DX2hA z@eV26lj+8DH0;&|xc#bTNG?$|ko>^;OcC-16sM}uo`Y5=+Xg*fozt}2z1#A2N1iN0 zSHS;$1TcK1xNI#aAviM^B72av^4TCAeRT4vES(62*i$h9YBfqPyCg4X5TvqlrxI1m z^5F24Lnp?H)Z)i1TFV!hb-r5IB)Cob{fqo!D?p}v`ep7yDoU+YwN?Bl^IO;z;bc2cJLfFohF!JhBC&68D8a-Xs$lEvq)Ed@u~aaFD# z;BDAwLBkjdS;M{Az>N`SJ{Fo!giwyUYHv`Uu%kp=C45dOTnc51h8W)_?sp#>BMU?~Gj8D*k@kgKdZMceiO5CBHx_6{-wi3;jIR z;r@lWA%TXE0g}O&v)~qP(i`72k^k$?`9ekwfQTpI-cFu8Bej5X&YUnkTSylMdm{W# zF%IG5mgJgtUUKtJ<~b6UI~MjmM19d{6FUo#6j5Hw-o88WSCU6)58^ zdOxRMjLTUilVQ?sJj774SG?VI0Pq(zsXF>pe^xPNT9~|NVH%S)i_Xv<@l`|%8t@!5 zN>w!3dOOiK1tF_G=IxpC>VYXlzegyIy$evTdm_lC^Nl8vI-mXZp3t9t#zBTBZ=5qh z{r2f&RsF8hWmt;mIgPp|KBJ~;eqDWt&(#OEzn4gJp$3K*jm$%v(#-^7pb9CZw6jkr z@*VY=EzaLM5|Qa*WNRN+(?wp47ce{{36VY?(WF*K>Q1^V)^r1BQ1XJ?V5W=|@AvP7 zc9Qkf!}77}I2<7v3N+;6Gd^|*o!93Vc@itRdbr!GL!8SVsxAXpVd8=Xo@-`x;FJ?% z=GDfIW_`MziccX)AC)R@I%Au5=Pk%;{n^oM6N!bEQwPf)u*?ZVbQ~zC=psVFio#(^ zn#&83&7~`M-ogcOhmNOSxLGW8)LYn;u!DyE5-%xXXVjL@(X{ysTvZr5BK~YJ zqhe&@kOi@id#z(})9JVCD83}t)y&lmLHKTLm3%gaq~b;Bhd}TJF5_0W0x!L zeR6ln*jD7b0g{!#V;g%zvs_7KwLTtynDP z=WLC+5f6GjB2xoC1V@I|LiTDUr^N9I~gK{o;CbSvdQlE$Z@9d^{>S$*+w9u-p zxTXqaF^iB_uZ{Z6^M-b~g@)2q)O^mz{QIPt`SkL6v7KP7dd0NO4{AK`QnU2!LWm3% zS}hh2^=Wwi0HHqPw(b=XE`foRV4=$S%0N+^ zR08Hv6Z(XL5rR5u82jZr?OiyM%t^Z_$nYqb2dRTs<-Z(K^M8nNgo@f2+cQ!0=0*iRX1u@RU?i?DJxPV13ol zK$F$(^Rf2eZR>ngtS@ zg;9l-WN5m>0bJS}(qm;8sQt`4Pm;VR86homeXniWVOzuzCZTFbmuK#_k6+7yi-2m+ z8=0^*lQ`fm$pRb1SMPdwc~#+-hd?@w9w+J$;(?q=duu)#ylYh1GwL}jO80&reya;2 zm!+k%XPIx>LZ9_1^>g#G?{Z!fLH+{cg@GRR1DBF9yRe*GsZ7#movDb<#ZBa|pgZTp zS*duIH{8feWY?@eB@}j!Mj1yGDA&_(B|yfos`;O~!xvO9oCL#F>fz;cn4hv*YWqWe zsUyS&6zB!``MQa4?;HfQTbN2n1a%n*VBjQp;9@Gju1n>1Od16f+2`5-8EJM-&R1s4 z-KcLX2z$Dj;{*M?G9|83`hr8AUlnT2Q=7QO$?e4Zkb*G(Q6~sxiS?nkM_>lC9iq}SrB}#2*cMla)hd{{k z-LE37o?~v+5d`kW@VGIg;Lrr zOWP-9ME}qvuu7gR7&5)mZ9J?<@D@+&Ht^kfHVyJON-z@w-xAeb{VKplGB$cwURgNYk;l2E z0elEHCBSp6(eO171Pgb7LwTA={wxYBZaU{0(;dm8@tje0O;^fN=SnDg3CX{V5{r}*Rm%N%|E$^T{eGCWvh=NwRZOcTZptt z^_aLmML4j*ISeD7qKE^#;tiRA4$ovEpwr&)*-nq$Z<4_ly_heC^=FVbr~h$E1_%wF zJOs!A8r%(?0Qw+N`+T(qv|;e-*x*lhZ3>D~@w06qZ$*cGG@Zmxc?gsqPoJ_{f8Pc~ mCdG)`!S;vn|MVK--{TKj!raj^(?(QCfJ0yVw3l2dN+$9k5lKaSWpY^?e zp0&RB-$&EDNrS65wKRlE0|-m$96GHA#|$N&HUO-@!)9RPr({PRVGe{HGA{DcGm zP<`;%)b&(1^8vZKyI9*fT7f+MT&+M>zIN6CfbVKWojp-&tHzup72w%uuQ%j;=d2>k0x&l3EG}mW_@gY-VqN9`DOm=IX-nj zdM>0#^*K-!rQ+dH>Y;b=S#f50evQ1U`Qzd7@}bDg;BV#MdoS0`Ax}Lm z{@UFtahK&Og1;+oxI)W+mno#1z`pFmZ4#&lZ}Ge+*##OUz&*Bk{d^gDT3=MXF6#eL zRUH_D|MKj0@6hi4?62_CD)sn&StMv9>S~7N^L@}R`FZ}+vd`t=@|Rc>il0`ux4+*n zo#%11W%sl$+&ZX>R9^Le3yL@RzO=#Cenk8mpLu?BYiLVZ+eLlo`7VAyAOVQ9jK-&% zy%T-if7VQBo-{Ew&ck+=y%{gm!Mm$m^l-_uzVcZ%lfPx1<%DnSyd~0SZKo^1=Xm`q zv`b=GJly`6$gks<9|&h!bdlRn_xfy&1RX1=5~*JX`VwOzpE|1dnJ$tC242(*u`wC- zt=>+Wnk^q<)kTJY#r(sR`&u4cE^P0-pfY0&@Zlw__Cy!N=}X|Z%pJkaDFpzfXflnY zc*IEcQntx#joQ&lnT_1!_KQs-g*~f@<50p>+8TAU0z{?ow;tdS{jm%J zvL41UH1x$8cy%l;>w2UM3!JweJHI<`sghEF57zCl-iZEyjb7tC<4QE8gm|m(h@sw3d5cq-n6*8d*Zh zN{frHvsJ>lG~d;sKN&IAscAjq#$Lwjt;chyudrQJkR;;J#X5&#c)rx!kRM2=k5yhk ztnwjM>%wE5Uz}ZJEvEOnVg6%#vjdpUpMBgN<=ET#lNo!zmbLdgU6-s#$LkcbjU;v2 z#FWF_2toV!m5D$GJ52fHYwuw}9o@+C$XQViq`l29)FY9HI6=g`hcH9QL_yFnSJQiI zkp!9qY3)s<{Cy&oc&eKoJS5)X)nBjx`-I@OH>+)n{lpcHjSGg3P5Le>Hr6334GwOX zc~7;`vX>}FDlL^728t9*aNCdC;~xTd{nu(9zD>7sFZFh~#&TQtGV0MivPLW1sRo?3 z+L!Y_+JZzbTjgRtlZ;Meac4;tFPT)nbF64%_dUR#g_ z-}=d~!mV7q4GBmZRj$kU#WyuM6G&q+?HNDgxs86dg=R&BuV1QUIg(#>ZxCHU_(5M7 zpuUmg9v;dAdAL*FAB?8l^d3}6q#uBJKqaW5d#h~~^f(!`7ZlTO{f?eX{-k?C%CAD{ zCGj)S_x%m9#*H@`M+Y2G!@8tFRbef=$emJc1L334+rNsVQ9|4)sDHutW zDF(tQaZwo&Tvp4t4*J+>UNk{yqC~33rd7(!!x5(6{k`N{&r>>^8?>k)Xi;yko$}y{ zGo|GiBb^+e29b*q9QKpmX8UJze;((CRcAbHbc@tHwC*q{NA$wZkOUrSU<6J z3~O!Bk-3pwAj3;L*U!((C4Fp!xk+b?NbAILoNZNVO*Xt*Kw{Z*N~PN68gAK0CP%E1 z4=?(8xTV_pnRa+Eii@Tgj-X;a0ggpW&RtCs)_sGomqNi?H}1!WZ_-+fb5&LYKW*x< zV@3-+wcqU`~V3i_Rx4H95OU!+Q2 z0+QMvkzN9flW?@nNKHd?-tM}3RC_jLip-*JQZafe`;B!sFxO=ESR^` zzep#lDeCo1XHikO>g1c`O;sY#pu*ODq)UoCM(4aU$%Dq`oKm+l^ylAHUh@ob+j(Yt zjvt^!N{SOG%193?83HX)EiHjJm~z;qXC=gEIQ9&MW3KR`cZ$w1t~v{!jFMHqe5RA! zErx|u#Aw>iLbXHha(h(Hwe9!z6jU9Nv|ag>DoN;^ zRwBF9T-nqVP_CRpMRJ6`m}pJ9;A-fY%`(bID-vG-P^%ClfXO?wS8-xJYrX20n$Ag# zakAouEpKL!Sje#h!UzbXMzoq`-#V(7z^GBu;g!Bs;u9YZjLYD5b`_E;yGYGulbC(a zKwUx=Ug)xk|I^$x#c%fuu4@AN#gVDVV?1c(d)SPUGxLVEdgg9`nN=-RUxI?a(|)j= zvF0EVO}SXypl2g#=)s~6CT`KU) zD-GW1tAc5WUkPKHONoQ{o9VeuMI64BXJ{1p^v@9tt+vb(nPpl%b{U&&qll2FGxUueo`_O;st}m;xJIC!f4@xsxSMlW!r3bofJ|$lRGfM45iq9oUaFvY7zV~2`BygA49+JA^hC4Nx zE`U~Gud5oM?hfWwqnRQArYiDjZ>{dIF=N&vCJ915uMmQa#kk&Gy3igMmiNi|vIgm# z%Qm9{vw|^&N|KkRSjW-9&|UQfr+&BIG? zxfDZdyH2>Ops2SDbK^2Z3oKtD&GiIE41-IN*}5*QLR|_CyV;}dvyFich9Z~ks+hKw zOu`8WMeOQHhDqN;t%yTS{E{OaO<*ycO-o0u(na=qL+ljU_(D0S!i;m0;1Z0ciiY>X z#uC*_ANajpmb}=z(J!PHL`woGWt!M1(=hnOH^I5=Y72WM(9dQGkAfYPj#G>l&L{v! zOgCB31rvtW6N4jY?8v=gr$ulyd9i(m{4JwABEgvD4VRwtcnmH!OG(W)mxiGzAnqr)1Y}^h)9W?KQ&P%I*ENjL1fpya?y^q!;amr#5c54>+C&4p$2V#1 zC2bYXlg>B4Db-EyYZ08-T-1{s3CNxcCcJb9%T}$p=zmPp+v*~JmZ(n38c;_VRFgsW zOF;VpI~d-dvburGiO}||7BQIh`!G0QCg?*d+pT}>J+KjL@PgqKDjUph<_7PJ+@Ehk zY3E-~9!D%V7M(oymJVv+B;!^{QfDixSY*FAotwOlY0~z4ZM4sFp})&EK7D*yBi~jc+yJI` zs>j_qY-LU@9{j3S3LwtWQu`_qiTH&99j*=28Vc5glFM}N@*Rvv-wuIQik3-R!dHG* zFn^y&RIDx?zF$nxHGm9SYnnWx0-#ro7A$MX8Hor5&LHeSyI*k{918aKHG9GaCPx+q zDji$pv%kYeG%aF>n!H(RRzQm}TPmD}v4;WEB+gPq0HqL|qQw+ff^GLfspv;A{Q4ku z*wl6fjlvL8axrjVpX`))i{Ovtot5`g->zt5Y;-BaxtXGI4P{J8zd}$Od1Ptj*Gi-z zv6!AAn0zxH0{Da3k=vNWIBX~@Vl{2pb#WgHZQ(AORxFV(w8U1DT=xYMYt5P4N@tr5 zSCxjt*{wP>MM}9*=ZJ7)!w99e)OHIWz6sw2M&sIx2Wt4& zcO;MC$8m(I%%q0Jh-!|E`GgXurJmEqOh(M|Q9 z4H9MdCenFMV780$3IBj72zoi$X9dm|5)p*uFoKHIxYUK$V0;s5`6cfMn{hdzUJ|b~ zen|D81oJ{Q6tF%#%Z{a2`@Y08HR(PRTXxM!=1uv&p@+3jv9!1(R{drx(l~jpW&(_D z^nhb_iXhJ(LUzS<%2QQ@tA;~!{aLNQZoFE`kAqVKKxSUR6k%8s!i@yM;OUy!Vyj2h z#dikqzeUo_%o<%x&B_3`CutG52RMYJ6{%SrVaoBv>IuBx78ZT>vv?fQw!&+_PP|4q zr*Z^;ccq?fa1yJ8p&Krb8Tip-IFvDz#fxpMpy^i}qvr=8lnU+f(5#RN^Rcy&$=RGw zns#=mLhG!+0$+E*rHp63$*|qHa#+Xj8llwPI;CZmY`BY6O=1`^EF=8MLjiqTXju(4 ziDf&c%VD%leeq>5rc_13G`xJsYkOKFdT(MfnL>1cRoB^4RYY|^dJ5?c7PQohCu+Hh z*)sFY)*#{Zx*50HO`gCLoF$a0Mu)Vg?E20QNo25I*39r7Z%whB`vgB>}%Fy@5o^AcSo920yWd`)uu`IhQ&(Yc)8Py zyBAtsXHRvLbDAaC{WpB*rX7}<-1)X0z}yv&7e%z{s8fE#MBDyk?~EJzCiujvfcn0 zpu@(4;^#9?hUg85M~Nf!;32AQG@M$!>`*jwXb?V5II3ug;w7c^=PEok8BOY=Ov9a# zwX4CfUMS|Jz$^kJSr-+Et3y#5t)zA|lD=%WD2HC;@Cu-$?w6aB(?-^45($l+A}NeZ zo_YaH7;ec+iyWp{8(TumP%izx1hRBoM>G1`MI_3phCMk}*`AmU+67|*&-{swbUhoQ*9^x-;zzxZ?5lUaxsy*C zoCvPmbEis%1xK^aY=jcVrC2sj@2w7z2LhUh*H=X+_3TrKjJJ2C_zi=Tzr+6u^VJ(Z z#Ap~sM{Tx^*a32qez!YTQ}v}R<1|;aGuK)d_#ozXS@D`MWSGQHakvE) zNn?O5qjzNLRKPP1+sEqng&p6Lo+2Gnrubvu)#|kmr_!9Sc2b1yJ+H~>_IZ43-TB58 zfkN=Dgid}h7JaVSVL^$j8I}ygR*CaxrVG@TCf%*{F`cz+rjGjqKnvH4=+mKB+;i0p2`ZDmI$Y1&K*>XTaa<%W#ws8YYF6|zVM-`WnrULL zcMWY!TLmg0w|)Nz|EJ+o(#pcSa>RQ)y^?JMv#`Y{(Sk|4+gdC!6bA#bJn$%VUWh3E zis0m&gd8n3pQiZ;Rfd!+`MNfIv3~WF8 zDg{hmOrYwoo}Q`%QHA=b&Vzy_RT`)&7(`iUwLox5bvEPsYg?2@4EXz-7Q$*K970ji zVJh=h~NcN$joRnBYzaS#=^RUw3R&lgx{Kj z!HwLM!4lW8KN3A?6`lka@Q8K+U<9MdE>hS4$%dn*W6`z?3(EGkx(Q)~*~S&Wx!pf_ zphV6cetW$@mR#B=5FQ#Yrm(flK7SsN_f)95y?E@7&1t2j9O}c~@l`oQCzmxa4&FAY zf0@xa_$DiUm0wrI7_|%^C>x`?obdY;4wL%XEGlma{-vDk!&< zAC|}t@I=!`*cxqSH0p}FN`bCHEJrJJq{Syf+Xo~0wrb@m7d?C!Lo(kYqEa*{MtuT> z|K})WSwS~jkJ&W^I}#)*QslRKHpOBv=}|zn7xLKf+TbAFw`tojeAq7TJWS=8i7uz6 zQ@ztL+u!&gvA3F_c>kb2__&g>VyZzR7sn=Tv##*5uEq9W1N$lrUUWayy!4NqmsR-ay1939~p|rB!+XJk?|}a5=x&5osYR4Kfcd zI1|sw%$$06jI3=9J;AF+J78u*Vaka}T40C^AZKd%Eon=`V-B;hk!1+4ie+_3c%b9+V$5?8z^Das8;I(=slj#Pp+f9y2VoxLRVW zw2l4-zhzuUZRbaVcqPVb-!HQNCue!@17!zt{vhB9vmF$(t&Sorm)j&NUW!X|3D=Pfh zbQC0lxU)G!+Snc2A+uTS&Cpw|iU_~iD4o(@p8^OAiAkfRt3qjnLYy-Qs@oCSe$;Ei zV4{qjb&W@+p-nbXmf^Sx76-V=M&0o_8er9m8DXe(<`aZP>lvT~NtiLBTj4HrdIw~Z zh&~U)ebzdP5l<^fdZcp39g5rBSuE9n7Nq-#YaNg$!VKOfWNQX;f~e?8 znWSsX_jmmSil&iG8D*L6bh6eL>~@B^i3cEgZ@AyBqmAQ>RzeczBK>OHa-;G`J?}30 zpnl1JDth-TE}PDRtK|Ec=h=+7V@J8I&h?^@Zn5(u(_NxfUm}umax_RHuXFhh#8Ulb zFwu48L(Xj@E;w!VT2qwrhxvE%SRSuzzRf~9;cW4+u5LdKmt~tbPo@)clPnoxZ^FF? zI{a&UB=NX|@6p-VP4C0YYA5O7 z7fMnQ@NXG|TLt>5t!0tNFIMpDeTN4fOvu5AOKQ^f~FpHmcDBthMQ|oQU<~fpow!Xv4k!ISpH(pDTo~wyb9Fi_J5}ojZc`LqGaL#Lt zSz5Vj;LeqkruakCy)7<#S)A&qyAdy6aw(QQGYqZ>9q`$NB>>~4ncdz25wnL=%Q8bh z=)t5y8H=`Mg8c51y4k-RZoSGPs(6@@?En#1U5C17dKjKOnIC;^jY(ID7yITJ0Cd-M zMID&&R9dFcqTliC0kJ0NN&k2&Bb$y~a)OO#7?>IV)6;W$F{^w`ZDuZu*EY#r)aQd* zM@3{(n?MpjAMm><#=53d7sJWK)zKA&&Y&pWsmO`e&#fnWj@T*nU9R@yR0mVnZKJub z9YqWv!d-hZ5ol-oh#-XIeNTT>@do#7w`vP_>9^dRHztbk zlNd*ZCAJ^MY@OPFs45VzINcesKGZz9=$E^4erG#TWx-a7)%Upifqj!6Dj{PVL3t9{ zD#;$9P7zbfT5%a)HT4eyKy<(0HT%_M>MC zx7-k;w*-A@4EB$*Ox>KA-i{o?j?2D1sLu#d5XOL&aajFj?39(%?^r9p&}@yC$}xIKYh6FZ#F zrVwIxa|KcGeU>rm#C|8Xt*Gle?>L+K`kznQ$NoC8*g(FA)##gQ^UZo-osXv&5eMHM zY-`d`ZlB1A){t*Du3W@KqK=JfIZk;fqhh5|0|i}W14F3gNFW07F3^n_GB|mx*pH5< z$TMxIB@md1bH}i#2jHE$zWo{2l+;8RNB?&1$;fbCZ3ob)y!8Ng%Ii*Z`+2q!Ev|R} z;SCNI|6#|KrW22I!%qd71F9is;=1DN0iwOHn;cky6LD;~9gd~p2*OK8YhPr2vj>qw zTkId~?1R4;Z?1`oy8GYYHP(>dE-B>*o%(`!Gc)KrqVPIZFR3k3-={~ayrjbK@YJpg z+zPi4J(O`ocREa&%bZuUQYlom<6Wu~vRc z+GeLjISi<6I~K&|F-Vlu@J;PrHLnUS@IM&bi?FT>J(A(?U{>4YxY!J{+(OleX-Y0A z5n7O>mgpBizl9wX9n;Hkts&2r8j-oHNzh7OP!ovbeu6W)5MPGjqDGLmZc2YwXh)$k zH%wY_o>qNp4nHbAlBe&mmIhX1=G%qSB`5g- zGL}l-3fnH@J-;tivp>Ocb=zv4nPCvzcLJeMDaK+bOZaog z`i$(f{N@`Xgt+>%Ks=-dV$|S1wWm_GjcqQw!EKk#weZzk*k2x5k26rQPa=Y5k>7si zzpn|~x_`ItA{(Krh$dNLA7FxUa24;_xkb5&dc+Sja=5OclqhU{8CQIltL^^WJv1u* zh=A^|EN{Kc#c7e%BETn`Wsf2M*bT+i8gAvl-zl(jyHFBxFZx@%Ha8CXWe2Kb*)OZ) zm)w39_ALQ0V%|-V$V`%D4Hq*JmI|Cl=N*#Vhh|IOLn3v4s~Tng!*fWi0LNE?W>lPD zxMxmRi3!tkt*YbLBM_704v_k_Kw7Z}{%?l%}cgBmp5639@u;Xqw`%xN`c zYroPkm`piS^rux;NhPzi6}%Epq9d$H)P+t?Aa-b)*EgWE zkCu=j8~wN#(NhvDZLL0Dt%I6jKD~-}k<#lefJq>Wyf{Bz3h>IziO=~cIM_Y(bH^~B zU;4A&!v`PuIb@`S&G~DK6$d`+QIuT8#7mnSJDB$>+|^Vz4&UCT47+7(utbw(fQ^2w zCExoJJ2y>;&n1RU{utGL7b4w~ny7g0RCFvGdmrk2q2;;*T#}756jB+}7Y>+(RAh-k z)p*`S!hA%+&FM1j6z9rQ+f~m~L`@;N_;n<6Wb5NP9>T4TJo8nmWN%npyHl@~tcvKU zJiQpo%_|>Tm`$A(+3Ltx?$Q`1n#XVydj2Aee1=-+?qf)?Xa_K)_)P_cJ|YR^%^Rui zqY{nx0)5eDq{^l2p~$yd4Pxv%OGL=%_M#5<+YeZAG1Hm4D&>@on0{|AiDf=1x7Uv^ zgCSGuPUJ$q6$ta7Y)VRom1L&!BwcCK!!ROrljMva*)OoAo4>V>el_5DHZf%47+32! zRg_J$#+oFHPfp2rN#6Sa5^=dmY5bQ~HzWMdPt-9i$(CQUBkaCG53VKoY?!fB z_=WuVq6?MBIJkPCGJNtIDQ}JAu)_zp@6roM&T8BNI2e_tt5%+7+%d+|8|?m=>8TwO z6Re%w`zV#%3#G`?(WE*HAW`QsVx{Dc5Aaw!doAhBhQZL)8vMG<)`&LW-9(rHIgqfr z!WPw{RrjOhjGH=Rm6zHzF@q0jZqVDc+!o)@W?;@`{0#XLG^;J!lX^9>9lY&(mHZWT zF{Z@r_G;|zvEf3}^uMR^OWfvHcn5Zk8kBL~25uMYPIgXjthv*jVW|>oU{%83?t@58 z$mv~cv_zVpl{Wn?i!IZ;L&eGDt2ehec98JqgCoeWTT2-o%(TA%_X6^z7o+duLi{4{d|tPA^oQYQ@Td%CSkCF&!>C1odzz70 zWAvj&SMOaUlTpIyS-M``PVLvkBR{Tx}0H7l5BqUVjBqaW| zvFmj^SAI~cknD&k$(V@-S1B$6M+~oJwKATh6iElNR5j2(N}F_6W}sM3B5ikx=n;tQo9F(N29LKD_`#ViMo&Y21>GEvz+ZpNL;!LfxPXbKuj&6;l;jE>O z)C=x=;2a!TU;XMqJ|z_vkvJ6X?DY*(g|L3n_T*>|=@aY(-EAQsNNWo}%axK2uerVx^3-z)ZLIBt`8iD&WN zP%H1=k|kj;;jLBW7ZPdcc-?SVOypYH5>x~>eKd8)a$6CyZSt#UtR5Wqx5a4|P`KYy zghW=zpU;*)swh9mrSi@_?)H*X00vMv?HiwhPI?ouLC>g$giIk7=(B-o#twG#6vZLL z-QCW1(`2qaFsQ;gtyBT(+00v#*ou>pnaHKv2Zj z)y%@d$`fR6Wn*l5Crnt5BisVPOeHy|Au$=_$Ldm ze6aYMxw5b^v$8ljvHV@b!&A!p737}={a-aaG+(zXvZz~mxOll+SV?(XIeSw49m3M$ z-}EC{SWDX`Tm#itCW%wzod(W*B|rbB!wV<#^<+mv9Pn`|Lc&Khlh=a zmD7rem6ex|iPM6coynYwi<^npoQ;c(!a`MB7a_*gkCn9QwtcwhB-tavPW{sy7qZuc6MW{!Wi>JOCVD-{jMhTwFZdTz^4XTJTG|xI3A>PN$udnT-{T ztFz5t9e)Vt7gLoJg0M5Q{!fdlqnW4mtAY?j(azb+_kTJx?VPMMJk9>F$;Qpi%g4jV z&CSZi%g)C451aqO)3$Q=c#XtAm~5=f9RI-mGcEkD&b$(9_9sqX0sex0^@U%;-O9|< z#a+|I#Zd_I2Lkj*^RMs*3I1bJWbHg&CH($G{6Aw}!^-U+Xa7h6N4vkeK%l?Emfy_c zAA@+9d0ScjHPEZxKe{Yz&75tlUi15(0rjtTyZ_5%@o?~%vvaXoGx1uRaWHXOvU9$2 zZOO)DX2E8`WnpG*&d$m4pXeSg)}B6Q?p9(pub#eo^BO>Zc>|*Ti%Poxl=iW;`ok0J zYi_f#F|o61vT^dW^YC;2nL~b7RtU>K9cKBH)&KHXkmdiwiQr!Xe=7s8djDv9EibRt zisj$s>Ytqbq4EFW=bv-&|8Rs?=>HD#zvB15bp4mE{}luOE8+h}*MI5yUor5%68>*= z{r^T6^8eiMSUJD0f_z?YXSh~;|Gbn0Z>}IC3BUsA0#NYCzLLJSAi2uwc>n+?*nhrI zfZTk7*G2?SIVCBC0~8!wVw%>rz61aO1dx*y)AU_Ed2dkfWaqtgOZ+X*!Q^8Fv(bi8 zQ0x3zZd( zgO;YoR0q(-ipG|8%B`Q8e(u*?Ypm_hP|?!4<@5TezeOOJz}DY0jTjR+bar-`wd&P( z-#g27Xz20j^7jkAEhkZ;blLoWfBr?m1L>O^=6})goU~yWM`E}qHDrR7l`QZg_yF@} z8P<5h??Wm}*r!c{SL!Z5BsTbn{gc6O)6k5k;M4Up~P%GZ1>3}on~=8 zVA@7>#O`m1knIbAFWQaQ#WGF^>HIQ@u}T zg>OEk-G1xUwSunCTo=n5`&`Y-Zb4|zUTVhgcn$krP7s7IqIir$-p9uOnSb%pe-d!{ zba$X~lQgIRUjzt|OU(++A`xbuREQQ~fhHJY??%u8LDRyMBAes8O*XvfmzTu0vyLD*I6a-6|6Uw53fZMuvTN>mKC3 zzsB%Qo}caqehIwnsyvUX_>$lLrzP)#JgVHIuAzq$33!q=dA%th_dq|fSz$8}*!-ed zGrO-S5U>uXQUX)x2%TAqATu#oxrhRV{qZdtA|%|RTR;M5f)%b3B$2AA*^OY+EDMjC z1>%;BSzb`pPD#c$A2Ed8AVk0Tbu0PYB=r0FI^^hfdCT|Rr-c8sVvT~N5O=Sz=Y8n+ z#9bci_0V-ta2e#2KiU}p)(+U_ZwnkHRh^}YGq>we#W;jjfM9;o9z@l=uBxyG4t-F) z1kzK2LAx=?;Alh$q?IB(bk5&p9;k}5Y}&t7kPA7Ilje0smM~v45|`U~^iB10U?^9T zbhlUxhPrt=$RC)qQTY>G;D5(wNcd43G*8k7b{78nt@Nl1$$;2%dbS|xbc889?9c{~ zR1ST#`HYUJTBfG*vnVAlZ0hMYHC<@TVJt%_9;_%EP6e;)5M|*TDmzT!gvol!Hpu79 zoiz0XO)XIF8ka@hsN3r^G|gz(M_pKjVKGK4;>e$3Mwj4fJ>kkt4lnF9K`AHUE8^Wd znh(ERZjhExpn@t!0|Y~6+ADdKE>u_Sc+iRKdbj5x`<4!_-PK+8(mN+s?I&XF-|A;mg3hc7M3y?AzM z#uKNut3(?cvf_WCaK8H%gvb3_>ael?S?;!65oaRTV_dP7aQ!1(KSkj$Jkl6Gd)J_s z*TbK&4jN&0X(ig#*aGn-QLp8dWwXR(@`t(zG|0`;=j9rrVBSmlD*C&Nj>=pWVc2h`RsT#SpXib=IS$r4){nJ&6Njx~qAouCh$pgT_b=a??(2 zNS?Ab4)A~0JHN&SHRV4qFu|WnBv{s;rSN-bE}$Vi-CLm>-N%XDA*iWL5}2a69h5{6 zUGOPd4RFxhwX^qldRm_H1peJKU;-jjl8=ktU6OBrQL1KMi45MPVMIDh#Rj8mh_?A- z-}S{(Ni`GwmgwI`7Pgo~n9!ss0wYD13e=8q5CSD-==n*QI;80F=x4$7{ASu^GM?d; zaNh*OA;Md%Ba&HKWv$?kQ^-Eyp($atHq_D7P}qcoX+!y)8F)5b1CZ(m2j^!bk0*hk z1D?N!&p;gkKl6|<|Avd?4cR2h*|oH!v*gYAhA}j}mS4^%52&2-6>tVrnmm-sZFrhl zD!Z#PV?$WPTQBgNecYGqa#32UQm3^sylJQ8&VvfRi`vK`~WF z65*gri0<^27$&_q_FBipDep?*vedir(pP?Et zrOvOq1c#D0mV*chVXj_sE&2@iafCoGqKnO9aZ}KNd!`a32GX^Z1*LW58FTnMyZ3ur z%S(dFSpp`U!zL3KRB0l)BHM^Q_=~J967%gAX?ZhtNMb_MYa!^zA?jI**9b|L$x%GS zBX`Zt##;L1H`jqGeqU(1WE_d713s@LKlUE^+4dbNjWm{)7LU*VGP=U1v!CG5#D3Ah zf2sfAcjBV<_v(2WGP2|9X7$Cybw^qd5y7p8?KU8L0C0umhAn_1w{AhsKsyg4EYWmT zs1UcqxC{I0(XBLY+SKAHVK_qF^68GPsV;hY&|-050ilCR8ub${npzLgQbGFlWn1|0 zC}waj2PY168XCb@el;TKPgDAIRAQJ+m;RPURp2Qm+ZD1jT};7Ri49Xdp<3;B5`y{f zVSb%Xqrr1mF)_Q144ATkU_Z7cgI$&nWI%v-@^upTbk7f9c+G zo8Uor^2^WRHQL1% zX*dUVG(bE;HLr~Df;NoQAl1FF{4r8S+^|D$JLiCnaldHs)!vRZEoi$pbIo~^lfY>! zLc(qpLoYQ9ieuiaxDxol#VyIX*^W;m!zBtsfB>bM*Rp7Q6JFfEQc4*~`8B*bJY}&g zw`eQ99Vnt zNGH?n*|%^|VxHN0oo3CHShcN;B$;0LX=d>)x#w~QmdoRD6#bY>~y?it7`|Z*L&#l<*HZMPdy0rAGo>^wL~G1kUfQEipzo%Z&}t?o;oO zhH|PgRuUONrW}m8b&VWaq;ZxoGABOL+;O}Nq@_fn+L_XrgcZ4y@^c*_8gOMf%Pqph z;js-yN5FE~D3j7|f<>h1IiIn|FV@#hcRVex8JnB(k0t7*Qs}8`ckEE zQ|>UpL_F<389r9}<4-QDe(?V=1tH6)RPt_;OW1YJ=Fe_Cl2`k*lys$Ui>G#?l}oi; zoib~wJ}D<8i+lI!{A(xp>kmC-Br$O$^S2dH-PUI#IlGq0S+clN)|xT}|CXl5yrYgNSsKz~ds zyUoH$eZAPKL~q!~*?GZlnwGGHSg=z4g_LsXd^o$U(T`HO=D+`)jdmk*mYUw@+n0y&Q-;orpvQe$D$AZ_}`Jt_iZZZv2S*m_vfL!V`$IRA0 zd1kaslCg|ULdexCAn=)}^3iG1=ux|0ywl8Kzn0{%?AmRab_8B5i@s#61-=_-__J(@ zl>Af7h!UGT9M`s$GvnW|5T|uSF_|mWLJ2{atyN_%pw>gf)e* z=#vK4w!pk8I{%V}_giswWcq+@N}J3`@W)ww*bqcK#q=j!$Mvj>5L{kcUZXW*ib1g?;WgVwd3ELVUZE9djYrD|KR9LJ2!QxZ=m39JiLF*J25ZRwC5xtK?v zb4NlBFbJX>2PIm`(i|gW;L%cjpW6}p>vx38NbcVHG^x%8qU9-Y?G*N4YbikMtsPdGSc|oUOf@WgQmTj9e1Zl9oX> z`YPes<7MTT=sI3$@9D%U>sK?HjE3Q1{%q|{nM(>RARv!@eJ zYa4%C!aM}Yig&NgP$GfZh7L){N>ZI`KB)8zu&@t*62~I71aU79GUw2^2_KJV3x(SP z$7a3ZqNF!yh`iBE@FKCu${Du`v~qL8Vt3z3nS&JLC~4QCnhp|Gweqo1u)A8|FSTNO z3Tfga+`2ZLn}f+L6BbzHKc*_i=W%eha>=9t9|{YAh6%ew(@FLq%nwW*5TJKdT!moC zF5@~=A^8vF;^}-dj|)oj$z`8J1Nnxc<&BDy(XmE-M5=HZB6TtKx2jDOIqxlU0JEWCs1r=5R74R@mL`RC z0`%G%k=NoW?6r(_Fqtk@J&Bk}gfG!68LOO4rqU}@$dfHXIanW@<_lae0u3bvA8HX3 zOV->GvwoE+2>H;sXuDd~uxpdN*=Ap6s-O2X)Gj%8_mDQqzw^2E>MDWPf2hsYyBKymAK%y zpA(#~WK<>|olT(r+=TqAc>ESI|K6SaG(+1&5KOMasZ;%a72~5jH03o}xK=X>Wt835 z%1~3um*LvJStvGlin||%?9Hw0$fdy?`*(zjwu_CTDMYDGs>aBAcyLpWaBiOfv(xyp z;v;pr>aFcNC-36`u3H=o+6~q9gEH<9wjA?5{W_*1$C;8tPP!mI=D@IFJO`h%*5=@7&A4#mJZ0-sP*E)_8=S zOc6)F%h4spZ~(rm;iG>0)bd7E-Gw5+xAFVcE9CdBnmew@Te z_NI;Uvy>?2!^WLa5IAoX7zfsmYC)ewWnDr9yK1RQ@7%P7%GPR|Zb&|-*19lSNp`4d zs+O>hawLO~`Gc5m$DZsV@dSje;Y<=tI}BZ*O5>SA<*&pnl{Sb}PZk>Q)u7UC_3^T8 z;?U%_E<}?*1kjD(wvZ;tCpWzY* z<R~)7**8`q()>h zEy(%Er;j;f`46oJmN>BbmQt%Gclt|hc@_%H<=#;U;0g|5n;sk20ih3Tz5lW7C>ZB; zymzvuj+}X2OEVLT{ygSlLhYT|Ty}~k5Vk-zrYIi^{eDz^>K!@#R+r(FZa#VJ;^wCY zIQfb7oj5C(_F&d8@zR>tH#Z^utweFEs*J~M_o*8mpS`^nxW0f}hr}#RE|=d6x4UDo z%2@5;*~C=Am0(6j*`%1Gf5)>_b_MZ?+Qb~P$H%}TiT-k=ajmr|XQXRogq(|z>4Wnc zWziQ?D^hY$7rRsUK6=_*PnIGcOXv%@^C+azP~#C2%PCg-Kd)I~(7v605uIywKwO#a zLyy11`Q$2}Ts}SRpxcAPOnA04A8pRA1LtUEq^PnRiP(U%)dmWrMB=R5^n_4g7-C5Z z%;VgVEJJ*1`IlOu#;@N$>NtJ;-P(ln_a(|!Y+i)ZyHD*|QqQ=O=kMNB1#2W~xwI0o z>-4=D^o;laNQnSxXGMgreH?TmFK!c~klrMi)j=Q4$Fz;5(C34G+cSvVMd^eI!TX7% zAxVwmjA|^et0ws6_iKz`Ea)|EU9)f!6aIU$^&mHxM3FFWHBd~wBpuJDID8vW}@(P zg5IE^SpHf)PPX&^F!he{aYxY2rJIt6RLCDeET*qNg?Wu zlQ?5=MtBoeRZ_mzjwzngnMsq*%ZWtAP%%&gv*;?iS}@b7t~RliqD@d$kl^dU@4we> zd)^j+oZlVK29yOM+DQShjpJ4>7FVE89} z_@EO;B$p)v?F_N;Qx9(KQ}K)%23K1U2P`=(rN;KV$aBOuduQ5cR9@*}FlW&K1}8L! zb`N7B1rk)7rY;Z{y(Zf)Ts-3|1t*H@n@tOM*pj%ya$(KgQ|>h_uB;kQ7_kFoX-YG0 zGRnYWgK!j!bfhU`l6uY7BKIQwCy%CDIKOrqOy~gI`Dg}fbfPul6pG?fgW>zxamiNY zBc4Tq%~FU}?tCY8vuslZ3E86#NW{VeG>xjN?UWp*ee}pI!uG*LdUxoDf(y=;Y3@tY>H$2YrBJ(?1v5sb3rNQ{4{1aI%U z1=Q7p|GUNl4J)@k=Cdt%4~IRElm66QL!Brl_mkT%SW-=oj>UrjLWq~4)06uB5F}V^ z;Jaq1)U@H%hE?Q!zkvj72@*czC}!gn_2B+-S^5z;iGLeOj-`078A157EaD~|Pfav& zcd3+6f+`{~`ZPBEZxVD&4+qr%ZV16S;T^)-4f(Vr-Aa3B0mdEN6= zV9D_Do_NRY-cf3Ks1|_aVvUKwV1}qB@s*-aRd%y)6bx=f)~Pmxm`b*D?T2QzJ+3y- z+)N98fu3&8^yAYgs6E2wr%e6x!d5SXX`p~PCZKubv<*UB z$UexadzuG&<2P9?!@XZa_f?`9(1kKZA%u$9@QwFo9hu!o~QmBby zT-l`@Wo$8IBLfi5lo=(!gIHjOG7d+S&F&HthuEZKnKB;TG(?w2k8O8Y@FEmNKA2)6 zAFKF_iYIKsW#`XckWlspfwR59iU=Q{VeVD%U;6?AosI7H1DM@wF7K;pBN6v|*sl9a zdxzqUo_6!7@cBb`iBXC)5<5dD#BzH&phy)$4(kHr6SnS^Czr=w*2?+!sl_KuBR=XN3Fnt`e(`>284Bl1^fzvW5wH`k>eHcPd<&4yhA5Bgj8!> zsd-$+TTY(!)xWA@H;w&Xhdz#4gZiT=`2R>CLdixRH?Iq)^><6y(LGwA zo{_Q3)pL(4v_ESD!~nYD4=Lw`3MH%=;L?`AS=ud(Znc*1llm}_2U*zOJE)tz-|Ydt zOrlj?a{`Xj`X^`&NozP-YEZ(_O~&M?b!29+8w4kz%g7WJZLWO4RMMZK8x6wxhxD7t zyoiH7!#AWsE^SdX?4{KJ3O{+>IYSBPi6NhIjLi=lU>y3VN;irNja%hYG}}`0EZh*P zErKx^kT@g4bxmC$|;rhI4iRuLy%wITxi0wgajLM5EWo{y{rihND`YQOH%ozDQ zT!ykZ83jDR<%TBP)+i2<_62~$xuua%iZ1I!?@-FZ*FTvL!;xQZ9=)~?lnRBGS1&AS zWNpc2siLWB`PHQhvxyoZ)NKx!jhhmwVY-V25rmLzdgo}^D*2!Xy#VVfceqdMa&$Ie zmw|Ub&iDPVhyQi&JRA+rUZ^JrVzj9wQ>2;aEh!z5pa0ZE!CB2ZdN#CLuC%+oyz|o* zsVidAI?K*(hao7oEh;8(ZTS_tGwZV40KLWsVq8y01L8)0Zu$m6^)@-QxB!+v@m)f29i zVW6QUj;M21={u~^w@~i?aGis9 zE;QxIabUT@Z+Ly`Id%8I?&>~7%f?TtLTu?gi^S-$G^v6Qxs1VUv-#gFfYtY_jm!}z zRF?(SzDf>_HJE`P(U_(1lucB?V|O7l4i{9^$)x_01qkv2g!UQXuh9H-rO^W3(qwnjpn|k(8b0mh%-e0_YiQggCl6 zCnPwR$VFmfM?emLY~->eM!E10hBG>qC~#21b2jl6^dD04|8U^p=qa@B$?^OSPwMu0 zrAf$UDY<8z5&y(Li8D4d^IG%_i<{wir@*0~aA1GwQ&XM1_Gx+&ck#1bMpThWHzXP7 zr&xwF+##E;c1-?_T&!GO)MzMXtNqqfY4F>z!>_;% zos$1jWM5D!|BMm`z#Sk4fp*h|xC)Bm_5bDYWq3w(CtZ`9cWG|ymxc}x_iy6yg62k)Rh z=lsMD7*KumBZT{a={M&oKP|xH6AYCJXBS4Ea$+{81fSJMrx-#c>Q-mZo{loc<4L5+ zQT_W!{Z(Mz*5T%)x9JE!9bb7>QuGg$#Z=XP&I8V|h-9g7`dAeMt%BArMR3o{Cmb-t z@qzw1GTo;ri=)7qtBXH1>i_q0G{SpF=1fB7{xZXO9on~)vf^jWJBi;A>&;fNN0vEN zOvv`9qsLBgO=nUAR799^zsUir8Pbcumna1KM`6z#V|Q1N%#-Z@ZC%^ADn1SnqmqS!o2r4 zYnMS=7lvdpaz#U4MI2#*+`fkBb~dU3h70B6{e61luC6_cb?%u&STB_U#QHC8Iv5)` zhjomZ2za#Zs?uL~IOYh5s@5ZjR&kkWRpur3C^_hqN$;XHNM-LKhN{S5*utxu)aK|Q z1}9DqqMPk1W@K(D?S6gh#5^c7nMHW<&yc8mij1WOm=ZZO(eI-%LDRJd8#2VKZpwdz zK*xMGdAW&&&S&FGhd84Y{{QYs0wV4o>H}Y!U%Kx}FHJweCm;=S{MH0~Di)v5Nv%Bl%|M@D-^g zIJqUY2NH)=q4*b8Is`5mgqyIbuy+pdm>jJ>x%EZ6DwEM2g0^aQcBczKdF)z3`KlR4 z#V!HIdt}#fRL$4=>5^LKYze;JA4=sW#)-VKSTrK+qsEf~K56{PwuUH%0{)g8M;2`t zrioh?#3~Eel9{2*L}Hp_`@U0 zi1GNh_lhC#uf54GYK4~%M;iUN0`5E~arDDuVuAnyDouUY7#suKdHm1^6&fr_q(RneF3X){mQW`LSX<0$?0EZq zw3`FR;^10~`@|+a610~E>&J#eJ`MbsO5II}K2ah{r4x1Y)kvJ?9Ci-atdd!~y_gPf zCT~iGk=!L7@!2}biaYpp{2hXf1VeFn%fIo&pcHjA+cY*HiJ8oJFJG>kLI(EZ8IDQ6 z-6+r`@=oGXwsMTqH8P?4l*G;vQl_`6iLEPSH3>92**dJ~eG(9Tp2qXZ z$GV{BLtH#EKB0FZMbt_1!HboHB(k(#%Wf2dHK`VP2^3ww4RZR8RasQvK;J?WU`tw%L|8`OA`Z0r7ip0f864$<=T2J^paOwSU^E zh|%dN>YEmCb5A$&1;Vxd5NtnYuhpOS5aZ0_lFP4zUixgfwqwaeoxle`wF>_QFb5;2 ze{KdkXcXinRE-0b!9u@wynUvK6a$WSb6=+F-vmVO4;;5F2U^dL6|EY53r^M;z>*EN zdK83(D|VqANDDf}c1b`H$7E+r2Ln|i^`c5!P|V6p+SH@y{cz3yX^s8AHyA&v_g;`A z$+WIiewL|_D4COol5;Xi%pbGTi(*jw*8%bPcWr8eO4Yw1Fd#JBw3lE{>s#YkxdW5MhSh+nD8HcD0?i&L^OAaMN)gHnYvg zSJ2t7a|hSicHr&-!m~@RhGrWe;l5uvhIe9`BMT|D-0DX*OqWDIpKM^3`n@mZVW5EK zVS#Cx7AG0tkD3nC#6;pG&Y)W@C6&^UOk6H&MH&`jePs6zvHJwF@x{ddV2Lxx+hFbpqjZl+3$OJ1ntOm=UzuJ_xEHs@@Il|7&Lla87 z-Zdg6f!b#bx7 z3Vv&1*JwXJhyK8e;yb5gf_^Me!_yZEm4Xz@6o`c@%1ldJ8+zB4>wFLOtEQ6y;z>_0 zstP!8Y|Lo@t|fPNh*V;o4bm_sbaqX-0SdAQ+zc;o!`3z0`K*Yf z5dukNfr7LotJds6pDY2Nwab6fan^9LTQr!d^8^9#%C4EP5!h>U5ebrfNGq`B%GJJt zb0xCqwbRS=NMzMPl(uS6WC>&?E>g1R7uAGU8c`%tYi1PG?S1s|G;3zZ+FXc4OLD(|cbZ z7+=~et(H6(>V`p&?JH(W3|6j&8S7i+_7e@C+@x^6RH~JQKZa2>cY1H)^8@URt*?k#mHJPiJ4*d za<0E}%=9m4fkIvNcsHh-$vH)fobZGO+0s*Fq|POmG?+sA?7x#n73v|7gqW}9 zTF6CURDZVS^S0(qT0{5=L6lA+ppT`L#N5nJ{cK5GNxsHsR^p8kiWP@EleaUUxFPe1 zzv7RY+2!mq^=U=rs^~1HVsxZA@{?0XlT~vV!)of78Jz&-RK66JOQrjwS?Cj_d+F7HdUXsGJ-Ydk{&!k)ohrVMqk2_TY0u7>MVfX+LC)GA6Rb zNTUNwW34?JrcEW=W>OIap&WQT%idXMo2>;KXR&eSJC3BnwfcF{N&ERDS*WT>`wY_N zkQ%@9ZM34aY$ZW!Tq$3Nbbcj0IMsz?GM;|gVkf!~pQAk~n9bYUW5?!|BJgm^jrxkAkDavwYoQH!-Qq3Z8U;HUNrNSv?;S<%8RLX89$MrLQ2Kq)as5-{>O)QKI z>j7ixsH)!KfBo18nn50Sz9*yV7q@fstZv8445TlD*SJn!qQ?zHUYXsRw=CkfHNfVA z{+Sk{3=8kll+tJ1a-ctOW7Pvo5y^~wDNQd=9p3PQ=QSsci%pm;%m_N!5noVf#koI6 ztXtM!a7K`~+J zJ157_{P)2b1TuV&mQfLQFlKT>B6f6cadj3+teD3^<|Tg=j;kgA+5Q!-=7w$7M4^*K zEkiBCu3VY;2H(8eRaM}WtGXAqX^C+_#VpB!tDT(uCxOb2N}etmX*+IV`w|qKto}TZ zuebgs!eM-9H6c0u>i6=HD-q{AQ%{5B1X=R^Jg%M`!@#+W<^e)g}w57=)D znW&lgA~?NZvNLoaSwr3os(oVS#(`tUkBlLwy2T+TICfUlwAxJ4WcKEO>NldQQmyiL zs*KYFJL4iDmr@D4!Zf)qq(VUyV+beV*KTObdhD}BlI`Lk+z>Rv6iWN=%A;a!>>vUP z(n>9d&0KcHz6g$JEiZRi97Ax@m9uYR4gE&`bTj(>>O+*_wb|xFlJU*PA&H3ZGs_Gy zi#NidxdQ-fV+uty&Npr3S-6yt%MjsgKH`TN14m|`3C>`qwRWH^dx5jvIm#MQ2n>sL zVvnM|(vSdg#=aKMHS+5$ic@_(>MC5S)3eW(bOnqs{gs>Ej2rB0cGb{yn=Qy|tIZL3Z$RoL-;eiST5zC`DBhpI!u4kLOifdQj;B zBNy4*TCOi#VbH}Nu+`F9s%g1f2^bKBa0sgzODvv1F5*fOOW=@v0EumK^lT)cIBny2 z=sEW2-$WKCBpPxbMUw#53R5@GEDxbD08)vId`3SlC{;&B9d9)SfRxKHE9dbT4I(*H ziAm+{^myDLNe=T+u6~XM63{7dxEUy<6u->bcu+Y8CbI}zid|g|>wW&sKUUrMOse#} zPP{aU9${-DP|1gkH<@~OeNJI83s;NI(QnLu2nNJ1b?Xkvo0}uJv}X1~wiGF2D&K9; zMfnmdWWVA#{6DkrW7NyYED>*yY=nP(?mLK+rG!RIJE&lh#FXV>iNS&T=6KVpmch3b zR0`PGiN^6i0YJ$ixQZ`0Z>;QTM z7Uah!O^S4Figb~E^;zR9Tlh=2C|f2X8(G$KE}zQN>BDQH%7*`6M3cb%c+vNRD?iaF zh!&OVkytjY-J#5ZUN)yH^RP`)S)+}p<++61T1i1N-~iRU$d%Al9dqV}7xvkXyOV)^ zXvM1%p-#;Agaq83gp*hKlQB6C0+WS4$f(K@mCB6VFNr>r-~!}ZvTH|Aa5IlgZo%(! zkLYjVZ<~B1pN5F_BTO2bx?TTS5o5uaST-=CB=R>JqGkh*p{IyKmGUgi= z?;y3sy}rxztf<#8cgU@~;|vQW?g0rXaAI>Ccd2d2dp)gf z9I}q`#W8YlU8O+Cc75yu-192f^SWm8R;KfQMmsth=#;Y=L42fA;E3QyD4FNiWXDVW z7k_iW=F5=>$0&amY&CkK83&>dJ&kF65J6=n;b3#3>T|ozKPyZe>CEUekD|S*2&8|~ zVMH0q)d7|0LwCor8Kp86KLhnz zx*C_4Q~%FZU5IX#2Dq@8i3Cj9J2Aa_8>qAS_OL&iXne+K`GH#h)0GxbC>H4exr^kV zx@hdbM?Wa!aFaG~zc<{Z{=09U{&#~?r4B0f`tl*!ho+<%5&B6V2X(B2oPLQVwV`lq zW2P>Llrbb=94_#a$yHSD-w>;>#qE|JCav&L1+cM;LBTSFy(uZ)o7)|@_4eV+lD~Njb|JXU|*R^jq_$#uD}x;2() z&z69PoVv1}IFg(jd>`zQpb^?AlUed}#IAL%$(;_T7+)%;eMeF|G%T)qsdiT=!a6(K zmOO@99xpVv{Fj_df}!$p2J#HdjFPun-NVBj7pv2X|HBh5Sx?Wf+d8!l3?2txyK~k6 z9|6(aFY=&q+->2pYFWH;Ypn3lc!2I?9&C$uEg>7&U;VB|3cW(ji$E`v`fx=TM0WMc z9cw=z*1#`5Kx6&y1xzg6r~xEc=;s$tsSuZtE*MtS$XasFyV$ox`fLQP4sY$lH_=3s zabY$3Y!de!c==V>Xk59O3bB&|7W|vz9~(Hik%B=nwib|L2r8zRRKz!+ zqnWfTL@}@8>1#m#Ndk;paBo$*;vY;o@Nhy|I38ys2m*+2@jh#%)zzb5qBo1kKzx({e&59{YL!}wuXqhWsH;Ny{_pO9Pmak=KS>sBR{nI?#A z6H_8I^5=6w2hH0)!_x(E;;4lQ`$wSof5JCcel8UAF8eHngp(gBGcs78>X3iqo1ebp z`_?)wZ(gBfSfs-o?0gM~Zf-H}7~z~6A2b9d;FwN9vm*tQqq@S^Y5Ti*EGkV?jTSJ@ zg>4&i0_#H%33f)Q-#x9L?%nU_r_FY0*K#tG z4;~MpvAzbPiPoeSiF{dpA!yI(;;q04%6-Dh@vpIW51YPBC``qjFUe*-8 z{aM+1q%bU4B6hGoieIY{fX4AjzYso^fcT-ud623cyxw=+WqruH=)wvw zg~ZgfBqr%;vYTwh9b-5BP!OGvRyaytMgNH`hjE;rQ~)aqmTUMz~SOGX?E_kM#7+K{*Mfa)>r2H~i@$NtDLIPMoFGa4xmG}YrR2Ro%`N(r9ta7% zzeF?L2S-=Y%HwxTd5#H0)Gnt*mxwf!eI!`gK3xkqE#7C;LOe!h zb#T5Oj-~Fa+%GkM*i8S^zS+C3+Cd2sWqi2b@h>bPGpTyxw+z2s(LM38&MK$fyw9Fi zCm{({kx%KyjbP*uW&pZvHsU6J-PC<1MbKq;tfGp7^EJE3qfA314cW}Xu%Kg0rt8ES zv-Fq=`k@>BBTaLZ!Lqx&&P;yMW#N+Zc)WCxJRL6fh`|YmdhtgC7Fz+A@-J1_0_I4$ z)A?pn$gQ}|FX=dOOvxw(xr~&H=ZDK8jAEkh?ngf!4Efmj8NPXv&MTI%Je8qAt&hXI0p)qf* zc9Nw&n4T~xC8wwI=ISkf0WZ`O%OL+!28N-?0V+2fK6a@;&mUTXlQ~s)6AQE13ftPk|m)P-M!Dwzevl zwGQ<)BL8S+s7o`YmPm6nH_he+^%qp_K+;`4V!}+Pcsv%Lmp|5nE!ZS+r{e(CxLh1k z$pMs#m>B&>rlS@q;Hw)oN&llDxnztFeL(^@Jj5P)#s&m4?xn%id;)YU6quODYJNw@ z&imnutz%U)(6pyG)zr!@pE-Lx)H0}UtA;}ACC;fcw1JNqgHZEpwRC#}cY+WXSg$bG zFfZLNN8jsekivzJxuSGGD;x$Bj?}1lv`-fSF#Ex;B=x^p073rTp?|l?(`E&$K+=uw zy?R|jOU za_~FD29j?JRvbkj{8iwqcixx#c!~cXODkBoswKk$G50m1{80`PLr6k2(Yqch-Q_}VQQ=HbmE_8&wsjz`n;yr|6p_cqM zof^p4D`ijWCReS3!ZBBpY~H)9<+Zp#|B(ZJ%hfGHLNz(e+VYQi6Zc8by^y3KyWI zAy3}Czo-eA)foH6cq2s}LC+N7iY1-#RH+^4^^9S2TrU?u6qBXkIw(AdCu>b`z=Q6^ zQ|M5I(0)0#!0`}eeA75+YlbF#<7YP@|IewJcW?soj!;mE<3m9I#M)cF?zXY^p+ zHv&?)t#VQxsId*)`tlJfxP=z;bl4=uBhV>+43c=Z^_rA^HLR%MF*m0mxRwp!w*XBm zMzwMS#?ns>%UyNr1Cj5)rVpa5*_5+UQ%4~<=rE%~Tcm>*kKDC#E9B6S5tPHMyM*?H z;K@^Y&{X&7LCAGNSUUoNDp;6|5?5d>{zK!*f zX1kTu-q6*U+oGeD1LP+uq6_G}-?&`}2ycV8zemrsiKXstZ%3|TC+!#1Ecj57(5hB_N3Cfuq&p4%) zuB+zP7U-~s4I1BaH}l&y`v|;^_knoterDP&UhMmH+H6Bx!&UJAyr!pX5{DxJ#nALT zq1{tl)ixHb{rs7$Bc5uFB!-Dy=aQfyq1R0;q19cEw@U5gRT!y7IR&1a~DdCyJkjr-FN( zZx?1PuK6!#phVI9PlyhM-VbQscQ$!@Zl5a1g#8~R&xI!Jl<2KuD~yL%3XK?1@;JMl2`&SQ6ef&#RM^TS;G% zYr^WAMYwF@SO4|HkFQEm2%^#yZe4$g%HiPRpo#IPJ4=EU8F01Ib|j6+r&-4hFShRC zzy!_!$JC9L4D;S&yS)sporw(bQ3x8rR5Nnu4W&Dm6|f52f%2TN5JQ_C+% z=+A3}U{@#=-(kFKNLLfK*ez5e!f6(1sZ|3t<-pQt>NT^03QG!0K#jf;rwy^bk;tpK zQO@m?uXo+r@474c_pkbD^DdxJQ$mae{N&GoB*js21r;m(l)ql>K;$XFo4!9! z8C5C?`xPgrYCuWMg_GO=AlWvjAfp!&*k5~YhOJDjind2qT+b?7z&r3 zd$k?#RPr#lA`;DNXe=hw&?sM8SK`#v;n{u1i)7%~6&{XxT?^MAZju&aA+S^Wu?TN| z>NA+TEny!j>cPdu-#M(;T}X}iukNS-T(kwr$N3c4q1aY`cS#loxiBEw3N9mU?(_X2 ziV;!bXE|HEb!@_2mCq}3uq+nv9hv=SIfjU_&92@BuQPa;Yz&}QdaLpusg`3WR<#e|QmF$Dj`&X4c%G#Xd+t&!Y%15rFX)y>ylxgC0m0jnD%fOaG zkDVr5JW-hwfl?#Jl|Yq-=|p8wMmSBQmAbx{HWsvtPzzH3nJ4AF8CzbX03#AKg$b3d z82#c-PG6!)DT}=io3as}4FA(l>GE8u;-OOqpE<@++z?t`-j|tUoy)?2yW>lj79q>#C8a=tYpIQ_=j7zf)c3!myHtzqTD=+*Zhi4XRp3&V{G9<`E5e9Qki z10m|%u}OhCL+}0qp^TEK=#63f6kc!?`7knY9mrkGT4pku@%qvN42deap7Z+}a6oOe zk-h9X8RX1l))YxyH1rL9d)|oOUF@w`(EoHu{xrQnOMezX6!H`)J&sx+Mm}1N^y~F3 zLsB>Y^g7)t>V+ZOi?n>reoc{HQoox%+i?v@O-iJzd2O;F6ep+oe##x!ibSz!f?Ly@ zIJuJf$?9ji;CT_wiE}o0ZO%6VZlhmR*CurN(A62U{kQhL2s`_OqpW@V(qhJ8JA+Z) z54k5ofxF>&=RLP;nIaFjFSn9K*Kti+#TFuyC)m2!kbP})wH4eP*w98QikBqr7@V@P zWmK5UJi*>2{~%~7398sg^6Vffe8-lLaJ=}8>!yRoXUWA6T1S}rD$cM_TkVR($CB9!9c(6fo zMN>$=fb(yWi9*D=1KgC*JIX&Kb_R>!bK(fZlQ7+Wp2$3Lr>j?`oT5OjqDp1)R9n*> z!0*JR&wh+R6%+VT{LBzi2Lukd4ExYernKG9xk{Js^eX=ZX(a zccdIbOqp{tf4PMjKn;jS_)ddE>ougyoyQ)9>uoilc_TN(-8e38>IAKp4rRQRs9~Ct zFhYOPGVb@w$Lps-*s&uvCe?1m2oFXP9Zv0cY4STL9;;9_D9WFnxcz<{bmr3Jiyy64 ztFRG$v`#1imz-py@0SP7-d`vF&Uyyid8ouwE6i#CAov)IP+Gd~EH%%M>t1y=1=l-k zS-lM$`<&0Fkos-awd-(B>9-NOG6j|C}ZaGEu^KeI~&W zGCC^#7M(@YMFVdz$zWxt>fnx@7zXD6nA;8OP__{Rn}R*jVJ3!?@!X;L9vk)3?sM4 zA%{+^*i9(=iQIN#j4-4fc%AK2yVFQrvGLJa1$3=q;~*v;cR2#|D?YVg^W7W~I zOVWz7(MDX}Kc(~^JxfB1b`pn^d7-f2N+sGk$v;IU%I*vXy)eO%X(tzYkQKhgi;SvN z#3BYiH1}KtqMKt3Rp$7d`IdW^*viYRk&pW_(~Hfn=L@a{C|#nUz!8ku9MaUoGV`KW zz2c%GN>?diuhu)B$DuCGnsmjyx&7Md{-uUzaZpsOEO%Ly5R4YQ?$pkhP``1$XMZl5 zSl|7X>>~bC*BI}HsGt*@GW!51cZbO!=1Yo4dr`=4vhy$VH1FX^9$h$unPIgq6hQlo zEROUNt9}6`cbcS`hwu%9HhWw$8)W033i}VSmMy^@oQIr__sbv0h>a0k!7F_9c?rpL8Q#aFT;sC>jIEpY;YYK3XkXy6W;*~BkCpm`Z=NYOfpyf{ zIyW*f>D_RqRgePHmRGTZe^v@j|Ef z+6l=c3PJMv5+gJl4>>-bJ(%_ z03{nhVwL|&TlUmX}r`Jc5 zPvI;XBj-`yjIC@Rx&9RxkyZE|(Nz1K@JD5qQwR$}u+Ki2op_cxpIi`|F^@@i&bWvjW*a~|{8{kJ9U*eNb~2I=2KK8Px{ zke&&C;0wajI*|5ymqU6}ZV!AT`#W*`+H&n@N-MIrh!BN z&xv6cSXWBgF)u{R{iq9wrgqbu3B~2zBWJ>hGxM7!0+R!Gm(lBA{3suyGf9Zq@cbv# zfM_sjpEc2WmUzTIb|29{OcS?bk%0Hjwtb?;-7}k=azsn7ht_^GZ(j}oG|b8J9I#Tw zg*iBeoGuyHWBIY1QLJZ(UOGCK312OQdE@y73qMeRM1or`gRXQ9okC%^<`=#B2w<9& zt;r!|8s>QHr);01dK|d}mxU=pe4MZtwgsPN=yYU|gL`P-&9Q7ulztr1f@dMz1s;D9 zo^UaO&d0JM|N85mjL@q~I3orklIo+SMpp@rIv|m0@#W^tThTmvG^T6cYH~FTMr8k! z(&RHWqj|R>Axs#dKOoDtZ4DR8467gCIEEyTq(la94L_E%dJXwEzJ|6r!tk1DIFnhw z+B86waOWQ$Z1qTG7)(!vE)W(r1QenKLyG}$@xP{eXpAfzNDSD`yo#YB*LwcC_%-^B zSwrVz?w?jDD^yigk zC?0V_98|oDN4&t7m4Wrm6KXwr4h+|{VtjJB(~E(^N`Qkjz*s_mX}?ic%=k#0H#?(y z`n>tB3LAs^FI`U~b8px~Z6G5?koMSgUM7DqL7afIisV;Qwj2O`TY%B922G-Zj#30M z6H~b~kE?)%h&i|f4oX`m;%|lNRk4^cc5))zC2G{e?*NfeQB})MzRlr2MA$b9=zXGB!jq&+8cG2_FlwsLW`t!Fvvl>KNb0{{N56u8DBn#A777ZgB9Ia{x z&{YipGrGgBamsJ>3=RVPY<6OK3ssm{b$_5AoL4jURw}bd-u+VJR6dIb*N`Bg zW^r3Hrqp5-r`(g=zjSIhQZnPAE;vK^leMq%#8iq`L<7-Ikho7RY|>QB{^e70c9+_i z<^`GjgbFx_4I~qN{8hhKW?hG*NpfDxWtBHg&=O_W*pDq;VwU@le==tYv#knTCO06H z=>sh8C6fF>(f{u&bh2lsrr?A+!7CT|t(@+yxiRtm%v*u~KT-<`b5vltF<_mgW#Jj- z4NI)67qR>oJ&ntE&$lpG-D5NURBrpAcnmvvUe=O9rb@X9XU#>5YUAdhtf?6l)Gs0w zGrA+@!qaHt7k?zOC_HJI5<;11lzQmSp^q-`=BVv#020%K#2HKWSsB9$z4I&GM#R#3 zvZC%b`Nr6kV7?(NKr{p5_s4uaj}yeL@F>>!Q!!31KI|f^SVGm~XI9|n26{?+9E~;t z)JF}%tu_?Vd9931DCloDB!_A2P<6uObYz`woXKX9p2k6geJ^YCJF1Rza3C!xb{`aK zYqRBA7zC*ZX znILa#x-B;sqTc5_qDz&{0Q8GrPP_fU4fjzVsLI{iU{2-(Eogd1 z9Ndh(y!e!dn7=)meLR6xQ7BlQKyeNkg)F<5tXTDXM3!DRL9Kk*3UUeO8&eq5tYcC* zFcfwdL1X5RflEvYkz#+XX#Wof@qT9jUASPj3m5E5p0k{;3+c6x>y|$a0Znkca*Rbc zUa&6d92ooyLfvuRT({RPUPYcPF zoDWXP??|#QWB(wG?yFXLq32X6Xa-0xaQ=!20lyCucPk2dLUh_*lxLyt@X?kQvj$9> z!%N{py5ozD*rNmK^oGR+=Q3eP*~bsQF8t5uMHPu=p2u>Z#swkRp+w z5i-gYfVBicyjV%5ptVSkmbKuIT)Q@U){cO!ALVcgE{GtIt^r&=?4KJZce|DKMdbbk*Hl*3i=||$p>iXA{?RqrbNT_mb}#5W zxBmkXvE4pF_+QexrQ&Q6q$nrMJKO_nOs(A_T1?nTUPi4v?(z1ETti7jq53O`)5TMs zLS6Vqet$QSF8xdn2C!DkVd)CD4IeG?_(<@>4pCa5Mc?_Cl?@2d6K9h4hsDxC4`RsB z(Fy&>xX4-=-gI5=HA~)%cF05hpsA5Qn(!18oq2DJ3#~F zgIxDXeC*RZbaS$oy2)OCSU} zZhuifF$fQ#2mUU767pS9gZxTbCK+A&tcfav4R<$TJFI@+T|vkddPNyv9un0Ew(L5{ zkwdY2i5H9kl!jyqTd(gf4>4`}OBcun!hV2omHgu!-i8hTy&+R&|F3Jeg;fY`@V@}B zR*d4@nm0ts+rMBJssWPLd14Hq$AbJDcw^>Z10LqIAEL79+kp)Wb(O`?R?byuib!b8zq>n=WylKM(XTLF*uL9)V;$3Yr=0nTEh- zP4RJeR`)@ZaSb!u@g#ZF<1nB8@ma;vM|TuxwA~KqvVYIhd)};mk@pY4Lwi|LYB530 zk%2-FDLe3$)K8JS1L5d{l2rtpoh4Ht3*LEnc0*5%R{BEDt5O@jo1Qt6yQ7^r6}Wle zay@O($lqyQ<0&~ZP-bRAy-DJ3d}2Go7?`r_ag0{mJ}pMq(dPM^!pAWd#*TkW1O zBhK&X`gAWU=z7)w+SdXZTq2(G!s#H0B-}0=#SuSw`VQ#VmMSxnj$OGp!W^I{hzaWe zRIGj(%jM$AMV?B;pbquyEnmmh^W2ZCtU_J#VPTX{Z8a3)t#L>Vh1wNQ3!@Sg3JY;d$m{u0N~EBf$zYhx#)F1N5;Y><#*$q`~OU7 z|2?h!&Csp6;{`uYV*D)#tYX0hl8p>S)J6DK8Nz(l)kSYpWo!#5E6T=kSW`oc_E3UYN|Vv9S;VG3v8d=imQ?z*6joSR-4JW%Q-$7|&cU$c8P;2WR@Z*qa#ZJ8*)AsksW;^)@ahs3p+O6cZg5qe> zorD@h3zlnNe9SqYNz7Jdc?`9(G>s*+z^pTik&?tJPpcthngIGX~L5`@xu?F?ZmbiQiH-IA@qYgF=`el=FJX-)=LoC7X6{!Bx$wPu93 zs3wJ(PZpY`64%$L-4f2Rng%%U3K@)VsAkkl4qiWEzO!j==RAw1w`o|78FyQw!l0#> ztMjrbBV-Nrm}uEAflrq+sg4#)m_^pR{$hXX)LyIcnyoQi6a3#?fZJLCa-8oPk}5AM ziP7kIOpj{3plk$&qAPoIq?SQ6AyvN94mo%+BWs-p95dJ^Eu}Qjogx zkLAGKow;$W+5d}+J>>evY~H{h1Oa||9nIcs=V&2g*%8xjv{p1LRs5{4?dhMO)CL-1 z)~(ixOJu=)c>`nwjfc?iSFN&=)p^b{0J7XOdzlYq5pEaq;xH!tJhvr5u{T)#d&4~h@uos&R%1nX+ptIim_yMQxrPn(Y#j$|P~YMg$SFK{x+=}Zu_mI~ zOr1<@(UVP#sQ2)rhSjd)Sy6G5h>HT?!)pw~PL2g|*K}#%lhPiDF|!M`y47fJN(p(o z&H?tRAS+?4|7?u&HfC zKL_zmJk}c|>^tPM?_2K^9Nyf4qfQ9X3qd{H@CWlPwQ@|{1tAD>l!CGR4X;8th zHo{L(J(I2tK}un-fz`P9q@O9uM0zE~Yj&!&NHohEHe4i{cEPygmBb8S#SKgnFeN@d zMqgBj1Ark@PcF7OAmNU99j+tzV0Ghmb-LRdKT*}NH*$PRaL-OE=A8aAFHPB6dk3b9 zO5JdpUokei{9%#7%H}Z@Ba})bePf9^?HaK$CS0P`%=XQstn@l}IzaH!evp?Y2#sP7 zd>y0!oP)dbg6(k?zWU#t?PNFz%=4EE^N9)2$;Shmt>sa+oB6Ps<(62I_Xu|{R`vTj z|M9h8xVpvcZZk69^F~q-kO7js3XPi~Y=>O))~|e0z?zV*0S{Kjh2fg)2dlk1csPT+ zJmHbq4DPpwSkZrNzt$-EE!T4zA-A4eDTO6x5dLT*h0;y-ixuFs%x^w?ldnMdC_&QZ zDdGqH=ZkrCeY3s(SZ?Jas*~HgAJAS9%dC=xt)NiGKqk|z)7a_(ES$irsvvKwdt*!HFiIILA{IGbV8I#Y>@#)knhS zct)jITF~g;7Ztf}o9@xLw|pkJpQ+y)j$N^LKu5z;7+20eOXpzQm!OvItFMLk>d2RD z0A#c!e~BL8bNUYL{o+m6_wsuS%A<T!H6utm5n zgsCREMcXx^ZueNgoE58V%4^s7042oMvUHV~m^Z6ngrsO?mtd21P_a>9v>=?~ifL2U zcN$Ow*D-OKlIj-2%~A=zIQPk({;hdNu~;nXM58IOil@lyzG$7x)jm=ZN`X)b#$r|3 zsgH&DV@%~xM*Q5~uas0&TPg8KnKSbKtRJgs9y`3!sQxg8Y(sx&^GtfDYY|RE|29gw zj8p;H??DfTp?&r;1v~(??mqu=S;Jf_iF7j}7tpJ`5YjMo|dANqH?k(m!%%!Wjw!&P4 zb|(@fSNdpAW8n+V0$t5rQ#Ah_@qfBmH1UIfOoG+-enqVt>4~q%2YZpx+A6&@yy-&T zQ}0*X0!l(WC&OR9xT~LKf6_SkhaEthxrlZ_MEa;OFT9` z`%i|Ko~0qCG)W@ZG8&Ndc3@>dy_V2zq0WKy8wo=}C*fgnkNBa z21h5=Y5gQo{OnDT1?j|8#9WV&?>P#tzN1+vtohiB%aanRw|ZS04xT!rPmnYKE#ulu zu^gfL0x7aqL@60uvmj>teVn}hFIU#L<9n~9{w+J>T&lm_RdpfE=iX}S6V7=f-iFLR zFmd1d^l-C&Kh)VR6BuBgM}kk)m{Pj0OE%ZC33ucb;TzqV6!i8k>*~EzoXHDAO;Oij z$29<(L6xO!B$j&ma+)SAtVa*nqu(F`?2(oCCg3Sd+N!J730xBKav=rA=&rp{3t3HK ziQ*9-b_EIk#&dT>y%t!;Ys^nQ5POdUYF?uf=TQ=hxiLwT_Foqu@%;V!zB3;*!H6R} z2U|~Ix`a`;S?=Z-+~;22KdPtM3u{Tk!Pg3 zgrko)`M;9_*){tLwrLDVNg0ZwH90=_qKh^@&K}NmTWgRDG8WBTSMKEA=yWNz=F&wh zS+&qv@s}APlB}H}Pw0(T!k24a(9Qd^(d;_gfJ+rag}k+l&JR!*;obuch8Z+lbupWv zOqU_rTl-++ah9$-Zm++2p^_@a&1h1tJK`bI?qEG2MLTV)WLa_cW|~hiKaJr1ts`aS zx82fnccV2Ott3VUhpg`iW4MUiOXOQPW#3-^A~KQA4_9{RsF)9#8Ut&w>l4B!@L#rhx93)1@QX1r%WmWrUtAQ93r`p+ii8$wN|5 zw|XJeq8(!dwCs#e6MO&xXgta6rBS{!JI?ge~;Aw!evS;=(F@dYM7 zSsJ>Nqfv~mrJD!Xe8`Z}2u!pW8MCo2+dk#@_++1SUKPYg#v*6K2m-u*g(Kp9zkQDS zJYelUZt_~0uPi317#6B8_=HQVHvPPN+YBI~ny_UMbCte*s0+Rhji3tLcP(i$r6jBK|g#3`+zo5dV7LeodPJ_*--q?9hK{O1S0;t8D!HB zVoV108%&kk>5ebWHvKBnTHlK8xgy|dB*FMUJhIyD_QkN)fZ^w#*x-Uqfac&F&GB$# z4XfnyvuOK=)YS2LN=0#o+y;(_{8@ai-)i$6n9+9MySylKC5!)_>5uCWYJ49-24NYV z=NrLRwa{XiiohrnS_OD+{?SZG8w`oeFLIyNY>84Cq_DvJxs|N`3YIa+h)g?i*4z@G zd0g?RK30i2-e$xAMa^RFwT^z9EK*B7-(-l4!JaGYMyDG|3r$?UjD5}q#jixCHUw{D zDBs2i9lLdy zvnq{R7i1G_azysYb*$>Qa`-!Gx`zb*wzY!Upfuh@b+7Byfskg%CllilHp<>EG9KC+d1xb)*!hS%{Z$2m_py&K!4$ zHhZzKu$fx?5q0~9hbPDPJGXjLF@5fOwgFFK9j^@P&nhrqrLRhj;N1U%q+)n0W3Yx9?=s(28QQzDqoW3z48^(|_#wqJw<7Tf%A^ny5QNjbC zlq%NrjgmM-Pny%TbOEOv2bhv|n-x41>mt6-QnR{@jR)J)h}d`ki}m zpTzHW6kZnmI)BOl6};GX zX3B6ZcUB%#ng!T#nc3m$+v#@iq&<>t00rk?KV%N9Kf(iQoyqE+kDl35zo%5zJtX>d z#nv6-fhA0XA2H@$!Zr~~Pl`oY#$Ep{C-}{d5?fjD32i*HF5#RV_R#cV zVTQU|*CcMJ#i`Sr*u#y2tjw;kjl7Vsg`kqyv}gREQS;nfTnW;g*F3{o`Z(*Ch!HXk zh-&zO59+fVOaqCk%Kz$P8|F|=QoX{bRV5AzS?F~L3F{4Ng}xf$q;fKyxUOebWZ@Z@ zqM`fCT}*WdK&xkvmDPud*&H=q*8TuQO;#Eg{=(`1O{oj2B{>iM8=*?{+Gf;Q;~YmS z3`Qf!j>$;wG_TEf)T<*Z8>CDAlriwWxVJHgGM;1m^+e~$;y}MUfoh2vLP^R6-nd0hEe0E8Y1qNZ_=!_pCLqG@L2>iVg*%9asghp*(qki+R$`0e2-=Ao$9gSPqMI?^D4? z7@Q8gH0}EpJ~O^9oc3YfbOD^uVY>F~=sISnHr%Cc!uaF9Vs>%o(67e>K5TM?CSZCb zKCcrs>)lspxr$lQG~j%+jP+PiqXb_yvM+69Y2d!tO^G#Xa8vOTE&#-!#wGE6Qdhg( z!w%CyvUs62Gq7_&Fx8>(F$DAlO-|ap#9Z>3Acs;9Dd&CrPtI)Bn2oH|fD$|5Y^^lx zz7OyE-LRjkP^U8sVVZC3Kf@G3Z{oI+sh_A~zaS0N^9P5##M{c?xR#l>(Wl!s+nYYqNKUk>8xF8!@nzBx5!qvo5_rJGpnG-LKV+YN;+h6F{qJ9+*9Z-MT<@1mEJ40C| zEb{=_)0b{C&4@j!;>9!mm!pURF`zuy!(ModouUKZP98E`{IBkI+_>g_oS{ z-*+SUKeGIfZ>-AZJVzvn_<7VACSGA00!0-TC{T|KOc|}(RhmrF!*px zP|zqqRhf4CmR-ttGBb!v18B~3E#Eir@S;mDkekF#S_k84{H64wvw1S)-+Lk-qRj%L zDX0;>PK&0y;@W*wVaR=>rEg;Aw2@gbidj!3HhNHAC)f1a{}u=soh1I;G#ffWa=EM; z-8M@X{#x`yRqZCmT;sP;dkHp*20K#1uBA+yJLqb;oVytGlxi)}|9~B5hV11?lboI2 zuN}sa`UpNIKOg&GODl=m_m=<@#FzmWt*|nO39Zb){k?wsis65G%=OgA*N496c}O7q zZebEOs7kcWD+Zu7aFgd0-AwR8 z976GdEH&Z~i*ILj;zp&?)2D{R#?pq2ccmG_hZQ;92Wd8W-B&bXwgkIy_#FI4Y#dC5 zlAmJ;Cwr^B@$19nEp6%RFBH?J2MvD4?UUqTd>Rp3D?JIyD?CPc49?~Rfb#*hHj(6r z=fW|+FB8DXw%G;G^R(_F zuNMzM91Uz1(lobmJ^T_*bi9>*_y-5r?JeHd%{T0R|G;Rvin{D?x)|`HN`ux3hCqMb zpyj_GIj=Q-uoTmH&)tM2ev%SUMXt%=C|yx)(l!%<9W|vfwMdgro;s-VM#(#+>E}bD zPHdlCNeyOmly4S?!h)YuVV`+HqI8V!c!^Gl|BHkB9)@8`ad)b96b0Xk zy888PcP*lh)%hZ95?ur=Jz4}dT#^kl4of9JGtdxj5*0=BQZM>5`qLlU+g2q?+NXSTFi^9{jz31Dexh@2%o5g@Tkdt!foZKXWSkt`w+QCE)>+$#49 z=~fSilaDo5WMQ_`N%H(Midox)TdI$oy799_#2-qie_?*QXKIdIidb`hUyVm^<~9Pg z`!0s=seX@RZweSVzJ7vXg*NQIE4*I*!^3uOGCOYFqjD%z3v5qvGNVHUc=a=sZb zl37IaT@yJ42N8|hN)*8kr=F0Xdv*9P>>l?0ukcYbR>&spBm5-PQ%2Ui?@~yP$CqmD z=q%r;Y${dJ-T12|+JFRt0U2_kNy(p-Vw8Dh4o~KghOFHteqY{}VqRfGIi^0WE(l3= z<@Q{U;#Agn&w#RY0~7ls-R)OVLHW$T#pULud4ps**?MY2nM!EkDeg#JKG`tnBv#Td z6@ig0C0rI6d2P;gWntL|&5;Bn!mm4nzoU}>0ifFm2rk&<^+g zdUvFf^y5gwm1@(MPw@IM zJ`=24J|ZK(TTsVlyccTEQvvT3ENN|M$!=rJ2=A>4ehTlA#0g?14${^9jFhjW4M}R z_yeb;lc~TdzA8h3J9XEuRoX!w%l&=^l50ZFguMf#$mo9SQa_>(3~`mU3rS(|Nb;ao zYP+B1aR+Gt#ZbuI5bKJNjy3h%&f01t=d-&%^WB|!+wKObpXscHED#$DI2Crcs;6g) zMtuyvCCgRGwuu3)k)U&YW%9L2d(N)o@x^pYsyq$~-Ds3Ee}MV8L8V^c|pyV{*pFie+bt7U`@*N3?0ai8FQSqkem?jvN{IJNY&ffiqmDwW${+Uh}drWGUnafM%4 zn^?B_4YPIs`%eGYc+ZityIUSgN0#6p?U}Rst%PBT4dKl2QD6+NXu;)#;XNP&E{KOb zIyoL;p{~;%AF}Q9ouEJ3LzZa5EsruD#g(15Hli39Uw+5=l-=1llg^j+_Q+`@NGm|? z=Yr`yqfsT!SLV&XTt@;czF$$lR>leGrsn9BasXI*B3t*?oDKAw(a6TU(YA0gff+p9p z9iXU>^LJr(k6=K$rj;m-@bMRhV9(ULY0szXVy?K{(cYgo+T>~i&~qWXuCbOs%V=sa z{k4$xzFjr~@nQ#f#Y^Z1=<>zdC+v)Zl=ihk_-~$_z_aM3bq1?%JP@=ta6Y(3LuP-0$%(_AnAWRy9orvPK?J_6=-i{d% zaS@IlfP;CYbp8l@q=J3`Ji=_F48oyrdyT{4ZHGg6fqYcXNaNBlM2I*0@OuBqXRe_) zQTrPPAJM&Igzfu8S@1fS@L7$%X5OO5V*6j9Pl6=0a_z#%7wV&8@~o4Kn5)_@r3u|N zOhV-dI6!l56stzo`FgSbPbzCOf)?#KhP7xbxv3Ny5=$fEc1lU*XBMfNWm0eh8Ky+V zhb6=x$}#0EZ&N46#bKwN$Y|k8cK8@pb;S|X38k5Yd(A&57Q|}W1hgxLOH!vS6k$2! zWIsqZ>%n7x6Hp?OU4&ULS04FcQseyy^EUDu_f-geH)8LY&qc7%Wf$A&S!k)`+m9`0 z$!i2_G8Tyt)Ra$QaHg9XuRqm_MoBgu+3fvPbPzI@DwCQfxnok(-EYw$vn4g(%g)&9$Z)uKvVI@LjND$Zb) zL`;Ls{@O;>0ZoNXtp4mxDhm2`pHIx(^8np!!+~ulVm?$Ed?p9afm@lq*o`Hq_mL(z z1DD7n-&}C7%T@}>@7qtKmA3Yt3e;=O%3RniVyf)ZyEbh!2Aw@Z9A>qO$G=LWU8LPl zRQA8hwk-g{VbM3i6NTK#TP1(?qrH8F1O7`eDLYF)PVrB)*7t$cz3C=wySrN;M-~23xN#^9DL*?8pQl*+p!_4wx)Q#)l}9`1L=H|=>^T^Sc`@LZ6%Am)Kg*l zHJ`mzy);)DCckl3m8ujC&p8jU{(&FfSyi=zE)=nOzumDTplhUMi^gd(lE`{+hltPF zHZm2in2L%7RiA4FqEZO>YCcXy=FIV7gTrh*9Y|V0qB@osWj=1NO1jSh&=G~w&w3Mv z&~>gwlZ0{wGZp0%pEd{5bvA6zJd5oNe={?=*=kE{{f-Bmy$DHj$pO!HeJ4($u1}Bk zv^2qH2P+I55WkN0@rF|VHx~dli*js&B5XBaq}Bd(fWJeLKA#}!v~f!-F0F4=4^}a` zbRfmw{kpgjSihZ4XX6qgJC)+1-A)RVK9;2t=}XTxDZ$T-P$5Qru++QsM;+Snpx4(S zUxBJWy3g)PJ9V<#MoBCcu6?&TS@-S2Qp~tGsK`62hDSNKEibL`Vc^OL$_jd$_~8X9 z^tp!bSwY?M8Bcc{h@gyg-}!u{(NOrYad>!w1Hz&2jb`bF$6IdUH&bv8*hSG~X|O(Z z>Zw2(ku{L%QfLJ#q+^qE=K^XVbF%-w=GL$LA#hEE;iPfpjaXPe>^N8c|C)O@)z7MZYzO!Gwl)*xG zxHe1lP(G}xM{?4YE|^S%q9mOc&&$+!Z@txul91Bs7IlVCmflFN0JeGI$j<2TqzAZ*v>ja{?65%mK2Am&&7SvwqI`?J79Tua zNE@3DA5tMf$x%-YqJQt7K7u=%-I?G^u@(q#eDkRq{04jIqE1EH4GI!*{CyDF$`6e%g1Q#!R@-wOfIsyl9!O_a=?k`;^go(R&;8_f zQfoNvgi3>7F#tgvLmz{LWByY<|G(~d+PQj@ggkH^elLFxV|x|b5oDol@h6^L?#Lfb6* zixpmN5R2fBZS$c6G?vlrBMMk|h3tXXW&eC4`^q(oCOk%OjdQ*0rlbYSvWx9o2YS?1@_J6MSzf)UiZF!igL& zd`jQikx3;`twFj|$hc{xYC(bZaI{A>Y1}Ry`j!tH?0m5Ui_8|jZ z0A_`4#Z-V0SeHxR9kd+hs6I*@FxMRxpo*(v;7qcw9=pBA5I|Ekk7`wcJ-v8^jb`Fq zQ0sAu5>@qs;Uj%ut30zg*0Xhnn8GQ;vWau<)Gk^1r_mViM#cjQQ_I(0ngj=;s z+ckDdke`MGAFesUe0&>K-S79g%DiQKSK&Jb>M3f(m-gILA(2=umX!NZ7q#ukGQ34( zWK#6E2;oYKH;g34>a$vqHY=9hG?9RkrEMG$Ros_rHrr@>(i)OO-BypW4ZUPgrz3cY zI_Hy9H_JefV!78fz+YA2u?A9AtN#Hl|G4VW-e66r%1yjX`;xX^*nPyj9n`INOqw$~ zis-A~<`7?=_4swo@2>VUOVb~Vff9D=cYdRU>ATSV?1HV!-S~%Q^$!>@gpEcfRL-!F zdKmO&Xr3X2h41E|)@gfmt6j)@>l~qAO;PwV)>_gTB_y>4F%Xfg-OJ`Qil7AwyY8@3 zHRF&Z+^3aaj_%+4S zhVIzT4q1J$z_Dn?jwDU%4VIhL@G0Hi@eLNJ6enoE^v5f) zN-p5mit?LVU5}7*{-LEq@EP6tT@`svd4s!GH1d#!(~C3lpaX(9mviU#L7O%+awDdt zD|T}{ts#QHehfXyil#7;Qs_L_SY}bmf<|G-SrziNNzuG5hpH4#W4%1hwgH$|D>aT#k}tX>^<4lH!ozMfCbt)kq_FkkCD)d*}6HMjXHpW+C~C zK%B-*ljnm4!A!8pp%3Sxj|oHl74gI;uVl{*{(Q}AAN)a6!PP&ON{Qk4@MomNnG`Li z1q*0`mLAPt^F8Xfn<}5`?AArrJ>{<>Pb)K>xTd~P{QbKG$0I!lApEMn4Y1t%6A2Z>RgC3u9!x?PQXvxRiE`In8)grD1iDv`z%H)oHV#g^3&<8sD0~sKYFs zfihE~4x3;YnBK5jC_9Sh3reyu+3g)jk*Z~iCx@%oG@7!UqGAb%ViJ91!aj4w4l`w4 z>AgyHx()tG0yW<|s{zuIHM`>G>6`ze%*=+RC#QI5llukSS z1$(;IH-fU5%R$43Bkhl+$ui1}5n+0iHPQ3bFNh$1bl48hI>dWaVUEBNv$ zBZ=XUF(*l><-aypN>sI4zX9(#X}Gy*IfmFF#~}~!NbfyobyQVYk~=CB|}7j z=;NP8j*(84q4NN?8zJLF5^JrVz`mi=!k);?LgBH?ro$EfLoQXagXEhIWC#+1aoAxs z%qdKPD+aMu@5`Q)d@OU@bU6=w(j_@`n`F3bQ%A)Ilin;aRZ7~85n+%wCM)|2T4UR` z8}QAv*b{!qL%l85eVg1~b*LsF;zO{VH@F&A7|m>fwdfCtJqdj|au-v{oORPmg7}Lc zYV|``PEhDu3u9ywiI$39(u9>(RmG=ZGifc6uFrFsQNX;lnCCBqn2wmkQVwC=a-wZF1bfuQ#^H$$bDRB!^l3gi^w1>n$x zs}D$Q%F{l2hi%K{MP6e3!f2t$3G8Ew(f-!QWi!J4Kz}l9zOsmd10qe7u$ZCYEmOE5 zlBl^pIdD|XvGBzLIB6B=eSdkOZoD_DcE&|s0YSJ)+1!@@iiu{1j6vB*{~qT`Z(%6@ zEF{~>7e36p$wB!8O-zyWaxU6(%ZvB8edE&?0DQi-lX{6~#pTG1+r zHi=i<1`{#~cCO&1=A%*$O{}`)qoFp0^Wso=_R5S&Wi*A5c?|95ys6a+r^C%w@QQFz zknV-f*U>II?raZpr2mHF?dxI8)&Hy?`oD#Pwx|!-#Ev8fP3vH_$NX)ZK)`5mX|pTr zUxd0K!@7@>6GkIqgAFHhWSvXdUDA^4j5!qkYA2+$oiG*%;VF#;Ve6A#+fsrQyCrlz zF9&-5J3H35*V97Y{AnOmR;*>IL z<(+3hvMzITMj=E&d&XiRPAcU}xog9J{bl_syK5$|YrF!j%=Jw5*RtO&FPf#mLU+p# zKT6a_W{dTj>?MdBxQ`Qwf&xX?NcM|B5Y1NdrIo-}iyrCTPLRVBE}Be(VVyu#BAz=*T~!Qr+=;}F#zARix5E0jL-8~rcw^MYTpZ|> zcFLYD#^le43e+QA=lT<}tMyZdezT--0W$&G8rKs0C7QCGc=N`{Xy5){6+Mr+Jq4ie zMm`$v4(7KE|K^IspM`N)A}&x%1Tk}jx^jQJB3b+ZS`aQ^WYD$>8`_@u(J@GUE3~3W z6OI%1D@fyaFRafZPC8>oQ2;vd61E1HV+RZNsl*YeF1 zwQ{(Ml60xFWjWjIUzW+OMOsB{aNO%ZrKU*u`_&=R^?y%Ey8WY>HN8LJ-+N*947ZZ( zo|!8~W9g^eMLm8yac3w9GKXdU9v1fUtlg|5GsDgC7Lh{n^6EmE-TGsAo8*guP9Lh` zr#={VKY+3hP4L9~>~&Az|FB~X9U5iX9!ARSHjfL5<0>rArjwQ81O5~fByu+o%xRpF z<2Toe%#hxCa@Jl+e9m)f-S!*9cUQ3+wCjZumyi$OtJqM5*glZsYYp<OC;W-~oW zk50`v+2tZ{a5E5;wU-bcRl}bD5Yn3ahIH3!BJ1-P-jdi>lM3IsN{R|D6?Q+g$dsAw zi^oq9%2%x}B_eYyk{68YP-avp7#WID=NP&noxg@A9C*s{1<1u=)+x31M6!~5pY$9W zu<~y0l{VmTn2j&fAd(W-8h<$Wg=72c%>tWW1?zF*wPS z%zj!q0Fhe%k`g9?9eScq`qOF^sBa1%R)rKA*;Q-63{=2)yT+LDb6qZTVsRumk_Bhk zIOGc{ZEl^oIZ!luR|&9%CiS3;#(pq>7fn4x)>{cI1*AgAcf#{Q&HWyX5za8}96J$O zjJ5^e+(aS+FratJoAb5{eK#tEAR52n2X2z>*CD%2C1bOOB}X)3YH$#+addK@)%@IW zqVRB4kIBBmx8##V=^5cJ@54~!t{^rO+~HM+Sh*^9)#EroT&ACpP&~`G<2MDR*P2hR zBw`JAoO1t&V*GHMwCwz5Cdg)#6ZvZi=B*UwO^*`Nau+mh!{;tRJ zyD@3FKlS78R7c_PxJ`1z__F+$wS*>E#3A2_x&Y^LR3jQ!eB6B*3z9ZsHN$UcC~k{u zA-*QvVdpTyCZ=w5)mcc%-6<7-JueAT)cWH;MI@VxL*MN19p>@vye&fLF#l6eS~!LN zfAIU8`xDn@t^IE=D2#pY@X=|Y)2hb9r;nB1%pqJrs*V=?P=5JX%B<|yq|RwvC!{p$ zcPjs(+ceb{6%4y(X>3X)wlphV&BT{$RU0j+8Tvz-s|% zxGv}>{Zd={=YsPJM8#j@Pe9XcrTr*YpIBtDyI;KbO8cS~WMiU8YE=>Bxd~SBAI^j+ z@PO=ejCgGaa`j!IvO!q-_Fem@2sHTe+7K#Qt1@9gJfcv#SGLv~z;~I=2&piM@?1qw zrz9!_nL~9gG%Z)cw^TdDEfo7(I|)@Q4*fG~c`yi5b3GBS3ydMeL(QQ5 zImccmmJtUIRQWVO2+L&ul;U{P(~aI$zBtI^zbcZ!ZaeW5Wwr~a&O!VYmqX&*s^$e| zl=(1CcNUvXCE~|RbaaIuCH`k|LCR%%N&+jESSv{xE$fW4OE74U>rVx6_(Iz_K!%ok z2lk)n+z#q;60Cq2Ye-G+@?n6!>o!krmKL5nA0TfdW&}342qjEr1su0s=n~64z)Ot* zZ!$etf1 z35q%dOC=+oSm-F(2}nb0j2<>-v0!7=F)%u0x}>~+6hpEV#1+~)!RkLm7ort;nK**A zUnvSR?P7lk1CBk5vT*HsMxX0y5@Q2cdJ1$C?S(#C`Du2tJfSn%+_}NNN+UhjlA3=k zxc*mP|FLd+AKvXAGt9AGGqS(=n|HrkQ&Zbj)4j8^)LwZi+CGl=e4U(N4lhMa1@vT( zCdbCMK#C6rAyH6>-Y%qoUF6i%V3^b!OLQfWIFL9Kfi9F-LQM(dT&e3^huH<^=LLPs z4*`4kua76g#(wXG%@~xJ6bOZEe$w8YatoaZCVGy4b@Q4phpbl`Py8G+&fpR|9bOJbdWYU`$t(k9yHR> z1mSeR_TksNL;BE#H;F`_kBt0J_6@wf_;*E!p?~%<`vbhA6=AQ3Zxic@Yn!zwMRqid z?Ln(`Qb*!G)e|azaT+MxXEr~PE{KGGMb*Q8V9*kj;vNe7%E9oc$JhzmKM4fMxbt?y zI1J(`jKlLfcLJQWFD28mf)8rWS~V17S>dgo20>Qvsiq>MzoU5bzvFHmeKAdyB zyNwNxms~%h(p^(Ey=?QR@*Sb1!54L$fC>``KKo|{b<=?Qg;DCsKlo?+EEdO?ib5%r z>ShgIMlS8&Y%V!Ra8f*)0PI!rfmcw{RvKONp9H=5$wjIU3~IP&mQSzTM zSW20y-O3>vc}R>)41Iqp`Z`Ae*(J$964cFu)O&%6xwHdI@2-v~RWRTywz6mVKrFnr zAgt@WrAYnPlWZ|;Ax81pb%i0}dpD)fL~?%V?bs)NlMwd9Q@jn?a2D&~4c({AGN6gbsXGK+^EQsb|Q?=XBM>hQ#l;?Z+hZjO~=i zBm35Njg!)+YoyBrDd}bes3k;7^dFZ0%F|TTQn#D!$#^#?d})x0KX0_3ut4d>4jrl- zE_ouF9m|t@l+jEZ+wfg{RFTannJ3V_Mcqvyw3~jhfg0|+Gh5ZgdQoilVBc+l-sKVB z6r~JFRsF^4n2Aj*Zx1IuKBMHuf`{~2 zh+A$xI>vEBb-oH`eO_Z0i4)_e90Psl&X<;HzrpFIjJZ@@HW*LwoG&z=4;{AXKXV#N zdI(BjyY3Pi?Wo zPtOAnfdManIE?#(;t)k{OnU~p8V|4t)J{rU++@kiAp>Ul#9s*KPfmX`oHBne!8Nh< zOyr=Jw(K^W1MRFAYZuTND^U|8(ao{&G={^>&9jnbPh)6{@go}&{q?pFBh86xX%nOz zeFE3>8IdUCOH#DgRbXyCLJ*JO$c4LN>IMDrOoF}eua`NuI>^|`qpkYI?}d{vRU&A= zViZ9@tT}Fe=6sAU(1a`P#$2i#;f;DDN#rr%xevd1G{mx9N95?7=s72y z{P_GK(wDOtqKW!o^{+HLzVYVEFR|FH{un|VH|0c#+&R$%v_@M5Y+xzVl1%?9uVZa zV%jsa0N)wGE8IpaeJS~ch*eu{_bd}hVYtf4X^%kKJcRVAZZ|u6wciOiH#+QidtZVZAAiV%8j;9 zekE;(rf4Bt$T3+kjhWFIa0wv>p>rb429u|(SkXin@#ZKd$4@VDA%2Y~`N@YlX6;{a zLH5PpX%#_1rxf?MDj(_)pIBpDz87Xkaq8S)mOhOzEDsQgit8V;R$Oa< zP4o2qY+x}NR)0l7<+!De2D2z>1SNoRebSEp0&i|K%qtb7Xv&b|I0dT+_sH5&4dK1I z-t%`7iMr+s#$z*$((uk5M(^_cc~}w}hGcKhlLG@aT#1155>c{y-Q=KUergCT_V0 znjcfJyj5U7eL@%8!Z=Kgxp7CIk@_>Vvs%Lzy)sXzL_t-ElA7FJ)1!b1jXjHOsf!Kc zdJ}}#!2S!a*Ewo8_UD+9_$n48@846xOFU~~hq|1^%y~?@lTJ{$0@$YvI@1Koj;k#H z^aV=7RhR8d)Y`tsoyq9K6Mmw1QrL%E&xaP7p`0PWPSGol(+^P&#aqaA%CTJuuY@pD z({m8KQ02$~ptM$4R_%=lzVfHa=;Uqa zx74GDgNEYp=lQP|Kylk$%xg~0)HSQ|NX7+%JQzT>lv64uuL3Bao=qc#dIs<6Ap||X z6F-0ZR4{5^Oz^p|@LqL6t;F)qM|v$ujQ}$)JALi5g-j#v&iY)@i}A{_2e6WVcaYMv zf^D?WI~Cbk(cQKGIIEMlvZ+ebH*znKI?rq#EA>pf*fzn^J|!W50F-D%Cbq>=la*QIz*KM>;v;a#N(y`=aL% z`rhkrnYTUd#zU_SqtlZhTA#H`$uvnN(+4G z4u|1*<&OM6pnk-Y=#Bo^h8&*BAm9$3?`XOGLb3vG)ET`HM4p%)ihREoGK5_*ppV~p z&rA`2gt+HtIrO6hb`}xP5aBReJ zpgf-JykkD^QW9(^6Q$VG(Q1c=Bu|?tp_EQDAun72>uhqCK;uNN@5PP%1; zw;YU4rGUS|!j ztMibYQULQmGEw@>*9Z2h$iq*Q@H6pwj<2c1O*(}g&|DMaMtnyeH|3%MsKLARkTo4i zOe~N#|4GssN_Tbckti2r&NJHx3raep6!F8Uekm_bjk(1rbi5jPjT${RuCTlcX@|$< zNmH$C0E$!J{WZzXICa#88y0Ron&0_1Uwee+&vC zBRL1*CNvV%DX|Y>3XD;^F&DuZ!Z1bHJZjKDX)>h6GYRQoJYW9UpY8idmu(#+_VLby z{gEKoOxBtNPJx@dQY1@BI!3FYk!JU3#yoY|(srgZDCNVLU;a+p?8inrhm}s@F14lM zvAg8$_1AUR8Sq1@)e*Rs>C{y9{lUXnq4lHL#iC`4)`~UPbU~uGv^v5&hdy(+CNr-w zB_ukC?B*Z6P|zPABB7737`+_H$8PI>m+v-Pes6_(&*3UN4q@O9={hNoit4e{!u@1^ zJk)hNaM6%VW=WWX9MWXGz`F6T+^wn~ZMwYVi`E+TI5a7UnCv+^f3Kj|2m0Sq{K-v_ z@L;W0qlqjSx%jP12nxhRlCpCSVNh&BwtGGmmjiv|b|98u&JDAFKOioVaI&@>U_uGN z%9;7C2AfIU=RL(=o2f7*iyw})jZ6{UYKSW&wqnY>2qxVcz^+~~xWPPeFZr9rhmNU| zJVLo;$*=9ooU*H;cfp9sY*sTx^0ixw^0OF{!C`r-S6H1DN>t-Jy2l6nFhy@1<_g9m z0wsZ>;j4d|>1#H86wwo*Gx9NmUtPLISZ{5i$%Y|anJO;9hWiSLJ3e<%^}@c0&co2o zwV?Vj@Fz+>0DEBva{X_mCO|G9pZ;496(y~4C1%Pvc?SE4Ig1?Hicys85mj;2^qv?t zkx|hqc_zsSjqVSf1JFhjLq4twE_$LR)*z zO#|u1a~LE*uDI^di5phWdF15lvZp`5L7#<$DCrV*P8%vEfaYP-?adcO*Cs31Cb6`K zhOe6~>(*t+U95lo__K0}LV%i)sP@AW+fr#3Hy3kKe-k?7tg5*Yv7cY_k@yO6*N_$q}{Iy zF3O(M!6{*il;COKK?&9=>bQV6v+OmIVDMt7mR-~{BSp)ti!KiLv9ySaF-Pq$rgc)n zf_W2mfHO=nJ0QAYr%ARMols}^_4~=BHyDTN-cpqxvnl6}S)4aA zGqq_3Wo;|g+T!av3knI7hwk^pAGULD7RB)RZNR@mA>*a zcQctff(;SIc!RvNzR%TW-a1xzyNtq5o-aN!x5^XF$r;q%V;)rzw9#DjcYK|vm`{ni zp1x+5p4>~?n;Oocmu=NgGNL*E;T;UAn(Hv-c?*@6OsNLoy-nVgkPh$J3oS=UwaQ4p|2C{g{*n}x%d3w z$YA+^SSETMYI+gabX)HJ7&NICbET|{Z3+9TlaU}HVp$YeLi=%+Gi4H97(T83b--5m zfffI-VwZcjpC6)0PZwm;2H(dkIMe`;OB&LH*icTv$Mq2j5e@lr)|(ZNQIxtqmtIln zo7-kPk|M)}>gfze+h8neW6Y^^==TPo}$>cD< z{<|Bi&vE%e4Cc=C=OZ}vpR>P=pr-&(0fS}VJJt~h298rhhqYVemxzs#zLe;I2ps1b z#2Q}2Hjz@T-?|G*DNcwfS$Wq92fs;#EB{h(B@w!YrlvzIbZH*&($(X-`crKq381Fm zhcQC^e76gWuII*yA<&&RQ%;ddo%fGKSJ(v-;wg#fNu!;BFpLoYN769}w)1Pm+yV9p zO+lA~3p+S*e%8YFuQ4+3C}0p~lmH24dezl*p41YHWFBXa$Bt- z7PqUhyrY|0*BfQW2;Jx64!ht-0KT)a(Lw74q7O^{9ye+?2-gHU#bOZ^{nM6iicx8# zK^OefZ&Yt@8=tdFlKR|j$m#@IKs?l}F#2U3ry{kv3qjp77MxjCDTIVw4r*le=?3y$ z(2M%!c%4l2?+y3cYNBW?+a*`E$u`4^wT8S51;o~@#o?ESsOECDcq7K%NYiL1`c@|Q z@H%Np7_C%)^Mv0b`yxc2XDnJ%v-D@BU5NUcL)&qnse@FkT@Co1=wKikzJxX_y`F)< zr~Rl9uBww)ivh!WQB9D&WXMf2u2oNWStfm)?;8j8AasH#{GO=S6XK}k!T;Q;iCVF9 zsV*^v0dj0s8#Z`L9;c1m|ot;ld0+<(}Orlac;SZALdm+08Mdh2erRC3r%x2-6LSe^1#At4`giWVZ1eX%{zfqF z?1(nCwc(yGMaCqP#%HdPbAXLCnFmK-wSZ*O@W3NC{0>qsmpVl`tL^d)2!dqsun~|hEeGdC^oQ?LWJ71;iQGx zq^L@?2OLF$>98V9O}W^)Nv%Eq8OD;UKYjTrtjw@L^n&Kd_6gFQOHQ@E!85_g>kRFn3uu=+k%;`;pU5mI_(4A?H{QxK+Y5p`TuPL^qV`Ju&{+*4%5 z+Mn-7A)@z>6*unh1W!u3lf zuwWI{6`J3Fkoc%e;8k14?hnkmC5D*E+u9}TqnOISt3AIlf5obsMe=wbE5GBl9!zr< z7$|{EnxfX`Dt1@2S`-rppWbC%ZGROvZ>zMR%sYvmWx63pP3-d#V)@_Qx&}mgNhSK@ z?0ZjCeDS-fR=jIOBYo~OGQ8vWvUq(a?YW*?&Jk!KrKK53hUNyS0p!Zc%dC1ixdg#k z@mtbKrMJ1O%-h>Vo7;g_3($Zx2cI1-m+}v3GzwBI6yD~#yhV8T^{c8qdBsk(;>)L! z_H^}J&;+wah^^`;j2XMo!p)K@&Q@uBt~9o_Ld+EN5dVgUQ0?`mZ5T@fdi#Tp9`J;A zGi5_AZ8qL*9b&CQc$q&uv4ZXu1$WB>PgU4?K(`Rf^&XzewU4H{9QF-A zLFWO*_%guEU%b>7%TAZOq4Pk2WRqBxgr_)WSa) ziDi=HN{4txkdg>)nynLSqJfh|okeu{n&CKhu!ozwgpDCc`J?Fg>}y1-v#?F^am@q- z#8_N|rc$P~nySb z7@IeSj%igsBkGpJ7sQ{(#Ow8E_xXp+x0Z-iqyTTy_ea~hZy0u zrQZZn07Z%{FKj1VR?04^yyut5Huqp!31{j#&p`;sT^ zH{4w~0n5vw_miOQG5^adoLu|u(v<5_TGUQ-p%e()MBtmwlW1&Tt&R`P@3n9X7R01P z1t_(jPiLr9E%+ZC@UUzKaR)=FqKPX5tcCF2y&pu7vV-g5#1)01uFzVmuXD=H2?UW9 zd&vgi3<$1=56qrM%4=z)Im{qxu^1Pmcyt1hmP&Sy^ThV){|ny{||xjujE?&b&%?I_vBmHVkClxEeuf@@qCh= zw%Dd{(iRA7LJ3dyZq012kGvPKKxuPTCy!=* zHeUZ$JJj2qA>~&okCj%yt<2`xn&xZK-L--;yfwUO+^Ify8o|<}?1gp61_^^!su&nn z!8;-;DoJcXpoM=ZUQAY7ugaWVJ(*hFF?6-5EI5slAvLgG=~<{QfNVJ1~)W? zRD1aL9y|NZvc{v09j7k8mRWy(H#y3I`|CCh1MGY)etI)oSL2{rRRg%e6uRN~5crJ| zV|+09y=GnsK@!wJi+E+V=Z9oiWolJJ16}yyE)kMS=^za-{nM0eTdW;BujuqEqLuuHnMr4u)f%=4H1NaDx=Zf&dx}A4uE-EeC_as@nqOt z(@F6jd=!1^slOPkeZ;5&1k0F;pT^~i*vyqq7O5CWVi!vbZyF^$%d@I+;1o8Kh2E&Pb=m zUEto!`{ys-GW;av2!ud-?1_bhPu~hKCXTZZOgq{;sEOS5)GV>Bw=6f2BRVs&X7sg& zSyD8m(&}IBF21WGnKhG^FcZ;R=S!xHJZUr#Gg>l>W>e^()aO{~a692?&m9IU8 zu9hN&J^vQFYG=Ktl7obK^{-1RVS~nN)fa7e*?N|7S^$X5%hAfOwXl6LXQV{FkcVak zU+<1PjLjoaDDEu*(@CM3S|YBRyC74P{RPp5=~tE#744u@7kBn~=07$gy5esJI@2P^ z^P!$8?vyVlhCg_qlq4zbrcg_utE!PgVKj6Fm?D`8s$3R#(4%fK6(_GK8k8^z^gVOq zfB?)4M0Z|O2=-P5xQO4Q)9SZb}OBx{tCwt6){cT%cq*i9j z?Wy~e(>ex0lfq^NdgR$uX~n;w*8t)Ri554$jMmap$h{~LNpUg0Hh-7b22$ugS|+}m ziV(AFwptJv-wJAhgRBkYL%2N3R!in@4@AKw&hpCAAp*X*SWdYCquB@TNO%!Rr zItgAlDXFz=Sousy1&waL-$E6n|IUv_)C_FX8bauZdue#Q2p4^gKLWDl?m+w_4~{M% ziqtpB%?8h>XGYynVb9QHpk>lJSWrZ~{>rtImDWdCwg#_}awwYBdFfUB|ca&o!Uc}~l_9vA}uD|yhX)T^2adNT& zcc>qNb3(d5eXCe??|QSfLLta5WT5NK?V-e;8_Ku6ad)3vx1V)Esu?!eBAVB!ik`NE}NL^b#|6Ci0MAvrljB!h)6FmJ3jmYhz?H~dm;?xp4O z2d%&yw5mV*w>B;3`L&(=TF(v^9z8#(uz07xdw{ekZk~@UDt^V9u}G{Ui#AB`1{!uc zinomBL_P_l?i3w!f@k7h(K>&_smu__vIMXa@>N@QL{715{!5+J_uD_|NXMyDQ?bYW zACe8FR7A6kz9=O0RwZd<0m(9ZJWj@B{s3D6xF$0ylgzNNGvx8?wc!RBT`hJ^7u8X_ zP%Yd>f~|!ow!+hhDepm4Cpk0e7XdArb}5`Ueh+zbnHl+!xWaJ7rebBP z+S6D&5%Utp7lKb?&7v}`)==!V0c{O`hXoWknO@^+u-{2xj?-2y=12ev4g?+?+;NzC zvV;%Eg`1^2Tk|fP@hmy!c?`kMJNBA(v7&ryPQcz9gtDfgs4#aZriKAQn;qsHY{1%% zs!*!=OgMW}89(Y*S*+4sjSWd0lTi;|U`_L(5;uu#OoxZyPq-b`UI&zDj}KGqt2vA- zve~vkIALJb3QVXg=UvK^!=e=@^=u?LLlks@T{D9=@($>eC+ny@=gUVUC6 zu{m;tc7tmpOM?J*;*YnVF;M>ab=Gv9Y%|NZ-+A9QR7$)q;0L`H; zE657>lG@aNkQ(i6WnW~6Lw<^x1`eqc3quZ^YSFmzMb6k6y+u-ne& zdF&Uuh?g62{fATH8JUnNvR=2@J>$vd_r23}v_cWXK)D#b)?Wj18<;ZA@^wy30iu4} z*<(%?>jzgiH{Buw)Z~@e(tp8Yi`UR!+rb{M@kxT}Z{Pqs3k?ls7N;pM_S~qwf@KH> z__QA30A*C;=zpP|^90PdijX%ZMvjQ64Y@n^oCp7fFe&tkQ&I@;($x%CAT}n&>P?s2`vS#L4p? z`{RPVn(p>ooV_`et6Vn0@{M9kA;}V7tadN_ne4eEydFu;xm?|{cl#=2M(h{(lMbNc zICA#bLp%)l00m(!lNB2T3kQxp9W~q5j=xXXKTYdELA8^(peXRA)uBM=;YEUgk|f~t z`X1GO%;}!T8JEJWqk%RY)o!t2#pju-)kw=427s=Sg??g~@8gWP#%~<5Ci^uN7pmpY zlLV2YK=dddWbSNYzj}YDK4UxLIQ7ckhZSt?=HE!EFLv~^N3z1^$u*gwau zNk{3NhRAh)cqTxt+fy~X90yAz?u<52SFX+xsIGDWh4AWhhld%K{nVf1tc|^vd+;5kIZ2J|xcMNZ# zqC*$#sHs2`A%32f6=+awKle59yk5LRkFAUO4-~PdS-REvS@)jYNqE@^9z6lL^3Py+ z04x4*4%YM5vLFFv$I}#PZZf3>0U;KgUQ66rE}V;faA>`u@j?4vr2H(3rN)whnB%bj zY5|&A6m(IoE}4!$<&|&J5Xn-nFv0G51s{0=7NnCIic0=m4q4P|jB?iYVi~SznG&uq zwk2_c;2vU4f*0>=a5Wl|ac(ETr1;s+C6N&R#fZIDfa|-yf%3`F3=92&G-h1*k||rV zfz0>84fUiZhyDPM9P1pyW$rEO{?Zl`;;*_}QDa0)}>jwimVn)**KliyN^L*QEEG44{F z(A+01!I;z5P@O>%KvqJ~IBa=xg!nUcjUt5rn(1L>qZU!s2UdUjMXx>x3CZV**d`--&H~CVYX2MoAZ;{F$y&w zM&kXdf?*y-nR5)ttu42w8o;E@y=_F9+|>y{&!&ZfD8k7h#Nc)nhSNJONG~)thZ29G z$r+x|JJQ&q??vWnKfsmGF87@kW!o_oWztwubgAPhlVCEI;(^H;)qxw}vOw5EMda-O z`=+Y8YDb;_{9!vHl?%0c14Q&F?SgGddc8@|i~t^hsuD)y@?9&dje@q1O~+P%$gBfq zb9u~|)Xy$v-sdBA#tP=pj)K@D#QAqY2aUcn76KjNv;Ign;`!42^X3im)W{^2_xW4; zC67tpDQlwg3uN^htPstS0C<;FruN!Sc5pvkAmh4?mX+rbo3Dty1jjh=_}{9i~Ef7Puim<%=; z_QoLnXl8$mZWEOAR;-xG!vBDBg6HZ~%e`>Eo$)%|D`985`?IVT)8MCecm}n&YbeNh zIF!8W6*?d%e?I`;5T`0Vgw67ZbemWbuE{dqMpLQ;I^zwuMv}mX@$)R~_l|opm; z>vwu(DTxANQ6Aj_kKcFWCl1|ox+H?0?ffeek9scmVu&$EDlgHvCVNYIkG;5A-ZGo9 zl=ZK5=&C^0)16;2`u$0m`-bd>6WoRaWWBfE|puHV)J}`M@<{b%Mb4EqrPA2@o(& zUJECA#X2t@O;yUaO9O^sf+5em0o#t?f}yKd!%_@=a-O@|78E)`Pmbdd$=nh~b$zwm zB2j22TpsO}r5RX!Y6+)#3H`RdnJcBEv7Om%L8J~qwH0@HrKVL!4WW*-_=G?r6j8^~ z;Ip@csEpMiBG}J1WbyX%q7T$ZbbX+thN{)1eT>{w`-)A9PrtnUXV%KSD|6bWzbOU@ zs1YV=e;fV0zR_SK@U7}yeL*?Jn-aW?K1{v6zZevuBJ7Lu$wsidMRar=a4939+~4QR z6OxB<>6~}?Znl?t2wgX%ZjGtxk%y1~Oi-_68c_~%Dt9(V9QD5>GCcSW8QksAdF?J=WfdX5fS>5it@+PCN#2_&aU3F5zy4=R|9l;S2+(02={y_E=ck zKJX(JLN{>|?{Z@AM3~MbDmniPY>q$lV+6_V`rOC&F;ng8=IC0%ig&u(yVsJa&J_#| zC#%<%Y)+XzW;k)WC>n5-Q2KrQyM;KW`2WbH<^csMp z3UG}E*v9n4C}Buc8C!Q4=yL+K<1BdLP3N(j)iV_e?kH|@l~rtQh6ESflOtsA&9UHc z%y3zJ1*bDY7F(ho)i@+EvoqaF#x8K9%Vt~azm;qC>cXTDRP+R``}IxAZ*5v;u;JyB zVl6Ulr+-?bN!%`w#>F8vHo~9c-NfN?w zL{*MV!k%Ozvf6Xy)@>+y9jO?(CM&2VnH1P>+FI1sPeP@LL2S@Na8XEi6ozSA(CDsV z6H@Ar04p1S8ClYX9^*n08LEu+wurk`p>`>Mr>*E4(G4y z4Qftd(s#UG0=b&Z)iH`o*z)s~^z|Hr^$!B2lIHlx)!9~XQ?NGqGikhD3IFzQGjBAU z|G~Z)^$bWh18apqneaAwsDPx#!Yq9hVf88vafZmT5z3W%lV08i=w4*ePhq%CVlhFq ze)Id><~zL4ugi`%(|mjPi95sbi#a~7-k|$xTtS!^uLhu?P{ET`-=4b;#*H+IOXZr8 zLvltIYYX=BnP0UR*a%?UA9`{LEN5u>vmX+>%>dP}TfC{7VIO5w*Kw9`atL=V>Wi@J zb^#wC=6nz_3HgpNM2g6V-ksPF{=qm6s|VJt2oZsT=-M(}&B zQyVlzXQQ;>Bw>tV@o<-Rm4%xjG9}vylwk3k|AtjBAfn}l0T}sR8Ot(fu(z9xxux^B}m0bgVy`6w&HDVXS1ct>a176z}J-^Eur?tHZz23KVFRl z$&!RIQZfz|At!-w>EfFsJZ(D2R4lzy=jN8Rs3Tk(tinAMgFJE*$2x3zCf3bl+toMP z!_U3%S)jrr)z4Ix>nc{I9*LAdSX(j>xDtUJyh8sjIl3C4f3C=fRKc#G=HPCR(fb20 z*<)NBlXWd-Z=@gVQ>4ly_xnMY2}H#t5W24@!QOO*N~f%XX&4vWDYE~Ab|(A_y&W$b zfLfg@CkyFW1d3YVVSQva?vLU2kUh*oLLv;2j=Pe0UPNQP>hNEJ=eJbvo4BjnyBDQg zas)4?0&kJ}*L}X-t6K^L5pJ@55MHG&??Dalu7ZNzC<;k~fE0W7g7DWbm4=Q%Lsu%W z6;#D54s*F~(btsHhW0S=705cJ%c0vaQ|D-uazTIlwhub_#_quIL&yj%48vGG(7(vw zQL+V?S673?i@S`sddDr4Cx2y0KdN~9scj%j`G)P8zz2zt^>0n7ed(_-Ak~-g&qSmz zjAjZ*;%akT`m=5uf=*%wQCdIiWrr?CQ&0XM3>m%{uJ^m$Bp32|UZk>H{5<1yH@CA^ z^nJOpkqBo`b2%{(3NkWT>4DSm9=rZ~3e~Bze8=@qzj*g{eOp7!g(N zuu@Wk(J&IRwIpJDGOBX>_Dttum(p4) za=DKiOcms3VS%49s??k8v}&cGv|rN+RvcuRr(h~N_Hw590cE+ipY1`T$&vx5$DZO{ zy(=q!nZz)43TTgYqDBv|mgPkH*>kmEl6%_;7Oq-JJ z#`ud){BnDC{Y>HMcZD??^4yv;X^cF)h1N{973Xzy7Ebll<-&Q~LFL`xP&r~#jaY9n zH~LJ>wWf>J9J^K<_8F^}orgj(hEUJvt>+X)3pAUbfpy`SBD@^MwK&+wqhq5^3 zJ-1)q;(`ea43d78x46*!E7`3O4+@!`7A#NEdpZG9Sc`gkGf_}9vLnkrAV41?CV+Z| zvI~4?E70nIHxIzuePe5ykgR@A&!@>v&yc=klu6eRZ!|S8UCkFs#2lM(yor6 zbgH_$dvWxHdnl*T(E9a)Lks+;o5pR@?p2kWTzcgZ2!s;eR8f59;$6lj@0P5tDJDm{ z4@|Im`GvZKSE<-3bFT(B?|2}~V@D6k@FX`YnL7q)h*d{DO<|yKw><>xq(pHz6C zM7Ys*m_cQJ-z?o#heUj+Dn)N>;8{i_-n61>IfA}0W8Bad*W0P183s#%VUNZ1`ms>M z_C|C%nez4qUC1s8gXs^t%eI3>yl}`igrC%N6vLRm4*w!?Jh`z6tace=*oVG2#xypE z10P}rO4UuV(LEM1{hL^$cyMo~{4gy~^tUqaauX|Hkk4e zt)%W2_S`|47N+HFey`~-0rKa8%Ywy>hP8h9{HO^#j|+bqK)XkW*5<-XRqVvV-HxYw z83yCFa%(}*kG2LC)l?@$Wt^1dOofqSf64}m6pK%MXfnMk%#iR3T-}5>EH3MTT6w?luaz#@US&#%}yOm#s=D^`%h_E z(>%TN5)H1z@|sdi5#rUWb_`Hsc5^OZCi-^lKr}KBE4~Qnk!i;F;_wUsfD5izrseY- zQYLB<3m^7h&e+9eie%45(wFh*sqRcA8Mx;)|2EAOxsC>nzw@2hX-kbpl==Fb21Cir_mldEF;^ULmFh&SeQ zl*&%Oqu?kLuhYSsn7=kJPutzfN>jAAB^V9NTpTaJgrx$OB!*;2{6`G zcBKaTYjw84MGb=Q7n||+>yQeB=^Mb*TsdK2!V)GIzpD=3kWA0Sr-H44_AMXu-R{1K z%sLoB@{T)PonN}uum%-3+FDx=?+T!$CT!w6&9~xbDcr$iyV#VSM$;g!w7v4?9dGI{ z3eq{}?u18MMLr|!dJcWoe+?Er!oG@@*-$-WMgLU_4}SN-7XGj6;oc>L z-5q1uH>H|lImFA#a5y*(2gN=5jP z%X{!C29U#fV;hg|E1SU~97FAIT`H}}IVR+hBv%zr?YeuwAH>-`N8`~=2nc;8HokCd zvo7q&hwQRu=E{N6J`q#(cyEYCoqxXm=8!V)Ra~}aCUo41WO$!%_X?vY zz}3Z6751%wa3i=p$5t1qh4P+LAdesY)*-=SWOU_JIc4r!d>uK;6#AD~SrWhoblM$;-Rya6J*+M%uXg`E;$C&*V3e~^6h^`Bpw1b8KGtA7l%?U~yoL#UEhzU%4N`z8jN(A!hBwxi&Ilpa2cxehV> zVWh_VUeAv&Q++@<6ThMI9}>s?RU?(w^6;Dx!V7lJt#}B=ZUEN5JI?yOWl0|F8tp+| zjI{D*Xs8Wb$C*Fe4xf*OxfB~=O81Uh3>0lPdoja*ge_Rt;8o3dZwIU^Nh4YaY z)OH*aygUFQV^@u7@IbiRbiR2E9*U+zo}?T>eTRO)7c@U80lL(vk}5;12Xl9Jh*HYq zes{KvYQu0A-rp^bd&^OlN(m0+?jO*Y$j$fb0Ewy}X8Ko?*hV?yYnLZ>3OG?i3pt+O zl{|iUXTOR!Rv_wGl@-lgIgEK2$w4C_4Wz{%<{9kN(bi0N;JI#ZUa8ufFIuX8bgBs` zw=_==K)K$q6Kz$>59*`u(O9p2iN)bd+ zaQ8}gK;MKghxcl=RBg&xBFn>HE)so?|Cb>--I#bbq=#xt|Qu#z4y*V4N-%k+D7dB z3dqVp&->ThzOP&a_pE@E~)IxbSmp%`#lb;JdC z^OQIpj8Y0gof?yq*8@6+&BlIsea;wl3;7~U`w;48(1I1o_doSM+dsn_9_*V(C%b+- z_roP6g^jJ+*=|w(Kc>Ecy|QTAGPZ42DzXfwryJ#+qP|<*vUy!QAM5mb-(-i zKkT{Z8kl2DPbONnNdSpd5K$%tRchXT8?MRm3uP$b5D0O}BL!LvIype(c?`&)wqy2h zXci_073y5fkwTUuHu`$bQMDDBvzMxxK#4~(8uSKh$lLFy?r^(bJEG@Q)7mqZd#(}W z>+cP0p*0-wxiV3sA`i?2WC(H|V&xbMf!ju%H;*veDPZ z14!bS2Il)QZ&NGbaFMi@2=54@ceY$o=C9i)nj0z32ZwFq28AkQLdREQr z1xawp-8HXEG?C&MZ5LtOHQp57U@R0PDyY4rEVQbM-`^@P+mkglqcD>Pv%L0TbTn_M zLZM#qA4sbHy%igFAXX^-FVpB}phx`m;^99;>Hn}KSR8#7rL4bkEp$vM^C5|(-#nup z)@@$C$@Mp_i8FNpKk#W-9w1mV7sIPcAdHEPmLRY$y=A6KBM{fHhjPQ=d<5&>fZxp1DT^;d#Y zB*k-HfywGFnQ!)HmTx^yEB5p;!*-h}5Y%m8GPcB%gbq4KJoOIZhUoIf+kf%URK89)@JkQ}qrT%mFfO(G6I` znU@HKpeeAU0#BI&pFCf~E=~I6ll#Yae1@FfNZ_J#K0WRMdD}6KZ5LoP4Kqm1><$-Jm+T9^tE}<ZoBcS7?gMuMq3GI$+2}M2UID2-U_>l>xEsp`;2{83|EYoY=LzI`I+Kf2JK}K zA)KADlA%(`Q~@4C<;jv5^XkA8jBG({SwAkTPhQ>3_~{IdUXdwsJp(6P*~EGp@~(!+ zKnltp`EE-B>(wyLVUhs{0eT_1NTqv=EzyW003}^eRM-a*H7@w{mf8q0f!@=s?6O&p zh$3r6aU-r7%lh)xAs}wZ2qwq7*99A-iAhZQ|ajzwzR}3L7A8i#v>n7 z9(R)?te*OF^#oAvMULOU8Gw&R&{{3=0js7H^-?BtI_e_GihcjVPa|PKUJl^+7~(V- z^+yyrN5DR<{gr&Z4qUx#n*2zOSODRneq%rOfDyDVxg#Q=9%mW*N@FbcvcCr$7=)w0 zp$8ahyV(yu^>AX82Yf>G)^Y_L$VK|P<HNQVZV!8{@0s#*cx3ML z%HR_p)w5xfe9By(q_H_+4(u`H#YwSdrtDw!iR~IhIG9dVIK=!Kqfeos#A|?D+euZOx6BIH-NWQ!quP$ovk8Q&)$=u?WcZAFaZLM zMA9if(oplkrusU{(UIX|<||AgYtZT!OoX=Rjt~+j>BIkc0c_e_;Fn_Yr~gG$pKr4q zGU5w;yA+WM5rxHC(s0@aelsoEx06T~ap}gW z-t=6M22Uddn2#a$24($|qWVN@!GcE}!7AraL5U4OB8#qb5Clh_iDV zh9s;1ErHhlbBbLpkTd%cQiv6($C{)l$K(c*ZXND8$XgmtSR4N4rV6Hj$RKaisVwJM zZaI3`$fHNGGd}oT;>%^KgPW0<4e%bPvKKmWp(4XULQwBl?H0Gep9R&)vOM!)Lll6~ z549sxe@HDFb+Zq&x}4`n>(+8B5MS6!VK5XqmiU^UCpkkS^l(D#UDb@V!3kx2sW>#B7pAo@kdmkvK*h!%$w!iNL0A;O{n!P z%I#hi$^A2SVx<_-lI()j_j$!5a4uvz4a`8KeWF>wcWHzN4+ zKMhol10oyOpHUKhhewmFZ^l&f>9Wi1<8r!Y;m4ZT#O65mvFnd0 zl3|hy`WLk&HTFy~wNyCJ`QsOas~A8>x!vE zUNPsAV<3*zMaGmLhHi}s;&z>rqwsJT&n!me?7PhvXnSH!Y>xFwxIsie@{SLxpz9sj z4gYM#p!M(hU8BGUM}|sYM|U1PK4Z~o3Dlb3Fs=3^QieK6u2!_}_Hw1K@D#eH@|ooZ zM~*+tR2aJiEA5-s?)iUk9xWBKUZ^QRIYUMfTu8xWF6!SNXPwE_KG$#Lg+W=W`Sqv^ zyHwS&K)=5`Ot)zKENFV&rs5^JfML#SEA^klOR8rwo+;E!yh$@>{2ToyX=wJVAKhS!-_Ir(UG{a_Zu0|oh_s^|zTZ!iSdZa9O zF9uR8x)kadg;+?ii7{G#zl{F zmimrd45h=)rX@x4Q2U3QeQ=dgV+c5e2}}#c>h>a|3=b}p#E`5S9rQ?42e3z@>dMhB zB~_Fv^aqTtgvCTGJ%N0=qzd4-P;G@;phQ!rl4fCqIq&jbD!7~7x@33$td%Ac1+>|qzh z<(%L{_a#_*5F>V1Sz(6A57@ya=K`uu+#jLT>e}kDOa^0OBXE7wg6Ub*#12OHE zFCm5^i!}ukc~oH{0xl1;gnu|nu*2tMiCxF)*&$F`Ykn{(cn|UK`*b>yOkeR4+Ltlu zWi^nhC&*8?BUEZZKSmx|@IWH#-QOxUyl2GpFJ6lX#TNhMG2yxfn_O1>N={Ya zpY%7-%$kMs4grG*_KUk*d=CC{==1hQZh;Wp5(E4YuO9UxJM^Gir^6v&4gT!ccq07v ziHzUv4&L(aFB;Y!Yq$ZXaryc34T>Ty5th;&d!SR&QI&)P!stAIbB|^6VxeZ>u_?d1 zKtE~FV(si-d8Xgc$)sztYLBLDqi z@6G>{`0LF;&%};dgcGFMsxVc{4kl^#cmItp`_D>k5tI{Fi|&DII~#c3a-&C$hjrLBaq za&v)hBn*^YuiJ_y56@4eKVkkZopoOvVaZB&PF6d^LrljY!NZZ6$vnlVgv<_&_^xoPS|Oagb$ymn+WKu27L}t78p;xi+`HiFI`db7j=V zaG>mrA~u79|K>VE!57^WR^HMoglzS*przqAZwe^_WU=T`_N<)Pp|DCcs_m9=EP|d5 zMof-(Dx1iTQ8W!iwrZ~TgLSHn@27`!iBLi)5-29E^=$rV4pp2N5tS86b)`gc@jNg_nHj5KBp{*RnCFlygSjFq zWH~_3Et4z+-~K`*eC;Gtjy~eVNA-Pz17y7ZV1C$?PIbES!gGs0FZ*y}_OD!y2z3#X zdJ$AL=jvkzjWN=sXaae~`DjDJ9H`Atr2WkGWp%7)(aDd|U>qBt%ikOc@`*uPL2=<; ze#kuH7u==tVd5jtl~`Pj9~F6$>Ty(&=bBZobJ`8V9DlL*UvXQMn=wxU$$7Hl2ES9P zxwL^*<=|hhx9>EcpDWof{d7C%pw$`mi?GS5Ho;}fbL{)f@A11LSj#$=dF!T~;N%0< z#O?zMc>>X2fK}R;-eMh3X7_fSAhQ{4Z<2azxHInsjJ3?ns*JU}T_;Wi#%uYzh9*iN zDR1x%<6W=8fw1qXm2(WoF$DytcbDLe3;@wTAlrMSiZic&Vo5AMpihb}ZMRa#iQip!sl30S6OVdaBIeu@7Dc6IxAL_uP}O8@IkkkC zIk~I{TR@99{pp`fv$u+GllQU?-WD;Z^tQv5GZlGhRrRc2Z1Q77?LQIbq&)hz8P9n! zKb!#yL*hbP1E2^ow)?r7PLzujKu@d<@3-ZD_SZSTVv=v%%-JIS0)(Xlm1=5SxlM!c z**8$`xJwwDmw9+sR+wqk_&0B8Z+^d&kCQ7_=f$O0%EU&2oKMHE&CXp4&@@1?$0U0) zZrB+Fx7c3Dx&YLr$(ESOw=>d0yp*$juIhJXGnHpOs^nCt9stkO_3_`bAC8<(B( z--~x?qTc>ju9cNANy9$fE~QR9tCkPjuBwoMfuIPb-OomG#4iAX&Pw`JAX ziKI#Ouf6Fz-8_%?-tT0-Oub+}O3G5yDplv7Z3vZ(>&GYNYnD@36duMJ**0_c1(epM zbvBReCp2Z95l`!x{e8EfHF~YFtI~@(Nu9pIvB>}@P}BZ;h&2eUNnL$HD$eAzpMQP>g-LnG1paC)_(`pNrXr)1E>&VwOsB(^76pfhkpO3RR5^_J}>y&yPl>9YN1q( zn%=R=8njqNsRas-8fCO>Ptx?YCFzW3OZZ{%e3&7H{Js!kL0=-q8%WBxempRZp%r5s z_+s@I2kPfpO5^9XmaOL+%2VDlhAVNBpJulB#jt(*-3j|Pz+=YG6K^WNC=ov z3aaOK^oG&r?TJ(%*crWaifix>33m#WfeurhRiqZNII0^CK=_=fF08#PNu#9EL+pK?z_JJW%7(BoJdR?THKv^5iC3W*}3l0N!ZwR!ZagCG)|MUci`J9gV0~7{lY)@wA_U@*O69> zfg4EPI`hkC<0gULqLh}bAg!YLw9X*h-o?Z!*wndTH6p7YF;5u7O$>C^5pPLi>u006 z&H0MUl4~eO>c;FEfYQVXs4B{W$t5em^yFy7lc2AM_@~cUDADni3N~}!q%s%^ESpy{ zxe5?XZYtVT%Zxjx`ZJdim4v*HGoIstvbpAiKFT8-b%(0?B21`!lXniW7(w89Uh60k zB$n=u%6{d(^)eI&ADPLxHiOJdwpV_F6Y>;hV8pGuLPfcORrC|_C09rJJR*kNup%wn zCzm5sh@gx+r~c26W?te(bM=}|emsl~_AsB%5^#&c07@Jw;etmcdOsvWYE)dE%C(5O z$uu`S9`35-VCaro<#cz1zX!*~x8f_`hIl*z0Zuii4}H{Ryi;Na5Ah5S(xVidUSV z;RT$%;*fK70Skv?o|FuC^?9_#o)!Sa9 zJF3=hS|y>+6ro;-VL|R3{r_(uJ9_V8+rB0*H1ex!Zv?0`u8IQHT_Pf=_lm_3jz^m? zK7*bE5dwICYv#(G&Mrv4zwA6h@Zz9=lIbcw7qe3@jszyS{Y=-@-gwY3AsBMZg)}`O z;dAO3wa7$k$e8_S(%&Zmvom9>v(zd&6NyzqUL1eEP&bq^HY|fVa_Nm?3wK*LVH>J> zL@Zi!?30GiMK{6})&G5dnBXIWhTCnTV#J2ft&Zd<6lYTmn~mabql`Ybz?&{il;OPM z;pma>#*#g_VQQbCN*DZbwf&)S)r1Px&?0r3Mnk_B;*h3NhFNlUp;dQ{Vc?HKN0luq z!13s57##uL@7Q}Q4^sltD8yz6AtBE$;)ULei3!)gMzZWUN`+C$=cADsb#p)WmwOrN z1i&{6VmeWSFDR+AmDpAlYK!D}N&m=t_qhP(KO5*UpIAVk?ESX79cCS{pH`1_zzVUq;#ol!KN)PIuK}Mw5iK zMazShGZk2+Vx??gS;NU?Bqll;_5Ok4-B}fPnIpo&fdtA4s|25uI*ZX?1r+QY~M`U7-*HwnL2D{aM z5;Q)*%Z6N)`L@kNQr6bg_F3qnS-nlc*;~jx+=nod-}PbiE;FV8FrFK_bYB`DQnrTY z!!fZx4Dktm&y0HclItInBTOe_`jKI05yobI>Z`hDNSp&%Sv_5?tvFFAQ#I?N(t({_ z1~X5Mf+<#k5uJvo9Ci>Bfxx(4xUXF!+>$O8NwIqfLSol7n{Rer<7o%BpU10a51EVg z7=tK=?9L5b`~_H2);0dFC#5makqvHjXSE@oobfpE|0z0L6H2&+Kp7b1mqr^7&0B@g z#Wa~kZ*f2RM9`6{03n1JRSmd| zZEg}CDgcK;mx?*}nQv~PrGA~AT&hJmx-6;h-BUd^+z+90&w-w3Im7Bi>BK65=Jw!$ zOu&p;WLlwqI$|P}n?sHwR14LLOOJ4+#t1LswHy5HIjau3WVY*jjgf^vhQrEP+geY9 ztcOTt8Y1f41gyBq6?~R7 z{FXEq46!@4od|p&W=KlSIL#&+y!wG?6~=J?G$y|xrzu2jcq1ZdD7Gy@IrG8iyr(eD z&7Xe$`~cN7C-#O=Sp7m9l$QF3nuBa4AsIT{CCGFdb&^xG&Kzq^yb-&7fpyHp^}J;b zn`uA`vs6@&ImGxw2Y{B!x!x>dxLA^-@Rt$+0^RNe&^y zJN$PuK`X0MT4D~21#*eXl0G|u&pg&{nnqI+ZXC(*G|po1PW1cRDbM?$>eXRz{Of7h z(r-;Yty3>9>;epd(M@gwM0~~(W`LN*Vg$Dg$fXUwCm|;sx{dH}?1z4vzxmXC?7rC< z2!dqMMB{Q)yzIW`Qi#t!&xjo!;6LZEhbkF50T8NPEwDZgB0#PgdEBVaO}a`|Z&e)V zw#!J$c}P9rB-O1j`#1xlrAQOERDaaj)uJJ3~CrV3VN7>dlVDZ+gG-6>T#~ z90@w|d~Asv_}x<+D(Lb+vAyHcq@y!8Zz|@f=|BiUXV= zw*$I_by#AP!}V#IQM$g!+%1Eg^HM)GjingoxGcl?*G3m5fjrX)8pQ~{PM00=^o6ti zr6qcNCU-8z7ntz<^S+HfzPs6DaYo^U%Z}j*P}v^i+SamP4}x%(fQRj*e>>x@JAHCe zVaO-w5sn3}LxPXEF)5t%!wbf4aYUfsJ>&F3uAl?7Zmr@_JOZM(`0~QnHZxQ>@~BHp^5d-$R7@sGz-wr64h>i4*=UWmbb^8C<`#U z4IUhm9z)alDRy#;D5NR+CkrrMscq*;6O;XU?#z5W3JCL6*Rz1|ipCWP4Z=XezNcT@ zyrp=-L?^9$H973=KY0WmbyO^O|CsTj)@ry{CfjAmEhW&yH<$O&RVV5ZcyQ%9x(y>b zYPodAO>z9>bKQ0=mRo7{l5pRqTxX$4E1P&sD9wR;copd!eAhj1>Bg%h88z{vUAAbz zp|{bL>c<0j$OriX1~cwk z+jFh(7)w(0lxxjd`+ zyju_03)bHtc+X^6n&2-57}DVmR0?`4oG7gBNx+v2Tg%f8M(GZKDU4I=P_Bv>rqkMs z-u;Wso>%N{@C$erqEy;}rbxSca)BX=Jji_n)ei+wZ0u8k$w0Wb0@~h5dm+Y5R)Gvu zpGx!u-Gpivag-#UH%CzN(2mW4ubr!JTG7xW>&Q6-*Y_Jk-ao&1ci)o}hAcAOMk*7u zlH!IDpT!?|e*>=?8u2g*J|9@E`x^&W`|WV_d!4Z7a<_x7o?i@g4<;o?7sQNWWJ+`- zy!I8zHn+WG0gvyN!w^R9&B1#!*%ni5U*dV}m025AH5DPOJPBvrXt(%p3K17?5ieum zOIQD;Y#b&oXLool;)(e`=}1mV&Z>_*x6Dtcv~@+D;j#3`lx4dZyn>a%UF_0Awg_#F zq0Anzqu>J$hQS3ZuwVXo*X<8!z`O6~P9hO8@O8bgpfgJ6K?eS2$F@!|kB6021zoM4 zqs;?_2>IuftO32!D6S%F*-&(`*N6=UHPvTOOIYaj58>pn6qzPjhonWV`X@+2y+Dx8 z*!yWs<1%F7R0;<&8)R2B?DheY3+l~^*UPSyL^au80YmvJPqp?eZ6uO`YZ`=3D!bN> zDHW|wyt>^7V|yviNUC4&6ez<#Ark}>L+xdWc??F1I^ew`qKt)@4D35ahsSHWCd_Y; zGt}8EMN}aB6_H$ee5ZuBJdM|a8n6yUUFK=A#J}ApuZQQq?trg6_q~xb-f^+*^SEDV zpk0RJAE-d=gYt0%kvCBf*0*-nY~$X3cUi>fVpBs4!N z21>MS)GdnCfmBh~!(eIfb(* zpS_#dk?W3fG3<$lUOyxSGkWJ8`aP%S6MTw)%9>(lN*eQ}XQF0j!j&O7{S(c)aOR`p zR4IaUOS~mRI4Ck`RjZ)n-`J8N`q$;Ys&_lmns5Twx1ynQvuSYsWeg*rmyPPX2mBu| zz%{NsV4$$#vobk;{`cR7mOMGZy1*{uR}x9XUom3B=r;JRX5A|(bvQ3lAYZ%5b%)<) zmW_?t${tN*@f;c7PZB}8le-47Fsliz31%Ujc?Xa*`BO`$V86m3<%$A zTyYHwwa+Gzuah)Q0zN2O+daXg+8&{cFLDo@K;EiHRN~0KHM?JzCh#lG&@xO&DuRg! z2mHA>(q2}?oU90I+-wNTMiY@e!RiGKZyIHzf9JkZ+X-?yV_X&Dh9BKs%2mBe&B*4w zmSYm9$&O3k3^?^YkZaM{-CD^~3=-?X5`fBO;eo#SXPM$@vi&k=4Fv`foQR-qHcG&C zGN)32G!@%uzAMn#BRpx!Ui@P)U(g3BGo%CzOi$SmcEB7M`&m`OdX_jy)`+Jp zE&`W&F24xj#_`gA|L__y>D`~Js9;Eli#q-61o%XI33c$n6>R#V>HQJqJC^Y`t00X; zTKGa^ry($klq02Hmuc`NQU1(ORl${O7WKs)=W}o-{mvra6PLINpKG%mrO7~(Rky%^ zN_{J0_DC4PR7H%7X}Mk-X)#5Q>4$JLq^+B&&B)|jj+hW`Iqb~yM=Sg{bs74wzVGpL zyH8IIN>R3+eZj{TK#SRp85fS`>FyC!K1MVK?zpudeFx$&990Dtc)zQXTca6$0`P$v zd+37aJBN?ke$A~uV!b<`?el=Buhrnvi*vLMDjgoNS6jeHTeGxJzR$eGXq`-jj6zd8 zs(}#3W*kd2xEDp2y2J?ZnSd%TcGUF*h+=aB7q%1*%W?pp$K>6o>F4G~h&EzEJLk_s zt~SG*uzL8IjbdR@eh`zm=X(5v^vWnWIyJwlFhC8!2gB8V{-RK(ur!jrH6 zcSB}guFhn8wd{S-liY}8B(8KQ&2O59asrrDIdoL=(z~3UOrp(3T}*H+x$*yMUow(Cf=I>Gx@r)qbzGni7>0 zQ(AttKqtV>3!tv@THLfvcM%zfF2E_+c81r36FCiGeb$4t-jbgAjy*qNruw>aFCL$6 zI_Zd`qOV=5I!XF`C9;3kG%}_VwGv$?*+AO&>471MaTK#0x@*;;ABt=CZXPEiD47bi zFZ7WsElgFasx($iX&m-({Wi2P2?fn(yw!2Q?h6svntnI<{X10`^cr88nbUR4mEP=2 zs~}U0*60hL-fnkb!6XnU5-#Kmx$n<*f||e)wu>{n|ItlAM5q9Zbi24EP;c*q0#oS* zi1mtxCKvhr<)sx#-B?3T^_RMv3*mSk_tndWYCHGYe3}dV&kci0Nq}k&wZrJZ%QugU zcL0SArh1xx$X;I>l=A9_A=qCF>nKuRtdtP3Ua!TbLtYtKa>2ph{+7OBtgIWifdd|5 z!wNN80Mj4a@m(s)&_)tGc(G6j#G538sdNpQ8ub&Pd}xT@@wG;}ukwo=%5_&?>dWUi#? z5xbAu7unx<L40Rv}QL4j6B_tI|byU@?Q~#Ap zkd)30lm+?E>*`_7|C_acXUJQZ;f0l~OjN9*=hrbb4sv@iQLZ-x^3s`svPgivi>qLAT$yw4$2DNqD9FFC=R_AyDPOKm2 z|$<%ectG9&W zT*R*FAznuh>-0(p3F{KPQwW?qKX7M?+VA4V=HdebbZuT2fv7lJh9cmo+}3&G+ggiPAD zhajfY5Q!TTeO#*EjtwPuez2e^RMc)#3?7a<-w7V6f{eQJ9An#fD@b(et_bRMfG1np z7fjc;{*~;@hgBw!qKwJR8VC49A{hf~cjWu0I`R5KM}jY=&PZp)o5?lG5nfPoK`k-* z2IVHC{^xQ_o?aYAN^s@3^xB|NT|=v1UQ7y_ zrcQyC`h2muvus*6($m^|%%buH?qNrJGQUPvZi-oQ*Wx~!4Cr|M_B>~aoNp%7*Rs$0 zuXZtyyPF0gm|u;6Xu?f1U|{v@`9`4z7bw{35~n$Xia?vg7fojn;Qkn?Pl%fQ{nw?y zli>JW85@C+nhJ>rkq@+?3`~bL>w)$X{5`f!F#bzJQu>!bRpi55&W86wH93P)y^J`` zE@^fvUJ`hlJP7(g?*2>dF-(5vBj4vtxxtu6jLLSq>;RK0^Egn^bQ;Fz1QOpm};R zwWjRZ!gVB26u@_(srPGl6`YrL{omV6~MAiKj>h4v_r~KLmo|=Dy zy;y}Sp;H6;8Xty_`u$p@-NU*qT9z?`o(8jETe3{xty(HR>K%Nz%FTZ#a_>34;$2p( zTXBdZV3!QmIyhCd7AGr-{(>LKX5}USpnCvKPDBABi!GG<+JuBYeL#tP;0__+`$XV( z^iDKe_oR7EMf)P5K8*-6G^sYwDWF{zEGk3-sx53xjD3}vIL2v~>8oyvh2j*Gq0_gy zibWf6^6NRV7pZ2FKbL9P9t!;)(=z#4>p>0rAI-T7G?oN*C1}456K;oZ9leR&hs_it zWE|a=*MNZD&jgeOb&o%>q~Rj~(m&wjq^u`2lM;|fT|ifrGuy{;Im4F4)A)Av8SBS3 zm#!3bgQ-3XF^AA7!9td|El8^kMq^aC(GGbRE9H z&`~T*CI|`SF8C4KaLf;!POC!B3gN<9dM2|4wN?(xp}7ztStI!!F~hEYoXrFcqc}bb z9)(MR&HZd7QtbxPERJ)jxKJr|Yb+GhNChjB3P%{0mN?6i3Q}J^ie`S15 z93&7DF+`;{z~xOXtReLYSI2YF-|(_5^iP#hA3QI7%ChEgda#}K34=PzG%`Hr!QTnI znia|nn75vaFWt8)pooi3IFL%EIEuBEauil4mH$4ft3BFw>Psmfo)@dJVmAm>1{1qZ z`$ZCEtQeO%v8f#`KV_?SuVd2z@}4Ow>JyKKQ)kOw@7h%qL8dTFeE zxmxo_|AL8k^w&}rLv`>-OWzWuy%&oN$IKoP65$r9EHs1vRoyNMVebiu&)*$V5PanE z8@>VT3f#W!Z)BjcWJ(!H>~s9;c(GJ zn#BMvfw%JAiR5{A1od>;U-v4?r(F^lNfJNGcdMf@RO(#03B4ZczklLG7h)-py1ET(P0mG<#%pBkmt?9@{jA3b4|$ZnQ&xaDAdE_XM-RZOaBJL#RDH-S zkT8CNq*n1TJr<`(tx?jA{% z|L0vPP=Gw>Yu|D4f4y*TPdwUbw{9=~L5esb_zJl*xNGz`W6M|{z$crTj zB7%1^OOM+1KymFbtL)k#dBaXvQx$N$r5*v!o0xpnY@y|P6>utZ1jx>FzH%7|4*C*Q zJ*mChnofChakb<^x;r(@^K9$Ze_C#zJt3ixVEPTQcy!Woq}wbm|Kc*csh_}a2P!3w z4JusaN+5l@Mnrh=6!CRT-@d&<*3q1=cyWHnRUR|vsukl&+DLde+|?V(;V+*?-j}*M zt{O5VBnhp%B*ThjL)#-pok#Ub*^b}UYYU=`s$D(BVY_i{^EKdV+IvlA6jFM>0_~$T zNzR)3i)OFg9iGW>u~L7)!Dc+n9ih~<+J*0T4NEUJQqlg5aqsHs z=SJnZ_cliqS`8SDdW$|d-Frp*-1WqFxC{zRP4l1f$P}>|8U6QY1FhsrCr>S~bh;oC zR3eV72-pVS>(d1Yc0N1#oO(s=jQiU&2DMZM6=#LG9!gae^4ul@!uX{Tl4sJ(Xz1AT zzv!l!|BS0>e~u{wj2uuJNb|XUzeok!M8O(8bkBl#DJfqr4&OXkpW5lxO)KfCK2(Pr zBmqVU+x> z%K^?2S1!@@&-T`!Q1y9>JZw`D#VWk!UL8#IpIZ5Q%P!b{<<5+9pLM44 zbg1p;fyg7Uz1O3J-SxTvOTD{UugEtT1PM5uQ|nrl4&3Ukp8ne*YwoBFc;o@wH0$=# z`&W`0=GET7XItmGlKu`%%oMND31U=0hvNH?%_IANI_7gA6$5y_U zz0hVx>LD7vW1-5rXZz9mfeemgMX?L7)6F~!bG3fT92^Lu_&U}#bXUx2&>x?(GjcuE zUk_PD0ncIGWowiAsp)VL3{gz%WaTWeEOb$PIjz!@_ zr^CRg#@wu4uej*%b{tEiqD}!`L2B3;Prp6VoMCw9@UUJhp;yx{+RIOlP7)5A>v?G4 z&Et{KL=Prp@qCnGfxjTNB)B<)J5o)D9X{Gx=SFcuR4*v(y_*w~+A-FEIz)DGklKyD z#TBIiQT9UgW}bC#-3*v*uZ8furl4jqL(S4=OC$j>dJ7fgfTwsmIF^DyUPp}d#Vuj4 zp=Ry!T2n1TdE9wVRHRV*)pD-8NdZa9Y*Y<$C>IHIx^jH{e4ZL+e6S3WSkIdl2Ep8d zjsFzU1Yg-uCUcZkrjPp&S`3Wa!v zH6bX|_q7AQRpq@h#JWj>1hF3P*&~tURO$B6Ptg%AGu^F2>%RebkYnwmEif%My1})o zhWHRPW4kL{R`A5Gl{mP_L#H-6wdG3HNQ2KbUKH6hs;RJ{gtl1KSa2M%WxpyrI8oydAA}3PRb#j_uO+!D`G=5U zW>Fqt;n+lkDT@X%HVsj*d0!Sv^{AGUSEjEq?)rAfkSsH2~$~f81Xkp`@>6)COaX94#=O?2< z+pakLj}J`H3@c8D-&}l9vUK@Vqwe@~nwxico=)5&V+ywix)=r>|3GeKR-@EqaA;a3 zUhFMl&(VGK4Dvpjs0fAMyX~=|Klqi^)>%UTx)Y{i)GnSTqp?x|!^Quy2flxtC-QyM zssB&+_aoMAJhMUjw>HJByQR8en}v!m;=d91HZNcl)l^2KpawfPS2%bUjuKWG94^lV z`+4;zekZ7j8TgBTwC`j3((~0iFecD#K`yk!>46SE9lQ#Xa|W!1B6729woH;}Jts9J zCe3oi>R@w~VNzcaA#C$TlSmg^OLIU%t>k{W*{V%#*o25+;`h8?EOiJ(!Q<_3-8R1T z6+VmPl-Sx9e3)09c6HXD2hC&lVN~W4QRqt7rV^2`>I^}XOz_Jwbq?S8`Lk!w1s&(= z7V|ui6|&3aAYwSW$RaIT_c`^=xib`|j#D%1!oQyMJsrmm)jkq!Z3ORd5vMbW4mlyBM1dYCw?>7TU6q}+!{F=pRo&8{ z;u;5!?-BQK0(I6`ThUIHk`&#zb(F?rV4jDL#ubBW>u`jtT`C?1W;~EuHgvH1g75qt zHk@8&%(_{KZhU>R)2dENX{xdG$3+~8?oyP4$UwBtUyKg|<`=OA?ZFF(Ygm@9+Sko0 zt6kzu+)Im^%tBd~SE4zou=PPpeAZ9fB-H%^nY2j9(TbqlgpGPKM6l_#SuJv^%2!=? zi9d==1C{!vw1xVSD*ieO;5rW-*7Evs%xQ@` z2{4EVELQGFBi0RA17>0vvEP7@GaQ9| z4KWvHQ~_|g1L-42qJmU@lv`)q+k*G#Hb3h8ue6&ZJ66c`GR)1rI~*1A2+DWXG;vJJgLV5>O@O zkp=SIkZ`@eAKH}NK2P4=_62h54TzevDvr2f+2h;+6OtKUqYeuHd)T*uhPe_r`=Dxb z_{dj#`)_@~%Zj2&TSyZGD{f8fd>%C;TxivAoP5TGU@ei6)hzYD| zMa;SAEis&+Ay)yGKVd@=Ggq>$9BPCy={W*)G^^0yhy!S_q7>GU;*+~oQnBjfx%ZJ&z)F^G5eH^YkajabCb7#s_tld!T;r+r^nvk5D18PDiu4M z-l8>7Xt2$<)IMlMYuF|@$`EQe)%IiArSE{G*+~g5tD4^-VXa*V|BRyifn*7&&1Lq<&dp4RM?#0C#A)JNOB`p?x`?U4x%T+n6=!dNK?1a=DXtR?Twn(xrESZGkeVD% zs7j4QMx$^CN3hh7OL6F8U5lF+P=dob{#Twn!dJT8?8XhM&__^R7Y&R{q4iEcwt6`g zCt-2O)||n|XEEDNWeUO|J#JIt{AAN&RPPxrO= zUVE+2<~+RDeWim)Fx#zV@fN|t1>24Su$m+&8t#gq)ncYjoILpO85z#pExJMw8W4RC zt1<|S#j%+J8DyIDOk}RVT-!kQEjrS2q*%lUGbM}U*l?!5iQaB&Ak<&^{q|%6OB6rR zmSNJw?dP~oyIpbZMDu2VXBHt0(N=Zb`?a9&eRGc9%g)lEZ88)d`fcgN0Bs7wN~_Qu z5#_|o9Z|j$JVyJy(%^UNk01$AxAPY41|0R!6I$#SDKJv)q=E!KK5CTzF^7{FSNf_3 zf63I2h>e%t5=#~nt!|D$yu=?7xoHulrz@h{TSfEuS0oHsrf}uMb2yoGU)mUwsLL~@ zw}&B^*QMRVDt0P=BZ`&Kmi!*>G*SmbZbX4q=;Kq;3rK`fTM~}IVud>(ljUk%E;WEC zS?an`8HVw^w5rc$;8IeA%(V2E9q`Y@BRW)gr7m~$^IkVk2D!dAKr6WGLvgTKfj1j6 zpo5_B4>IL9NUte8c>#LRcA_L2ML*qJm%S9j4}DvQ2h7MZ5Jgo)STau-^;74y#e3UduKw;8ciNnn&CBHlx3U4nNC2GjiSx1C75hb|0W-QFTlh%}f0~ zElrH8(}7lvnKlO|z1sE%pCdB6L{Zb}E1Fokr3fZ{xaAVVC+zk-&KauSwionFE=v;@ z{NL;VLFn?c*dj1>5p1m>6qyXFH}MVO#nxNoAjp57%MVP)2DHmvpj|P;>My4e-p^mI zm+p3B%_$KIuHXC101AEmm^`|WY|==qnN)wSz*J%5#vl;;wMM%}ql0d*2|+h2ym#E( zJR6A$;%#VpL<^21^y(5+uey-lUDXOY)m}kU@#OvnSfAIJq{Cjsh9^Fv%N3=9J)ctVrw3KGahTR)^rFp`Zf|<9*zYdg4VwsWVm{C^7&LOr3M1s?PW;&GrFI`n5QU(J)9AKuM?sacgTH^`J2L0v6mywI`9uX2z++LQmV!r*JH}qk z_ln6B5}Tqe5#j*i5m*PL{zjHm%3aje*TyHNri%{+^VFI^bL8rzI1=2d16|?aNnEcY z^WmH~S?}O9byRyCPI~X6HTEMbu~7)=@+SC=3LS%!{weCjzb7rNqR^N2;?+b7)O_&0 z%)vKYt2@XzE~NjbDDslBH1%EXi7pzu=56faW^*`gM+??WVP{hh{I?fCx(3y8{2Y;M z#f%epilDwzZDm#WubRd@J!2_o7HPEHsVD4t)QpS;XUGhJ0Ktu5Q1+Ldc;o*=I7Mc%Hby zX*DI@7LIAx*?IqOw@3q-2H_bStSvNK@*BTME_OxQ(d+U2dJIU!Y6D7msDE}9;fZLtBq<5)2 zErRP=fa>dNB<3H$T`hU-NOtzI0vDwb%}=HHvr5e(+^a?m883*Y=u)yun`5xfE0|5W z@<=h4O0Z(9MHIK&^CH^psGJ*eLra@rR`ObZ7Jk!Y0KRpknT%Ybv6vUy8fj=PT+NuO zT$_oK%dkn8e=9Od&tnz}^|F?J!g8p9RTrH0N&GqWy|UjQ z_aA9MiX6da>G{`u7L;&1X|ysjEsEzk%LSVv9ceO3=U%~)mvQ*OhrKQk0gToXR@~{n z`u+ECIFeE9YyR8lYv)NUr?^p22$InHQIntemMFaf3ke9#6Ex=MQ8HFc^;bfGg6{9d*b>5{It6u8B%}(j_!wG2iK0|gw z&t-Dx6ed`&RiYsx3EOw8qBWXg>izE$ep5(b=j=aOZ#=p#l+R^0WqDC+=v1fHt-d)el4e{`W*t==u2NA_GFbed)dd@3#KR#Ij z_HH*yucphYi9*PE^Zj5_j&`|aM=_emm~@-&SaYdhQ?>9^gXGrzB*%au|6y*#f$@9} zXbO{cuS1w}EbK2ns-x)-bdqnHjthTQcg|uBp&}^dC5%w@mYCSv)HW57r;0GEH8!|H z-R)$X=v@z}xVgrf3r;-iQ3&vNGQnoYW9QcSiaXD&Z+e^<#2tDLb*g-4P$g{EQbeK3g>6x}Y&kTQY3=&+^A65Dkmrw{endHX z&KM%E762P9)MX(9I}w~SRA+IGDMG!5JPnff>+XC~bElW3;|@o}>|a+ttwD5bfUvoc zM?#GE_Kx|XoY_mSCba`Mf*dJj30hJ`SqvJFH(A77!w)d{#c&5mni4lLf_xjOefXk? zCImHZL$LD&!(U*+kPOaaHAF?Wk#8#LK9imrusY-dH?r~1 zMZz=FA@M?dD`;sB_-9mj?8i9uP>QgzD(KXV7SzqH>(=LP$@&fg<(nsJ2CzPN^mxt- zWBqt}RN4EuNY}o?In&V_FWy+O!Zw=ny0DGBiGV$pnt-uYt!CCJEx zV7;sJ=b55jh~}W;wVsHJ>=Aa=G-s=Be-u|%n)fEn*Hw}E_fP%FW(h-l2)H=muL{i= z7Rbg9HFy7afaPH43}eXeA=;W1Q;UCMJx#d;5vGWvXLeccmXUVE?0 zX7hz4-h^jfg{;35BEx^G@a9$zJi7X0h;oC_GraBE zN~HH=S0z1 zc`X-F)+Nq)lTq;X$2%gLw=&ia?AR>0iw7V6f9fY1)mKaVx`j=J^DidYzmq4n1s$@f zkKY}0xGF9tIGDFIf;lAjTS z+pavg!C=5?6m~;m*BWxlQV`)EBe=8<@rfFR4_%`$2YFt3$Jdc+kUbr4(8#(7LaloH z206tHHSec^<;Sd zVnEQs4~+`R7HqV}=l?{iL<8^5y_dYyye^pmvDjy5JZ@MC#d;Q#fTdc( zsb>igk{djaKw=O^V2@7`5ui9H+M)o7AS+R0@tIOoLsKzl@Gf>YQ)+bPJA~{tpQzyI zN-fFmbmK%*~_(TK-8hip_kpf3c&N&z4v5D|Y!)lb>GVSr`I)7L) z3=EH_HoW@9=q~OkkFQR&X4{G(Fm3H2uVO|(JLG*%mHkW?L~==rnziAvOtAK7 zdO66w%!N9F9%Vxlr#^!C@tqho#%FgXvO~QXDf`o%v!2W+)2&TZb)b%YEjeqDC`aZ=cAk0Y-{5q{(ihKBW)M|$dHPo}@L^s=(&9OSpId_&>8piAGBou@ zV3008So9Kw6Jr3~UYU-O6Neqf6_H!31tk(Z!{zFL!vLqu9DnOTujRvJR5jAyOB$OR z<@!+tf{vw(oCaD4l?zI;fLVIl)n&7E2)Vw7oOhQVl4fn|u!!eks43{i#X}n53K)p8 zk>@G_Say9GC^cO??Lnf%;_V!Q{KTciTvS-m0{F@t?EbR}GeOywYP9teEY(up?H5j| z;hb{~Ew)VCuR&7>ZU022H;PM%dl28D@eoxFDhnGYQ?qvSS*uzC?<2j8wX}&^3KK{F zj=OxHqN$Syt||=>AAY2G!c@M7^x7FEt(j}RfHSAdBJ%V{`irzDf^Z*m26eK-9}a}) zf6D9^M?1{u+rV-v))TbJJC`95(C*ihZ!pum6%5ua)l%l~oyk5O6Qra7Pq)?vM0E9k zf=-#=+urK-l|R-H?eb*X&==?GR{B8hBc~x0_5h0Cfz5pSvy%sSAY99c0^eiQy^RDX zGp`mSH_3J_E}A}6S@^GtW!qu5OqD@MC5;ql?6aid*XQ{tq}n!Ov9b940~r6yKy2L+ z`^XL!z4~345(}~M1GD<;fprxF=LVt!5D3cH|CXnhBtGpKQL28C)0SOGE%cRS2Y@LA z4P4RuBFw@iocA)#t|z5DP#?t>T?L5QYj zKZQZaL=vJO^*+0VsYmq>5bB48tM;Hd=ZsK?dwj{$u4|j34n_0K@-KHy{k5Sk5JF+7 zxTR*Dg+2YqbP9t*cDv=Z|?Gpt(BSPc;fBibp(h#Cn45C-kGO(SEsAm6;YUd}R z*B$N5Bf`}i5$hk~B}M>en`Gg3+M7#S5=m-uwB&D=P$Cb9RiM&b^EFf?&31wzS=8#h zraZQ%>hE@MZJtJ{vaooHa1wK&3d4fc;VsB5aN0u@@17aStdv^Ugh6S??B41X+a(o*eZ-u5juT^+fS_)m5#T|O` z?wo|G`c4cZMTK79nNZY~Y1>T%R*`F$>|Xe-Xp+pv5lnq9OK;g1mVh$S)!W3qv~r*% zX(rpuGV?6Ll$~P-A%>ozPL^cGH&`62Z}gjmmgPsjnR$Cqu7pml6t-R3E) zFZMzxIeP)@unD~*yeKasp|=oJD-6pMpaHD!FayLY0;2fH5_l4TT@xD+uQGDezZhd} z!uVkp4YlWg?Y>zImh#dl6w$A`=?yLFe?U)Ue5EnA%h_#PjyT684E05&)4N2mADW6@ zF+n7*=8NOF=l=OU;`pf#icw%+Pic)J@t0m?Gou3|g~elcv55!|vk(dZc~qIZI)hQ< zi(~cgqn04$AH@qX-bHk|sdu0kWDI_9_x7kZ1)J1~7uSWhllbjdg1O6-ZKn}Q$8Op! z^xJ!mOHyv%9T553*xAKS)WkNX2$+YHE( z^g>KwU-+X;TXLjaze!9rAZY22*h#?lz9~H3zkgJQ3^KG!F&Iyja_al@dxif0llCZh zz!s$p!PT=&7^q_;i8R+zw6UIVD4I!O*@=P43Xvpj@Mc=7tctbMv{jh2uh`ur{#p+^ z_`gPFI6pJ+3Ns4!FHuMB?%js`u#FGO>8owy*qWO;wG%w|$R>Itv#|#iu71;3(fA=4 z;GnTpwL#f=(4!~k+SSRAzPsz+2RCV;ae?%x zp9Lv^LotRMc8{J;!;yPn;ISBv<1+iRbxMl=+VxG7&Woe6kTP*}^9bF^Ekin!Q*Coe z?YI1+nfyXPhgMSZ;lN(gsfxv=Sx&Zql&~&CS>)*cE%y3bjE_!LG;dZ~g~L#Vf?8oh z^B`nCfITZb@|?RZVri=;y6|l%mIPB(u(hjvSm&X)mEf=L7Tnj(@Q`jqtM`oeyhW&G zefz`RQ@I@9Q6IQwjWa!?xV6`P61?8QbZSxUvGA}z=FQVGB0qn>Kq!ytQLA|O(c8UZ zP1uf&O$$V*T80iCBI04?qrk%rL0GonLMcHPBi^vzyz%hVV5)ze5f}X9AZ{e{qd-9h z@j%ClD=gyAzQJV*7RxUp=!ycrFBR>6%?dMXFdsgL(VsIuJ%p+Sx<(7nep9KPCJ_h3 zUHshg?o&=U_WJ?O7*YGDB{aGQ_lQldHROC5JGrs(&+PsPG++JYyG$_+$Cr&x6)csd zs7g%xFTAq|Acpn(HInI5pyS(_lo?Jhk&Fa8l)FZj9|tO&vG9&y`kuEN{QK4OPX8y? z&0YZFstKlCladF*DRxp+C0;u6SYss*VyDnf3YPN2gZn6K;NQa^@A_WG!x|amExO9< zMkmC-7-le}5q7GJ&ScnI-&5^?Z}gs9SAWbH{GSN!@?A_gEjaR5T`H7uV$xb3dx(-| zzmA0Mf4T3!v*YZ319#gULCgJux+-cv-;H=TqboyU+9`X z;r4=#U1A1A-aX^{g8_zkq0Xk&=scv`&|}JVr*!b|pJc?r;0>W_gYl^f7B?RZj#uu_ z-K7#jZ&NSG8_~e9wh>C9W!Gzd${zw3^O9YU(OikWs`@g2bj^hfxpK29j=e-^?23tN z1p0s*ciHEoZV)e~@?}HfssD*x{TE1X_UzkRJIiaq@%LkFdMi!qWMGwVl_7eI!FeKu z+s3e7g=CVJO6z@MpaUYkZGC?;zoP9ktK{963==I{jLX++(7;>bAk;ZyFL%V6_OviR zbW3F|>qpof9*=zKqLm>->Wxr67tDyFM50-xeOn`gh2~1DJg~f8+9Xnq`fTyXa*JSt zv=e3VWLHtV6h?H%o`(iqurTHcMoJ+wYf(l}!u29Gv4GIBkB4LJ8yHhk>4@%ivOF{^9&oV}2Xq~|>(8c*Lc zd02?nXrs|3GRYV1QZFf?z##VB-Q5w?oD3)7iQQPx5hUcPG0}wL&>_mr z;N~N1>>#iT!4beGj3`pp^!M*mq{9<7uqZy~?^zxoOs}G3x8wUM780-Fl&<@?0zn-` zA(uPd4w$KX%uR}pOohH?@Mz0|{*yv(E{z<36c)h)9mtmjOY{vQQeKRP#FZo&O=ijb zI+I!lO(bJ-Q?kD(#c#~N!A>-rcwwVr_U5X6Dlo zMwGM)CQ_-Nfz!}gRThT%H39~lkIl_`0lHIWE;CLAfWZ&;%C;uqk~f>z+Mn@5PR1sV4~>9KfT@H__}YMSXwkJwFc&Vu8`p>Y&^3S#f)*Nqqy1j|9o~ zo05O6V*9F|DfCNuqRk&2bR_eYjk@At>Ltil*bJoic`%8w2=-2@w6D*Cs@g~OE3dH_ zM~M=4L5Ji~s7Gu4=zdXtuh%)c4QZfQ`+lwicK#0Mu>n36`23B4-I8)f z|NC)`sW-vyF40jRyla7v5jN7{BOEL&+GuR^zqMek$AHW*b(e1_#a#`=45%UGBeo(M z)rmnjv`C;Lu6KMeQUjFfjFlq~bSjp*$%{`!uOU*1_X5n!@v*2~fd`YlN$B4i(zy7T z6ftdsj%I#NhDAF4(zGUOgi#h+0#y@kRE`w4IjXFCqekysA~Uta{O74c_9ilBovq@r zj#N536+~rY3u~xH*DWg@Z1tB(rg7lTRtqi9XUn`Ro&%)KhO6Yui9K1#kGROEh$mk* zH$GwrRL}2fI+0JgP+L^$uy780ut5M4iVKhsCET69hmG!GO)GtF8uI9>uiZD& zpeSVWi|$-B^E>8Lf$1|uFovY?$CzzoLLs$j$;Tq=g~zuO4CT#7d|Mz%n3LYhr#M*i zE*r~gCER&+c{O>Htq2EActW^tWyMx5L@+b0_%H+&inE$m9sE!II~(+B`^z+~#*Z7~ z7W68HPHa@RXr1E?R}yu1gckZ#9UU?ibCmpz11y6jswa9U7IxWbl(15iPCW^ak~Lr@ zqvQMxoZUQowNxkxRSjA$e-oja6^ziU!wUMzsJjZLD3F>D zIa)i2`ia<`_^}de1LURAyW%S`o01x|b)7dV?5L*};2SEHW-Ik@lF;Tqy-D5MxkN`B zd0&Z92`@US?+a^1TUR%8X^{Y|4uT&bKhcD~5Wn2%Y;?7q1ZrQVw#~+)a{52tx{jgA z)7YlvHpGy(@j={ppf-_vnn0j8QeQ#)iQi#`pKL$jI#a|FKtSKj7R)NeqrJqAd0|4l z`2l||6>Pyuh;*3-aUx36fVE{{_M>`>A-uspw6#Bdblo(@HYh1_zP3)vF`k{)s9j+1 z;+b^+WFOLhcO@8hXSOrH%*lHekkbyVd%|?2xZ`ym!5v2-m<9*u0V}8{^v@}9W|YA& zcT;vuuGqdVs;SBk5NzikblIkLY{ksam0{OCWrH%L0$vR)8~6Df+T+!B24M%C`t@R8 zKy7^o)Z#6j2uQUjR?&1M-c||_UuUX2#xjr7D*)kkxxBhVkP8CtC|57T>O_J_Z|=Lc zz$PVQTMfB*aHv2TOZAK}R5%K?EF0?qv*GFG^_Iqg5i02apGC1MjmCFWX0( z{yv6AxZ?Zwo!uXI1XQh8q7e7oe_^k$)5H6;vMS zS`KGSb6Nc!=wfDOtNlJIJYHxPCu>kI0LiOgnlT}h& zvx_P1=y(#p`ZTU4<3;tjk`|1Urnp>#;ta~c90DJ%N6L#QO8IZkF!?G$`B$v_#q)3P zDe5kfnW&ZFHPBu6{BaFQf(>6P%|qJ}Tw#|@VJ|Crg_Y9s1$$ip8pV76fesnJp6@bl zQ{b_X%&$2YET(k&#^v>91Ln!7gLf9GdTQB?Zz2CA z{8OwH>eIl}VoNuMtY2E`5?x~aKWE+%LL)LF0;S#dwdI2%=bdYD;n<-nWlpaRxNVv_jya zF_4W$W$mpwQK5XkpxP-1O0%R=&HYYb{$EzUp7PZ@J|xb%JKksG!4)?BEic6>t|2wQ zrTE#fgEP-K)}x^ew^8BTY3(pH{e4ZOZg!G`d86muH8Yc^f120KOHmJqp}&;IX8zj? zFfLmly>$5sb9o{t)DswHmpWPYo11X;zNONs2^Pft7qUtdGIrBDsLy*Y>1up_76YHQ zsYI(fZ>D$z$*bKd%o%b3M2z=Bl+~UMNKhHIF6fEP>Das34nn*ktM?nUE>1Ap&!M76 zi?*z;ObfiGPVbVpM;WmDC#H3-h2KGmO7?B?=!tPgJ&-F&)K=t!>7GKtz$jxyInxyv3~NJSjgs3hmfhR=xSc% zF0?%3&YZO`er4wIB#$>DV!(BI1OcU6PsoPczmPH>M0YHRiEYw-XaWnY?Rl6ZkYCMHs=ZM#fuME_f-a$FFqsU$aJ+uE zqrFS=+fv2u8f4PIIJ>S=bnCn?vB_k}Wi!jdxzka#jwqg2G_)D3HY#@XwnfBBaB0CX zoZe)xL-&VL&fN@(`_fr7feCVKa~xLnl)%D{B~b*hz?;%hmzk(>d!#z_OuP^hcH5Dy z0cCa}Td;H7lDkk!e{BG;h`ibzMhgLn5@9elyV(%;nqZA+qr`#Dw9xZGlsdc}k%^%5 zo}#;dLNfWg5?-B7KI8`2!W%q6?TXhy%VZDYB-e_#8AdmpU zju4+C(v>_5Sa-gjufv-X4V-FKihG)>hcQ?CEg)*l1VI@4tDc$? zi{4G|j|O^bm?`BmIpH_jQ!1T=-{Lxh&YENye81_n=CrqFLyE!Rf{~-~B}-yAg{x1Q zN3&{{VSlPCF%dZ)3nCmjc%cTK;5yOlVYVle*~TyQ9(by5A_G5L`yxc224?qSz&r`} z!Eh6PUXrpiK8l*p!*=G8*+((YjD==9hx9Fug1z4^jox->i^ODTmje`uT2)ryMb8X1V&j({&Ao2%(W-E;e)}o@RbyUt)AYk>8m0~*LD|H%At!1L?A0IV*Y;3|pO0#K3l5FsQo@&& ztvYj~=HL{HpX59erzQ+Kf)1I~eBCs|5ip#rWR~io zRSW)$mg<~0E7oc8mPUuc*SkvStsldr8oUo%R&66*`G#R`p3v8Q$|lp}XDyMVEvxgE zvHQVT1d}kVxpuFaak?g~9z~3)+LC)}p5ex&2E@t<@gAU$`}z*^CJ;P`yWl`_Lx(tT zGOoCl(((Z$GK-`4^2I5wt!09~(O!!qo8`Fs=NY>5d0sa}*fH=lorp?#4R-s|4E;HQDzTWWT&$f$S=fG9#z>{0wiA(Dj1d^^5;u{-7=3tRfLG&XEN+ zvK5c(wadApdUrXF>3{X_`BwQ2?fJLUN_1Q)u}RrDo;8s-ou~h`nKYdTu}sI5J=1OL z2X;Byrh{d}48OmzkHqnie-$1_T9pFyJ~AJ^Kde>GTT@%xjm~j^iQ>N^Fjm;7|{rWr5>&4SmMGBiq*wp$Yf%Z<`7ux zrXTJFkz@%WW(;75Wyd*WbC|FyD4jFbR)(R&NG<6Bgl0K@;zFZlnpDtR*o?bi1mHX8 zT`-2)5paG{apX zXEt8JO3%~zteReVKv`p&A&_8N`3Q)ksctMQ{s|B%fUn{BI zq=MU)F4Q}XJ7)h=@KyL6KS2(pC_%6U%DdMkjth9Er=ofByAQ3>1ZMpS7==H{i ztRII4qghK>w*Cpz_4Jt8GbJ}@yQF5RZlX(A%_Y-96s6n(&UY2Q5EzkBTYH-{I(s|w zcC$a(F6dq_B}+3p(78Ss4cJhK$C$2sN`9t>Hz`R+o&fkq!81Ayd1Fvb0hOdxkPT)2ZC4YIy zTao_7wm|Nf{Ksfqf88i{;OwN+Zr2rht8jh700DP$%~>QI-k^g6gTaQ5BmZAAtu&&I z%Tdz@R*9f!-2T0OzKV(I>}FffbOF@4nqwaC6IsE97z^UPQ%NycF+6Oz9HMjR&dKN$ z6Ne8}_W=^CB{(xW!S3D6ENDz9Bu;D6)QEGihui!b25cROl))5K2T7dND&BAJh9^_& ztrH`x5WTq=YRvX(iw&)m`d%xD9K3WeaFs2-x|)(-x6IecD-Ee|x&~1wvhY!Bmbv{U zm$x7twxC-G`TCV)1<7sw^kEMoqZ)kpjUVQ|U;+L)=YG7SbL1;XB>@L(o_dr}b%YN7 zcB3PWX5Pf#8ydIOrovg$&}uAV_qoQ%F`7iYykgld@#}M`cc&}Sh}Cj`qQeEklzd2w z0R{V3!uh03k0Do7+^9f-+-V|^_3A1GMa`bXbF|PMSw~@X^Av(2-&YKWt^e$s_VQz} zdm$iUA#S3xU-)hy@qD3At0!!!*z6q*?XoLV5;eluhH+?ncC#bp;aPK>imQ1VbLGb0 zBn8&TF*6@Mj|+zj3j8YL6Yubhk&-G0m>MkMmZLh$uL-CkfKM2Hsa2|Le|_dx$q0|? zeRxnxWZ2auVu^E+nZt=TrJ`QFgtT);&`FqPA|D7^JyZBOo*ZwIyern#*Y=hM&Bfaz z#YQUp|4k$LUmSuSNZ$?LwY`joSRKm>A=(z=bHCcvfg2w;=SE^iSA3?{QP<+rMiMY1 z*?l)v(Vsyd^B3O!qUdb7gqQT%IUZf&8bLxPrpyR9Hp$PV+wkE&viS;%F)$ShARMq} zM!&WC`C`e>rYhZGu3!|SPdJY&odY~lKj3Ra}PKjXn*38{On6P)$e!Nv%)(LlckA4EKwD7{~$ zh?uzKYI$wnUjw#v;h6EnH~hA8z>SZGmdrqm7?Nps>6J+#CeasPFMYIV1zWm)1l5sulV_!s{Z${I8H)=@WvuRk!?>W znKZJal`X>cwi{^RoiCT-0cqSY=9U^LP96$k3omTDXRtu&T0BM0^h;ziFN=@iNxyI$M9@udEL*@Zd~hgR8oo^%?E${QaU)axNDRF%P%V^+Z3TJ^~rlW@HI zjmv*7HtZ%tO#ft>SqINXS=V{8mep8$fUAL%Jv934Y5EffloXzob+T9@Q$P7Tzjhw) zZnH7Wz~Yoi8T8Yzq^gFQ*`eeqIFJ-?v*K(Atm#ya623cJCMB4CUbCPKGC`ML1Va%F zg*hu%`ZppUK?SXQJf_2=MvSaT@5vhe0J! z48}))Y57@zo)$eIHgtHy+j%FYk~RjJ_y;GoPEQ;~__i7M_aKU`FB&(M$U zIt~*`3_Xq}JGi+foy3l@ZicM*Z8CeXNs7E1huIbG632m0#bjJGd1jz7i9NmVrG*C< z7G|$XODs5?7JmP^L8&vln^R@i>9Z$T&2%ob$&|@a)zYehpJpPFXgGUzf81x$GCF=H z)v2PK5`-Zx>MZ2Ze5HUh+~)b3wH!jO!T>ZgX5IMOth#KxaUtlNbGcax9u4JwK+$8UT)1EIGK&A zsGab&Z$P>JjKe|9D9m6i8%~&V-I~kQEKA|NE5`?s`-KEu9o-C7`30b!)O$`tqvTkE z0r|;DVQjbv&9EN8WoY$VfigAXR%=NZZ5W#;v6_B@}4OdaJZ3%aXUeBO+f6`fX^m1_%hg;4MHVmgx;p+h4 z8$L*^hAmBFFa{4a^@LX!c7RZUSJsYue046bp_H&9t-2e{dW7KvSGTo!H~8pqr}B+4 zqu^U2Wdi{ws^?|`58pQwI`2P#GUwV+Uu-R%KZvD+QGWr(Sgtj&} zB`oSgco}hW4D!ta0dK;b^znnBhW&Djg|fVBg!Jy)i23_BewJzM>od^xck*-1}5_sgx*p^)88ss>PDCkY^z}ICg@BRgLV^?buGc zg`m@jJDYXh%J8_Z2KCqnRJ4`R!&?>6+@IL|lwC=$%NVW1#|*^u0FeOS9-Me1NqMu@ z^eu=TTdu+@K%3hr`umZnH>a|`cj)3(x3&^=ql%gcGQ)%%aJbdrF1B3m$MvXW-Id|Y z!KNM36Vy~)-USz^Jgy7b(~aghc}&NoH=!ZF3cP1C4dbewQpV&1%&Nv9>&i1BOj-Pz zKuqpd+xRw7Gs#Xk?B;Z3(3**TIM3f%RD;NhwjmZ6DZejut?qi5p7ZKHle>x3y@^uuhZ zG{lT$bcsx9r`oI8l1YysuNrE~>~?gnsEs|^W(&D%mL@*LgPXz}c+)+^czOLGYet@feo-Fv`C7xe+c(#_fFGmEfhXYY;Ek(G5x)`l3eTO5kdTb4l+eUPOYvnu0<8-F)znJ zKybZy$?hgo{B9+Qg!v=Y+{C>L<99u^CtR}pK%`5Q2HJ^Qy9N` zuAw2r_1$7~?B4%I?DVg&p=Y(@nJu3%z6+n19bPbF;|D@g1 zy!c`F(p#&+RjRqP&-7qmjVy3Pvx5GHw3(~YUgVq>t%`27&bH!sW5At(6TZpkItnqI zowNhI`Dxy=#Kpv)Q@AvS2yCt7J0n0ssJgDkYQcU!a0+)wEclB|>h~9^An4=E zqWIi?65zjzfxU2QVur+d)_GZm!w3!uytoRcPadCo zhjmyDiXS`4NO}D49q0Y)8!qK0DgOxR^ppl>=kH`#Uh?0uy{+BN{I90FC-~)UcB8Nv zlS^pJ9=SB=Vtz!t{sPFNANzzLz=ihhrQ@h${DY_5846P*6+Vi*U5Vf4Jhq=$?IkD9 ztckE$g2V`cI}`ztP7-2@PCsx4V)^d*NI_!QVU)?s^0}nu;2-C=RJ>|ZWXC#UMv|1N zR>(;OdJIn@slxCveM~wMrV|4;0mph^6Ew26a8+Dc<1?73Z9QIT#|HT7sFqA?**XE# z`);&py2zs0KxiV$VWU8(0h*gbc!{04b#X+kg~Jxwdr?v3NUiVE)C^4FTG7mujKkG* z++8?CVF$l!u|5*jh>}P1iLGW4ewaSpp9x-mbI?r!w4?**7t=qjdal1Ce{9FNI_3Ft zrNH>Tj8>Nk+os|3rY;56d~b9IeuU|t6C53DDxoFnqfgAItoNSSvpG1Ba`P2E9LqEL z&0RM)+*u-^S;vn>31hmlyx|d_HEa(mukzIdot$bFBdjhJwJu?s`N=vjavWSgdOrRWfk~_;?+P5>~D0bS0$fC+zm4$4> zw)V{ZCOe6DsgTmY=z(=Wmd6OQhx6|uz=rB44&5>DbfP+^x&U;=H&a^A7fj=YkDu#5 z&$GZB_@xk6=}2gp9KBq;HIU^-t99t!35i9EYE3&GGu^RIvRlI`2)@sB9S8t&POLW# z`9JDjSU!d#jQd$lkbhh4C#;4zJt60ZVn(R7h;o~cX>m;pyf=gM-1$O$o<9i$g4U|M zyJ^b^x6rY~4h@n*r!o@L?B&i(3c}d=!|0SQdb43wM%g!)D7;3EbD5}`>uFr%V$q*V zZARKD?9>b?-p-eQ#h4OF^@TdJBsx=Fey>F-Hz5YHu{j6y9UQ+B829+a7i@i@uLZ-A zH|@2tFD9cRrF3@Vk z|E+i#Vkg3iu&IXrEP>P_%zl0qK-xG=+QdAUX)I6&3A>q1o23$;I(|xSEe18h)5zLv zj)*Y$j~ykEO3W+)0_PRDvCL;1&&>=CmP7R<4YMrVaHM(zx7{~q2$9$RzH_Xa4NZdv za)7NL+HhQpf)HjnpJBDMW?9$hb&C^=bOg(M)6)i)0*6eReF)Za83qK>$+UiMiAM!HMhgX4ko!V!a~xC)5wC(9j5G zlAnRKCI=z;2}JG6^=pNb|BtD2V6Ow*wsmaVwr$(Sif!ArZ6|GGHE66A8(pz&Hcis# z=A8TNyYG+qFz0y37=uTSWdryzHI@{{SG!H=Gvt0`ER|4=J4zf1L~|S9)N)N$Wq~xP1h%rM>FSi7`fZLZcEg#E7cG1X2`jQ2G=V{$vH~__(6da zZB^y(Tb;J1@81RUi8Czm;wBb$oz*gVgG=Z!R6+7n@?qE&U!+9ZTv{NVA=(+j)7)+ZJ9L%2HI(t+GD_C7u{O0 zurvX*Jf^%rlmTp7Bcf0c-ISoR{e z0Z8Nem%-lq1gpr#$T`BxhP+>Qli^R!;V*6Wlb8fBR+WEOF&vX;6|>d8V6J6gqg|_j z5<|c|-+o_S<>L|2flSy1(@<@?HK1mW&7^A~`$Y~KRWv2w4;QY$=s~PyA z3HKjHi$o-1p{f%_m(lnq+iMLp(FL|XIetmgIU7Yd)&4Fi^*5zN1 zx+(h*Ams|-s}K}dP0WDD0&7U#f?oVPN{?N1Vi6H$WneQ5XAaq^cL$6X6q|qPZ!qR$ zWVk>jCn)&xKh{)r;A%n{76vl&*rnh&HRXq7BKjQ#!dh~PXrR_SE_^VW8%Zy9B}!6? zn)p^w78{2WUEz8cOV$dHHgz|UqLw*w=ke&Vro<0nfHxb`h92r|yLp0q0x`74Lr=7Z z;Y1jcaLXWftsK7tz8xt1U1Vhqn*KP*5R ziX+x5|(Di^UZ9Y8)#I&EdS{?mAJ53h$9vVW(JcV7n?bkuP&n8DJ{ft_2$8IJ+o-$lVg_ zq)4zkgQ;0ZUusdo*p(j>&C$kzj#_by(-=#0ECtr_G^~u$<68U-<$ODtQTcqo_4+*0 zA?xT!tHCvkKVfQEK|(ud6+yh7ilgNKosVVV)*}oKta+QJJ{E=bsEN;I#@>_dZynIBdA;~rK++` z+gLlr!9xpmRQH}BKbn3K%xqsXpVLU{nl}CJ6IPgwUmNUtt zx^VX^kb-cHf=%?UbP~Wcw+1@4T11EW6dbz6^iW|E3GPvw)2+I_IphXr!Iq!glu0my z@?sOQ4VG9!^|M%g?JQyo8|=QW+T8v=9L{{;ubK|2_SaDbOkYo^_r|UXc5l(a<8Cjh)gFU z;MgETKa4JDpWNY@Nku|k!)UjaE3_en|BS|6a@s>0y@UJG8f*CmA3AK(fR!Zr=bE(@ zbBL=EnsvO|ND*1i_UAp`TIiHgLL?vA?2YIs zasX&!U)*b_`JVSQ`E+mn_qS#|YG>GAam)h2HtWKuL&*cVddsey91;RWK%pLVhK_9y z&HB2_(@m&fEn{%sL1IkNFnb+au3};qAsOy*L$srdf8`1}+_*8H3idunVV^B--kCml z6iC&7Mb>2W=0PqmdkVUO&!hClxL1}&0iWOGb~k43U`+xs`(OT18}29<3Zsd1g;75L zfHlH%ABl9lIRo*bchQ_)D`X0rK$Js?Y@Zy;vDGd!bQ(>!h;kx9&MwTQA3fhoI?<0P zIqiYOhR1XOToEz9Gb0R=Q7}k_eJe}ju+R#_k6P`49b1^cTnm38i~2nw2S&{0%JP|k z@tq70A94(J!~BxA=PSm)rSBf9^_z*+=@qSTphY~a3Gj_9UT}al4lHcBsS~ty!)3vK7wXng8zd8fx|H zlI@chk)|D`hiCb(ZIb+vGaZ~+-CGZ-;6MtFsQ?5Un9HsdDx`BQ=?6^QP0+pi+x1xE zfx0C$U!YZ8B?JPI{}Kj&AZW3AF=hFLx^!1;R@MNW(;>o8tyWtjt*K(ktxew4Dy)qk zFabRZ^Ya;fJ7P$RFK$ODv|j4_FKLsAR6VjE1EoYTP4=2z1PWo*aI;OxR`cj=*$?8T z@`Hkp4!={1gtfq+v5?>SfjRH|nax%eP|2WB*o#UE;l%b_Eb_rixHYb2{BtlHsJD8j zDchkg9#1IMI--zn7mP8Fr%+5^dNNyLB%9lLDTA?H9T5+3HCPhYp81|A_Kvemuwe!z z(7_|wiL8h8Q^QNuRr4=XNwx3w$YzrAAef9Wc6&jRy1dQ0P}|v?`Ozwb@_ppW8HcHY zx=@QvyNz=KxW6-s9q^ZYFWNSdR+J5sQqBmEMrad3GP;Bkgi}(|EYqFzDL?UAqTQ>C zJ*|PKsj|B^w`>L*v*QEpdO@DEt1?*6^F;2!n98+Xfjw0M*DbF3W~Q%Gq{fHO?o!Tc(~hemIu*tKJq6 z)CFEa+|Nq-DEZW`0GOt%sTq91LblBxdkaXzSIoixBA#x)KcfkTA`_}ukU8xN%{l%P zGx2&f$h0_xc6|QbF+%%>u(CS&a)P1rTY_D`umFOl`F%oQd%|OkECG(tG&%^iQ0VsZ zAC7Ydm3)Bj3`|t9MO|bbDL-@3M@*b8uFBX}v}^nF495sV{`;IV5rDXdw-c7)PcQLT zGWL7lp8ReLIlEidh{a2&dd*d#@B2AUN#H$2@Y%I+pjAKcUFRtmed_PO0relNYN?3f z$DqFylO#gZDEl8>wWS4e!_w*j*?k{&tOK{gc`vlAmH+<{MWcG#5kG#aC%JfRxAvjb}_MbnHClNEj>Bz34YJ@S>qb3eoo^MB1qB^1z8)P=3SyA9oI+fPXRVVv{W%hRZzoIoaW z2?u5fYiw1B3ziYlrtN9vB-LcQsn{7d$mX$#zr@%86dmg6(pU7P;XS?xx;mqPD6Z(h zWEa#(>-sw?zTd)O;_&{YknRbkI1L>-dtWu(5?#An-BZkjpY5CYuuFW>HtOe!!C zr!fvmhSL}!Rx}XSkmoi3oTf|oRgh-HM$+LtoyiWvvod!|ydz(Kg=YhCHO5eoM^>1z zY#qc7Tb5)Lv=*UyM~ZVODtH750UM-g0J}wm+@y3!b(*SklzZfgaWWF!1~|Ja9%tED z3fx7RmS@{31+y};1e`odu?s9BrrUs%R*LX++i(h80_5qt+lf$811M^t!wAnIp83rZ zmhaFgJZ*Qf>okCmyI`cp5DQ{2g+fFpGpL}PH>Iw{oF{5r)d7WAHV%1)AIfq~&{0ST z(c&s|;F%=^KYkYStjAgldc=*bIy8lF!D|Qpa__L@K7WMLBG$-u{x5Tj24l0%bE*Pwper-G2m zp7fq48`=+F{j$P#A_?F6bVHy;2nkiv;M!T>nVUB3=hQSM+WiF=_}0b|p7iv#L4AuM zf<|971gM|Yd#XmyDF)+(C7L?9oqxL-Pt-bA`1$>|udz{vZa|f}zwtE~AUgvk;XI}z zCAtI+OWbKjv5V8@B1XYG7R-Gcb>gyDBsYEGQ1~^0E@5+cZ;ouM(+WF;m#i#t1x_~J z(f5ylq)-RrH@|~=Sx?&^ynWSj=axHHo48O*0^<58AygZ9H`#tbkb0g|n>V(94+FJA z>(t^X#;dbcN-8OW3l{sO4|x(%7`6e_sYVxfu|8w?%Vx0j3A>bKu!yVr>Km1zb`8iJ zO`q(qJ9-PA|9~)i+#jI6_P5G`!aho*SWT1K6#<1yciJnt#ipGfdamO8;>~`-8yAUV z=k|1qgjX{6h7@^se)-le`7f*#545bb*Z+M>KR@mh%|bub2a4V9ex9?T9v)gyn=DdX z*UXx8>u?e}(w_$1qRA7C{tb)wpn?SzD7|$pVZJ>hmJC+j*e7&*JnK};{K3yQXhJS} zX>YYgcVztKB0x4*LD95Fz3pRhLK-H)V)(g6=`k7}f3MF@icHGA{xd>54m(=^&r57?NF`Zq znKT}&EDQ?amb)Kd9a<@@mbB~B~`tu-%y=DR^XmRG%FJ0@t@a zo1M6uo#Ned7PoDCHm|M)@Ar}Yf#=b%oFR6zm>Sw+P*|(pz5wiUViox>{L(%IG_~nc zZ&NNx*dP}@qHmI!6~8tW1DEJXSjJnf^K9{|ifcz5sF$jD1Zb?g0}^|XYFyw790kpx zX-fQfS5}_aUtJDP9!PFXN?e1whCrRczgqYU68?U0V$=Gk4_tg9eXrOgijiuPxO5O~ ztZ6|dDcUE@#u@erFGe0iQa}uJLFcjPN?l#*2K}}_T9Hk|0m5)6(>M}QQ!E3Vvd@4d zgBn}oxor%_gmg?V)d%b%`4*fWZRze=LPG&paZ3#9aS!>pEKeb23D&D?ev#luC|2R% zXjYLBSd+PcOAD<<=(>3wkDEQSn;5xb-apIZXtfUip524y@Vk-R4Cb-e)lOX=3N`B{%9FEqh+GZ$6)~9%57Yo8M?Jc4%sL>dwj1 zVKH1Y7-l0N9?rv9Hy#aR(&tmt9rhL*64tN-CeL|Fna_yWosuE(+QDjD4`9l2wRx+b zw%FJwhHq(J61qrSkv8%_2SCBPd@vYIas2$ZBXs4001w=ZVQ-Rcie?{7$)e;D-rU%K zOz@ZqbmKo5sA5u_(Zo0*alS*TG>SFhEZ1)$@w0Pwm03gJSs3JP$0akKdgQ`fzEZ7N zQc$STLmBUE*oHYHsC3lskxhmd2Uj5gN>?#nti^={D@;P{W(j&EO4Zi8JrL4^_1hW%CBN$jVmTjg`Bb7{uufKI; z%m>!zX^ByfZr9-q>qk??xuShM(yfdL;g)@C{-&=y;SpZAjzR2?ehZj}r?aGFMw*}t z%^S4-U5W>peE=rs2^P^A60Z5Q>*K)Tud&jxqnZe|?XlY*a5MG1bHy)mo8tWi`~VWyc`(y0OPPDKPeb zK^{E4RcR6uTR2{%;^V@{v3Xm=3))}(=!2dn@*v$6X+xp7c-4AOq^Jz2=#|$~td?)7!W;Y0&-w2 zl<8#>4+^87Ze}|Qyi#(}5mf}V9uqM*WTpMxAwOFn?0EmAH3%GrpWbWG>#i5_onVk0 z^=VKhZ=CX!CGM63^>dNx9&&MrXKOCEU|rWI{kXh)`uRopkPC}N$3%sVhe>_!lmm4Q z&8B) zQxmWEU3~v*2)*~`3q}3>mG)fO3cUi}_^?=3&(ei1w!IiGltx(w{6~&k&`E~KAC=PN ztwT4-j`6JBG1Pyb#(R7Gu2Wh^uK&-!O55fcz=Oi6gfeMon+z>*l55nslB(#e9oSK2 z%jglWEvchtSZU<$B#XJ;q>hHhXyzJvCvX{k`LG5M_||enW?OyDFp{-VRJLX1gc8Gp zPJrbL0OKrymY^(TTDGplIcl?O7%^dL7t_trb4g2rWR)XSC4I|t*v-d6`0Ym;t`^Ne zC$ltUW$~o_t62z@Uc$fWXfW5diF*3aUoX8k-OSQg2QKH@l2_i8}#*2sz( zQ>NE*G0hin)}Q`5*DD?%o0_w7l_v;dbG-xXqwIoMqep$h$CRqTCE5*!8Tg}q@{?{F(m?38$#Bal}CIh%Z zY#G5F%h=%QcZQ&8<_!{G8TYcab}}GNtfwhn@#rvWvd!~~Nf2g9PfJ4rkxWYnO$?*U zZB-g2CxEd>nuRAhUm9gD@^Ot|uwffQ_JPb44mTBdq~P|kdvYNSIZxQ~NWZnz6umBm z=9_1S#oRRZNSUuTu5@4w+qFcufVl)!<}w%2&Tz| z;N|OZc1#s43r2kNT*}hr-kbASj6kL5Mj;W$8~llX5lURs zE2w$=rI)Iz>UQuOEx3)_h)Zn=z*xXb&elWw69c?@ZTVX5o9l}Jrakr@K0{v_ik67+ z)gy{-gjLb&C{IB~omjtq78ixJ5#5Rj7HuM_-awXg_(eK6J}rhAFgmuHw`e2>aNa9A?JIcj_6zPFA`=47`Vo7-Z?CB21x{}p8f=JGz#!APY^ zV&{I!-J=<0Wm*WAO1*z+dn_{DA`fcRwyjJGrFa8mY|Op32dpy#sG>p0{YMjwzI%Nr zxAsC+`eIHBUa1FW^n7)lW*hdk4t+$JkSM;MfBS6AIYT4*cm3VRCMDS%UDe=5k?LY* zF(_$JJzsXske%$m{`9WgX1+GnUh_%`4H&-%^_X!^!-BOVmYaUex@YUS6Xx*e=8}yk z-sH_kkrDSX{gk|WX&9ffDCrWhFyS$p?@*KYP4HX)l>fIgDJX_Di2bZVj`Lsy9r z**{OdBSgD3yu%R$T%_A8?oI`^^gLf^Qmlcv%+wxQO+Ayon1EEJ1b6meH0qcPou%z* zOWVkfT7k2)6_@SaZAsEzZyqd_?ZS8Wrruhe_9#pa3WjKO@Yus;(TrrY zQAFQVWCVF=ox=gSe#EW@3`Hf|?9odhKhDkQLV58e&sIQ=9y;RMwxD?^v|uZr+7mUy zUHruymu`zP>G2wgI-%Jk$#8g`1{_lwBb#pru8^iwym?1*jb~9tlhmI_SmQY>^{W2Y zLQEj+G%PG$A8lVgfJ4`*JX#G5&n&lrKyK}UI-6zP$O!XMd6TnAUWi&qSH61T$&nxW z*%*E3BC%b|FWhW89LpJ#txg-+?C(GBE{kn1)ovo5x?&Dj#+ z7M-ldDWYok_HqZng*Qw6jT>bW-Kzwhw6#$RB+1)jgB(6sm3@6yz-&CSA9$OC2N2VY z8+sqpfvPVS#TaxN5%jB%Cj5c#40H`59jG9DGYpO`;qQli9+z)8PmCZ$ zK?jc!2+(&}v4k?x4b$6K;+ccjtnmR~-8J5DB)^BP9u9@>EYxiBTvAS`NQ|A|yRPXi zKdse^?AUqRGQk((3kDIz>5egP5{Y^Qqu#6N4!$_s(%emxs2 z4`_3WJMl1nmb&9pYm>g9Cy0-g(6{O-__zNDGYm$Ha{u~tWv{CuMURB`rsS9|&{ z;&miSygp9Mb4N!mz7YzeS7V+4n@GZg>Tol8vV@*GYn=o=sqX}COW=nAuo{aXrzz~SxCnd#&|$XR#B!HepaY%BHSOiS>AoY2^U=H$Zx_m-LByO zEevn;)(cN1X6+kYOZx%E08@`b96+Uvo7RZW8_r!A48ELL!(A?PGZ|%_Dqb}WK0z-K zY-TWV9PdCA|BdLd{TvS;8WEBoD}HZ}6c^Mb&^?0VJEHS(lYS zBQzNZEe!#|u&`!HCl|;&eAFVJ9|j!}kOv656;hKIOW2FG-oB%O^TYQLp*MJ{cWxff z#eAIa9FC@`&}s4Csd2H~`O!ZdYFJ3)d#W4so13~yg!{UNveeHJDWA~?5}{Kq5i^lN zQ;TKn+U$zzpaevX@QSColNkGZN85Vd02;!}>8Ai|+0(9X3J{KnX&5z(rVe`Me zO$G2!@1^lz#|2y^40w5R@EB=}HXN)1W0>g*A*W$;lK6hcUU;ePusPOt$yz6XUu4HC zjjh5tOx3JzSg_`n1*|Qg+I>ksiFQV{ZS-7xv#*h%aq@_dhiDvL#6QR-9TP79=FzmC zL>m60eM;t(meRc*?c>EK);I%%M5a1<@JGyjmnsjL4kj@Bx9ZwCfcjB<|W= zygguUm~tu$=wW-!p>&CCB6a4Raye<(Ze6@ui>>M`G%H=w0_U#h?nL_@5}pM@@si1P zWB{^5T&IWa4V52s?pqd#bKo9Bs4dr|O<|SncdU-2gD%}c*s3L}o85cPln<7rP-wh~ zaCj90ln|9lt0Lf+cseLhAiR<#w;ybUcu-*iGBh$;+>OMB0cn~w;En``wP{ZMM&#%+z51H#jjQuO<( zWzBcMHfZ~%N_J;_p{9uiF*2BWUX_pp_x_#}&^{!`DJ%BG40u3_$JcxXxeLgUF~wJC zPKu8NQCZi*$I<+fr*rlUqVF%X0LHN2Z43DXXb?1Uk{lTCB-AvkXq{<(4aOw#{>QR_z#F--2*%OgysIp9=fp1t)$no|LgJ z{&5`#y0VtGsmTJlH&&5RtFo`~6r%t2#x%>VHlhG;9!R`VAIqjNiKQ|wdW41{ zrVF>W0ii6?kmWp7+m7IOm^wk<7)94^1sVsl(c?*)9$#N4xAap3XOufIi0ldF%d3$R zOzZ0}e~M4W6Hcuw-R3?@29HQC&t+^ifVU%BH(bSkA3Qb&))F%ytPy>6q5c?1)6S}N zvXeDO?R>rbT*1*DjucKUZGqcr^Q$%+$=cIRDn-#+kFB&H8aUUvn69t2ivVTRxM+Eu zO|J_wWFa{bdz@qp$yu_hl{4_df*WU;a_~b;!VlEzX3}wtR{+pBZ^9@f&?`Y4YT?ps zvtrE_XD|bC&$VW#BvmqRjawXi_a$d+`f! zlB0LjVDRHhrUyl1wrcw%k&1b=g}b^rkz&WRXz zTDAJhILl$b!Gm{ZZ#f9e*_%cz}DsJ389n-spkuAuLsr z;JAlnp1|((eU>TpcL*7ixpgeQ=G6e8Ma7oX)Ck98kSBL+=6uu3K;bZhb+be|vZ4qk zr3`#~UPoN0&HmJ{Xwiq6xdYalo89-NWx|_p26oaU1FPC~VT6o7Bel~ths3(O8LZtU zc2|sqhe(6>kOO^;$@e-4xnoNpI^qYksoJX=OKuX0H0XN5_v|4EC$D@mqS6Vl> zAo7H9>am*!UV@);n_uMGMmt0#*pE277bL)D3Sy!h90KWE206>~9q{)-F)*rq*W0(J zDK2VgmWOgd9jFnlH?Ec}(m|)~mibi+nCT{${p_92v$s9Zrk9l6;dn4pOm#_QXIITg zgmkO&38qP}L&APf;8L=VKX=p;9iR`z1>WwT!H9Z&@&q?ulwQ~%3y0taM&a7h1xO_$ z6T5WoS9>9wjg+ALl)md;9as*Y6)Nn#$BuN0dn;j$x%-dW^O<78KkCvta5J5_UAk^& zQ5TcRa7q-h2~Io#;ucR`5hpv130Sq~I8AhO!Z(ovy7=9*zm2!7+}_6sQiSr?nDjAh zmWwH|DXn)!z@}4%YHp~C3f-3i;o-W{zyA}%{sUvGptLI~N=Od6PR%y#>f`DTf|NY; z7YhC0M~$UF_dftHREKH(?oW&+ry!~|YDTOb@URPIJUQqb6QYn7A4yqs=*$XMwaqbt z-g;z0lo~#X|EqX%C8waT0rMnmnF7~@^0$F#$JLD0nq`#UxeaEUN$GgiAiUdZL+arC znjW$YCR+(J89WjLEutry&%jW@+3o?G?@3+HYuKa-Ga|PXQoCoU#OkCXZfz+_YJ{XC z@%bZ(fD=5hAI@7(j6s)1XmVv@ks;JoMc6>3<_ z3X35aMp63NRPB}}shidhS(d6Fo0|W2whe-uf*x+`e*kPf1+BcHto5G!`fsVF2>NNg zJbwRO$e9w)c7R}sxUO?_5RI}Vq?Uk3B6uS%G#^PD%y77j&1F2NMn~Iy4Irw*EJ(=F zFdydO-+Xutz#j14zWbPo{yckAemi^Wi)}1j13p$hum4~LYq&l;5cjmF3qDmOgTlw! zOU|Gc$%+%b{loeEQGn|v_|oZXdKsq)6UaI$TZm-ivuRS~>f<%EHQ~xjs3cA zK?>v*3!t6j>Jzab&0=VQQqLamGf)5EL3qBY3vuX^RS_Z`8EuqNj2VLh#Y#-M`2OJG zzN;g+kf6~)*BVk)4+C_p5ECDcBB-nEd)N|M6E5n062_IaVCiI=mNG%0J?h9QLNalU z6Unw4GdIBminBdgIUK1!sqYPOOD@Ss1H$I)@YNZ7Ulg%uZ+}F~0`x;DVp&x}Xd%W} z_`5Tu@sl*Vn!6OLXHY>K&Y*`I70=xs>@W$m|EJ0`MKO7S1l3HF^et`rY z5#$*FF7ySYufd7!@n@=&VtF0a?How_R63jD^ZjFNwhq$~ZVtiW>BpUZ%AZEirB z)wrsD0tvRmwIVFf#LzJhXKM$7Kp%t3rB3;}@m2%*nr(K_a0qbN;sy8T(G%(+VxeBh zOp426{{(2FNGAcRZ3n*b)NjTJ2!@(x;Z+D>am-BK7IM?saQfp&epeD$QvzU+eLrB| zwgODXbcfy8MmQq#-V;4CL+P*Agp;C6>#qv>XS0(zP@fR=;CM88yLZ|~Re zRYnIq$UDlSk2ns9fe^TR0L55hy%q=SLkj4P97_SQ8qVfth$Frlexe;c6?G%Ppa2h< zOydm(XR!s=hC(R>m@ed~!q}Xxddr9>6w3Hxm@j)u_<+@XFDAes(EF0;d38cLP{VoO zt24$XMv;kTHEzgakV7)dr-fbM+;A2AA%ap)FE>I>cc$)k1vzsuS4vC(8NGy1pNakB<26r7%a3YpQ zLIy{mGd9Iehr*KIkWgh97*bq3z9uCny+-J@=#~_frvf_$(CT zFyEhWK{HiL-0icIVzJatuRud6Yz!j>IPYnCN56xhqX~wd?@6YAEL&$V`qrOew>%DJ z!(kWHV_w37XkgsaV=lkL)+$77?J|7-BWp0bWg)>Je}KEC_1LU9(Dec~oqygo)AkzL z;zUI;mj%!}UdWzr@AX)~znwl`p}#FO)j_t-SYm9fw@aC=s&(CUHlpu3nM znUKjUY_k2E>pi0 zpCz{H8iCmukrIjppq>{vT1@wkuE5fr!Wfiu=FC^;@QH*grn4;}P=UlPjx~2Wh(^^v z`rYi?kN8JdIhI&!U__0n7IV4EiE2CjVXyw@Uo9uugcORkZ1r5EBxK`q3ap55T}_dq z;NW@pdScCcJkDe6P}^V|i^M~M)&fY+gG3l=8M>eH-06=48&qru^mOc$_rX|8mN#tM zSRBNd;B&EXi`k9HMC*8Ff#2}D(Q*MjiK&N&Ok#Zd7h9ewmEO>I^yy@7kl+ z1puSy`nrwx9xfmQkIx_jKlVlXC}9nw59FPtn~yP(yTO@oU$8erccm}y(ywpJuWl!n zHlF()aPJnZQgX6||17$3gksj=mE+M!F=^HjPt<`+V_`9&V>S?Ah5qh+LH-Kbv88qs zcuI#f)%JkOs*9<^$+QJup~1_-U7{EeIQP4vqq7s&RKI3@>O-wr{`m|C#F8*OhIQ_X z{d~O>4*c?@5*P1j`S{hpB8>BUckMccM7!bf4aeI6+)Sqx$zQH@$lfy`T?gmWT=p0c z@fo$U`)D~E2}Ov~LLJ%~qNfN2!fqnUE}qT&?1T zvAWTp$EA12=0qb|g}Vnt1%yUh{_#VpN^6=vv{3M9Z>ull*ca0Cz{BcX2Rb89Q`LBK zs_}f>3U0(2qProMf7UYH$5|<2u!E#Xd7^W0d^PNdF?}iPILWbB#a!16jDUAYVI9+ac~e zv!wl>q0TZ@lxB`OXX3vW3zZ>>PJEU{o&qYk{%NC;MZwVhpy%7|OPg(D0fGe_g9g>D zTGk$p{?+;OHr_dHWk3Uh1rcQxSKcU$;mE|=4_0G`iUIkO1Lnps!`kMJ}bC{ExxUtaqKTa@Ho+6Z7vY|RZCG9~( zCVBF2U{>_2XdGYlDzb2%>>LXB4(y!A|A=V>JEU3nd-mDN0iGIbl!b4QBRs~LNLW*6 zrc~xTRE<;n6{ZfXvkS+quMcn4x=0E8hHMBHBi+;y-^eN~Lw#`-FsNiVLQqKP8S>2c zxM7_4v$VOO<-Di}zvXjjEr1fLvLTii<;z$mT4S5Nw=A(TC<8SSH3|X;p8HeWL%e=l zhOd1K&50<93d{&?jS_~2_>On#*s_q^gJnWpB09aV$h?Ahtk3?_c160?A-ricvS zF&;1q+)IRYWZwycmt!h5rnL+G@y?69Q+pTvM6h)+=@c&D~ynwGNDo`P?pd?V~TMyR{p*y2m!K zCA%*pgC$hjG8@&hNt=7L+cs@=zIq<=3cfP5OCi^XKUfKfrs__0 zJx^`5iPYtkdC0TdQmhw7j5C(8+w-s%cBK#HP;bVxT376pLD}taC|)%hzZXEJ8hq8B z{dp6Py|JgK1so!fZcJ*0;J)C3g@uKC2D&M(qA~*BRHdFNTm;|BQ;c*Kwj>hTv>yk# zs~7OeGyGLm9&GOH+S3-CQ8C|1kV1sfN`xDxQ^wR&`AEhNew9$&-6RatmS5Ht#XU`9 zQC2cEpLsjHveHh}v^5V~r&o4PH1GiNho!{aj>AIsE~Vd|JxGVfSg-}4YLD;j$rH#c z&|YqEflQH^E-cef%jS|Ul!*Cq&l#DgXpcS^iFv~xi9T{x4&2N|{C30-Bdm%l8CDs( z$$Adj>d167O?CEN;ZA2^RIaZue^G~~Ps_!$fqKq=d)uvrLiC2!)6j0_aEn6+a2&aE zIV(GU55WSOb2p%%B_ryA*)Cp_6hCiwPX z-gMnkXu&WfGln6@FMiTnub<}(+m#pdc}=mRM^#vp1Ia8MKXE3%dRb=+O?x|o#te&u zW!yj}-@F_&Ll=vAUri5lZxG%!D1$I=A_)Hm^685=`5WVFpE!gqwR%FlZPV||c=RXK zks{f*NG=lK2J0f+n?ma_pz^n)dB+0x3=pck zAQRMHQ|hNeT}-%@$Q~D$b&j?MIL=B{+Tt@?hX1y8%axBmILAUL=RU@_rRiGnVCz5#=E+{TEkFME8gT#RCaYPj96C87jmxl$1 z(P27#@#TLQw|47O1g=_>W8x6$huT62CSgw>7DYnx`UG5J17!LLqh*3w+1cS0RMgsy z)wNbVlf0y0EW?z7Dwo2AZQ;}Gyo{5x4(b^PEbU1ieXpK>efTMq7m87iATte9l?x9*H zY5i8Q)pTcp_HIW;LbP%`c0eablyY#laW=~So1!z5z1@g?WI4rUb3HNT1grEdj zBbSU#*R0%c^RG#RI4Q=%P5rxZu>Y)8j^qUfSS1%_*%{ZvVR*#PDU-v6Ov{VZ=Rj8j z1L;>NP_q*(+RM`&2I$p*rNFsm71v|n^=}6vUW3R>@_s3v6l=(bL1*?;IIrd`T$6j2jQMJ`s3{Fp1)|d>#1?Q$ zPjA{s1oPN_o3k8hWq5rjv5RS!myZEBFQ;KjkdLQ7sI$~zBT&Ml1vQ=xF_(dcS=Pxp zI`);D&Jwj+1*2Mnpj@%}y>OjnDxfLy3ekB~>E^#OJ=$umGj2Dj;2h3s&Rg(dt`yRZ z9V>G)+AqRA1@Z_cV49Y}S~pP(vT+u2ok_ARh328t{Oxr>Fe56GZE%OeJ+72`_2_Gk z<(i@O3{%c*Y2L!s=cDz*O#ODpec%wPQ8=gJz^q4MIAbdkQtc-_Hu%v_Z7ZP!})gjsf&PvKx3zHI3Lh8xRMxDidWwua%t zTU{x!F}c~43==Tez}<-nl+TkRpF2^}FN?7;6nXS$;ZHL$JjQQ5ygRa3BSw%{jT&bY zozO)*Gyt^c2=92&$IaU3Z8XE&UDAJ$Z@*kZ&_kAP_C4@?WuaiP>ny41U_7bV-QzNk z7JIUmLXrx9e%%002ic~`gIRFbvb)Gcw}fBc`lB;Qu;>$2RaeMRlK7hpqPQMeN15NN zlbm70FGY+$ze}D4(xujU@+xXiIuqvMbzZ=rYNbgcpmSZoHzr{G;b!v_IwD)|S<-^2 zRW;KvvMES`HP>?ufe-vRGZG31f^4xSpB7ON43J`6hyA0TI`#u{JbFCw*!~Ys=ipU| z`@QXOvg=eQ+pfuWlWp6!ZQHi(nruupxhB_SYijE4yVm>p{SnVvdq4NJ?~7bfRjsKO zUTNGgN0J{4sPc>l8e93rLUqAo2dprFB2ZXmjr z(j52!DSPFO%cjO=T`4xeay{LDkE}coHCITy3j}|mwGgBIE7fx^r&4+pCRuG1%1}+S z8BM5^>P*QkgPKtMv8SYFv$_}Z2wAbp=zIU#wh^Vvz81#dtZ@ZvT@bM?Gwhm}!;MXt z&P(jy9-Nxn^%JcwrIn{gi|X${s6^YlyL!YrIlrD$VR^9JN|>jD4BiM8Kec=!D)Br7 zoC8EA#w#qxzp6_KDy9qh(Ab5S>&LzgEeYM2U|FkAh;Rie+z#z&{^B77CT&5(6_t#i za90>6{vb~|ONzrM7Vpo6QwmR6i;Qx`%>;cbb-CM+<6|LH$<-f1C)Xg&1p18Beri!o zlj{lgYWOAwC<_wr+H_Cd;S7wn%Om(K=R^~8+O#^-F`O?V@h4VGAQ@{(aY$67=KgN#o|+d0=@K_mBR6EC9rabd=6w9y+_#rMGn+ zW!eBctV^m2Jy2%Pi=B+=I$`g0f_+K3Fcq3fVkSIBYt+E*?_y}}8^f;;K&E@S{6XQLG3gyz}oRqmh?2y)JvC1y-WG)jur!OpJ!s&rOYy0Nptv4(g5 zr{Yed@a=V){-+Cp{*WNGvIEMxZ?CrUF>#H}eXV#D7@I$SzzHlaq%<7Mk++ysc_E!# zQ!xP?G0=WE>J}0uln^`d#g99{Fpvg;eU@cg%bik9px7yx{RP)GFSZRMZPLS@$*O}^gQ;%)!`KRq#m7i3 z4UBZD{%2(}JG;q|Flo)ZA`UGcxP`dly3<7ScG%Odi;_dD%9K&(P|bOA-eDNS*tH-n z5hxF)&ye5sC$W0XG<5`lwoKM8MNd)duQ4PTO)axlfSq&ARF(Fut4WVl%_;jaZAj>y zwIKNUB?K~&fR+1Yd8Iv4miw+;xz?2XwJjRx)Va#V8gC+fX%9Rp>EW@eN=0*Y0_!0+)$M zNZ*69J@kC6?j z3Oe2Fd(clDm?9HlolrPL#FeEL_QYaY5tLUy_?CyF6kg@!sJW-~b&SmzJ2O8sR)QB< z=M|_S-lvpVG`J$jwF?uxPxmNYT1UZOY*{%&4>W4-y}qxHd?~2@4hlAe*IjS3Cm{e2%cVxgSC&v44B9tFX(+# z;zE8oN9hmx@p35lPKM(-G|Tqq;pZAGs$m!*yaPU)9)GSa&hzfUT2LLN79VHDvl&i!#S=iIpxT>0~X344u`Jt7+{wXhiXla zRHmL6-x~`d-C=FGRI7H9YzZmRS89~LN}D^K7U^2qMU6$|iHvYnM<;-Xa`Ows5X=d3 z8G)}20w2Ez|FXUh!W;k)=IIg_x*);h_1`|F2HBTYA$1L9#^_aemuLU6>ZAq7w+02} zQQKd-a}DCpxi)z@q}LU&Esb&=C@5XkMPh|cp@n%TC^kE{bI~-TR(awpkus1Bu@4vy zN=YcCT{U_OYW7F8nqXN<7FagNltN>efPp|*Ybd+zS53pgtE+FGiWg;aeO2j@&7rWY z<=U}|tub!MPUgR)Iu_EaxsTeEdUK=q(=f;ZKNQ41kV*TUbgTY*!aeV>Y0ohe1y;QD zrT3FV83z~CFgzIgN=ULCq0UKLbcew-KUvQK8-9j}?{wE*n4gPR9QJ};-lgJDub-4N+?hHH+Dz)%c!;Y@OB+nk1aB$GnB<<+W-93$E(YdL4v zTQpv(o+{FxT>`ZQo^)oJ3S=rc|l}lTbI5SiVmSvyO!qK zkNmxbXmJ2bKA7TKQF(Zs%$P`N;;u@1z`n9nv}}=EC_+iXlR1o|yH<}| z0Uh>Ktt$yf$6fR{p_J7^exrhP|C_kSZ>O#Rf&nLT?+<%AFS*c+Zs1!@WNlP0TbJ$l z9DFmCLu3Ao3&4!%8CWP=%({Rkm=grNZ_t4VXuX&)y-se}Cnkdt3JzbQ&I{G1l{e{% z9HtrSkJ$M1w4)#O1Urp1F_@`uQrDpGLe4x)5N#AC;@PYH!r&ZTg*eQZVY~B#Lp;XO zLV7h*9(G-C)t+iWeK7LsyZfet4g7r}*UI!uNWLmbC$GD7_7E=w({jE-@{K2t0?g=& zvRJ{A;uJIwH$KxoQjq~zh$|lIE@4>In%s_2y`J)5TN{^f4yni1%ft135U#s`99C3z zF-49g*;`>KP^ew1DC4{jsd0pdDDeZHFYm21z^8u}G{GYZY8$24Eq?QIb+4J8M z2BK(u!J1TZ@$oq&c3=O&5het!zO&>v^Rzt#th(}INZffOU}FL|o#3uGFC%EXQC{fnO-4p2Eq~>iiIE?c>;KS+tLx3_`Vy9o(u?`%vi# zc7vTI<`xH$l@;O~%sm6X_Fu%ycZZhSU3S*aTDj)J&C>lzikW4%OOtB1{LG5I#aW3>Zp}Y z?Q7{&qm>?g)KaNEnXEb(Zg>l~yUKqL!)&8NiO+@|8D9J9Q-y$Wj1{Y5I8M!jYQb2V zwqi3dig*r%BVcMkA@z}NwQE(6FTT^1k#iorH3o1}F|8Mn`9pZGPFrZ?cto0HyH48t)-((S>bM) zo!p}yTo^33)2s7hqna_AfHIy;Ooz=6sytIorhW!*xc55^DDkmx0O5G%Zfzb871W&p z-1sm#p>^EuHmQ64i^}!hXUIV;;!))sMpbXZDN5%YiaXE!lpiKh$!CZhE`Nc zla#;mVR~gbMBl|zI6wy%1$RJt{&OT^G8~MMx8EO|C=jkHo|a?cWoi8h)VghLx-^%x5dEkW0QmB76`XQdd2*RebL%-eN0+i?qsr7cnOU538#&ydHx z>pj?jW*=ULa;(&v^B8?um?49tX`e<%;!$v-$2UfzNXpo}#yY99tYYbF@$av0Kx^}HZ&p$jy<_g$$WqZls=xpq zQvc&lZ9zgw;r*`w%cF%D{9h}WU{;Es9lD!*4o7TI<}S~2m}7rj2}BOh=-6b+3X(WI zwA*fq;`?C?(KyN)gVfihdUOQuZaqqGkWBAUhNmP9<2Og6csh(&y(hI8#+fvYe`Q9T z*2(F4TZut_LBBL?!SO1W(PYH#5smxAg3)qeFhqUkeeK)qIi$)x* za{CUt06QoC{t)O_=1)$dSHAs+MLOJHF?utTI&iGrhoo5??rN9t%Z9OI2y>fWVEKI0 z;gB9eSb3j)%I!%=NXD6rZNh0Dr@ZU--fMnCBDhhPkLlocU^@6b8CM#{13kUveT;#mJ@G<4%0bv?X z`u^^k4({~yMtt5b;C0SxCX2l~dD=;^A}U_SHF;5X#Eidnp@s_L5|9~1w+u%UwL=N| zKl#6p4R_q`ym{Qv6+5;Z4#Sgiewl?n8h(tg9qzlSm$POsKPC@);-6^w(j|9&~EznZ;?GO*LgiJ1{8DNv79SK zFnt7f)pibio-Edo2AKy@MGY^X2c&utxp94=zs-F6V_t2+M3P;ksKQf)<$v5e#t^ua zw>N@yC+aEzk}-1=qU?6qe;FV?wuwyjfYIy_KqO{!-o(u7Rm7F!>&X~kvW7?7@_ zR*IP&qN}Bq>)VFCRL319P)BA_wyaUFSHa#&C&)Lj-?fR5^psrGQjIT+v>vSfd%5G< zmez>Rx*1NqZA-q6qd%D*zamU2+mZjaw-gsW@SMGoLHh18j`?=`Qs+!RuHyK$DGN5m zKKmoudm=@$7?jRMwK#VJjkaDITp+c*G-iEWwcJBua{sJBG-B)y&-z2MbPiDjs(>-=15V64gZT5=2Jhea?F|#~+IPcJRC=%9q4=1h??a3| zItSv|vAgMj5mHKqLBrwuftaEh_9K0&k+#pe+^{T`$6DtvmtrU_eR6^T-#k_R|k1ie3J?&#}X zEF^}t&Xc5nWGj4R-zdll$jCA?uFSMI1^fzIme!GjtU?l+y)u@JR`$YXHl28bk^Z_o^@Q7AGJ3;KWveO`O?YdrV5$_9p~| zEx{hqZl4myolRbFsM(y9mXuI9F(TfT7!L{{u%VCOd#%{wIM`6TXfvS)80t?KaQr~cro*{Fg3kC^#v zuxSW<_2}+Qr+RclnwwgGf^;n;NVtMHdy#c&jaM(Pgl~xL9Ut`N(QxD6zGqC@Xx_4+ z7>pxr@1gwej7u3F5QfbP{U$oJhqI8R7ZZyB9^VBwW~*F`M8mFBCK~A0=?WX_6Ogg4 zCafbUPKgrHKp_>2^^-&m@>I{@)Yvw&=rJG`$R%DBvuldvdv=J`&pCHP;1wPM2ycm6 z$uqV`Ca&KFDP{tY2|@d7O88HpfyU%vPe0g#l3;ey9hfQg(#jX29ddKFc=Yww2gD!? zw9Y7N{@Z$JJH}93;kb<^l8PHnW>)X-N|MX4gXQNW$pj0ljYV3LG_mntP1qL6#u1>B z0z4XOG0uik9aELzx<5u@LZ^x{PK#QTHSqMBj%ASOK`nIS*Xsp)3Az15JDu;i{UNsA zO8x;*8glY*;%Vi!cm20s9xI>t@&)EM8>wrN8mH|Sp9P}evBolpo#zTUmmkk8$U6dz z8~-KQdY&UU8UwTGF0|!pS{mnc@^w9L-THhiJK|;;^H7^JT4Q6$3}6D!56HiLU12i( zgCzI0BA~~opt4~aAscrgrlijOVV^XzGPc6@`JICk1iT1$T<-7%-2U?UpPbU@YX#+K z1pUKZ6LySb-^T3U2SSfWvwUHlhDevE9Wg?~i=bMUYPGzz;DO!DdH4iL5>knLwhF!O zv)M)3s4njD+E@6=u;tiP8S@J>l^l%)&>d&(#Xfu5pYF!@=P}1ILg)^)@L5)nSq{C8 zmD%N{zrO0b;|n!2CfshD5&Y<&i&1-Hx}$1&76!7#XJ_L1-hORS3!Igh(#8@2gh5X| z;&R~Nxnyiir(^rhLiD-W^ekVk%*WWP3)^)y`c^Gn2@V94j-M3gXsg06I%{-LEZbMl zdWu=1mo4L?d9xK{te8SvL_+DkOZ3K;YO0I~?_ZLnmAxrNsx?;P{G4$LEJW@@eqh%i z68gFv))g#x2)KUNV|Lsug=v5WNO8SQv3Sa*S%KJ0{)i9zNw*cbUBECUBSNw#YRDp- zdz9KaxEc4QtT|>b}DIZJM9ec+$pFCk~7~U_-1ci%>AmAibRvASDR|b?^>&~~>Yv^rPC#|@&QvaQJ=Mz$k z7&&Z}n$USEm_FLd7h=Pz&wsV2_>-Lnvs<3b7yOVgtbi!Hk%EOMcTaQ@B6jou_kaD} zag%im(&(0Hg2BYT$6~ixWXf+`vHV$yic$>xaU8l~HULNHHwYs@Q{mZbaC7gmGjgKp zR0WT(PVXj;T1}S|SD_vv4#{@+c6)=Rx;do+X0{sNnwBzqY9GWKb{fs3c0tmttlDh; zaNo4wuoZxGt<_Id@HZF>Gyy|XW@)QVq zaFmu8fT`3o$j`Pg#nV%kQBiAR-?_{kqqEN-I0wrh+S&L{f&y+Y%zFJF!2*2v21>#p zF!Fq_)?S3lZxpE}YVsm((T{r6J00oAQ{T)9rr0H1p=0e49FWK0Py>7dobP<<8JYfd zItvnheil3uCVVaOA5q9k|KKoR`-%p0FG7`|bU0sOe7czcbn(tuu5%kwG6&_#eP}aN zVN-fs{c(`Lf8!BW?ME?qQX(*pI4bUfjg~QbR!38nd(wx)z;Eeb^_m1Aw z+?->3+=NN)4CSB@?KxK@w(vjo5V&%=OH-H6e9s`k2BqaZ!I@2rlA7CV4K1_nmzV_z zG9Rkv@FZ5a`~)BItL3a$5ftpZn-d=10Y!V>5$Pu^gcB zl;bgGMj=TJxnZSw=rng@LjfdCzR=WE6zfhry0ImymkE~+CmrkyQWyhXZFts#f>!5SO@XO+9BBpr^`uoh4V2@ul46Sycnm587Sa z4tV|F^Zx&<{iYn*ol4M)>OyER|0;VKye&rn6#d=0oWZxtteYL6u0)?X_-fZ=;P-J1 zi2M}qXuK|qK7F!v^|)$A;2>ro71|4BAk#RM!2JW`Z|^4nHg8CN{=VrC1q&#)9OZog z)#w;h>K$9>c$>HhI86x_iY^y*-D+EuhN(LT0R$hB**qgju@qFA+3K0nPi}5vQh{A& za^|1hd)Qa$@Rn&QX&X5}EjB2AOqO zIqU+ZK@AhQJ=_ccajemH@Ki-9d5IW25?ho>>d!wu0~FkFlvA@7AKSp>PD5Z;Y*3{) z*SaA*Yd%>u$ZkFj%V&k%JH-Hx*@8|AT$@fuPpw|RBMF2Eu^c8(2r;= zVr(F7*~(d@qNsh-XJA{t`h_zzbt@gAixbg5zJc=D!Z1))EoON48)4nq@a`-(&hxv! z`0JK}h8X{Pn4ECAj_0neKAKZ>6^(J$@$rJFH_sK}seeqYnBR%)?dr8ZWbc~qF_r~*a0_a0=~qOB{Sr)c6F7?~B0O&<;};oPp^kyL z%}U_=EKTWjhObkkP06gL(s|*d+lwQ!Dz@NlFcPjYjFkt8)f-a5#SK=_>xMVKFzDK* zscwDeTM6oJ-Yl)Vn(~cwf#hD(B;P7L5AN@mN!aOrjYZKiIN`NctYnp68RmV80Nt<(S_oLqes)k9(LgKgV`B~MyrLZx7h#>JK%5HJ)oN36W7QEOeF4>iiIUX zmjs?MI6v{irPx*-UYSGA*K#O0t)haM)z-l5FFd#~O6Zaq%O^TVBIO_zcJ5q98;^BT zQlA3;V*z62fgA#DLQFw%@mU(ZR8*zad@aJF$Ju~|XEzoqY27V|m{-!N6J@nIV)W#7 z`&M8kL<)rMBV%ca!}Y*@`*_sV?PJYbX9H%AQf0+G%eO34R_nd;`;2{UY7VEeedbdK z=S;U6kEp#&wK)u-ZPE*r&AE?ol+QV*$Nz3muFFLq1%VTiLfP%^P(;TPf3umDmYk(g z^!>K-m*puWfmMR1u>{YWKt-ZPHz7X5e2zEm_o_zjU>tFTfgz(SW)x~V&=L+E?E0Q@ z#fvuB8Xsf36523Z_=7PQ?M>7;ZZd4e~(PgWI=E`|1J+)+bswyRfaXHMA#2(^HZEB5? zTAXcpo#JxrTG5&~?MjiYp1*3qTG6`7Ft%`1etyQ2W73sfw$qItaRI$LAJxe0g4eqv z6x}Zkg_oai_t)~dZMWx!r zOVljnpGZ8{D%43`R<-b&T6a5hwNDUr$8qj(6XZueH+(lfRdD%|C+fMb(!4Dmo4qy9c_(Zx z#DJW(5u-R3Wgu$nJaWaEwXmQ3+9$TZ`qb-@5aA2{b=rJ%B$p`0ancNRByW+bODR=z zto|xzq03{WaE^QLbrrN0go&LSW!cUev;Y%78-=U@=L>KS_Q}BZoGsYR!ZFG6Et2G( zHV2Arx-~oPJzdh~;M(ir`Kz&Md0n63s!$J2L^X*BPZD`ut;~L2?cX`$W8eITp4xX= zseamuZPj1UjdW%8+jn0?58+m)ntg*eC&$|)pb|Z6Ep+HE_u)}YhWSh(I8@4C&|aA! z7x%ctCVB+tC{1be?ja&ido`jcOoU5_@n-L&EQYyMa zV(UoFDI7)LmQiT;gYUE1DH68AzO6+UR)-F)WDBZT4QhzRdd||@*?2`3ZV$*^0m`-Q z6mkuI&waYusB_Ju!XKN|;U=*buFh`krH0`f1r!l0;{j{pAUe_HZ6-?Xu$y$ezi2q4 z%B9OLD_z46ZMmXQKN4yDES1Gr0Tc+zFbrLL{6EgZo|`>oYf!`ng|SRGT5a4CjPL7L z#00#1rsb^vI9gWp)g7njJL6jqLJ-`rXKXTfZwBv;%w-u*XND1&SCZsoHU^o8ietB7 zv42Dl6ti}bjDiOZyTUAmcHn$}6b3U~Se}+YVDS!7mV;0(QG-}$w+-U6a6$gPeMe9= z=_$jCi~geB8>{^#wsf~sF0Ik-%7R9{oC=9{62+gUVD!u^3?&Mb0_0UT{OMz#S`pAc744J$6ebhBbRDd)I-zE!Aj5h z%s|%W1AiGTB`k!&yN1V_FPhK~4Ug`NHDHF=>jeLP;bAcTiJ{<>nEMh_F%fIWsCV8= zb*L=uIaGdA=tFw{ivC5x~EZg|JV)g`@`mTx`m7rb%QR8F%J~6%Zpit|!fIxm&scZrFp-6_{%WNG}SZX!G8F+3UI#xm>} z&scqX;rP0_G!75;VyIHe95&}Czprh9P67jiR1?EXNF^LwsR(oC#RN}mN}V@zba-`kM4T?V(^-z(8Q)YCnjAzgnwRBy*p zs#K`8CojCa)GpQ{)wqb#?)eTLBh1O&k^>B4Fb%Yc_JzTi^7D70ct^d&;YaoyFZ7I9 zv`wQ1Ph$>$mF=Um=fNf+!Ne|y&Di$%*b|F#?0(OZg#yyYV#EaR1g&fa%TZi5+x@=u z+~ufnE=|PQgk@v~w}LI#5D+N6%wEP-R#}a9d-xWY17y&l$8KrPb9WT3xcnwHYo_{M zNJZJ^wXy;C_?({QWB2CE%ayo&gYFVysn&y)jliR3dlHKrk@YYp4t0a}GwZW6gSm!o)~j7gomFy`V~c5i0%MQwPM0&9hNQ`fVur3Q(rV|GTrG1O2%Z+a5p73a zDlXDvfs0TB@TmG0N2oY}0l9jZLYlh)wkzQx({FBUDlURq+w>oIDZ>JsBfaC^6iCN` z`kP=+6Kl0>XX1NB{x}}xbTW8-1JH5O zRI6TA*y^h;L6Hy)gC8KVGsv?bC^~HcM`fmI7?J6&Ov28G5(M@tXZbd`*tF>nRDYbC zTA;yKbS_>&nz@y_%bXv#Oq-(+Ef#0nj!)$1mwOZDnErCa9Q5NF8)fs|nceL4ODi~% z&hTwdCDv1U5R!~Y-G{-I%IaC_zCOz{tilJkX_YAfY(ncITX9uQ`8FI;CaG;2F9kO& z))T(?W(@x!ya6aq`$sSd_D47vjbaACgt$_ixqu1Op1c(18;*Z}RXlaHl*j_AJ)|Hh zz0M@bZH;~F5>dZ4=-w^mm}&A7L~w){efy+6v|7!< zR3V4LJ@HxHJ1miPmu*8?#;p;1n$$#FSw8j&IJH@<0I4S~>Gv3Q)1L8HPTFgAw{!2X z@nYu^s^?xkdlC<5Afw#24)l(e492R<4&0mGrP5L$6<(gs0S*50R$}37tD4G4Lc49I z*xrerSXczyo;iB@>W=&$Zzi1Z^JrdX0{x%JLi?n~LAYTzuJw?L`yGQ`R|zt|+H8i( z2tX40a%E`<*KYnYpc)hPFO{s%-X_DT@*IN5JA6FVV$MVeqbDeyvz~BoZq%fb#{Npa zG35{@H%ersO&ptjG&m4>w0b*c;5&ufJQNd#ou+E!fbqFfuHSY`b{!>9c9`z`Tz3OQ z$wI4(EA{IS1W*^1hK<#)Uar%%K+2t)1eP*6OpOLHgNh({^8O*zhrzTJEM;XaH|pG635%31e|FR`ufjZ$+T5@9dx1h!b)|0hsFKPNfO_$?sq9X) zL3l3qhYhnsytA$lnnY@`!cU8tMHNs>Z8_#9Y^D=l1#5uHq>Vxy<%;}fm+P-`!<@mD zMC!ACT=m%28CIFxgGbA`kkiGVB2vrmM*!P#uApIFD#9csTBCBU;bthvsk8aE>l!Ht zLj=>YxJN8m?H?s@+ssH+LJDb+t`bdp6tDvf!epAysJ1Y-hN{%&^rZNjvKC|cGfBZ6 z%l|st-mx4c+U>rvtEo)7<0Y7D0#%XmP)n=^{JNFaQl>g#3wgV1x=C4}*z?PPeehbe zkN<87O*ey-Bb)YBEd9&(Y=!_j58nsM$88yKTzzin#N4>$1+HEF0&5e(dsd*ov}wKN zSWDkuGfo|cvBEVG(VD4R{ZXPhHge$X6J?j)N8^&fHCKWVAp0A2Zd)7zpbR2w0L%4Y zvBl8h^bHmPGq9tw^UD|`nVU>8tD!f)!ETB#3L&-zDe85tz;XkSF%@hD8r&(EU=3q! z7$9<@9N_FiIecQ{P-Qr?Z9{7bq>qsXqz7R+~2l*^|SXsI!AoQkcoH$z$+qr z8xSX-RltK6mJ?j#x#_UWNW<}N7-}N$taTZLGIa^l&UaU28SeV_TnTB+>nclZSkL=X zuxE*Xx|Bi+VIfXHeneZLufsRrUM!5qM7&&i+S#q1AIy(uc6wa{-IH}Q^ zUI??Kl5y5q$6TxXBw5EAm3}buX~}bXP<6?#jZ8_CQTw)@9p@|cS5!}BSElp}js`)* zv+f0s=kD9|}lO9s)v2!N|Ey=EAbW;+)_7aPJx2XxwZp^Q7F~EOl ziPQ<15k#OwjL#Ej-2UHWj7l!@0h!bp77g($vLjtg8#CY){YaKn93k#Jrqa2@ws^jE0z^|wDs7VL7*Orpi_BQOc^jwdSkA;Pj|p_^%|Y? z#;@N@Fo&GvX8o>(VAo&$8$mzgcvaNbRYnN7)u_#9y?i$Y%n95&yj?8LsSR&IMHVywZc1u7p#2?d!3npx;8}} zyb`Kcj!;t3D}JHj|KlIrJ#rGDz`VidfxPx9Nq!vK^9Q&2XD`5z_em&t=`HH`dR|K& z5$1rGl0iaR&Xad2DfL5%F>z|@-isAFhu>P*YUM}C&+X*pJj@{YGyKJ;+jze}i#O^= zmb@){2*0YcHGP`xih;;2xJ{(`gpkd^>(KXju=G6iPMy^EOs9g(-iYWwsk=w8mPfak zNX%~Xsb!pWix{tKt*xp#35)DMm7weqO9g+J{An%HI4n7%=o6deZm{#zQ=CxTSPa9n zJ}$rGb#bPKbqTwDrNFDS`R4-xT4zOCNCQ~V+7PQDVa2=QJD&JP6!^T+z4)B#@#&`` z!}H8`(nvr$cSM>hKBUTJ-MTCDYT+ORx}fzo=I+)T>_x4L5#T)=h_AimGXKsQ;`2jU zF>5!rHx79u8G@OcdwQ9O5D0w^*oD+|_o-ym91l!LQ`EQ7FzQ0=$-4g}o0#ifj(3q7 zDDuP&l8hMleC*ov#KGByt;$4Yc%W^mZ`E~IR?4tc`eszKq_(J3Aa#-yX{-pyu?}^} zSF$Y{%EY*>?*HvrI&cmVo2>Pnhg6e9GkjMSF#+>sW>jy>WI8%kMj4;e@a3m7B+&}L zz0!b(7In6|tSBWHPI^7>J(iE|`dN!sglbN54P{?> zjaM5>Yv!kfaxs#TQZTeYX#`tK8UR+QCv2 z+h_hYn8cjd$Yg1l7b;4wOy$r&XnSjMeJR*^xm>7viC|UTE>ObVK~i{vgK1QooQstg z)x`w186iJ&cXR)L#83a*1xr0Cv4sT}H}{}F(#0x}KOD6jZXPO-*5*@jPr{R0Bx^k5 z_@~LQa1#|T3i87Wn;^_69+z@6%!Z3*_>7~brW1#9sx~w%@&jTm#8!0;PeJF~Ci+~C zt|c|1fwf-|;>5nqG^j!1?pu|GRBn502U_ZjGpibq655k6&Pf%SC$$KCShL@i>Dt$7+ljTcHj{fFzXkV7*Qp#^3pm!c+0(K{fd?Cpz&<%EG9#g^44~bONEP-(yyj$ z4Mp~4(>`;)4(csY)T^EX;d^cFh}X=B@Y7&wblDOFsfJiNPQ<^I?`Az}-Acy80FYbD z!lYq)X=UU&gLPY@y1zO&9jyhY3@@Arr}5tI7!B*)U`^;i_m)_sDvF=IV=!jOA{vT= zIkYAd7nyN%^%&hA6Dc=RSL!p|0$o!mUguO?!l-J!U+RC(@H!Ll-#B^yisPhpnkC83Gia z5RUp%-Jw=*H7gGIGCCRTNMwAZS6r`L;|=~m-5j&MmnN*(v5(pA9Gd1eP3cq4Y^)%ZPK=7S(+pbJ*N zoq#yH=bVC3cF}9-?=FzDjU;_hzJJn#W_dnahcypJ8urinD~|tzWB@SWWBy;o%TmCx z!hyp3q+k%yKNG=dqSqv%!3zJ?>!Ha>n&19!J)TbwRuZqJfco=$<8`qc^Ea>p2vd-?PLi|PV%QfKhmPN{z z6@R&v(CA`zz~q37af9W~FC0pGN>IJfw@Ix|ac%F(BwQaLb%nBB87Aw=`oIeG@(xQr z0yy~(lxvFgz9&Y%If@wYRVxicNV<+ti8n{GL;>vA;l%aEFfEYl zNz%9-sKHolvXNSfD&?j4EnfAV>NSHhte8;0&BAYs+7PSFqP>z zl^_6x7rOj}{z4DJSr*2paFgs(@Z5v~FN$$9M=n(h>%y{fQELoS50r@~XZGQItUEG= zi2{{n>lYc7)9*IvEAWFh!a;zO#9;!zbr2Q3)`sFF#Et@bkf&KW^;cN$Z+Z`m=+Izp z&K$K%s_c^)4zTgP+{ARS;N~aDb1?OSTkP%j7)R?%fNycldGhJc6P%@4usq@g*0XHx zFd~bzJhEJ^zv4~A2OMp1m)d`g(N)6;v1Yl6b_3|@fLvIW{9QGTFJ)ML09FPyv*%feF7eZ zAOLdXpd@zx?8g%TM4=3R>wUo(ukijz*qRIlcpEpH>+eeQm6tA*KbcC+;jQ2(Nd5XQ z(L5WjZno?bcr~y+>4TIQ)1jZx#=0k$JnYP|6KDynX$CT;tIynbp6{k#S^};F2Mu24 zKU{qO7QBAb_dezK{OCHl4MOP+R`A`&cqt0_cs`)|m@ODU`4~XiujAYqTK(}sSt9Ja zp$xLCy@&oypVL5Z+R~)4OAAE#iZLVEdcFPQ|7FIO#h*%b;ZEwGTK@?a^||MYy<|t5 zbjuvNcYSD`xDra$x!@Gn>~&4YYWR64_EKA0J8P+m3m!xPIJb{*h?nvGxIxuuC;#Ov zJ?2?(eRG?V_S)HYi&*<{BUCZHK_zxp!V4MqL@SO4p!2&C z03!p`bytCuYa-s;K82r}*f)God+VOHux?9-<*0VxkQ%8SpkI|1_9-L%ZVeX|U$=&r zIm)zX(;PWLZH&33N>C-U2Ng>5#vlgZ+H-1k#k-j9ZJ- zV=Y*4sbnF@U&T&|Bamki;|$1^W2(wk^7Sxkt;y7hj(qt@2-r9IT<+_6j*r9pZywjt zNlFaB4EoII^Z3_B&p-E;U#;y^AW-4sZSL)D-)rTGCP`|QWKKL-ZVH0V)QqPeb_@hG zhrq((Pe7v&r^kYYLmuw`F>vfZl<6NS2o{4w8z~jrh^iHfrdCEb);>gCoJriDY2@bj zyLNfATiZo4@zbPxS;A{u+#!;g~wX3-D@*vPoMM_0dkZ_RW*K?!dyHm@Mm5=7*j~P=(JS6^EVPitUTp(U2JGdC9ipx2A*KWIR z?A9i)FIY^6x>nDLAm*4NjYxRgXAAtxb_{Pl=_M7;Qy~l);=jaBt742jt{pbRB+qsg zB-GRl9iPHTbc(k2`UMQ8GfBB9W>W&-fppm6ps)RiWXWjspw!4x{!z*eM8I22%nfe> zMAA2gp)`NJyn0rQm(Ge=aQKiwnMm{=6|rCE?0JRiOLd`GJmgiNbW?tpPM;*^G{joN zdm&6iAn2hTFdho|E4>n~6qg7e5*>9ZHEOMyea|)nNXnWhu4@lW;OoV!Fq)$&I^xfJ z_S6nuJdsO_EH$nwj9D*R$giC5qU^p;M9W7ADh(=oWw%nH$p~SNqlA^6rOfGYEzR*u zDloXk(#}U+OzPIrn*W32^iar@*}0Eb$wc0mKHg*O8q>5#mxJhg&9xx!q|8^$V;$8Y z4>T6ogk<5Oc*?8=ahz5sHi)Gv7Xo9#Ux`~f3(KZT&}H%aA$X^YjjARN%)liEhW5^d zG7X6!a3DPx8D7Sp^~Ws;`{OtNA5rJPRcYL>;cVNsCfhc5ZnE3W#L4Dl+pfv32|Ks5 zHDQOzP1b}towd&Uo)7V?-}Aq5-QCQ8VO>s-JBkdaw!elM`F*wAuV-UipaO%Gn6`gP zjL<|%m{9InDCkbU3b$$qqLu(!<&2qPeigA0z4@m!qK=%bQ}kcmN+kBbp#Ec4*U&JC z<0>y+9rX`1d%cRf){(d;yne^*p|q;~K-Knbe9NrWRWX$yz18_q~<4<<1nc9Wm>@i2DT?^^ObIE$L^_nI$jQO#yR@HcbSekWDmEdj2)8KXJrnZsG+ z#c8-_)1~R{t8=-zTpX61xR0T z=4$Dl1BNR#LXtUIcz0TF-dI8kHzJ%sK5O;iaXyeA;V~@jyxQ9)BT)-pq)p$AoF{u8 z@pLA>EZ_nn8UWN>*!0h7Z1Rr68$j$a$V&^`xLvl+2D>Fpj^wB)Cn@8;?$gVGFWp63 zIf$B=SP(9$>F`Ciu!UtHOD`|k`Zm$C)!(Qq$pO!ma*ac70Q_p*G>5`zshle?1(hz8 zTuZAUbz(iQ$B7ODE5}RD?D(8V5Jt^%Qd|o+9%M%KVO~T>`(MDVj#NA_^F@Up_!-aU z&#sbKho{M@lm5+2GSu?37OOs%!HQuDi@{`RSDx4V;k!(s9ymc0vr9{=KGbAJZe@6d zZ+xBj#h<)C3YCoC>F@E1Hc<2_w?Z%*-H1RS-M zSid}j=g+}H3=STD0U&wR!E2WlC9Fewd)PWvZ5CCHcmFEHS~kSyIMA6+le5R}F^H2% zM^MeGgilyc*XX$nRT`*Sxh>$88OCn$84*bC2BN6$fEY-bJuvvVWg@V-c?9dOY7{ znWsH}{pi(E$kW%v%IAGgpdwkr{=Isi?0aq1>q$ji%g@5RFz;5*RK`cXKCab1l6_&VaJN4&YvofvGTu1nOdaDNz2tu-EcPX zalWM%8cIImOP;KQfOW|NvWJw4E&Y*wJy-9aj0IzJdi}C-EBloY+VZCs?J9&d0KQ`C zjFO3+R#H1=Ba!-xF3qQi3 zLNHu)pv8+DDHTc(f8_?OFTa*UN_bP-hP2F|6qa7fngG7Z7qOo#bv^?D{- z6+W%BD7Ps9ER622Xgok$l3^A5n_XD#3P=8C==`F|1s@{$fJo&{+cpyAVadr6r>E7m zWojeF(iYTOk@A_ieT4QBtS)*}bvuiVk`uH%@lf0^*=IE`fE@cP zL$&YTPirs=-J!`)7Y4@_dBVmnqs`*MZ#kPWs~Y(KhKzSWgdATWdiZ83kQs;BMvmj$ zk@?LMNBdtU3M#~7s{px|?2AUTZ=J54ps=-bvv4Xz=tPoV+Z>O~g{o27i?V#je9#FJ zPa&9){-=g_75{@$N*la8VBCH((d?z;>yN!IyTYN^Pkqrx1wj;cc9l;HnSE1-H2i)b z2DhJL_qT?6P%^Nr!H9&ISG$v|*Brkrj>Io@`F+FUrOLaT0@cgW%Lh#Z7K7`8Rdzre zB7=Yl>&z*nN)jszocL8L$y9qSCo8^!9}jR zS^kftrzV%zt~>^$*wSpMek~8T;JjvEX5BE2PV2uP>yhy5s++c(rcOriiK>IAkQH3T z?AB`s<@5~US>)?d7NnELi)Th-8$?0slj=~W8R(u<%?2IzR!-#Lh8|@0szFvBo`yBUL)e}LSW~<=CWB= z#}s8WSwQsWM?TFuehyqF`+<7S{yBfXaF5j2S7S$ofJKL=<-4Cw(-(?XlF*R?ud`_G zKw0B3kD-oJ2}b*d3b7KD7T|_8c3e9#o+3b=La)^R?~JrE)P5_UcNMr=dvFu*lo0Om zSF(H<$jdkl7=P*-#3d<2Y(}@2cc51+>o*fMx4qKRN{Z2HhusN0wD#uB!Ucy*$_*Q; zcb&Z;$6C!UpJJ|8;Nx3 zqBX0Nf>sep`Lr)>w!QQ=lgE!&Ti;Z>Iu4^Y(diqxctCC7T4!7^Z?ZbY$#5!yA_`S0 zF*JpbM)!EQ6hh9TcC+PgMIStwVOnUsKB&_J1*`v*Uv85zdLXI%lYGCzgphh}lni+a^^2btY91D^RACH)~JGUAl>gjzeMVxg2txJ%)$P(u5Tt~-km2N&%JlNQX7 zJh`Of7=K_4wQqpzX0iel3^ESc@p*FI#12uZxIc&SCIca6j9sC96SSa}*lD(bg^5Qe z^4g$mBYC;^*NSWeg2X4DfVam_`dm=7E;Oh!9;@aMXIUp^yq`+>ONi3J@V2O{ftBzh z&lJMTLH?l~6QZ*cq`mTrEspKeH8Hjijwo{GO9)?DGw~!#21<#yoP_s1tmGYTfYczO zuJ>l*25XBfn4f%CcPBQReos)J5?=%b1|!Q!sGEpcz`%`_E$4Eq+9OdTaM&FBlV=S> zi6w-b2f8+8>t>T0`?{Y@5zc*z2*I&{kJxmn&e?77uR?ZT^Du2`%b9@@pab)J3 zuV?5RV7T~$X_Y@a8EK|Y?on0i35WA@R@2P?)nr1BnL{7vu-#??@7fpP8yv80j#I|$ z8t+3>)FS#r4$w4WEDB3~(p1gpp`Gz*8M$dG;8TLT=ZJsle<#(JoUKksNhQzms%PCA$IW~4By#s^K+J%o@uT56FzqOwh5E$%(GmEB&`raf@9 z>+wr!*H1e+t=oYE?rq^jVpWK^Vne!Z7GnMJ+?3+Pg>^jDHrvq-6*EXXEdFBMYCWau zp+(n5CWRu%a5syL0gS*L+iK=br@%2#&F%z^l0B0PF{}H@-RrlOa9Jw{qLB(ZF>B=& zb!DM5t#2L`kx*O8Q{;RpD(V>f^K4n%0Ka92S+9JgcfIX;D~1A#!4}!QE~{ zQE>XExL#+3PP-BkLEb@fkL?EYxyhFWsm4~e;%^0JQ;Aq_K?6U@9a5vK)VElW4$Q2< z+<+xC%CNGdU(5D9e*>hqBIg@y5yb1=p0`26GmPLsmyZ;UL?`Mun0B50#0x_s+U~{; zjyv!1n^93{pIb7r4%=zavWr&y=bZ;PyHYxD(518bU2^h+^afPeL_xGJ4KQnKO{nUT@J1&_v1AaR%rlQXWsoocDt5*vupYBPFYhsbn z>W-vzbO)=s0Iu~hA|JIN89NP#(0QYKC{}h=T1=CrgsYK8tie=SC~@&=?$Ri;oqenY z_{Hy^4R`X*KM`E^8dpQ~gW!&4q*iHJflmxS24q|(5&C>D5w?Sm#1RK@VD+JUCTFg` zhzzVF#X)iKJH@2$g?<~;bkaTa-*!)IA>lp-uB5rl8($!6e*-Sg-et49+CN*-)? zauTnuT`42E2W5!1AmyU0mn9xvRk7seEOJ{%K6}X;uV*SJ3@~lOrBqCerxNSr5Mpqc zEQhd1TzB`W)khCsjWO80K6AlrI(F)=L_bv0m`{b3v^bt01R-aWG)wRze z6rV2Rb8%OcrW8zUyeCs(|MP(tr>DY0%$MN&qz?vBRJ5Q3#Yy676SZqk!m7v>PC8}y zS$}Hm&yIfnm_5`l$b5l0CsrABeE6~=Y7wt<33;+|noMtMYrer%m zKR(Ze1Yeph?j0q&r+TL7>5z7eo6T(&2nWCwV?bclFBtO`l+N1Ojq*(#)Fe$WQjuTU zMXGaZfaA!nEQks9I~6lcLp)ZZvXU5sa7?%ZxhoFth_yoEh zA(m^!O}%u|$ee9c^iXURZ@TZ3X0uJgYy=p8B=(pA0aMKs%x;BB4H(T#?^!2R%}UOr z%Wv_Xf?Xb$Pbo+16-VXOpgSgfPz&FVOZWlqMEZRrT9$pdQx~Zy*f`hmnLbjua!FLw zQR>Kgqde}>3t!CWI!bY%{g~iYAF9bgjrogwP$V1H5Ha6GWUeP#^j zB%OFP2fj4r8?32`DJtjwYo;r9YCrAqfxK^id?DWd(gJ8E`gne8Z{exA6zY;vP7zqQRLRP0Y2MF|cq>gP2o5tgz#y%C2|?TQ z)&2SXQLzMjptOykgC;V<2(1s1|3dB<+*u~3boc_F`rf0H%wyYpbHPyw zD@YoG_yYg$U?BHN!OLubGlU1oLy>ePHX!tPJhJ!1!E$`-+R1_Akc0HTj9rO$g|nUo$676$jHO}E(|4hO7H{6TVM@-T0D!NSBU?sEXP2S+OCqw)93+Uh5=IVs47NYMy`0IXirUGql z4+Fy{I4w002+HkXPAKMc3%;OOj41WF=+e z?tM9!Ja0tZX&0)`@n^q@)BpHJ*F?M)Sv~D@AN&~#*8tWC%iTFoRFCyGWLMk}4Q|k9 z9nRbQIwe2^Q%#t$G0z^g9Mh(2jwQ|~7%V7PLuo|tonzv3dhw<>Q<<7*S_!#@g?Zgn z$38m&PMt1=wDbCCb++FBa}Dwz<8Xzw<1kaH8~2#9aMySQZUm_y$UzsiLrMxn@2B~} zO9eM3BYfY1hn?n15hW2!Y@Ei=957OpLpXBOKQUX7b*snYXX&RA0L$RMmN z6cKXS6oQs~vGD$`stS(!v*15d&~qTq$dKJ;K~kgXx12n21Rc%hECX5LZ35GYkXAw= z39I$t8u|l{%9k+BsF>eB))W;!9OTE5s~#^kLU9GTv%5&E3S7FvkUSce3K13`SgFpc zXv=s$n;EVYLaFrcUa*r)!6h-3DS8_fv{_zW{5*0z%IY7a@A8rh7SF-CX8&yQ3ovPR zJV%`$3(OBqA5CVMd%~KkZ(F<~b_bvFSgEXU0|jT#gz6qb@8%N-c65i%Bo|=55CIN|chPPz#rT$_N zQMt9q$D{cFp2qIgd(7eQ_a>XYJ_s}!HE+W_S_zGKDv`u{Z+(P2rmt9H5@k741jsP1?y4r^xDSBPnADxF zu9y;yB110^jDp5(m`-6-1G;E3o*VoyN{Ejz$T;Hk?!GV;n!|^4DctG~4IXK5q}w{^ zcrDPW`dA)3L^P!cRJmcAQ?>~F->Ehy*0b_5S>{|0%Zkw&HgQv< zDk1x1w3sigM(Om$v`2rZ%%J4^D5Tkb>2Jn^eU7I_A$Dw*?%STK({9Js}4z#Q4us$rhmbYSyDqSOKHw{f?NRcw3{Pxfop&kSXIeJH^HwgfyBHcF}{XJHYh}1U=z&CrH-r0nB+Tha1fMu~R8G1pxj-7KZC7YJ24kk>Mh3@m)@`ZnY;Lg3HCadp!f_F`P2O6tat2z=S}a%X8jn(XQTmZ`pZ zS<*shlZb?7-)i2sX8UhP_4|viQ#M@sIeYq}Cahln)&B|O+v3QbcIke4)ERV)@GXNa zB<=1~>nGJ&fh9jPFJGwApAYu^jY$j)^dg&`@H(M?G`-cQbvniF!*fU*A7y1op8(tk zOw2uwlrTM|O7O6(bx6cd8IUf2S~dhd)Yk$*ard)a@898EfvnIWSjWPRnfN6M*k!-m zebI7HXtVjLtC1v;9hi|bW0t0ZI=XolKiiTJWDt8T8&XS|wR!o|*atCzy)<)BW?|{& zDFRjTI45@6?E1yfV|T-`7f>%@v7G|H&PqHkyXTN63?LsstXCDx75S{a*{DOv$BDST z)#mYpd4DgCe9^q~7)ds@Kn=^Dp~WDzqJYz)S$ z{$6YYt+%k{f5z!w_9&zmN8}@sRz^NwLsbkz=Z7+*bT<)}>WNp0K;dl3gk``J5*9pi z-sDi3fF$vF@dBx`B}Dz8(p)Q`8Q`1erhQMQ=UT?|A;Q-R7C)F`nqJ*EZAfH+YUcUN zvnQ5!UA$1lpMLV0Fn8e#XQVb9QQv|@K7DYrE56um-uOnVzIRX}0L##~(uTmuhM6?6 z(K2+pxf-jgtjIEz&P<9EgD_ta!i9w`7&pK7rQElQR&bq;ENoU9OIi4ocym}9r;0JT zCi^u`yPcvyh0z8&cl@*Zz0JoQh#DMvO_Ez^XUelSA~of9M5297mfJLHy*@E+4G>Lt zzmW*WcK!I)pgIo1OA0*D%BHh+4;T4%7Gonwk;nAjaL*t3dz*6a%fVj(T3qDK9|#<} zmKFf0>n;RR{c1~auS_XbDp*I*6;kRJlems=5(DyDLF}|a988BNuO|Z{!dJj3*)~Vf zt(Onf69Bp;TcpZEDW8WKuU_3}UAEp<#qcy=SN}mMA?tR9yDb3h+L*yTm}bLIMn_Q5 zd*+cl*M3iXy`-H1EC51bYp8ZMo_ne^CA6&%N}QGaMKiJ$+i9vkq-7nG|J#$O3>guS z+v}ELzdQe&k}vz0$b}VxGlD`|9?0Mz5#;{D)1yy0q#chV$N?pK$EPc;HMhGd5{Euu zVnXgEt8xl*y2p2YyXy_qQn)zuo=W>w&o_Rm77r!tiA+8mMq)R77 zI9&=pJTulSJKbO*DN#Oo$^P#75~?g&m$;Wiecy%V++A zGN5yJNS4YNU~E)8g;WYeFv1z~2~`?aA2T6n|C}UHan@WFUV9u++=$JQFEdprNUL0x zbo0ej0Zg-wrrR8A#K79JOWDTcaXYs=^zZN*5Y&@^{(axGXFC}kxo2H-qAFVMQV|S6 zsG=v0;CW!d3QtwK8}CMa&p4FIfuB#RFTetlQ04+(F67^~h@ypgXOR8X-NNK5uw->{ zw@8rf3J2{R;Y*3Fe#J9pZzq`@g)qNgx)Sz3LkRwyqZMNq+U1=D%F6#rZyUxV@_>sr z9}Z^8LXh&2ronC;Pk~1i*h$LE!5C&}cPYlDM*Z{%r10i^h=IFwnGNll2vo-cXg5LN z2I_`d9QN#?m|zj1>B7IPlM)vIixi)qB2bb=Wf9_(2vz!{5<*7suJUFa%kdhE8c&IQ z?&xONHC#t9u&e=P?j9?k8AOJY3ThL?(`PzCyN2iXa0D&l zCdzQq`s$8VqK#RcGeqHXhi5{KDf0mrgqhx~WiOJC_c@w5V$85jV!2;tz|ELGR88 z0z@12N@uT>DA!9pG|XStHgoc+{a9Eh#Eit{8xX0En==dUT{lJDAw@wr_Y2nQPzLn6 zQM0$BX%g2Yl~p>Q3vaRCt+T`mIa(pr1WY*bTEt7@K7~n=lxMWJ5>`XQ22gmi- z=gk_aHMF>P{Drn6%i`zkB5TVa9VkQ%=0YI&P&Y*$b40}d7U8dk{CIWRJSh(as~+fyC_p$pY-tuI=kQ2o@OPdj&uU5dPGare-5p{Fo<; zW(uL~=NQk-u7;{caP4zN{vssO8+wHzP_4K-5A5(=ppE zEQc#8+}7&Ew>PHePldOqJJ))pGntOKj#?w6)BDe?iyG3g(-T^akQ88Njy{G?(_%IQ zpZu%r7E8TlphFU}aZ>dLU=A|Sb!;s#z8bi7%j9?r44|x<|LKZ}L&4ur(64Wbbr`c( zsK$_~ik^$?#h8lzs#S^J!rkZwW%F8_kG`hscFC{h56s)0Jue*jX^BZ*-FoeLCDd@c zOl7}WaaxI&tIAp&+gcZt04Nof7YN7^l7Y8S)^4klMS8{m#Xd`+@$lcj@rOCt9(uYr zcube7)>t=U6bJ$^BFY9jDeFeBnwba{^e0Bg_ozu|8ccZ!)TaQrUf9YEg5tJa9Z-=v zUL!AEt&f3OErN zKl?PzX04-U;olz`u?;Ys8P~=CN>q^3#GHmpYtWZ*!rq=8UIN30oCe59a-c`9YT<0i z;!$2vKtqdD)nx8u+6@?8Thh`>1FAFvv2c$x zTyPBGDsov%h@g9G`z?np$4`80CyzuV8d|JmlXw+(v8a4*Zc(>>z9^MGU@<)u^fD?o z@YUe&dftbHiscOcp7|1txnW1(2N!8frO0lqg@1i-Qp6@X&rF%#3hh-z!j|TmXh@R9 zz1O^o3Cnv~*yPI}^wdpGd&}oBFc?nmAh{EFu^eMicNhFnHOFn$iY7N#QR1Add>P1X z&MllY6@&dMU{7W9K;7cHTb5hFK3aKNR`DYbSRco)m8ut;MPx}n-R*$X#FHJ}46Hpr z@E{4+&M8pIO8;vuIGprYBN9pSrSQ+S@lnUeWR~72bj|$j^xmXXd`V)@9=}k$+>dP( zje)^gE?S9V?|za%|3^Vn(LBj9XaO!49feVfhj!d)ld8}bps3J*7cVLAxDn?Ga(CnW zs&5lt5(GtnfVP!le4GKl5la7R70i3qrO zf#LA1@H#M$c+%Tz<6!eeaV@NsYS7CrM6~rvlWe)5Bn=oG0G27~kNpw4={K-O-F^1#&~{6^4n%9h7;fxAJ1=uemd zuzSeZ07h~{POP3X$r-q~{NL8&%oiJhNnzEbi!mwj1ywTAf{&rz2b+wdAB2ZA7WvSM zC>TN#18VP4!dLLt{?q)Hu89rq2H$0i%Si)nYj9d!sgI5JQOkyc{qH%$rTe)b&tl^E z0j&iU+VEl}HA&D006LM4<(>}cGCBa}~aRUbI$aN-gh}WdVg>rd{ zGB(bCeZOv~_}voxr@FSoB(GFcwNFk>MU25{?h!{QRUd{2X>oU~h)VkC8$ zHNft`sEj*|%5imKYda{U2}2SgGK1TaN|fgW(VK|PDV}E~F$2nO-V9PZl*5zOZHA0$ zp(9zOvK{L)AAJ3i0siM}Kh0r4&JzSe4=2$h;b%UfQ1#k)qP<)Buo>%~z!DdsQSxg^;wAzAH zMN$*v-EY9(N{*~yaS7H@J_8R1T(KvWiV-V2z*N`C zBkG~YYjDdsHCzjy4$BByHHjHxC>%1epwS{DVnQJ@1ln>S#iWu<7U-l17o&~-yl*iv z9e9iFxOaedjPsP!s+e6^goC76+a%BV2t-l!Ii_(IR}L^6aQo|9rh>h)5}kW&(7*PTRAnA~-` z4TNXMhgg8d?&1pQ(me;;Ku?|drN>(OyDeEH*+T4aDoQ913tFYsGOvL$uyl`GQn$COp8G zXPxX2!bgTld*z~bW^-e;Aiwq@_L;C(O#d|m2vZmvuCLr0#Za_m`)bV{0V1u#oB*43 zimWZCM+jC)r9=5;xE)m6d1y$h1{#yN^STE__$dOvRnr~uxbfi*QR)=!E?tfRqdt-Z z@}U{!$?>>aBZFnxz!kd|OY`YShk;O50>Yfx7!9a4z(TSEY`MD<(UqJFGuh=T?(1=R zRG#^aJER=IxM>;v@W>u)=~`*KDlL zdIGW}u5zLgcKf2#11DR%{3vV{PiW8y&lN(jS=SrMd)Lm;8fhG^fs8pZ>wd-Ov0CrAcV2Bpa)%?_EVXP@k;!!i(Z3g<2;tC!1dloOEjcWh4}8ueaZ>S zy3%3!S~W^j#YXPRQ?3j}N)6k}MO_rijBB(gO#rjeVGfGmv*r>(Qap}2WeN**#>^9l zZ9#?aBOh)j!-h|tO_ll)t}q|TA}Fspr-jl$4L3*;`;aTy2oNvy!dtHmF_>A^%zvM? z6hzh|)dWubBs(8%6AO>bG(S*|ONUxfi%}uUCEn0#%}y#c1Z8YyeWxTaJT4-_&yv)6 zvRC=e+1(*O%AvCh$wNah#H(0IaV%H($#_gH=*=upW0>NP&chvK33E7FWpB=}l3#<8 zS*;oa;L2jat=M(d)paR1+BF!mC|-+&{4u>|73hyJ8|zh7G;n7p!Es(i9Tjg@Q@qzi zFL8JKOb~N7(IRE;Q)uE28FPt|C{Nr4FS`=L3Ve|IgfTk%DRLxexVn}8@X<4_4x0zVp7%%>EO7v^;9{=Fj`s!O5h9!uNN{ z+B*vj+3~(Y*cJv4a!b6>Uyh8gna2bFev83ok@rkp>*bMe5Kp0s!&0-iq{pGY)S^7# zQRL&+Mz*Or_!jJ>W<)<-ZDR4t;^(fZbuM}pPDG)vA}$NJf@A_o%8>tTPP_Q2%8;sz zgiO2$dc%2@nz6K`Muw$X@b5jop^UO}DJrROHUXerk;5U{C9WAa!XvdDpz;S?;F>CI z%EipOlY|W&Tg5N^XUu|#aC>?=g@C?=u{1jBD@j})eq9v)wc-ZycrsHiIeu=AQ=$?d zNw{7%nIzuVXc`F?_s;g8RBWUJ8R3rOGzPj?aB!51V^`QeV{~p(Lt|L_B>4yZF{*26 zKOe3)iUv1LBR_nhJ0juBg78x>aU&hi$c(?hU8jzvzFiCR(|zb>zEPh;I@b|rs=jM| z8BD#VN*w;39@okMJ5lX?`14b~y=GE-$37LJ?|LOQw=K}=D)2>h_Dopwu%Cv6R`e-KE;KfBx zM?c}ismCA&Op%Gh`V{-T0mrFi9BI$5TnlB)wfiGlYGn+9mmU% z)v``CW#lzUW9b=M@1l7Ql5Aw z1XIE|K6|3{WkKCWLQP$sHgDnl0EP?ZHbB^uuE&jH9N>VtBq!OH?8!nU9rEu28p+5pOkq7{% zp%#^f)M6%hW?ezJ{r-=now~@F&Tqe}&O5h#<(AVvp~k&B>uH4;qT-!)Mutdq^vCpj zoDlakl2H5wu?k-)lUSFge5=>@#TMz^@Omy-usTWE@wRA z=PuPW-hS4rR8~H%7rq&ex-6<)s=`;(@GVekUDn4*qK&b;`tQy-Jqc4l%t3!Rb7rYq z?|z?6?x~U9sJhgp{^klE61L@~%L|Jt`m9M+aQ5Z5l#d!@%)~CQr{D^Xmmu*7bm%0f zD-Xq*tvT7fS99(fAT*Q^BS{E>hQs^@tz*uJ5)aUp!s%lv%aCV|F%h{RQi}e}wZ7)l z(-eMApa>wqKuS9&Qoo5RRSDDy?T4R?`(ErE_dsBgl1?eiOcQqzF#V=L0G+Yf;AhBi zGy)!UoKXt-nS9wr@;J;f72h~_AS}4Zvyg!csFdYT@CfB7e{l7>>9B<%NK-bfuz|A-t35D4l#Zbi}xDCFlb@^y=(1czQ{4afMEL1{-DxWfZ1`@$w`$y%fYp- zwM(8{B%h_Q-}8S3@c*--*XWZ3s3F!L%wXz~pyNNDGvjA-t6X8@G@JrzEX=K-wCjhS z=qvFmJ}Bjiueyil<^}JDTLgzS^kFn&7ailsuWC%gEOGAo6 zK9zh$^zASzptbQqv{tgE0pi?2;j9S#1t0IflOfG8P~ur-Mab}IG=H5O&*{|;`!#HQ z5Z~u>Lghc^lTrUkjmS(4iZ~{MhEkAGF_Zb~8lJctU`L6BLKG%?!nl`V65}CKD4_;| z#5FYEBV6fxn)m0FbBKwUSJ^1wO98msBZr|xhUjqha_Si+6}|q>R#&rP0S`oX*Nck_ zuC8g^KUn}cdqSfSzsPndK0(X`3n8#8$q(8RU7Bh*D%nqDu2%|$LX%rSb9tH-JX5SI zEjRBuhVtoa7ygpXR-1NJ_p?xAUPfaaXv1fiER@}J7VZ>$YV9j9+=80LWy)fUr=}PW z?$*u2c!M43&logydKr;7Syclj)#qlZH6Ek$mX)834T6&OY7*&c>BT+Q#~`)}i4 zxoB}C)R-ES@>2$+jB$F7$zFluvKEz>*foAir!shdSkonrzt<{)Gu*9z`qrf@CxHDW z>hvtPf6ghksgwy<&s@<+#wByku*eq?{h8Xue#&{YnkWQBh5M+w>>mEvsp$a zJxe)`%g0m8ZoPSTfj;1L>4xDPn)Dm6`x=Pa0D&n^_@mtP3kO)k@D*-LHe{B8%i0B| z+qtz&`h*MdeG1I?8qs2RHQ){b1RI}vdkR5pe1dcLOHiG!-Y`|O%JsrvQ&|D=0B@K6 zno-4f%U8p~u6Hzb4UXSC$sKkQFLNKrg^x;AP!!idJ28c-yre>j=3WC*Ez1)M#oh;Z z6tP2tuyf&1_P6dAzjc!SLu*fjFX;E?qxW+eAGla4%`AW+v! zGHuHe2b<>fi)(EuX}q#r;g3C8m`mZ_n6YsOBCO5t_#K#p)fv^ti@y9QgP%pQFmz(7 zt#u!oks1o|cLDm>38}z1bO!(5W=grL8{p%h(8q40y!@j{jq419-%+&X)ZP67UmC^$tEixQ%v^mhO-pB{yraV^%ZEk3N z*9ztCDDiteh1-NhpVuSwkmbl^fycNeaz=rjnDbMTSUSfgjU!L@EQX=&2U6QB zJT}JHBnVOkN3_ddr6iliCth`#-lA;vU#N{w)@2FJ6(+Tvz51`IF9KH)uCQLz;LNJW z8q^c?ERMskt@jQ1N~vG>*pkpg2t5<~uZI1u-fzj($pp=#a!H;D$QquZA4lby1@q(C zJ{>B_rD-Qy;p>Vl+f%nR%FxT=E9=4}N_e7EBP+j~O0##Yg2Bk*Zzme5nL|WQT!W@g zIBd7%1Zo5!P+#RBt6zH)BtXzdD(WY!SbFR2H3kM+HgFyrqgfLZ71;Pfw57bPMhmjk zBM=L2JW*7QP7orlBg>6mNq+`mP*w9NJL!@Yxie`r(lJzlh|tJl@+syH@*gEgV6BTT z8&cNqjpKA7BYri4z3QGC>S2>r$iikW3z;R*a-6kKhr3sa*GKVjYtcs>3(Y`t`RrQ+Q_H&DLh>Jt2 zBo=`UNnF{FFfUcZ9*s&ae$Zq`2PL*H<8^STxFibm(+J7&6WqvM@~x-6EbcJk9Dl?M z_R~#g;=kJm^i46Y^QDlc3L|LPpIkRZ@EgvY?IA@rSn}Wv;}4YC2RhJ?(3I?Jf;2!w#>VrqLC2sF5*LG_O_EgCd3FYS!8d zPm)I?*q%p^&Znp1qRXYU5}dMiZ$SAAx|5mxoH<_?_k$Aqs%elF5*Cw%KM<+j8t3Z` z(hm&K5_jS|Q4x-DCpn*3pxAI8tG-izxlI2}19|q5YLcxuu)oGbp!OOE`$z6sOHU8((Vjl; zMyJA?8D8!Nz0I(C>o)5XGgVcz|D|_AvcYcCVoCGpd0`}94xWRL2|0p&YRN1Z@S}uc z#6%>Ec{%k)4IveG`E;{Bbu=L0C~#TQlH;q5U8+qWye_M*&-IHPFPhDOO$h0$&Zm+F zjli2!?}sisoKw~j@OPBvEcB;llaHA!nUJ7euj{i8)3J-s8|dLI+(*Nkg1ZO^&jJ`+ z6`}xc+%p^cq}FpJbB}WOuND$z>tsesx(H}sJ3dzD0@kz)q5`6{BxTQ&054+De)kxz zLF3iFAdw3tG1t!>_UF9#n5a4$3SbXo=VJBfbwLlJ#N^(D<~qYR)s%WDlL*U;pUllR z>ixkAiVzBCy%~^bb%--p zwSSzgypU}f_gajbeSIB=sQba~d9mRu?&hh<*Jcg#Eg&Kyt~6g&w7KWl$X;h7IY&H5 zA%pel;15ZChFDvK4Rn+JZW?c5)DPoTne% zY_pUaPye--7E?M6b*htyMf7pqb{Ue^|M+xju_Cuv?w1rS3|&srtC{{$WLDjK&B*0F zG2eEf(<~^kb|SbB4S4n(n42% z52z6YsRvZS-WX9|moV45gL+uEm!$1S4ihrCQ|85t4GxPFYK$ua$^6R_cx zRR=m$doq|_nKE&SU?#ygqM3dWjo!_P{3Hfz0JQ~@lPFjvoyDz;+=}53Pi`F#7k0OVUG1}J1?j+5f7SaTR z6*rXQb4R{P1Q&TY+lGK;na8g(=|t-$?F=V4n$?tIWgVg=h+e5HH#}bB5?&i@w6M9n z;I?qN2zOtdp`RC}E$@W_yVpbCDEXuup@YBo%oF*$c$RLTSuTy43E9VEc28cEx=6ZW zbYc}yaIURcMf8lvRyz@zaQ2dpdMU2 z`l5^KCOrVCL8pNU+bW9dX%WY>3;BJJz|<&QW7#Hby-309kcL045Jr?v_Ypmxd>L=P z0?e6E!fi_@JVeyYKx&1w_b8`jV8_d zXioGQr;6Oh_wKcP4TLot2|$~iiQ=|ktRv;yT~^=xPSRgWw`o}BU@yesW~K#kMfP|6 zCm49Qz-Q7W&}cc*1#?8x83tna+)RWmRS}YVaCA{;E#P*b&rQfW?L~(Lk@g9R6jgwB zMX4Ke+s}TviQO;hJY8%24^QXdRq6M?{p^!D*|v?z#>uwpgvqvT+t$=fO(z?ZHJOuL zlb*iMTEEYKaIbZ*z4sf}^}76P45|8re^9jjJR)__3ifSf>O}0zbPgnRhKQ1ClGK}w z&{lV3-<$%UwP`GDdCksx-ugrcI3uMLR3yADMqXRgQbA1 zs@K6{`b?%lZZJ(GPWfn!+4Uo?N7`T&Jj)=)rQvL+sJJymcFLQIL0Vz>mKPM$rc~rG zJH3e0_yVUMozT}{h1)Nv)tb{@taxEYOQ|LXbCa;qi;>0tT_By#pv7eSUl^jlR5kJH zB#Uk~RuZB8dp5fzJp~RGPJOxI!jS;S63n61-ugVlt|D-*ZT{je+g^z`a?4R>?g zhtL_v=JN^rRrS2}l_`HH0QHN7Lkg$-TZjLWd@$`83Ox<#c`flOs-iF4vub;U*C0{N zK1*jJ{>SyT-+$5%^315syB2Of|6!@=#ue(ZlMX2*22N!a!Rgd@ljQHWlMga&Bq^qk zLjG(83}uE$+~4wvCccm=Uun=#KMiXjn0_DneZh9{uFmFE5}sWm`v|#>3k(k$MR87i29-n2PEKIq|Wv2 zw>@#0fq`MP`U;_Q0!?yv(ON~f<*g4%#6N|**Hf7k9zL2AS#kGLu`l9CNpNzUtU<0t zh9#`7_lIl&i?UaHD79X*`@{l?3qMIzf5BAE^#%{kWv#~LQzuX3hO4bk(QqT zr*QT2oz~}ISztfD>~LUVBWb9E9G9I^RFk8V@iHm)H^n%iFD4@-6$St&9%0P;M}@cr z0N7IC_(D?vWbaj+?PiOzV1|BF0khKeM z!<~xAyZ<7DAAf8we>;To6Y^{c9GZ&ty7tMy+SvtcV2CwYaW*}KrRiISE;7oOGT2*6 zmY5tK$F)Uja+(-kV+aoQK0olL& z*e;~;#ozDz6&3ZTArF%RarqD^8a40lApt+CSxOyDV~N{s4<+)9#RruM-J(&b5(Y!0 zyIeaK4$f+rQh9u~RX;HNImLD$i;8Kd6KV)61%Md6p)m6n1iy>UZ7@gNuu|~5U@2$> z+W~QbmQ5?e;&{QE%sINPmp}H7n~1|g0#|LLq}wxg0@L!jMrT(~h<_Gx1%)?c zB&-Vi>ASkd-&=Er)Qo>*CFi+n^1+~s3HEb*MRbhP-7sI7G>NJw6Gf^9E9$Bg)5N^f zNe47w2?o{UbW+$Cc;>E#kWdBMgnFnv{b8!!rrt1>wbV}B+c)eQ)Y@oGQPDOg+qr0t z^oCTWGIq_d@;SRY-Mbg5c{^UBXSG+QDkJ`aoBwQ2;-tZy)p-FhFd}7WUh3>&d4GQ- zz&#?$OMJ<9BHwxX^^MBRV@xn5rE6O$8*QY{u12-tX?$14OXY4(KikmrAoHCErqK6P z^Ys_vQYtYn+L@56T`3YJ4udWfr+j1C*cZbacT71KUf^xd#)(i{nIy0OIrsg;xE!+F zwuyz_NDKZ^2LZ zN5>X6CBe|Z93mDSDr+h|30UNCnUC>%R&kX%4L4QC2#_m477b-?0R_u}snJB{r?f6> z@f88;3>TMTcb2+RRZWW;>KgOvFCt;uC_;NW_YxbDLlqo-1k9G;sYNz7ZOagM9}g-j zz*Hmqcedt!RWSSN`t5J=F<@uEQ_b zUX9Z$zmF?2AV{a%$!Ab!Ps0bQDwU93qPbch`+f}9Zh`S5ftB4;%PMa}g$3#+y@*kej-Wo9BiJ z{6Ao6-)kGEMD+xXaCFRZ%jBQv1jgvG;&2g^$vP`&SIE^Ij-!Mt3dDFgE=uVZ=QrYqGCuVy;CzpMaVHsFKMo1{s`N5(ASUf#TuGjSLU^Xaa>}(& z_xL({DiRY@SP83Ds-LMI-@F?-TCprwTiZq=S%}^Dsl$5yuTZ|)hS>~LA*jk}BPBVH zty}w3;VCl5F*{_Ho8GG;pID&db}>Nz7B_FsuJ0HA0F8a7LS~9p18fn`Al!j&79e2> zX03m`9*X=93Ph`dMF1sU9_hNSnfYA@500hxWyu2xhrp5;?Ut?eJN* zpa6GKBM-gyEJ7QtZly#d#$6Mx43n3Y@jRP}^Y^qMQkdV}5r)D)=a``!^(iAT)RrOn zWIL+wx{LQ`6f0o?rru*ju5W~baTy233surp5dR&2ij zP~})dr=0ikiHqTK?EHokX^NNqobWaBzhsr32r^~l++5IIU9%o{>6&a8AlZvu5sit; zLTJc3X2}qG>QmE2+yqpo1xNrQ;L+y94r@c*x?{;WPgJzH^ zjX8PLU+jM{z@?LXTakuY;-dmLVGE|H6u7-**$|jIz2nKI8w)4}3C> zE$bsF&7I0`yzGhDt<71Gg^L#}W97&Cbz{)Rk8mZTd7v8hL&+9pSklpxYRB+7Qf+mJ zsy+`pJlN(F#ehZNhYm!PFs=3?|6??!aQvdn=^bg>vC5(2=KAkk0Xg8H-g(z^50@ot zg{%u$Bm_sEYcQ4b&tvZQip~+=w@foT2<>ICN}kb3p-@kR0-83F*>-UrG$tR+Rfgb& ze1&`z(0q2?3~qescTJ;`>8Le}{PnIT_{MsHpqT(_Lo_gGUg5@qER`Ee zm+U8JJgFlk2D3mwIMKrEd6Tbh=KP@k{QOYp#CGvE8gMloNv}JiFOT{LEL#6H8_`co z_^3OwZAs|ExJBwWwFQEEM`9zr9mV;%6MqsL?oh2d>lz9BOXiijMWzAxHYUVh>M6rq zx&Ivc*WnOg3|8`hdwP4opPHC}e` zo-`i+mLLZJFadjq_MP=`>Bw}ba_49x>XGcqkQm9~)dCW5$OI`LU5E-Euyd_rR>5Dh zDw5R9voVvx^Hr75HMftU;==T7-$>dt(d0`!lmap6yO^}3i%PhEr%Dq1b_~BUCS-iu zu}x6Ak5E#fONHVX2k2z9k=O0u4_E`4PIi)#=gI2sUJq$G8O;5lHN&jZtk=SrDzCm^ z1q?+KZ^Ld^!1KP*M=-6hrJR}$$zJDOWy|3?gt}BwmOy4*q(6wG-K5}FkmVK$&4#vE z0>bt5=nduu>eH?n5GhMC8fSucP95M7w`18#(#MD$9y&FOsl21`;`1mne84SgR3hkS zL&6Omy;p^B#?ZtpG+M z$I8>PG+_!-=h7o(0y6pH__Q;r+DZMnUi~cfz z$J6kN0m{`)NRxq?Q+p0XiAvq|eFB!Fih?aO;S;P29Z$v zBWMvKG*ECTt;&cLs2ScVRk7(iY>P&XMO~~CYb+jlMxU1+A7-8mp>)0X`6(gR;TgvN zumCPE5Bk}0H~uTj@2@KFREzHO68O!`EVZM?V=&|Q{^7fX_)q<(joaKxBy$oX^6hS| z2O}_~)?c%6OoOrinD-Yc>?}Tav?gD+X$Q-Tjj|8|*&hesx-iv|*A|7OFDX}vNOj(g zA2yk_=3fLZeV6n;oG*oaWl>cysWoKu(=6mN>VVu-BT6`BHoq5aF*==`bAH&IZQi3S zfP8SUZG1FJ#X=8DqJp7dW5~W?4SEn&vc)Y$V-LvJJyA(zFprfaU|Y+gMz&f9UIx}1 z)mhhnC>Uc)nX7d_8HqJhxLRFazHJT&2+dqxgX>))CSo#!w$r6QM zYhO7*CRuxz^*36ce0U#-;8kGiys)y*m&##!^b2dyipTY|--U)PX6_wZt3!;fx#B@ObXI|U)QI6#UvA>|xhxL7OYbDM zdGulJW{@nc01mH2`8457aj+w75fC@{U)Y?$( z?omnLo?$b+l;3ow*{};kI4u8YXJf>SaZeZ?fd*afXG~eefZzA28pkvSinxX?2eCNT z?UMvsL>lNN4^q=9_fgfLA*-t6@LgwHJRjX6o{T9^mr9s)YEDsviSal1k{C%->?4Om zY1vg?Of#Fb7WfnyvY#KlY!cvnIiiiKX2Y?HL=sD!@}=~ErxuSS1`@Z480zlZ{Rx^P z3-^pj4{4rGbwXQBplp;2VLHeDS{Ssr9v_Na{9|?~6aYo;q+UxSJbPJLK~f_^g!-Ui z3~p=?iqQLyL%{wwbS52odY&=30yR6;9qp<8PZ4t3vUMR12SSnTmOD;%>Tf$B6(7+6 zTqmPJgo)Dy7k6Lvm*!*u6O36T5f>;Q*gX;tG2t-jX5T2?c1f~>4~CenzuYwCdpswej7>y2=pGauRrGuH2$6cn!Q%u zyQbO-%7g|%IEU^U|Gek=#p|>^ z+`jikMiF89KFMWR0OYJ}FcfimO4*R8zl$oAlbhN*Cm$%Ep4L$!X$M;4`dTylcXeX1 z2I8ro)DP15eY|xF)2lx5GT=zCmh?z)Jr+g(e2(*y!A`e|CB9b^`^k?YpD}xK% zV(6!R9D+@=6VA-sk2Q#LKH)Mzc}#HC;y0Ath_k$H&GB&X=+ipMeuqKDroYN3O9^FwDKpK?t#?h}I& zLx4U%Dp^Vj?~sKtS#5JhC9-wT@&w=s5QqKcl?w*~n9^ey z(LMBIB-rk&9Xup{&2kMr^IZSw1am&HA?7duFf=8 zEHn}}w{)-Zp#1)Z^vtRp{(iy-pi&+=@yaLV09E>p?C3s6q)sMWAQJveT3t>~_Tmn2 zq`4OSOxLx>5r7N0Diw_?{5MuOsPZZ!8tmEoB7EkXf4~aQB86-1rb`cK=T0z`fWhD0 z4)YF2`W0<2M5yfr3IPA6UEn?L5U<{xoEAWaeN6kNMOJyvBL^vF9dbT*8Ur^48O*PN zE0UNxvE?u<3B%Kp?M|zz07Ixt0K7R=rm%9h<**Qo?k&;j5*5tNUlY=f!|nOdYNdT7 z#w$@azSE+f=NeBH=D%X|iPim7dl^EG zZsGQ0jGCw&IB2x!Nfr~?ela`pLs^F+OvX2d>|3Yd{W9Cz1{Q*jF)#Lw={i0OI?XF0 z9f+(}9Dc?CH>yr{z>hJBC?iVKi0w{C0MGhhbwu%T=ZfbJAeUJ*(5D-Zs=nn&{2?d^ z0Gd=@yWShj3qrM6ILuq^tP`4fRg~rbCaOY(Y*_w(cv;a3|LLZmv}ZPHu4^O)s%Gu zXZ+k~o^Vl%iNR#EzwUYUWmeLz(A6)WeaF^(K3K?@hk7hncT+HpzLq=LnEKN6h*NSq zOwI}^hAwMRP4#EC2V4bRRT5ywQVnvlq-_>omOE>P2sdtT5Y%^T4K4b(+xjHa8EdZ( zA@8NDJtqRoFhCVI_ZNqXnR};5s(j9o3)?vk-0OEb^>loI)p>vOt;NiJ8-(A9Z_TD@ zFa==s-exF?#K$5e_JS-97o)x`O~5MSuEwW%u{rf2@d6;)GB+1@`oHjqJg#oNBn-*? z>Fl~OdWgUq{9E)v_34Pt>llTaE0ZepcJ#1Jc;E|W>GRe8ioGa-nzF~0X$50uaGU)a9pm`5sNKRt z(Etoz#&?H`8S0hhwzb#P8Gh)fAC%-=1|XYET52Quh=r=iH6DBZ%eQig9wq0ulgJ_! zg%k06mhL9VRGg}k-PBqA;(L1bqr_98flROlqNr;-3W`=+9xJtu%6i2$iw@6P61C{6MYWR5R-sVMm??(MfNb}fM>%vg_ag%6|kS{hY5-86@-czA;dsF8Zl7k()I>{X|ELt@yX0KT<86penr(}>;2x5k*+bpNP zT%U~l{?_Yz7$JdFJfs`kJ3j4A0V2LEjxQ|R#cY};_%Ds%!_=C1hnRC!^^aAqK*aNk zrMIY3S}7q>nHH9t9A#Z^G90orQ!XJ5%A&m_59Da&VpVpB+<9VRRlLy*sndlt8I}Dd z6!8ez$?gwbCY9t@gB`;s4+~?oD0$*kO7Ww8j^5JjFE)u8Xh*M<^k*2;0Ijz6;$v1# zGG{mS>_Oj9nyW9=)kx)2=|kg(X-BB3!o8ONhN!GWaAv*5t-b=!5CR&#Z!Iw8Rg?RK z{pfmUMGg8p%m4UMIUj#(`H90nZ1GJJj_E+QTtg zHz+vfdP>-~@8bL#^g&|N!P3;P5O|Q?oIDZym=f56k%euC`5=4DX=<)mL(~-CEqeWOc4NuYso{!AslR-sk432xRbI!Mw z7`K0xz($LQo-W(OO+8Y&6*fLf@Mzp+gz?uNE~Bb1mc$ew_?JMN=ZqKO6-K4|h+o1p~kndlm{lY4|7=o3mJPLp8UuR@cps%`>%ls~JGWWZs& zYF^dJ#+a$@?yABtj$p$A;Wm_tixnD2fhB@v>j=509?xx#stcQG>NOKDbA2%Y5E{hJ zL|lTEur#6W{$?V_9En@dCQM?0fb(6x>`dyuG1*$0pGLzOFucMGEk!rZdhsPcL%zgy2s%KW-y61qKnYt+P{qJ^pLlN zx)sM_!U|`K=q6bv)7?Lscz?_furfM}lrbaWQYC$la}|ea#hoKjte?@-=Qg~yx0>aF zkg^{5z93=yXV#80#iOzG^}8hLT#j0D8$VSsPVjQ&d`1#mJJ-J5NQmZzw`JZfY)N}5 zt^)#YfDN})5;a+0iUFSq1BvWVzqgn)55jKyxiLLz7xj3BoVE^OFDFf_dmgEWHWEC6 zW$8A0rHS#z?IBHGO8FS1oO3c?(75TyO+=+xP#6CNz?1PQSH;Vs4AY|%CuQ|inu^+ZU8ScyU)znQ5K zP^v@v*o^TBOMfpTGZ{ahI}V+BwqQld`c;DSMm4vc{qH12<;jJzLX6dAJAIorZweTu z6iyQ$f$-0s5EW8mus>Fw;1x+%G@9{dzo`PObWdU#lT$^QjK+=*^aMgnuFzim>XlEO zWo$ze&ca4ZS|kNHn~p>zkGi_7pvw>K-c^#(N%u)49fvU%hEnh{S3JE~!ChTdmvzx5 zxGYUlYDtwfdeBeXegr)eGeisp)r}T1`86-l;+wu*&d|b=7EN8}wx4t(uP0#1+=b;- zxG$_`!<=6lVILICYS0^m2j7)_?`8AC1ivmtOwGobjx7%s7{NkhUfZ*~2}s}$L7<1N zWFoU4^ew*WrP!UWJ`^RU4Iez=BW@fpr(wQ;?QBywfuqBTo38e7Lb1PTj58ls5+S@= zF42kAbT5weJWQgEKn4%~&wb|iZ~g;D`?2wAm{%xV^n+3~rttmp1O9bJ^mh3g@4de_ z^0xI>?|C+}o=t)8%U52Vg%P`?&acBL8xO=R!=1PP8nE}X0h^k4=1fBIaxkw` z+GmJYbK|RWFj+JUf7=FHL2h>~E}3SS;a$1FsK7TAy5T%VA+Pcv?AAN$t>S zfQ^G49g`x_gpWch3}8M4j53`Q8txU-acCq--Cy6*k&G>h?b67sIlb9v1SRX-!#2BQ z(VFcBG)3;WA^K_21M;_|foXIrX{ofBLm=6oFn@()*!j8kfRrX?29ofRtcsq+!q!>s zl7`{is{PdCt!IC)n0q?xaPr4YQ0RzVUA*gSJTHCLjqS_75B(3=*f^%KWnT1pBA9IFa!~vVGJUO zz@SB3ZJK5IP17%n$gAt{F*O=yE#MxA$6k|#6#rKS3~9c=RR*VgperLKo>6YQYtnzb z3>JrhFGNS%=;GVH?7iZC{m~l)4>NL5G^}O*cW%oQt?EJPvi3}k%0W*=lq7t6FR`0| zw95}O@sG@ng!Hxm4^dWKt78H@LH}lijmdlJb=vuZ=Y1Qw@F?K9UAUp~l%W3%=~wO2VOFSRjTVwm7JK*evoY@Wazo*W zwF~J;E94{qX(WxAK*9)e^Y~>LdN#g>sU5DRqJF~L5Q#vfjkZlW7JVIG?AWYRRSxUq z*knAhWFVm%J8q5r5AS{uCj>d=Es1oS54mYJ0BXz{0wxoF;HwqiWt6#!ej$=HJq1Eh z8yttH2;O`>-+~xNYnoKjZ9W!olbUcyUp%RUDzF`?4x%6NBI*0`2cY}Y`(in% zdmyX$Pg@R}EC_8QFBBRL5tVHL1P?vqBx7)D!bAnnI}W^8ZKx{cWfgGGL}C|Q|C4=5 zys^(!7rlh`l}6ISg|j)?j|FOypI(NgYcIuy#M~-W&fA?q;^rq|v))*3_}CW6e)aLTkZ`d*5&iOT~-GVogygj?l4HKPLzAir$ zohFbiLZj&mq3Nx24&alJP^9frh_%*S@9jV3FnBmAs(}K-{WK}j6WK%!=-AJ#vYg*p z+yc1~>}FF@2!;%X9bK`#x5sq*RlZe1+Z!^K)`sZoOQF#Dkn2Rtk=cC?mmCs}>O9mgM ztto43YlR(gLal5278!*@`6nl@nw&EnSHA>1FWgVx;|_l}U!FXT%%Dwq6t+#0N1;Gv z!`xDfVk3EF*#Z?2NNSaefke+LfsYEGcr4JU4;uu=A)ibcYGIRwiF4`VT>zm&ti$2; z{T{n1cCA0d9!IRY>G7CH?Mw=GyH-3bj%TF8oXo@0(woXj-gxYx^T5NI7|Jh4U_{!>k5_<)Y25N#Nju>DQdYn4ip1-dAxEr}<4cLbVxp@3aeQ`Mz z2ODq?b=?v7)FW7!zah~S^!OmM4kv>y5aSis*dVb{2`2Ha3p~A62WfK$PgcPM+ud>V3Z_lR$W_Rx8luzXO7J8ak+r$D-{CUz(s0hc--m!AaL*VTkp-y` zH~tXP(S_nd9#-(g3kPF#ci__tVw>e`&iF%<*>9)z3d&%TRC_@1p6ED42&cKqMcPkr zVEiRwpr^p3$lt!r;7Cv3YBtHWFo%d@ z#=*D%8Z~9F2z_7vvh>mLmsGBmAQG-@ku0RP{8bQr>a~hW@u5)hudG}vDypncGF^H? z7NH$(fCK^~nd6`8`WzW3%AeKF-w;4~sR42qQ`z9Gfr7LTCw<4<-vKAEh?C5g&5|q2 zo=9R+uzJDhw5IM;u;2@UZ5r%^Dj;G3$D(CB8;x%pHC5UtLc-9~44qOam?^7uHD`tA z(!T0WnYcfRE-S~uVxekrfa}2eUeLytzYUWUnxd63qPsGE-Dv{gXq&zwW~Y?ex##Nz4!xY+U}EIM z227o@a`>ArDaqTIv17@HPl@Sbi@vri7y84W`$e1DyumT-v<);Hxt zU-J%+j<<^;8FS&GPFXHcjuZ#6!&v)(O|>d#tmiVAYq0mfyV2qKal$8WLYsEFm$G&b zr{P9_Dd6Huv3}D$mqV~XpZuvg_>C*LdBk8SAM?Z%QD`eyk6`kSG0h+4{`+rKtY1ab zF|z4~^5IDJM#Wi_@HW>U`)V6vD&KBVQI?Q$8+{pPp=0&%*&iGYxBS-V%x%7+guo;%QK^LlXVThZ}Twyuq`#4*Wo^?6vt0hVa)Z4H(=6G+fYKY#_E8RA^!49 zW(*%Wm8}-{GhJ>3s|Yq~J$a%36+}_={E+N!=4-@c zrD%Tyqs4ff((~Cnwl7BjX68riYc6i6{}P}iqg*SQgW|BF?5iQkl7SjS zrDR5JBKnb(kocvp>>+5|v%DSlPp8%#O7{Y7wo%Ha^}j)d4oqFi1XDHz=T1HO?~7%; zcE#ST*n47mG2vHtVfK113z;~ybk17i9gEXZle9&@Id5>Lqq?o?WLwR`Kucw>l^+Iz zBCLyw;)_zNRgfV+$GHm=X6^w8iOr|vy_-26Ky3wC8uS}h0 z^xv^Vg4bYZ%SzK!&N;0qb!Nz~e!9y?aNw#nDM?eP|0SYO1+3tLIQ>pq7>+S9IzIjdEDEe zfZ*E!NO&v*^Ia*?T_bj$yyeam>%Pxg*SDsn>ggmmZ)TI;;hNfGOnk8ZqI2mBwl*?uoCAuvq6J_(xJ%;^VZM|r#*p*s9>MQssBt&nldVSFd(U{ke?>+5zi{+tI9pUAQm>H)K4&Z*#jt z5q?fy4)_INehopW#}2^q%A64*jkk?^T~!!Y*dr54W(}Xl_GIdTSq}88kz*Ct%y3yJ zZue1fT$YhvK7k{uB6SkYaTom8AJ_oznRLL=e8cdL!Jt#eHZrgWD!uv!qr!S2I!7lc ztHm_CR3~Zr1PG!J%|9nsXor+ReZ#!bN6Pfm@^oFQvV>u_hat-JZ3xgILnmY|_nQvM z!wQ9};7bIjvUpv_%UnqwI5gg4jazGJ#kspO0BJo6S`L+9yZdU@id3VmP!7n11d?rr zDBvZp3UgLh|JQX_2LJ!AduIK}qlx}cDpXjmHM^s4#5|folW=`zSX0o;QH~V4^6*re zR+!e;RJ;1Z7r?pA)IZ`I*fU#(;oI+pBj-n#7T6UQ#s40@_Yz-zW*S@#5*C2b{O-)3 z&;rg_cRyLm%ktmo_^{t)%OR{MiBj=(+IpBmej+Ins>&18isCRMBd~q%N*z-01zEzE z)>I9ko5VT*Y1Dt9?rZVUP4E=w{W`xCABkYf`7X}`X?V}?my6>aoE5@fT3dSMkXx$V zako3~dpYk*$w0Ui5#4gGE0wnhm8XQL0o@T|W>(APAZ9Ktu@Dx|BgW zG8J?J$us-Ah%B-QlgjE^n5#%YR@XUfETmu&)K7bR+)@0o*q;(_J{H2O2x^G8} z-Uy)zMkrMTJE|*-jN;kXA}2x>ktvI6f|9`{WIP?N+lO~)p8w30TF{@zJ+*}#6|zbv zJKZYzZz_K_jq|7@Igu0pY}LO%Npf@=OWY!HHSp|;Xhh!``Sz~F7$*;9$mYh2hpEUa z1nKJeVPoQwGH$eQ;%3G!b=+>pX$*3IstFNGp9TYjOF;%9-;*Q;^lA!z*R0MhmG|ZzOz_yi#OswFWRkFDr4Rd)+F+yDd;Nhnepz?!#;Ri&! zf5jSizc3YG1@L|@s~;e5e42J4@ti6NqpYUH*pj?HRQr_h{6r^Gf)njOm@ptIY7t20GaTC|<|-UWdcxUWaFDE8jwRPMoLGR!kOiMI5-TW>~eRn;n$M^fQ5YcY+!LdKQ0AuqcOGvp5BM`X+Ud;rynZ z;QUy~(%jr~z&&f&);o@{8EHWR<_ypv2VD=&fnNuc5b#iCEuI*b8%y4J<^3hS#Pyu9 z+34YGkpY1PDpG~!|MAoGAXBrJuhYD^QnXKO{|xS`@GSRA@dhz?Wf zgpkL0vICVyFDXk&6NG+Vo~JpsR-%|mm>t{2plMijb_TJohKl$B+5{q^PB?U;CQGX? zqI&y9(oH%vs)}24EzJkG{H9r}VLGl*#cqJfFts55JXZ&H&N+5m{P~)H--g8NyAUy- zSLaQe&ncEyf|{xl?M8L4)@GISM=A2?iJ3$q&xNQc=pio$5rUO`Lc2`etpyOUHSpt+ zlgh#FkRon_8Ee(N9#wAPEMdYN(H6NdKCZE=^b|i=8AzR1_=@iTfQO-NEp2}p(DR~5 z6SzRq#m=aID6{-4>0)p3c}tVK&E}0&R%oh?`mV=FG)0ENCS2}~v&QUOZXA|!svgQ% zGYlZ;LiQ9A+xC1Dd0*lUL@OK^O#b6jgBrFF=#ICgMfpeg_B4CSvz802pHwTY913$3O9)ZQb!}9nyC_i5W6u8wv1hP(v3MF6BnFjYF+pC9}!uw#;$+yivo`tfkil zMR>C@op47Zlo2}KX2%h*`b13m#lm1RTseN#E=rC%(iMtLE-~65rONyEtI+2h4N}Id zZ5(Y^F=f|R-3oOJ`b`)Po>?R~6d~b)r~;0p)vf#JmAyMWjse?~Dx)8kgWUfefxcmM z=}Rz=(D#K_aY6k%rOR@j-|`Wu37&9Q#@PUV&G-M-+8ziX!b$T>NAE$`y73a<$~I4> zhx`KQ>en+_RXy~OyAP3QonG+;!o1L%2NPW2bT|FckaCq0bZ9-j3=N1?UEIaRCAlVO z97=oWd5D0d>l^a=#?|;+=Jo#&j->Ci@NNqI5>`?sjfx)R*a`czgP;xEk<81j0dC}q zjdN9sGpOvbq8x*zp}J!po}y4BNZebiO*JicXpT6o#J^dd@nTY<{q(2xe6w0*JY)qBcSSatm>1>rNSTU2DBba?nRh7vCbM;2z7z+5Rn$_8)Pjy69 z_(Xe9v5VX2L7-=|s7(pjiHTil<4$mY){6On^i~KQFdPm+a2Nl%AW}OphpfaKUQ4M` zuzdXY*DBS@R4ZM0N$lZEe;)}Ze?~GV+)mrKVq6_{2>as>nygpZRXgpK!1tSx!{-3m2=s;0Uf8O9JL$rIi* z$-twrNDSe61bSa`D#mKi3embtsU1D20!||wne8ZJi2xrxqR-lrnrq=|MNqgH+i7Ba z$%)Qru{~HC&A9|J`($lY>GAS)>2e$EgKa0HoW_?q;dUU{jB}XPP1U$2+`SZ=i7Q)q z+_OaIS74f20@~!2O2frAY40nuL3UpQZ`CdT8G)?v$M2k6q!wE>{Ad&m9hYW?li!Py3za|Xqz=p<5SQ>{V zPACX0({N5J$25d(R#7;<5=(BQj}vKR2w1Of2}#K)%0}YqBXiZ0sTlLPbZgN-3wPbc83a%DIDfmvn2ekrp=~C+cIK#Xg0T@h@jie!( zH){Zg)Gy?gL%m5erZ~Q_*FDlW@=if8;=7{?B6ZUnV3S?-iTS+(pZ}AUu8T>$DyMzm zp6syn8vU~@r5v2ouAYidLLD+U!Qw6CXq{DI{TM%yW*XJ;;lGtiUi3S~)M-nCRRy&< z>6lG%mObH0mF7BFXw-OabTDywbooRC6XNk+SMc^CmPAYt@?Hg_1)zWBKDcB;F9Ur^ zi;!XclN9pTfST*C7%wyNc7SxY6!46{S!Euc` z$;L`u3vJxCPN_S9fA&GWec()FX?D-v=73lZBJ9=Wn!S1A?+xK%*pogU@(BHM9lXZM zssQa4;$=-6*j)!lOogsqEwD)j;bS(OiI~P32mOvYEovHTOc5Lfta;&U4w^%zR77)K z@z*OE3td$^K=&wm`P-9qICcdwGeCZpEC%NsA#1zz1a>r;J@qInen}KxN-$VS-{LP4 zkJBtYXS{5VfRDmROD2`nUWR9`vcQi@%>GuyjL}`T8a`j}T`#^-Xf%yH>3{m%WNI_xAScYDL3%Ow5Aq2Z!sV`36SS8RNPkfl5eN)+X; z4uWj-o!0gbPKwK1J6D3S|+g5UhCJg9T19QJ`cXJ*LCev`Z>>q9*24xnGY~Z z>Z@S$O{PNbuoY%w$OF43L`*33sqH%5mqsWF#k-%WJggvyJsP6LDO1X*C=}Vl7M~B~a$1b5>LH>) zlKkWw;qRhHL+>2lun9v%g%z*@6cs!mR<@{Ty}BXvWz$asLFh_X4pM}f|3a&A!I%oc z%koY95p+b)lld4~CjJ@>B1gMENV1s67A= zd2XY*z8oJpY9!FR=Jmhv+*wwGXBtjkntBLZWeQvG8bDFlm3n~*2FKdLEu#bU@()__` z(+^)o_#T@A%GrBj-s|7V_x?`ySv4Gb4LZB^L8=&ei%#5agvkrb4wHCWhUqisc89}9%f~$B2KZi4X`2fFP%$?7 z9<^}*bs(A0%(deAZC-NGl+{iAhT`*Dl5|?BAR#yPAg7~%HK zs8n#;5KwnLoGGXVjXDM2=(96O-uM<=4Dl!+@}(hS!A7ec<4%Z@rvL+}bA!x(tB>#n z{y(P9!K>1+-NRvL+xD&|+qP|+Q_W0GO}1@L)?{mE+nCzfZZb~aS?fFN{14CjKKFC4 z-}Srl2*ye@5g;W?p~<0e5p}s?f?$5x4s~d+p?jj>xz1aZh~%9iq*<1R3n0Ui@bx7> zzhr8~hrG>f+__tVlzs@mW&=iGUWo*_ylkM=MGYS;1PI;a-(4levA@Wo4QUs0r=ny!I2kq9WAV$dHa zlb3yALo{DVdClWjd(E6&LD~Hr{j008dC#gKWSu&*{(n%Eso5{L z^>-=1Yjn{+m7pkxmm?? z9DzKe0T{j?Q`q@NjfJ&#QY(36OfvgrcZ`JVC__LiKhuMtUdXIv=C+DA5gtC64 zj%dcLeEBgm&>+M?u&xgVORnOU<|cyZn>H#rMCr zLZoQK`R3YSREqun+70G7)z^lM2t=Lb##l3$084WyKH~8st>Ikjh}0pHr3A*A?Rzah zm+mD;Un9w5SR5>=9fjaQe9|}%I~hMjpk0YZ$(0?M4qP@{2dcx!`hwteJa>t5`|=(~ z@<19czK9m|2bNeAu}1WG*;2pI?|5-MzjwrKeMTiO2DqBr#F|m4AYw$_u23);m~m!k z<4N2qfSMU~Bn)s5a$Isxl6DHF#7mYPX-yW-ef@2Vc=ZTA{j5^D&&rkbm~zMMng1j- zVh+D5bN4k-thN?JEN)2Ry1SF=Fo0GDEX^<=4&-)OJo7aYAaQ}vsXg;|j*4xQ^TLNX z>&F?7kZwqZi9O)PJdZ?3Owq%nfxCTz>GUR#Y>C8b3|Z9zJFb|vZT0xLiGYjb_Cng?a)8>=MbiDN0r8QDizh#eR}?>!&?|Z{$CKHZ=sO^Y*R6q zi;*T=YkZGy(xAiCNiV6d^=q3xCA2}pDK&jL(-G z)Ge#&j?eY1|MUh+uHUCzWMU2@QQ{D(IHQnCV(NF}iRnExE%JKeWBhfwsb}9W zDeD<~@;*x=0fr>$SUaP%qkTr;TAVtW)IcdyR}QqaqH0zID_AcgOCKgcFqWVApC~{d z)94%EXI~EcLuU&|pss-yH)F3fBD;)~Z2vm1C~W?i_dwVDSNnYGg)dRixI`tez9zAc zZ5<}PX>1whb;%g}b!hu-cQ@ZIcYHI70+g+RBnHkrEF_QN!hvyre^QZVJ%L8ftADtK zU)keR^3J>a@?Z_OnMrW}Mw#+3L7z@{u)T<8+T6a#q!9ZiaOkMvq@l-um|7_r$+KdDXuZRxQW`^uAZfrf9PwVnpJB zAA9n=aZOUG#9^o`$?C+sM7FlZl)U$vs&)OBL5{cqqRRw_8 zV$Sw#ReF9{YAj#MOnquTlr)c+(XGCSR+31w?ypoV_Vzm@Jodii8Hc*fAA zeO-U^ll9#y(&`fQ<&Wlf=$8LzmNZ$ZlS(6}6QJEt7M#xBYn_adXz@QRTW`gz>>ruhfx>By3@2|ba8FAzg(>A1>zd?< z9P_7xr15vJ?>40;A0FATn5DZOow2c^8)^!2=bPUv-)Jkv#TCFEb96^tX`B4=DqVkP z5Ml4BxrQ%x*vwnnZfGu97@_~>I;hE$NwGutC_G^9q&5^W`eFG;b(t>&~dPwbL<>Pam^=1!mC@m^FLIdcE55nVe;!aJ0$w zxCrSlYSCM{Te7Kp?czrDsv!CTE*f@})j%_-W$!QMY9Bifq@bnnw0Crq6WGGJCT*C9 z1b(S0z9NR0Y`XMEE~a*ytxC(oMZ&e3i}5Jv8grl^>Ti;=Cv&X16{tWQG~%i;plqaz{J{ap+eClYCy2X2u~^!qya> zyDrk#!*2XH$3|PR)ku#E1==P94t3vXS?F`?ICJjfB@Ve%hnsvx8I}}I>q^3 zq0hMA;(=CcdSYd}SdTH>hSK>4$S)NC)b-x}{sOXfm1RKOM%|g{O{&QiPSbp8A7)w-#67b?`7y)j%LVWM-a@l@2+nZ1s z`%T9ipNf^Lh!?L@B)TrqNyWUt0= zgPP_!@gWW&=qFMzx`65OvuKTe*Uc~ zZ0V4K$W;Z|KluBoy&zdM^O7*G@d%LI?;+giBj&AEZeC_4xO7$6CYj*61w|iK5<6w9 z=N=YEY;V#E?hi9~^!){o=8ZaLZp*?_mQ|GV#i`_}A-mS~opzA5paO{4PWtpWKlfS` zt#btS;0F4mJG|?70W$l)oM?twcA?r};ljW|= z^a1Xt?jy}}eSI@>d_2qZHDdU4%@@$nPtdCjvwx#=&C*GAt|9yjFJPmm%z+m#pd=I1I(fPQmu%Ia7+VwGa(7Ae z{@K?*)VPA>vS;pD-5qtsme=lvc1Eg(^`;V5rebzEvW(461wSr>2q_Iwfd{v>0SRU? zkX0Mi%j3(hb^gY=FGmzS{n0R(fYc@^3RSY&-d1^n)mLTCE}tn&W8Po?xHjTP29iO+ zs7`VA3CT4tLA4wv{z5uLZfrJbZ;v=Y z)YP#DW%Ub;W+k;J`l^q%`{B_5BFE?_s!*m{;>pXidG&gkiQi8OdYqVMbqE?xm%c7e zRlRvB#zwIpT|PQ@L{-UVx5IN9>%i4Q!P#0P;QsZYMeL?abBCQGf^={^i`yC)Bc@11DowvNYQkqg!OV26?|$XwNhkqt|1uZX)+ zn^0MNMl>@kGC$koQb-Y?HK9^af($89PahrR>GO<=vy&PD*yxmaGpjShe@3@c68vnW zSZlmG&O)QaQm1W1ZV5hZ*dH015njFzTYq-l^InJ%pR2T8kN@qY#@0>n?i1p+WPsoq zN}MRpMPtziqo7_76sC9EM(k3oM&yNUY}+&ruNcSP>74#@I(axVsv!G+Zs#J>^j*)u zT!oq>q6Nl+L?tapk#(Vruuv}i9RYM`c=)ouBVBxK0%D^D^|RJ#{THu%our;Bz(lbk zS7HzF0e&lFKloQz&Z~(z%Eixki9xaCJP9n>jM)@R6;IQ#GaCte#*|T=-Uayj&=GD? zrhPk9NDUfH%^XC3HNlx=|tNoNJ%Vea zO3k6~7rIPQRw(gNI5djeUs0zj?m3vx=vX&m!T5L>&0Y(!m zZ&*HXI;rQ~tbymDz-Di?g?u^E>e+g*3PhF!8Z_G0PwY6Mcl^!!)RrBROHaBl&Ksf} zD{^sYdJ&}m^eSb0Fu{6WFh6$S*+P!}rNOedR?<717$sRc<7-#v<+((5h*P zr3Mx?t(L6Lb>kFd_NH-)CO&<`()j#jpg=`9gLli9xMSMrGsk#}mbqyADBDDGi2iKx zSTScS2#`#Cc6Zq}#xkkmi7i- z727qYr)FAD^mghkZuLcSrmG3q(BDKZG0+Da{>S)1^4az~l_LnwKpNcZZ1R>b$o;1L zZu#}MR;3?nH{N^y27KhA&1S^y9I?z(wL=hJZ-Y(#Oxo%*WJA|7Q?bsSX!MsK{{`#J z6OLFLbaO94n)5 zLjBX~3yn@##wM0P&_9jg<<(uo*-KpBsB#b75Pk-{^3GG$WXcog*}*_ymgp=Im3|Y0 zg0sRDm2^|9n9i9Jv#S6}#-P|&1<=mJMZzRZ*F^>!vA=HGr~MkSDsLg!D($^>TpY0( za4z;;;0nqi`Eps%=@P?DokhUVnS=5gYq;&wa~1YT*>%oG{x7y$k_)LIXJmG#?fLC= zR>@wu^tBa)?%9Fwdo|X<9UxI6IW^U}SiZiWjV8lh)eX z``K1uo)iuGtTE4*6u%gCw9T5jivJc+(;B8H4~O#+NFILQEFhac0>zO(+chYxxr+UWWG#6iVwo4nErk&A#?= z`KOt${Vyz}LozOcv2^xdDF^RYDhahYYv7j1>lUyE<0<@P_~MiP)+XpD8|N1NE(~M; zh&X<{R5k+rUmhNrn)KJ*_i=|_CB(N#4YyMU44ijRdn~3y+&!(li*@^bAz9b!!zR)C zdz!Tu)4T@rK1Gd|)SEXGE;|ghHa7PCMn|$0EY#(%t%v^vZeZa^^pG{HlA)|aB_JUs zzFI@0iQKWCxq^6lo=`S?FAUJFiV6X381>r0>TJas(&*NY)`0F;FrtpNPe9twPo*

    Svp+TLYUIys`86;Rbr>hWT`Ge+EP30WD2IDo%>tNlGBK zva{8R51JAeNbK=J(z&|9jD-=Qh2o%E`K!vuo22NfwdFL#n^!Gy$S@V*hNrBg6M)jbgdYz+v2#(Ud%dgnn6f~(l3NfF zM;D|ri>qYrA%VdbQvLuZWX$4-X4Aw@tU(2WiWX91A?qR&oUIf~%V+?aX(Dnm)y)or zu1z0ZfNGE&_k_L#9<$p^cZB73?t74tqCM)mCW*Q9skrf_yTz@=`B)&p&qb|FC4AOg zby>hdfaC4}0?Wz8do!sJ(sM1r-=X#Qj-qH?@WrGi-KR~Sn);u&?*Z7q0ya&1z{dzT zU5wY9T#u4Hr&ir8;bgC)st64ty?Z}j9EO{TC#JJSJw`m)QL!v`!}`V9n|W1Hu_ibS znYxIMdR}8A3NH7=9 z@9>jDUVg(dH0b*xwuoVya)K;c&fMny5f#E5{;m3KhFuk~i4E#;d$RU1-2e@;rRGFj zS77P3g>IvN(}!TDO6q5ma-{(Y<#Y^j<*Ag5X67UZnY{{sV{jZBQ(wx7x3VxuAo@E5 z=xs*lp1dNhl7ldL?ZdmC7kQh199r%{^eo|f`%33533#7Vc`LSZKhm@7La2Uz-eZO* zY&QHldiIwelWRn8Hlsh}5d63(U^2X{R2u;X^TGcxE;yD|T{EgjAZ|3*OjHZLzXF;N zJ$3PiwG|f|KG>pWAMplw1@ep|oIpCRfK!2KL>HbxIolY(tv^sJ!Sa&zGKjwE&~p&Y z84uh)MG_%oanko{AJtLPe!}kFQUY;3d6IJ#-_jE|%Ia0~E%;7fO9GYPe`peB*~fEw zxgfzESg|m-T6Y)D6P8=p#ZV@W30CRER_@*2%b3qBKg|645as7&Vf zILKm*ut&5SjZKb-;$v?Qg+*XrsVh~s01G#p;9;tvRa*_p%{o)*qLoe_G&p ziBY3j3{aZy4kDN$b-^_P=&ze^9iG=r<3$qiq252$Bygn+8b&JJX$16`D5Dx8MgRd{ z+VQREQZ2vA11*pyoD^V=0o)JPRI&r+M&t;GC6Q)KOUw!-N4kQ}G%)GGu_dn}+dfHL znV5S=4L*y=itv%G{oCt`R^sEoyU1oJ`GO&J!59hJ*$8nU+@yrUaenLzA^-A8<$aJh zz{h>0ieEusHJ`9BpP2#)w-CifVYCzb%zfERPC5-CYm>4dD~8F{rm<={nE_4T&_6lR zx^Q}W%q+CF;CnsbAUc@bek@}?Fy0ZyLQ{|Hq1UNH(fOoN?Rn#;*NrS?&sN|(%p#og z+^pMbb_u75#B-`6&n!)o-PnP67(4r121G&n3VP}mQQ1a9LMr<1DjP}pO1K;@qHWzs zQQ}p!Mw6@pT9ck;6269P=loXSM;EpN0eF|eVrWgf9gcgA%@PlVuVwWl*7k@AT66_J z%D#`b?Ko~lufGhD-pYURZD7wIm}tQmf(e;XfG|`@1|m32+rpA*)^A1*yA}>w`lqL2 z77}&%Bq2%-XA5lX*Tmu^2$zeJ#(Y+o&+bolAwlkOdoXzH`6SJvh}#(s(TA$N8E93t z%jOJSRa=8ExT?Px=x)lS!TVTl^q~pgU^@@`8=x*O6ykjE`a?5=)j-Kihn}Ps4>i#V z7hA`*!ShtFjxg3}xz{kbsL&htOOrg4BZvfmdHQQEA>-&lK`7{@!5)10alr!id#&_C z8Qz7S;Hz`Gjr-T-bGa!56u_r2D&_iJZAP9YS(%h(1h*+lA97@CtihekH`2`lsxjK? zEj95XeVA&p_#V(W&8v}`I^>6`ZkLUQMAQod9lGbQp}6jhCtq85#)NI|NZUk_`9!R+ zNMrq;;ZC0`*b$KRq=5m!ji$Tjm-&RT)X<<9Q0si8@HGi*GB`OC#V~VQ92Pafuf+;o z70sKmLQ{e*l0#Dm`LjRnKKCh*=*5Es%E&>Kk1;pXZ#eo$B#i~l#kGC{pnb?8{`7kv zy;~85IG#_47_+y4c~K!{srN1^MRic);4RRST@_I%-9?c|>ispn*L}>a=in`M`Z4j{ z>a*m{l+EC+?H!t}dyCj5GxD;-2TEnG-r&1b9hf)-PdH3Yc~iB~BRb(s_rW0<5e1=U z^3q_E!4wF+@?{6pRxL*RQNy!Y zJ8w>e$rXo49sWvlgO<;K3h6$jZA^APaqRW6CW*o(10UmAJl6$iK4kercCGGF#9frR zN!?Nqx4d6{cQHSwetZ?-cl-(Db?(u)vT!vsfUYX(h~H42A9LtdM|+e@Iu&X_LjgX9 zTs!=a{kEGt3ohr068UbG;+Qxd&*DBn(MY9Gs=Pa)JcwY}9bi@fVxqLLoTH z*wO92z&WcNs`!Yp0>`o(*6!N+1CnTJ8cEhD`d>H&#Pm~eB2;jCv1QkVmTF4ZVXvfIyzoEQ$f|&)uy=cidib7V*)(ba?s{4CTtXP#Ed#U!=N6 z@eYYZGyw)~NXcJ|rvQjRJ$EccuLk6C3I*4ZnOyVTe-t^G5YADUxvBRwEh7|b3~rrVGDIZeURxj5u)lg0EKGRH<=dB0^obMH5quWPOdz_ChgjwVM%(*GkSzWb3dxR*MFcM6pGhU%Qv@an^9^!wiTF#S{l-@g@e(7RY zQWYX%>FaZo7fl8sFw6H2ZP4*+Qf;Sl^Xog{9wk(G#w!%?MX|!6YFr`u`w5!#O?O>M z`2#B%$vMaK4$stx`17|YbVd+{^h*3P5+98*deass+xF4s&=U8a$>EYf z8U7_LQ^B8gSp{8{MnoxFf}9~i`w9JU2nM>B%v-k4pqq-4Yxk%7Pjqrm23OG$)&+k) zfqA!C!HO_(q^O=FsJwtBN~;^Mv+@d}V2?;yv_JO!#%f9BGvtRspmq7!@aMN8`y#7T zLU{MeZ|1+1 z?0@?+ngffUGY7YG74~~qBLkTzC<}k*u1Dkis8A&P#d-~m7YC*a8+M)7Kp^Zh!_KqI z$y7TQ3l6z=8rL8%fJi|PAUQ3y;j1%9R2!XBKoj$PrOyBVw>>HJF3(`0StN#cgbBIn z-*kA(UJORN00tyIIvab*&r*3~DCH;40G&V_6`DC?d$eO|6V*wp(V9;WZcdt!6oUrC z`ySdmy6|(INCeR#-m%wKwwkk8XHGFwirz!gJgTw6Ktut zA$45OzWP7gk3Jj2HZ>PJy=I_;P_=HMVR=1qzdL zE9i<*|{<dV^t~MSc;K_C@zWwbseG>a&csL=mPmXFgZ?{o{@1;px6#SX zubvujp<&&5oi6-u*p#|;&Vt>-R4jvF#+K~_zoliz=a}fo8wDOvt>hH3yd7(=dA`sC zLhSf~8Odp5fowDGKN8qN4GBjmLSAI4?(kV2 z^bLYrg9tLd=%{XMVra&Dbggtd#&7D``8m2~jZux+-e=df~! zC3SK}V6Fx^`3mjMP)25*QlUu!sqwGC*x_jxFvkz)ohfC496|m7gcNbA2n>(wSY=$b zyYER2jp7d7qIuZWIA`n;$c2(h|>MzbEt8z-tJU) z!XSXG$CG7~0O@8|#^~AuM(qG~3L6x7s%qMf*rR8Ze#a zV*DR}`Tkw-DDp

    0-Dw!EpQ?_TJ^!)-B2I8WHnbYwvUNKJs-lE3=$w7cC2ydHxF^1Q^TMh-z3! zbPN_?WYgv!poQE*asz}=VO0a6av95_BoJ-@Lc%r@0tv~Mv9qc&>t^0p-t*XdeKUea z#E6J>%0eK~AuAg$XJy^H&)I8z-<t1Td-VmjsZPl+Xh%9fgqn<%K><~jo8$q3S$w#&KTCjxw2!XmZv|r zqDMX}PvSTQ*s_xQtGF%4r?)rw8~j$rZGH~mQy4x6@J1u`Vv~2l zdy8AtO-rNK!bY3`vutFz+4sztcgyC%i~UhHy-brK44TZPvQT9FDN_jg-jw!%=O7`J zabAk1T9_Kb>gH{N;*F(6MvnoG7i%EH-aOI+LBW|N}FFjM=@|*X_u*%Hvxm>ozPuslGr&D0lkRYm4eJZ}U>FRCD6 zrEU*cr}dP1Ser=8_d!l+F-*s_iR9qD64S;IyS^Lga~GYgW`q7e`4f zlHsi=&|L7uptg%SpEg%%&%a57WM!xh$>uUmMMyaUn#7m8S_Q{%a}+H)x*wY`nU9Dr1OW-=uT za;1)AUGd)g@8R(pK+JD_=R5eN@Bcx3`2Gj@#z$Ynx;)?z;OXhP)P8zgasZP`I=6m@ zG93#UbN~4G0+-7LU%vVhzx$&f0;0HF4m`bi3)_yLeEt(0IB*;bfA{bG9en)pCpd2> zKK$ATc>n#c;)~Bd!4H1xckt@D#u`C>Gpqqj?xe+pU(%?a_MLPdMi86Nyq zNe2X3E;(vorFx`t`^572&x{g1)&W^Z$?J7#L3dO_k)f26Duo6)WT0BYJeoZ>9GREC z_ZiWZ*-Y*8Ob$gzyN^o@NS>c>Ve^m}PT09|=I^@CUEXC8#pPIV1D?(&cAzns@Q%n6 z>nPKBudM{tma;_2zrR&=E?5%Zwb?G^O(UVCnFZo^TRM3AXM1Qd;tIy_X{v!4`?m@3 z@;J-l!8`8YNGdy4-i!C}z3Y4U?(v9^eBtF* ztPMPx<6=8rqHQn@dHw0(nRG0yoZK6LGdw=5r?YBi~5i|30hxofQ5Y+7Y}wR|xpy>NC=rEbkMHp#$SC z`MF3rl`5eCBN}zEpbHzw@6EE{TqOf4TMx){s*U@|ug`Z?dFG3R3Q$Hv=Q%vkdk}F9 zp0#a=##D-Ida3+FH)C2!G*X|(=6&KLm4~UP84+p;Z}$Ao#dg&huBaiFc_=by=TbP% zRHu1@H>X|0vfdUn&>6`;*o>j38b{MNPf-`&8$KJ!TPWZrIu`E^{a(PfRo~5|%exP7 zGd!bf8DWY7Jqvgb?K{Y=nAcFL#=2>7q#WJZ>{Gp(102brIPE<^M+##MnOddMkZm1F zQc-dZHV8)KI~SD5A?$3#xY)Y!D3@kMtXMJ#)e%ecJj@NVX*YA$Op7{ZM5`8yA-Pq3 zTdh5r5DB@Z!b3EMeQxLk6=_SPI4v#1>gvsi;O2{)~q}bOCpV^AjAYwi48_p z{h^>~ipwq9UrHX~Q+*jKI5%3O;@@3)R#ZedqCSwy8kM$X^Lv7|RchA4VyW~hqf=Q- z;#rP&-O%ccsJHWM$a>!-(&vz>K^ZyT-aN3yN+0WTb}q{ndLV*oB2j z(%Lh?!JMjD)EKR}=&@R*57u2FW8jzPST5(xG?fHA({gaNvIk6MyLu1!3jWR1z}0Yc zo@0t;6VV1GsjQDhWeyazVd5lk``&k|J1TE{L=TgjB!qub@vad*Fb6%NgO}xP0Z}CQ zcH939^}hs{|IE$a{Cpts|BNB=*WK~YF~va8l4}{tA?m(r(2xfhAtRO%?*)fS`681Y zz>9Los0B^_jSI?ZyX0DtJI+3GX+e=0aosQp&g(feHoXA=7GZa|9no2z^ zDFO3JucySHedfkUGMz+iNJwXt!81MK6yeI(%peK|srxGaC zAOTQa0FK=5Yy(aa93DfOn;2dlj@JS_$;M~58-9F7+Qn}@zsB$E*Z6Im_!JvIhT$~~ z&rE7Sr^%Fue-0ogo5sgxQ-^DMq+*mToeII22uC{8Y+T_1UCsNd*I#m%V1qj$(6|r0 z(+WC`$!nXS!jbQh>adR2)+)3b@+m39F0gMgY*5KkVfpt|yC|6cYsz<|rQCOCxeae` zuW}QHoY25ZJLo|S(*xAUX*XiPMGU(lx>5Eoe!k*4B(gpih%2Nt6LB$JKf=eX?H1C7rOlKmR z4MIioo*)a3vrlbnwa1c3g9u;Yu&?DAu0rKzJh#~SjAWe}e^0{Hoy+x#ZpMtQb^?Wc zu#g!2nzfEHqBq_jV@iN?ksp>xE2RiO1+5`U!TO{VLn;A}cX~dL%Ng}(1@|M+l3oWH z0Mu>PLwGdon>9fV9|Tq3dc_0Bk_O%pk`*hN4XqaIq7BQ8WoNA|GNLK-T`kU2#R@Z7 zOVYBqGVT&>)ne^Elf3ffRSjj%=6T7cIIVv#zw#cw@y&1ITOWN3?|tPxJY27M|AY7O z;^m7_FdmAR*9YkJ0k2=b!g+hfl(GnQhfcZU&9w)eu>ww-r#y!cz(X&)vGV? z-uoZmP{r^3=!bZEd&4K6{1`v^~bK_qo>L5IauJ`mXr-*huVa#Y8KOxN5cJ$J|Aqa#+bE zDMu+HM1~6K?(D@~x?=yi9m(u9`O|x|k8y~RXM4(KTQS$r^RhGZte9Sr zz8G-rsCjlP#L-H+)R$&Iw{ut`zYExZ0>^)3=Ktr<2NM6c0}1_WaQyS-S;tPVZ*K9W zH=YD7$sPxpbj`+CnrbvX8fe}9Qq|_G8M0N2?S$EwrL;@)#VQ$LY?!#&pA#*()!${! zBnncj$t=J;v#bkjPMI^uTVwzsc{p-2UeTiY0;>++dpATO%eCNC*vNcWDW4oA>};qh zttFMVChPoX%06Qah8dR$53Ra@6UH<{D_p$da(EgkRV0k?<7aH0&F#5C>B&I;{?;*{ zE}e#=LuS86B$d$%$037FDQYvk=M=M~pfy8D*{Ti+J7rni#4+W=yIrhYan0X{9%k_Y z`D20DK`0evRMw!~LydH~E9H%fvE7s=ZKsmQEWm!?rZEqHJ+#sWbRAGEtV>+nYb~fM zjup%aSxl7)Nj(98JCzKo8Yv-~YKqj*?VTRe9MRbzl>J2qC^qL8@VcD>WV z<~y-%S+t-qgE{$yam?Ciu5CQ3xMWHn;qF_e(x$S4fw_BLV(Su|Dy zw7MLXL$BRqHfgQ`)#ByFwvp_^^!O>sQ@`iAxI-s^Dit~l+|DJfg^CSdz>4O3x!?lB z0|XZcUIO?A7QUe`@X_&z?>@Z1cdrll77qM^KH|gUzLz&R5v3+qWlJT8cx zfJU^itbppC3Bxq8RK#Sg@H+5Z%#k4Kqh!=ESYrg)Y{Z#`x7x;9!RxqKs5q{SuT;l2$36r%09%0##cMHqCMUk!&-e_6AMR)T zgSTJcclI-WbUxu@yv1kO_)>sp0B06LCVW@({K`l{v)ycw5mh-&d#3l2YbpMtQ*@U_ zwiS30?!7ff0!@ntfl4Fr5gEe&s0!>8?HM*I{7`ok*EE7A=t~asB)L6QpPAiT@@S(P z&iC?qOAen3Z=C~^{jNe|q*3&pC1sy6Zh@)h+QW8G|3nkzMYQfetNS`Xv-P!Rp4oWG8y^DnBZy(XeZ0Te!-=CNLIaSc@6w7ex8<5 z$#)ipQ1%VJR48ZWkPo#;LBb8)yyIR68={zI?t)!!&J9M6mRL`Ob4V zA&^-+m;smT1s}Zs73^&|pKtj3*S?1D{h?pN*T4QXh&Vp_=v(;cqiER_0V5gm16mxv-Bbr#oa!H1@ z33tKZZKKt7~d6P!qUzVC}F!U{hy_)_R@=NJkLi0+0Zn<$_iQ1puwwC_%j?^d9?eK@HQv3sNK3_)+$o60@-m0Bvb_f& z;~a*YxypYdz+V)T|7tTL>E{E9!I03)Up3gj$f+?E3-!fk?`0_oLo3dr2w5FT7dheD znb_!;0(H2xP?2E0wBA>=6M;RFiiMh*Q;Co?c`FL10(+N#iWpld>1#@R7CMaqeZh(v z_NFW+w=~-EO5#46$&B&L`7SIE9Z6ncrm`EcO4UZJ_+k<5YD zBS1WBt&A{>XXCg$U@0IM#UUC$Ll@5TmXg|p)27Yg$ZddT2ei*nvaD3MSTaAD9I_b} zU^}=Iw;P`J4MY<^Kc9Gde#(aaj0oC&HYklv6o*hbEuUG{9S3EwZ8$AGt6K-8Y?SmP zN)M8eTC#hvl{TUjYnAj9^Q^N0XO*$#9#cKB9kwAhKso5__ZKxp8*{t3tW@hAjz%gj z(GLyystCRX!MEfA-_)1*`okl>b$!IwE)V#Hb-_pafUlm47w))v3@UVIs!Gzl4`>a) zY$(7^ZN-PPkj*A#*I!a$vY{z;Kf?VTHmJSB`h(_kI0s5Ao=-zu;znXTISN@oSM2fF z2AkdT9gXLAi{ez$zzwfuqgBG>ZAmkhc9!Wp@C*QiTF4GCJ618!%g5n1`@ zJ2TzUdNOI2kY>UTOU8rJ{(M z8{)Yho&^7q(xDFc(pI)%0dfvtFN-{fVdc3tK-p>GI62-PsTY-Mzy^`OOTEXm?d33g z?>GaER)vtoo82o=_&wB}Htv{mzF=;0v!GoFTT#l$YojgRc8W}LDphB)8Kmx;F(Ars zw4zhf{^`^_ILGF#9^~*bSSOZKL9H0{*ze>3#@ffv;(4WwE>x!J>z0{x&@ycLK+;-=v3N=bTBPK0i(q|8Y$R=!Pcoy%z25QvGbu^V^Em3 zBJY|EVyUiVR4Lgre5Z8m!$(6eQ%LY`jC~J$c%H}vfK?=3L}VW5?YlTr_vn9$J2-4m z*^tsw1?we&&kTp*SE6-i0sAx@x++};*29`(aqf-V zEo95@edR0o;DZnF;Rhe!y${~Qhwpue_da+JYh7UO_~NtA@O*oV@BYFs;)4(0$J28t zAdbrg0>Mvy@?-quC!gZ=n^*Yp?|zKu+bxzv#|3X*zrxeg6ZTF^JAWU3Y{sOauHVh) z_-uSe@T}YCEs}#dPn(!TGF$`>62#~Yh}Us=S}YcJLRl-_WU|wo)@TfM%CM5G97z+R zUMHFkMOqJdEft;QOjSdORvv@3pc~|1>Y{g;j|UO}Bi33AR4ElX%lcEDvC0hzZQE{q zo@DoEDz0MY-+!h7r2^~pwp8MAxxOt^yr?v-(ewWy(Er#4f631C)1MC{Y9OJ%0hd1p z@w+%o`ZUY0#fj{U$I3~gNqfNQyQ4&bKr<8oPm3B|OJ->RiJ%+k{pNzHG|nuh74(6v^r_z zd&IbFZPc8QEz^pfnqV27HX9PPMTa7zhX*p&50V3!mZ~@Ff*U2zUqHC+b$XiFNO*0% zIrQS+r7pwOY75y>#Oy&Uu#D5BrGf*_DsEQ%d-3O=5Iu4OmxC6Ss*{f-?n}nVa_i4c zmLwqU&=uM0SWrd4R1ZyiMK)Ig9LG@+Q7XXYcnDc1JoC8~*W-%Q&IE#9VvvvrClF~U zVv6-}jqLbr0xSzgZpNbzUyvgTynQ<(sl&XI65Ji9J8n-;xa|!)4Y%`FF_8NVLbcf~ z)L70@7c@Lsr@mBsWg!TNQ+yQ5Iy!_ObM%hr#^7rc`-Fe6OkDSp-ioJSp&`f=~PzpV-E4 z-(KOj&bRo%`39fh#24ocpYJz#3&$&Qyh%nxsLod?K{Cb+JJnAj;U7*=n-s?<{*Fu( z5ofA|!F9Fdbv!c_14*^SYT%<-`sL9L7-giL(kBI89<^C+ephW<&8$>8dPr9`&(N$; zr3KG+OZ60?zDntR$ah7GlvK}DR=$}^*GfR8#F}c;e4p4{ud*)ClCw$~AbKZEY&B)o z5u^avfM*7iMYT7gUWAfV3=Q@b6-~8av=n`mt^RmrlmO^%_h0#D_jAyWJC{c*)R6NK? z%V6B?Pz4sqGu!%z^p0P3gh%WQ%WxfK0dLJ%=;Z5;PcDMmX*?dlimEyuXhTxksL# zjDckVFda2Kq4hzTVLCoD5yMW^jTR`}VN+eoF>%37z1a;_{p}j~lW*jzz$x&xOKIfe z8FKXb<0BxZ(r+qspc5gfh*S^jiKn`?&HQ7GkTOP3JCIi$kck)O~XB+ zN9}}d;5=_JsBpvWcEX-F3>&^T&drSTxn=XBhPwmXZeQ=cbEkz#Zkd%cfn~#d6TWuxD=0-*WP{bXMAs-r$JDEC`jaO{KcODt>Y(6M( zY2Y|s0(gYtnmS4=x8@y5jM2!J}XB2;e~kU%6bcOmGG8fKWqSR>YI4 zD0E%1WI_C_24Z{0)I_^WklBL@q0~cu(7~H8uuM=f<;d+-&2?H(u|J=N+KM zr@is&e8L;wc=LS2E8OrF&-fhA_!Nd8;lyVGymrK}B46Vz+SP?wr7E~Ld7TLdOomcr zd?@>d+)VP!&6!21M4acm--%@~>?h!vSFp(Rbc=Umk^ks&EtLU_tSXqji^2vb4;7jZ zvNwe~#sPhkCM63Z;~0Ce5$ez3AeN=EXXCO4s-2~{ksD-1zE@vxIrEh3l%H1INm&47 zK%^Pni`K=o#C7G6g=~hqB(7H35tQeWjFAM0Rv@MiWr%cyVHM9K-l8^kBYMusNHW}| zk<5y=Ji<1tB9e9h8}&+=liJ3r#dVS%2RMfO0t22sV1k;=Co&oGxn-Te&o=JbabH*J ziPi!sHlaJ$8ae!4nRY|Fs&vE7JCWezig&E&P1=-FWjAE?=AKcAj$U|5OS=b?A=p3s zy;Chmi1+}ZveqMlTzs_xX(baURZbNXEvOiES_;_2SgKtZpBr#gLH^hTMSECwcJg(k zpXmTEj=1z%GUw;3Orau6B8LWKDk3%Kq?QH6WN)D_&`7p)pbs1LVEWXqtwzIM2EqND7m5KF?yWs8}{~Zg(j2|Di zOD1y>cI!FJXY4(Pm=$y@9l;tO_kg@tI!Y?1cPn8h;*&#YVPB#>v|Y)hd;h%;@XLSn zkKn5xeigs{J3qv4{q_%FwsBn#xZQ5}^3@l3`+URR8<)$%!hzd<&gU1kP(mtn&7zJu zHyds*)lfI)RyGY<^oR*c6jWP=`uFNJLKZI8xUpT^Cv^He|wm1*a(feLMGm z@Njm60!e=0NGxocQcOk>Id_%dr?r2F$QLS^>CFS7oh(4;q zD-ZbEauaUCOFEC~8TBWg595e)swgqWGJ+)Odu9NJOy6fAFx|xUvUX-|^u1CSCYaG& z$3_xpp`9J?e04Zp7|e(ym=iGLrjVr&J8M!8G7Yp3Q8T-n{)KFPhx&v15#f-7iD+R~ z4oIYAj`b=Kq?ew~tI#S>*`0@~rwHZ#E0_$*8^{=)S&U3-595w(u9tPhUHzU1IJXt1 zScD=Cvb1I7Rd0q9id1yw)k$FV{`xo!llX5rVlUv9SW|ihDNnaqcGvQQFjRx*fn0m0 zu6biP7(mE|eFXF^A!%V(Fz7mJqP--3wwl6@kXj!rJ-=&V<#)M`14|b4LU^U(SeJy1 zMY6{+elBc74~=w?%YvP;VG2&css`W9acFG4j}@s4BHEj-Ov$NCbpYpi;$|oIJ~QFL zux}gtjQ?)Ku%9+=xM4r8d->_Wpa5-?( zfb&Vl5pFk}0N$RrX#Cg08{7ElzTwqvL9PlUjj!XiPrvEq4Z*YrJEv0VMu*LJXQUOx% zRx8d^z>nkkw_%?^KL1-Vr21Aq@i-&A)q_JBM9RJS8N3_mNJz{fqKveO`jM7^J}H|h z85^3PD^oMLWFSz~d}HZRTdUwv8QfDj$?tUy&J*}^rX>gU-S%BnRcS|gqE#ne31En1 z5_Bwd+bocb?eD)KV8Gic37x?n0 zufsl?n|8URnlSRMOZ{t`Y#XoiCO_l%WDURpk&#)YUI{o}HZAMKqX7$ELS+bs6~33B zr4%lOIcNMm2++GHL0Y|33Ol9{wu zPoG~$laBP~2h-w%8*iW9zW)7d*^u>LsNzp@s_0ofmk-%HF~N!~N=wC{vd(5P z^Ld@1tx`P*&Pr488fI9=M5?SLn4>eYdBIg)Zc5g?Edt2Nws^;MHV#pSQ$Ie?PyhfR z07*naRHFYbon2dWtQv|`(uMmyw$NI3uRh^1tOj&M+#$*A*{mvaXyRbOJ1Ui1`N{}pW_wEHtek~jFBPPV z#;8T)+3)AJ3U0CYf$ zztN+`YZvN?i;`tg~RYPuvM&fP^h#wh87eZ&RRxm}&WxEwpCkGB9u^R%OBQWQ zqB+0L)FWLL%e<*TbIrt&xNI%3!f*+4`9^>v7l5sbCm2oxPR+r#w4AwD&qF57C2?*f z1g&le`g#MYssR;RspQTcIE9s6>~>>oS>20qQ5Vo4d+o7hb0XEqfvF*y&_*`) z#gK{1u>L%ZL<0g=>4C0DWvk~BjyocWbF*6$+Er{DG8-Uc^Wq~Gn@wjvZijjb+Q!ea zp)R1+M2A`}hwPdR5uhdV1x(Fsc}m|c@6Y8!?%r`5)kK#!xg%Xt4asJ>IVd{$EIyKV z$gL`;yyl&_&WkOp2;I%gw@pM-*-EFF+g;tEqEs_)vh>w(>2IxbtuoR zTF%@x3w!`A?mC&0^!)sCIJ!qe$0najlTI(o=bVz~L9{O$izF?+<_DG@e;oio8c1_} z!mW2Qq_|jea8QiOU@JNO7|vt5;}+m&q#iJ(3(~a>2gNnMCqGLXN!j&Mr^@8f&l|2}^Flb_(TPd~+{pL~ipuip$f zyx=&ZAvU`Kx)Lz#%`$L{?;9&Y-ce^OgJLGj&GUjM!SzYHYS|F;YY{Hq51ql_wveC;*Jq2yOu7Es!U zWa)OYP}Q~@=*T)p!6v10a7of;Y$V}=sx1Lxj7)sGO+I#?OADTdQZl^UCE z*cp|0?hV^kJcj_tAy^K#6Sp0Q>HN>Uyywe3%S}GR9g%tad^~D|x3qz6ABH0r{n8UI-v`!o1QNVbQEmox^CyY4KB?ssbIGKTjqP zWxiUnR!J{?Pjqt7EF3J4IAcn?Nvx;xhj)fH2Sw5%z-@SG0>}Nv$TpsH4PT3)w6e$? zcDvNf$62AB_Ehf_%kHWbRt>8KBPoY+X<1_&)=E!hH=bpZnZ~K24%z)v%vLYNqR}h4U>=`wU$8gE^Ss-n@FruEyx6F_HS~7 zl9k1%@Q8p-ftL2-#1e{Xp3|fy#WA7%tz%^QtY{^K(H=t;02P$dntJpq5k2f?yr0J5 zq5~uyDGE9gOrl{<2Rhc>H#t-u(%^9_dieQSDUu>7O^W%`7SbA?6_d)El`zh6(Fxk? zA)3^gwmLN=VZzmiska|J-|Ll~y?NI*+`;%7(I2T2CVz~c6JK{yyjtc^mae(kWeurW zl$Tm*>_w$GBU$f_5G<5^MI)6V`OrS#;XeGiRAT60IUP9!k$fkGDRiTWMAZ;d!?sdD z<}t!S6k}I?pQcc594;62Y{Y3#5mpHio6@Sv`d3G|16K22oIlst z(|yNm|7-8P{L0^adVc=9zYhlz@~;Twk9cihc@w09fK5IhNX77w=F7pu*~WB~up)TN zPMP+QB>^u^6(3)omWR?Yn@P%@Df3gtE7wcOgaDV8CN`;a{48qBbUtut!5DFFGHfX- z-9+gC61R+rlf(AFip$6=lJahfwj@)=VWHwhJLg8O7*)bUpe+IArK4oP?gs`mc*3`D~Cl|P6{s=BgxUIIciTd>mGH* z;L@aols78RvOssz5X_``j>z3l_*2(IcgJ7D|wHM6=04&i2?|3xXw44Ox7r zm$iarAPp^BmkTbJh3j#_!@~m}A0P4f;sstjTyb0u$Xd8u4_q%-T#koeWE=r<)TQ}Z z$Fp{8QP8jg-Z0 z40ucZGtxaUH{@~`@3ks1E+vq(a#%n!7lCi-$F1Uj+pwWoeFh=}oC}Bh%EkmRiQ>jw zLIK^A=76bsLLT$nTi$;MamSIHA9WU<7S!y7q0(k16@i&&m982S@(U1}HK{z|z%iTk zl6{s1aKOT37REB<)>@VwRVnzJPdvR#MNi9AiU~#xZG$N9pkgXDYz>HRuwLrF8$e-6 zwN|AO)L2>j&&h4o9^l}T?dsSOu}ROE$l9=S$WF!Mh|%@Y9IChXOf%Tt8J);?gWhLt z%Zs$Sx3}_S5$J4Mx*=7yY9J+*5lDLuqYahtzp`fL89Ii}o;I&q%+I@4WF(_#s~E`K zsKlsFIGJfne2=QoFUh^-u$2(Hg}dlWjA2-nO;&Y>4(?3>63JD-kXREoTC<^%E`aIY z$>t&LUS8}Q^k?AB*&(-piCy(`^DJAmLP%RAXj3h^d|z#~)vx9yxf3c~XDYX>ltsIY zxNtUgUc(YTlbIWkn|Ln|IPDOU8`PckUt2C3;HTsFA6lTis{J0e=uGp~yM&6ezOZhU z29OX8E8ewE515BDEcHK1&QaNbv}`fXWRQ^(QPT3XXLb~uS9*ij`GT_E2J^$Kap$w& ziD$dtqnXTUQtxxAvXVVeX=9~c=E85+nWf&!1X#u{m)f{_C_F>WCML}bUsrX!$l#vD zd5=_elVig;N-5>mP~p^&mR6K}f<*3?4?3brRbq+r^2JNMd3uG9fBfUv2eFxD9nNRw z+&yHo9uzxcm^nPG8d8RBXFQNCLa4MD<#KT#X=eIE<8|0^JXb98yuKOMnx1@u;P6A-jf) zTQ2;}vj6ak@$R4Rqo(IEmW!sH1G*aa4Tx?IcGy>_j40!|bj~x%sPva!aBH3?u6TJn zZ-4rmAARHLSAOMJ{`)tA6j1ksM9^o&0QRTA7djR`VAPo9KXPFX8%3r)Qj7C&xxDhH|=BKFj3 zODc@qLHp4#E*|9cP`!r*eDD~T!zp{%T^vh&_P(u-z<7ysYE2*$Z73o=?&H8=#;R6f zGV;PzJ2!d)Dj7pnS8h^^6`jzLF$;HvE++VM+CB;hCUDcTp7?+yC^r##;PEFoLs6jj@+|>74BchVgyGl>Ny*IC-mgJ*(6UC@Hri z;IJA^g4=}~hR((=)sJ+(Hwa53xJbJYDn$gncP8~7hEKRB>t67q@!Tg|jD9#m&fFc_ zP=g20;lp{}aNcgPbL06I;K^+}@6#AWkQUCETlcU}plI>=bF(l~UAcL-pn2zm6i~-V zuGo90o+i04wsE~4xLmLKE(%_}e1XS@2dFN*eEEQf>lMf4iWe`gP!(J+4_NDf9>K#< zrAk7o5KvJZN>E^{Gc*-4oZ{<>?{bkbkTw@eL_1YS>T|E5#n?LdhfXVRc{u#`J|HRm@<(sd}`*?R0}xoe)Q-eAaQ~|Gqpt@XK;wprbjkSbOpPQ~){rR_f0}2_NY=?l0)|d}zHMfS2`xD-SZ0@|h z)PfNY=%p{rfqqFBTd`m^*a`#bkoW6inhF05U0qC;U_pl}fiwDUJUbP@>n_C@{*>nz zdIy1){OoB9ntX@!0FA^RNhmDmklIZiBU8O$$~D-^`kBg{Xz~a-%&5;C;?;$gTXV7g zi*f{m;m1}4B#6JMa;k+EVeE_~HXPC?if2q}`V~ z?cVoMkZNf?VJll1+=#nmmO}D`EP04s8+FsB1zPW@amisi)lN$pze|sfjKOLAT~#e;W2+_e zRB&N=Kzey|$*$N#j)SZQWql7i6>~Fcb&V#FYd&fI02Koj_e#VrYksf)CKe0k37Mu^ zXYx3*=}w*vftO6;@i8bgzt6CjJRg>1oMm!V&f5*!47cYKXOdw>)6#wJk<{UXA(iK` zO{?<#`AUV?ogGg*LK-^M0@BL>)DW~e?v~068Qc0&T{(IvC-)=(AGpsLHY*s}NrjtC zV>uVZc;~&TnncR9QSZL=pR`2ER)v|^l0zn1nio@r8Mn<|NVy!+ReA|l7}y2`no9A2e-9|{gr~9_xl7wYD-l^MY2NEb#fBxgUiJC;>7Thh z-~RBQ`8WT}U;7*X-QT$V{!mB={*;UUa%VyJ1zStJrFT=O-W0TA*+(R09m?97EA_MJ z$)PTWn~uSmDA-cPD$3pEkA%Ir3rsCwhSuo^^xIS3<_xmVKZh5b&*^NQemW#jlG4N_ z4Faa0vo_h49)jV$r3|}^P|9!XFf5Ov4ry?OHsC&?z6r}(FDK=jA-fD&{sb_Vab=W8 zJha0S}dfMuaEtRFCXY zEIF{Qm-zj}alKrENdaIj!HdV2c=6&TmM$EZ1M65=$AO2-73*@~dK}P99Jw5qfLCcY zhj56b?PFcA9%7z2<#6?_Y5v0>bHKMINEf*Y4tF$$x>|abz zNa&`~&rZGz3)O{6hNx&nP5I+cAjsY4m&I2p_q>}1A8IVeI5QH&n;TRPzC-bqTVrXB zg3IWtXCWrT=9UZ;e#i9*@%&FKn;wi4oLP)R#!84GA>Z_DYcrC{lP-iuPWw*2=Q&{1 zvBA}8X{UtNwF^z}LEIkv5syR*TR8c3!J>g@hA|*|iApz%rB_p0yt9aI0o82dd z#JjpWfM8_O>y(LbqCSY2_c5WKVuWD)K)s_&t6`H$2Qv6O3Yc*paj1y~DT2{-BTk!7 zGa;eAy??}neAk@EOOaVao1r|=cX%;^J~!D&=~(7)gK7D!th(DYBQ^0(r9V8Hj4h<4 z%G*27=Oo+^dM86~n5VXimx~YF>M$&5xi!qbPdbSw7pVQV z%A|b`9#{^-s!z^(eBhosP;x}O5EPpOcdBK&%aM% zzxnCU{6GS-{-itpXis~+pkBTC8`JE3fi$fVDuR=jgL5MjMLV`PqXEoqJrj#&WdV?x zzsm>GP+nDWZY}cSSDktSG*?nnPAMfP4bDOiD14Q#YZjK0sRQxS=d|0~sp$jyVwv~s zHn!m-5=lg$+j?@JO@wShV`g4%f~S&tUS#7znitBiX3j6V8DO5hrWcU~1vl}lLX=Rk zSISLFO^UQOX7uwik%P^MOUocRtTFMS%8*QZ@Vf?MCTz42Oz-NnmH~3G(DJeN#O4%K z6Z*8IJZ!UhuPG#dOm4yp@-Dp@B_z@a8B3-`7qh!IO`;8l;@w_vM#}!YSdSt8TQdz3 zQ+}uu)7wxTAGUh8rY+~^E`wyx2GuyhmI=2)u?$q2u^~*Xq3wt;P`6B>F>G(!0T5B0 zd!N|niSyieKA-XY_8GT*!`rvdIL{Me2J;*2bm+mMP!?;|2*l6QvF&qGq;v7)0FG+H zs$~xaMZSY;X>%M!VRqtJip#?Tt~r>vTn=0xF1S8i@%Ru{jv%I5+v5ps%RyVCXx${VO@sLo;ur7gc zVXD!}o6LclcGkRg4y-AYHHeLcz{=--^(NlydG3hl!?oCILS4M{pa>aRTj!Owa?R&+$>C_u%J>~& zNuA}jP0q8tkRv1cOG@eH!{X${r_eQQgFtPJgJ_?_MeD6OnpKTaxdrUtJ&fJ<)*mZr*#l>r@$ncCmo_ zxiqM{0(fOc_YR99huo^QTr0kY=MdJXim?0uWmzd@u+Bk}G$?;Wxyy(ot>2aL6Xa*` zyO)_{WYt9W`;_-)e#Yj4bsVts#C9vLmW<|{;C&8yYdb!Aof@*|`5Qc`xuo>up^Wc@ z$|i3XM%7cS24tu}rmY>F0ystly*L>Q5%xi}#F+S`GOUX}I}qfE_$wKWT(!b?Dh=sIKpMu?piCcW zB-RWE@roFOuK2TjNR@`7pyhPv&2nu(sc=|XAIr#PW{qxRfg&k=k|bgGq?BMLM%DNp z9@XCYM{$r*L@VN)Ytv8var}|e0~Jp5(V4^q;xwRaWRmOLI?ip9HjFd>Hf9!fggZ~Y z^@ZvEJYo#IbbQ|27SvxgrbB&krRmiZnuXyP!x&^?9Qb9S88URO0n$5hw204gMzBfx zyJ0y5(vHQt^Mee?j~1t6*r)_|mZeFzpOT&`#*E8iOZoZ0qIpKY*@?X!CnEL2%<%kt z;`aQE=i3wBK84+bv4j@6OE9;H-%Q+kOn)ut(CLru2t}(bX*0(r*?yk|#H?wvIaI@% zajYvY*9(rzf$KW(`1pX!<%;Vih7!l+z~yqmak)TLaajj+1%F~`Y?XBhR;ntND3&UY zwII5n$C1O3eCLvpG3_AST4{dQ2iDjIr*AWx#FD`dfGdYmr3`3sY;W5~fkxGZy7s%y zVFw0huow~}IYz1JY_DiX!}V;Th8P7D^YSA5o$r{H4Q&qb^Yu>cQKqUIpmZ7!3&tc` z*YKg)KHSn%V%c1p`y>mVEjm6b6V$MJlyvQp9-V!7i&qXO$u%Ud?9vIwhXEX;>D(^&|HP2$xS3e}kcaUjmu%G*`^SeG9Lk;eS zm!Tt$)0a+IZx5flNL@L^E)2BJ!3pk^Il(g;K7Xo>l1Q&M(v2u>MtjI-f!jdY)qvS7 z)NOdR-9|SzV0!hOc!O!R{w%GcHqIgrlH(p&s&^zbNj?@ocgvT`>oE%Mg_#yFLdRyU zwx}Gc+kmlp#UkWqP>j=N4rMbHsjRVO6sD2o-P&A+R=jrc?gf>EHmgF$K$stWyr(wO zUXlYaltK`PFNON5T5Y)xB~kO3s%fluI%_%Y^NcukAAa*z>dP2Rdc#KR;v6c8YM{o# zU?_X}5qQC>C9Hh)2hPRKNN^GFRG1vT767)EMqWyr($$JrY8fi|Oj@BOxKmMjZHDs= z(SGkRZDcMryo2Arv!3Y-N~Kvs&V4LH$RFDtX(T8=eXFEt8r2*OYbFd7uSWQ}hl_rQ zY-))Fbh=(dzIJ*AJ^G>E>uBt(ZtMM{ZDh|{tfEp!(p(O$bTSSYHSZjBCx_y`-NyC$ z0E6S{cEjG*$rs)+wv28jR9|dF?eY$Mr^oQDd1ym(Z$$yqxU}kyQ|$l%AOJ~3K~x{_ zvb(EFM@r6&Nu5?=sijCkua9Iusnp+{54|Wdm4u3qS%aqH!*}!|Tw-hYZ1L}{{y-Tu z%X6VbH}Xz=WG9kIo+G>$DtdjWNJ^Q*47aRPIVV$PogtESnn1lJ)5u^P%zdS2E2WK4HzE2_3dBy(|_U5|AkL}?bm+o zfBM;FNT~h^QUAx`+J~}Mc2Qq^KFGp2_lAqNSl;ximWRlc^rCfS}2gxT2(S&IeH@oQ0QmOI!I3 zweo|*mf-Jh&jF&LO4?ny}a!Y;O~u=*UD54cpfb*V0&ZV zHlEKLo}Zp0@Lw9P~aZYsB^>K8)}#g>wn~?^J147<2%UNEEX8g z)f0J#R!xu)Ne&&{TfTZH^6bGTEfL~cT!mt_3{a1ZR3lBodq{z~;UYwhUT-Vg90#!X zTP__0;SQeRHvK-jOe%4N2F=P9FV2LQf~=a9nYfV<(v{Eu8q)GEJV z<;LvaIZW26b{O(s=@`sGH*DGPP!&dPF!=pU?cz37(dThm6U)Ncu_ckVRcH_C ziMQ|+WCR2d4Y}svUhjW3Pqn9SE#J6PrA14fjrwfDWhZ*Va$w`VZI%HsUuZCGC&_W% zrIk$M=!oyO-f3GXsQ4U91)Yo^mEXfiXcm?hKaArN$>m!E(T7c^eEM0Z(WaxVGo&)X zMk6ITM5>{+>fI2e(pAT~?yh42X|22>lS5l&;-lW}prVW&o@^jY#e%0wnaqG3UuVr8 zeBZX!RX3^@t>JnN)n(NE)`}=D z19fDpa2Yr36^wo70He}3W=yjs@E0Zs{e zN5b-Ed&Iq$wKe2^@W@)z{q8Q5InXDKpW%)5&$e2u5@oFURJ78?@oQz}s;J+zKGT-A z&GFNpeu3-tA|HMG+yBC!`*Z*9=fC=^zxqG^j4~wTPl)?JQL^7bGM;syWJ*Iv5=q7R z75s#mzP0Vu_Vt?IZIQ}DDIJef39ix-7nGDMJ(%VdXM=pPDCzCt?~d)OH(i)p6y+4J zUWmbRxQngoi5EzR#-Oe?k*12zXathQ(A3I0e7X7TVNJ)XEkVP-YVeUHRvV<*yXZH+ zgE*EZ&j&Ytrdn&Fa6Fz zm^37B!es8NnwC(RH^I9&9jo)V2|T5hM#kT*JZ&77jOZR2ikOP%Y^#bMT8S7T30)(G zb;Lx{>&9koC~hMDcJW190<%2*$P1hq&fRzh?9#9kfcgA%!?~YvJ8w8o!|mxA&$k=QcMcmQhmV#jE2efh z_C5n_CkR_bUX0s3ixI)zyL`_QRc$w7c~0q4=vr8^a5)xqDXx!?5D{E2k9c_TfQRD& zmt)~_y~a@D@_^%5c(@!`3C6rWTyec#A#y+#a2yAY%LT`wSjPc5LZN{blIXRL{QOo+ ztyQr?{~qu1p}AGkjJwTfNA1LhPK{}~;#8sag*)sC`L6f#D!|x46ktxIR6ja|4K+X= zMS@Hll+P{8Y&QkayFj>5FIAiuH^pPqFXKJVv#&v*C^dqQVVk6?%cVC>!fg@+w)74b z=Q;-IEO4Z~z-{{P;ZCdIyzF+!Cgu(nIaPCWV>CVPNz(3f7^R@;CeerNc2vVA9WUq| zRZAP6(~78}>T{t|E$-PRJrHol;%YN=s=-QFooEBVWWbj>0Og>>tJy*)G*CyrhYc+G z0ZT?A)MO^e!XZsQ?qsf(VKYTa2DKpuVx4R)t3SIvDd{5#0KW&q#u{Z+@|R0*oCSV2 zk`wyROXkmix3~CJJJn~_o8Z_Uwpysv9a(o60mstKF*n|wERt*t@$%VYq?XOpvuc{9 zmlqWkNJV8b5qK~2=Pd~^;bOWDTK-%ObXt3Bc3v!`94hRgrtlG8sU7*Mtb@kz-17|7 z6?~7$C5&fE+e$IxM4AzteTmg@P44a*>7lIjuyc)Y@2K|-Jwj;plT*6yR_kA7Gne+{ zhD*lpM%$~j+pf~TSi87;#ko3W8q9#6OQ)8&bOMdi6KY=1wXYmXpc~?P?uwz}BgWCg zvNdd1p7J^wsVQ<75+cKTIus_kP%0Xn8b;e%WVJiMy>wJ(r4$t8leU6hfHfdmI^|EL z0OMt=^YQiFJ!h%pVL!t`7RKQCj`QfH^w5l7Wr9v8J9yavWMYc_{tQA!p3n4%N_r3< zJf6{OP(?AJ_Dq{YgJ3bG|1~Vm+SjrVStD>o94!a2RuI0+KN`zPk^3|kRc-L^82%KV z+%cAaJv^W#GTXzjzo=l_P;v^v+TLuGJ40`OcGVB2y{VM`m;z~QySQ+0r=@0D6i|rA z;3v5_txEFZl2`$Wa|oA4_M@s;MSSo+qfq#-V1X?4sHg7#m<2te$RcqRQNt?WH2NhuF&!PuyF;W4o zny78s_TQ7;I_cz5h_r9Yg%^imkuOm)a~5$VBWu%(=^Hky2C}Egonx!D1<@*+MYJQ6 zSXi3hduLnII5K~=A)xtLsA@@cwWPo;lh@qVM?4&*tXi_T4#+}nF3L-(y#*z;U$wVM zRXeYasZ#!)jHko0D zJjn#^aVT9LP%o%_<2@bsUa`t1?`b>++^j%%#uN~{D=skO)A`&Xr`laZ)F(`MXYdca zCCXgR*2bbCZQP2guihkyJ;EXu+4Il)01?SF%8bcl2UoT^Un0CJb+7X3~s-A zh~nHd-b2xh+b%HI|jv`pb~nk^mPa>bzlQPrnbjaky|;~?=N*SI0fgmQ?A2J;aEjZb)tY~2WRrLXNoX;GmT1cR zJGb5vPHEPOMD;xt^C<>+kUWocHeQ)PV+o+fgmcTd_hVtOt&d`}5!)B2g!OjyH6%9S9!^+OH`D#|qqGujr`74|!5G%MJC(-N#)6q%87Gn(JV_C$FI*0j(wb@AqG| zfz34p7)%NssZmHc{w^Da-ZN`_V_T102;z73yTlg9P%$!Any1PD9=7$W6>=T%K9&`% zTUW=&PfMO+-6oHCRRwY^o7JHnV^9^3K{v6`-o(x$RT z@3;vu=+MNWWh$eOw2O+a9(S>lRiifbb!YV}}v z@#ShJT0buuRy>{Tjrc6voo>w|LJmOYo#LwoA3!KDO&o9Dyn%?|Ti^WFSO2}g@aO;2 z|NJli< z0lli0L|89*u8#*VvzkW?3maEuL2C_)t8D@+mO+pXxzF&jqU{!J6bT!PKWk;d1}xVR zqd7K=tina#*;Ht4B#=-qM#hzNt`0|S)`>GE%=+)DqtFXgWb7RO!_>}DNSILJ%Wr8$2|A`U0-fYPh!Ds`mbjUkPX=AqOa znKq$Ce2#y|_QuT|uoJiEjptiHF`v$7oKMeqKA$Uobem%xE1!9ViopyqxER|!+o7mo zw)D0Qc>KFnn0O|^xMQ2wRUDiMjln{yAl6!V@%Vu2#T)Q<7WR4Wu0cSjHCgMtQutyluQ%_8@F>xG|3fx!a9J6%M$W|D zdZeP;0$SXX?;@Qxe0(MyX{vWR6&7_mlp~hs{$S=L6QiV?-0We{@M(vW1~hwlf32B&+GndNYV(5n zcV&n*y5XiOLp(58`rKqBBcMFzakjcyCuEgRUYgg05vz^~ltkOcGLHOSLDtI)BLk}f3#V72oP(f3!IlKfqNC+& zmgQk#6V%A%kaMIdTwy}7@d?UD`QUFigp{e#$w83s6oN8W2v>ut~210gWIGSi%>47 zYgjjHxn!i>F>!b&+x@V9imT=tT&AR+&X=M#rD^2SkW6rhq-tK;G5ho&~l%fglhsF!sPP7-`W&R0bY@J=?^)jrz)q>su>5D1D)wzS1p*GrZd8RcBY!t8oYC+ zzVb6E^1P*wM39*F#U-HAkj2zrm9NaClV5Ujqo&`_0IB6owseySALM|VMtmCAfJwMK zy&Qh>xR5k8CVsC-Z82N%K%QAk!rl_wgWT%)gg6Q^DMmoZS&PCPGc3*F3}VYRZI6pK z%_-lYl%9ATtSf)0*4`=7uj#XG+e{{{1ulx=Tevkd4WfqIA&JCv@E6ceb1P21lksAi1)`5(3oR(06a0CU3-`*&Njp~tc;;AO$m7A&U~%hGW=ozXq1qc10{ zi?Jm5{03_mwBB((op4&+-m#u>I-PJ{O?k2OW-+6E!MZSdKVj*X{?R>Flv_hzY$&oU z3z!JKv%xc&K^`|s9*JkMRW`KX<~cQuO=_>~dGlN_xOr0svj#=SS@DD|CC)9R;1jef z8(rrlz@ykg?r9sVqH9MR%Z;9;>EM~%bKc?lxhPX2L=hSdG)B+Q#+k7g=@B6-oiwY- z11N9x1OukprXf=lYUomccS@OTCUU^G!Ifk{5tQzQnq*=TW=>9w_F~JL9$a9|`jBZ8 zR2DZI-Y4VNeae`&$oe1Zsiz@CZF%d>EZ#=kGvq2dYmyxZ$y$&t1<)iL32qD>!3h!H zmbV@fu|TdNW0Bv|dYHC=oW1!FLS0QUo1;iW z2sP^^GnqOD3&Dxe90XPb9bCT8lRR4kaGUnomWqCO6GgeKlF3SBg*tOY3-SbycI)oa z&{$ltz|bM+479f3{(Og5FJIt`FTTL**ROF}PPiRAbnF&itZ{d_6-fov+j>K*nrDM0 z<+?qMIh7A{x={{&89qq2_q>O_$5pJzrfq7*Xj(GNoAa4nrYMW&RC#a7{n zr+?1lPFqb3e5lzEBwFf=;z4Ml*fi@>$)Rv665JX{;&ILKMoh^vxzHr`=3k%L(nRq* zhB=4U&TnxwPVxgng*__eC(=TUr4~R%prQF>?Lt_@ni{#Gn2IH!#F#P$5fz9E}G6WtET$UEf%724&?jhl3{MO zI90c%E1T*^r=g)v`rnN4)RBX~z&2Q^U-rOBv*Ks`)Q{w1P79{#EF+};9FrYsQpt&{ zN}ZI=eugn=F55G{HWPl-fw4MqNnGXn467SwYmNO$0qW#@9>um<-0YV>`SQDe`M>?k z|K%UUK%)On|1~9*uAssB0fK5n!aVYW4rx=6@oCDfRhi&<5lJ%!s1sx$z0_zUL5At; zb)RK4>O-xN6+ehL0W~g}SI!4y@IX^a&zX`P%0fUpe9M!m7YYsdQlwoFWq6ZJO`}Q1 zEX^Sb2ywqR9BvadU_0DKdK^PGZG~iPc?1WQgi4&CTZQwXeYAP7Mx%11`lS|@DWKl>o`I-|Q&l|2I zRdq}>^|;7=y6l#vkhfdbvx{YQQj=t+yM2`swjQ z(~#e@g8)@s7TN7PZXg^odFWy|))W}?H2ZmC4bQash8(aE3&sG@6fzie3@bt*a#ex?;$Nr7h@p z7o3(AqJnMT(P)?@!iP84?dF>l8@Bbeqfz(izkMHL9}ZG8TS;#XtuI!{vc1Q>-C^i~ z#Q{)bKOi`u%N;}q#^`wDPjTtLhRW4Jrvs~;a2x_{f|ta&-8SQEH~@4+{OPe{KX!;J z?RDa(fbpL#tZY7`#%G1oisXh6E7vx-+ZE;v+FZ3EjOR zrrhULFq9%HB91)~(9bjK01gL(X)$mxZKGnam~va<=f(OM_HVG?BTX$E&$H&ol&SC> zlo$_RY#eloDGLbhNV|kgX+P{rEhU|JIYl`yjawvK43oa=bT0(dIkp!f-h4{BtVHbd zt{67Y6%SgxZx_h{YlEtgPmq_#8xsj2oLkuBIQeijN~btWtey>)6Y?AnC5gC2B^4U+ z86Ckm;^e{vEH#s_0#@AoNo}>Evtm9x)&SEmTZ?OMnYFzAYJ(Wt^n1ZhIdb(#X@NE` z^u4$t}sq_Lxhg^j~CB3ov`^ty^5=-siaT~yB2Z4tLG!g@-TYU0xWk=l8 zsxv*8-TSP$5E`P-x4>H4JoUKVI7?$W184ul^2+Xg0Uex8t@`K3XjqpO%j&AL^BJGK z`2?SR`WByl_7*Q+y~dkQ-r#OMqb&{Z-+#cr`q%#ofBt{`Io{uHwbhLV(qZOSlN|Pr zj0xPFfKA^-KTGbK9Ov78rB}=Fb?1yS@op46X%>9WQeps_^jF1{CX~Gao9U8+Oky4$Q%3U(9J2u7~ zutNnz4j_xLsH-n@dXB+!)b{oeu?){0GNj_u$rIc ziZ;cU=H}i{2tff8}R8e^b_0&vG(@|Jd%#J z9-M5C<(Ut#k$s8cFLM%Zxq)b;KQ-*{@U6Zm(t8zI*hny$CPN`&^E9J&*(cdi1d26W z7RgXxIFJ0as>B-!7T_?iIbfULtr?(}>PYd8l&IRlD>l&a9u_a|AMpD1 z8?@drWXEwkaEL&TVZKaaEZlL3fn6RSA8|W&j9uK%81gmSgsbM-$1PYB89(*vhOIO`{FrCK+FVH(iyZaR3JCIPDuH!wNz;=g%oV{iJ8I;!yuY99O7| z#3$}8Ha4084OE5(2gas`BK5J-VBt~kaHdU`}}4crK)(-|U!_RztLr+fswZ6 z#r04~JFx8p)=^xK<8ZJhW7C^`AKYE_KxlnI<1_Z%R2lnnM0ywYZO6VJSmoE)Mu!{( zq0h2bx;uU2P1=FedBw2{_G3#JnH<)*h#U}Cc!&<{LvW0NP+RWX&3)j6V;nZjnWcpy zIEFQ4Bbp(xXWSYd@tEF$h;$Y&Hjw`C*eDDar{e$sAOJ~3K~&rOV`sHTwR!r6ssKHP z0#cj*PHAOG;W+_gxU3h^n0lcUL&@}Oxpg4fIstmLhOKe=c7*^X8+4I(4;p7Ml{zmZ zy*4n{6mUW;JxL(x2c{e>+q18vR)=R1F0N>R4-kq9uenhg<|)o1kT62_I&`9n%X1eb zPZ0muNYMm_ex|cUs>;G%BI1^)7-TYeV-%ZJuH>nytQM*!-Oe^|SVF&zfnnt#h{$K1 zGBsNSJ)V>PbEx5;tuVkN3V2G`n}|-yQb?PnEhoh8OMkp2z8WzlkEoR2!TB*lIz^Rg z*-mDam+uWMWy#`>N_t;mL~WMTLKa;xXILQyQ$Uvfy*B>^kTH>`-)OPf994W4uqnS5XoBjD1MO zaek*FN~V;CxcSr*r(%O|&pkYKA{q=+r#27?-n@Q`Km6)H!jHfF2|oYyQ@sA<4PL)~ zgHK<-!TrMv+`qiTX*~mA+yL1?Ccm~PeE;3|_|>n!#?OBKGc;GF*kX#3;}n}*)guA4 z3D}hi5vf)RX@jk#t7=5$S@Otpn55^EC9`FaqLEZCTD7QB0ZfDQgCOY#bdW}V z=lTpxHJv%(1PL8TL<@)afP66On{=I3=wkPrhJ{W_Inn&*>eNhu$j)x?vo-!3Q9G&I zWZ}vJ%}803l#8o1)286>sdds0RFt{3^@W=-4vdV0j%Jf45nI#@&!FH`mH>8EPNa;{ zY=zQNI~6ZLGYK!?i&75bQoHc9I}~qGQ|ULH=~h20MV*+oq=_v8^@dq6{N8LV7+TO= z#L~{sefH)$k6W+FZGc%8GHarz#5Ctn$g{M-5YKS-L}9OurC5dx#95~W$JVLFO$*oz zN_N{GIlm^9apGqkF=4p+TLK)Yg$hWBEfbov81iSzD~OB1;4f4Qz8!*9HKqlI4-bd; zt%IOl{fG!_|IDE&#R_q$!(5Z(iF!N%sZo|xnymg(pFib-xGZ}G+vI0zbiusTt>4|$ z9|~%U@4kPBzx>O8^VR?OKm1Q${dfQUfBW~pR|APykf{1rm&%(seA?C64o)5VEQj5^ zoiv>h&8GJ0EU6R(&D{I=LVSwFn$4|0^}a!u${mg=@+7Gjhu}y`?J3s101<``&kZkX zbr5BVI5!5wHslWqJSjT_2`SSPRD_dB)M+A*YvyU2I>Xc=&2I{I4`XXC0rLOF)|>FF{?VoiJHu zYm#l@o*V6$sONAZ3U+6vs8?QQ)3QOyoNNMWHdI4HPEV@O&J77^Y>H~Y(Yy(i1d>S( zDU?wP2~VJD;8Y=!Z%4)b=}joN^>)@-(Ub#n4)7aKH1r_Fx7zsl**;<$ucdeq)q+fh zYGQ&`CRsv4@-03}qFVFc zruto`KKS!ggr93>q{L=9k5Gm6A_@LOyMx97V{7e*6bt;>{;-ak)Dccn^wQ6*LsLW5XDN zec!Qd8@_+{9iASaN{JY}hG(#@DPbvVz}PldGC^#%BkDT5{&p5kgF~)7Dh&<_GlmFA z40=jpgF*Aa+Y+WBis@z>asc9YFx<|X;J@Yzk}@XLHX;(2d^1dxQ9m~J7qJNFsIRmV zFR0=!iXjjwHBxp|_LL6$MNN?^hoCjLEr_OkUefNB8%TRDB80?$k4SI`b@R-A5Yh?! zjpAGQze8yGt4AFrC~h1{+=B%00HH&5RH{>OQkc`~X0}r6!X;AnKt6c#UnMLkq!S)Q z%l?Tes-pvPv|`J|06uJQ8j5c&6_2>8JGdq)Q(Bz3I!=>cVc`tTUp8YZ#E`@N9Tm%- zX?ha?dg6;9BHKNhJ|<#9_3S82MZs>ZC0E99*dB>6R9t}(_CN91d;51e6o4@Z9+rMa zBcd;PQq{-8W8z7;dX4>A6Mt9W>Vsg3h(~M-fYxf*%3j}!Gm}WHc8ye~iqNK1E(bVN zM?rX1b%OM3*0?kceFR*#Aeicuvi7LtNwe6951&IJ<;q2MrFCqxnd?3BJ8;C3!dq_k zen6EWI`FrD^Z(%Q{`PPG8G!%!_sg;YXxI}}Dyj-e47Q>|)tW7B|D`?J{%auQ((6LH^xBY1yVxy)HIaa1)7Mw zN-$8^817jOFl6}oB#QxYV=HOpQ3pIz;!-1P=t(Ai#3a)rzu+xAjWboCfaO6@Z$98F zC||9dD`u1myx|)a!H5fV1`}y%L&DD*1CW?J1^lPh4M?eQsC(cEDn*%-%-Po1gfVRM zC1#s~?ijM+>FE*cvZD0`y*E7Fo^V;uI9&)24|mv(fw67CK^UFU*Tn{UV_=ZmdL;b5 z%?G85gH~WX;n>f3yb*A{8OV}5#&#H&qIF#EF6mi34#Csy3F5YjvDtub9Gf>M_8A@W z7OVaS#}IUG*v7EPP-0IS@!{_=Fm~Ja5&LF!m`dRgMqj)$IBK}F1_)CKLk{f6Ko5v6 zGseE#MLNU^3zs~0|KYlS_s0-W?@&=3-QK&s`O)gzPt zonaOwmI(sq2~e?Z&6qV#xppu=NF)B*8{OdGaNQA{_7E<cqpx=!Eg;hd@w2u?Y4EbqIVd#0#jnf5Wm)VQjxf-#;rWT zfssgmig%tTD}YG(%+>L2-q{EZs~c23#?+EZVn7@~6P+LA zzDfG{6VMjgFK?b~P-a85CR7_nCLbn-U4x3+9s}PwI@ULVO!W*~<2(&{8`bbHd0oC3 zm=uaJ+9noP`k8uxHO^X>LgxV3<<45eb5rv)2#(~7q+egdDyUPlFUhuo*C;9EoTB!) zHGJ{;5AfxWe~eFFe}eVAKoJ(U4Z|%tv8CVl1CQ57+>RT*{`zbDyZ`rRxa~I^`miZQ zB5qrK36;vjA*u4OXH!#J4RV`XG`2phFiVY0kR`u^hd*B^-h#5(ggCw*6|aM7+OS*W z5b(&9^TQS^&v|WjHjDvlv?q(H22eHC1eEM370G8MUO z0%ybl<^}mog`961M5K5R@%)b%m?K+x@>Glw=`Lymg5c(f7N0NGY&s#?_mZyW z5?A3jX6R9Nj2L{7e>OuEo$Sn0DD)7sUte zwZ;5QE@v6Tr(liTZ;c7Q;_3)uV93<)Pk@jFvJ{n71mNzHDW{QW5PgGX(cH*}W8z7O z`&)rF#czopavV^<%)WKnGs;#$Uf5nyH4JE36zC}h%)`UC1}&n) z?GQ=fwOVXb#qDrqrjL+UIylGWhB4_38CnjUhW(N)ZI8w1h{<`%Nwpw0sk~6%vhf5K zqfrxUgaw4N*b9JIXozNcolWK|nlGC;#OAK>KIZ-P=8Po|!vfuvdoYd*buI?;bAKYU zrCL}b7L^T)*!2OF2o?>WoJ^@aX|Ia!bNKVdI5x$g!$FptQOh42zHh+Y~;(8y@Kstojkhen1u2_Z|JVfmp$9!4qEK zhp!H_WdVtTyIp8R3>kvkc7=%GI6C&RLlszB#{~!W{T{>Z8LSs$qHIrSr+Z)k`}P4h z95&F9VPMVPF+?%O4i>^O%pTMD}bYu1#(Qb{^5*68&H zG{rK28L!jW+i>9sg z9E9o&mXH=pH2ML$mB~k&o34;c8snv@S^c7c&jgxjP=<-Hp9`!sJxelQ;JUXc5=g6X z(P(NkX^1JKH9xj(OGLznhBc6|dFm*oU_caACf->zFjCvV#DY_*oswB8BfZNBB^E%t z7kemci2WJQQ;&&`L{!^=Xsb=3|2q{&Uu4R#c#3k`8$z`fsXZn?SKy$^bq{~Acw^E9 z(YHyYoW%VecREd}F*;>P2Tvtp%tdV&=Fb%C6eN~Xd`b%oYgtNi_!i6jg)fM=-}c8NTSP%o~|2@Vx|5&}uUS;LM|jnbG5f*M=7<_(1} zlrRtJL%uBvZIS@GNIs#EIBBR83Z7}dkIR_@UP|if4*^rhvUc1*+~NN20nOu*ZLnj@ z4{q$BY#aKGW5eUqBlhbJU;pZt`1RLcLk~3zlalFqw9*OG)it)9;$&K-b^8+d^ClpGcVR5WKvv=9sQA+wK(6K!B0ep4>HPQ<_Z?>@6&C_uCt;6)#l(eMF^TH;IEAl$l|&1bMy zikTBlu(-M{hf7Q#Y0&0pMDTY4@Tip*5zlJfAxJfvW?cGY-3>UVPD?V1cV@DT`bC@1 zhBJQhmWtp2(Fq(c#gUG55v|@ZER!P0^eG*lCdHHW4gd}Zuaa3ipqzXt3IXMW8DKx4 zhcDtx^=I6iMv5WM`{5~}Mam{0@MfK%5M17Rje&a%Et(I9q6Y{tHTlyNaA8l%QW-WJ z8JY}V1xMm{I*?$9%)a|*j1yLslU+W2P^ueCEh%% zCu*2{6G^G1h%d^t5plU7yZp+FP(M~{8i~qlCnJM1G^L%rHN1TB0{yh&xZSYtJCovW za>8)%P{1w^&u=zB;{dM)HQTlgs*3%#nfK4PY?(P6em?Woc!B1{qr?D4!-12k0!@;) z-5al7y~3A2`VoHggFnD^+i-onVaPG%um_Am@b29^e7HWMe}4k;Eu)E4%krassFqeY zfKZWp&E*%*ta1m*xO^0c7qZReUON1cbM^(uX}|*XW|Q~WsKii&?H7y!UHv^ug-~Ko z<=IX+00QdrtyxYSzOzAeBkVDbNukf@3ZF#`54D`0(@$m@* zjJ0ogcu-uQPFPfM>Z{3kTLVPV7Q%^E94c6j1*@Dej@>t(;VD0PgsI?A!C1~#&^5)n zoUk;;)9r>Prl9CccUDA)9A;zEVRot>(A?2*97odndoy{YcuXU6x9#?D=oGsuLxZVO z+22QRSO{2DAmoDwkGnH?lof#8p9%3shFm7ei+RbLntE%DDUwdhL$4#U*qb*329RSn zOA|)Molyd`eBj_)XbpuBX{u{Pb&9&AeEfn#K+iyy$V3I3iU)*el+vdaFqE6u0kSd;V?&Vcie8$7wJ?>w;z~ywtvYc^RR&QQ_?h^hn6#Lk5eSE_8@e%Ldy~8j6zpwG(;|FuG z4)J?X(>7!k;CPm{K>j-&qwdx@<0YE1_#P=e6vzbz)M}`>OVK#4y5uL37Dr7`4J^op zI^wX?0S6BHB|W0S^D|e*1TGj6-C92OvZlNJCSiu2QWVyjutso$4)G+O7=lqg>IN`{|WLiD*`IjZa`f<=8Ytk?T1pW zA*lid>fE=17EN~W|_sCq9FwjPvHB66HH2QQS;c~eEfl)51`jPRXGbSEW)AE z2ql7O@R{5K#BtucB{~utMy1Z?_wp>B*3RGHk15Q8DQsuwfl7MFBrH%Zp2!vmVMUAn zOH!2;KFffcYAUcQCB_E=zD%NsfA?lNxF6cAehWohSoadv@7x!Pw5SO0r~o5ZHTmbm zai2`V2BD&WEn6}jiyW#dl{r*5)?9Dgp4yKa9UCgwCXi;Fz#sB*E zzc&L3AqcrNRHa}bLA@5wndcOmy4*#)QShl)NZqY^b*T+E2MyYWL8iD#$tiA2|7(!a z?Y}VKW{GD&zkmd_sc?%adBFEHniw)!^Xf#4^857)5WKse2QHK$9~w);RW%+ z3kDrHZUf7@;@98&rZR^mHny5Ea}Dekj`+p|-6x^i>{1|~%E$L-cO4rQ-W3djD5i%? z(A+9i;yV{txTMfG3pt^7DRPd&NQ+V$+U6d`o6G$g*M*=lypVi%0(#W^GcIDXzk?)3 z_wRW!=Z4!nt6ot}1%ghADpt4--m}wXsd(JT*;3i7^69(#$ra7ra0Ki-{KLx{%nWrmkHqB zFb?ynA71z|Te01uNhN1@MrL3oehxD}qm zJ(jV6^uk*wFqzR)^SKrO-b3r1@wN^2ERcN;VF{7gkW+KI`IJQ-7DTNGR#65mrXI;b zZK#&KelZ0L+j~-VwI2u7RA_934zbTHcuiV=wz2_TiPi%2==lvAS;;;`mXI-Pt+1ThyV|!Wk9^q;wiQ+ zC5a|S)+9(2DSVbq0W=kUV=UfkW` zbUvfgG_;C1XELtVzTNQn_=pehKj5c7{V9I_i+^u??##9<1_5O*TFVAPMp#ERIU=?^ znte&$NcplWlX0|66aoevp)BC63o0$#|LgvFa3Js89?5ZTNx=r<-pghtlCE0DZ@4Vq z2SNVt$t_8F=#2f~>M5{QYzY}I$24dCK|WmMZ1hK(U1o()B*BCt%MB@^ zV)Gh5VfL+i)s@iP9rMZhZ(V8^FA-?JIOFNXY2)7S|=-3M8={Lt{CLtQkcqwRH5_48nH1;io_SyMJ(w#P60NF<8ysXd8Zs!@xbOCFU;zxpgmx zeb_o>V8_cH$R*K~kR!m3+8m05Q4Qa2)PgZ)xse-JGmsCWT7)D>+Oc?zTxi09qu3QvGU?jI)lZmmlJsHc(}XA+FMSu@9*zSb|eOGTw2F+ zTFoPW95#?hs8NHc9qrs1a_rc)8?KL6lX0^GdL#fW^0Ac^2CV><6}O$I?1JoFDOccgFd0M(YcfegJw(=;t;Ltj!b&eeHk(*L}xF zd5M*t&{{`pHeB7e10Q#TuB~gsX+7h7KBM==ppvkLyAy-XCtNz?7w_NV^Yc631_{>l z3Weh7=?M%0_XVm0OTPhh!P3mLe;a3vb_X6G(c2@qbqCM7XS*xLei#ErfMYZ?RcyMW zuV#hV_ATvb3A9tfK!frCH$v+Tx({$uEYbjO7_Pb)5VY3ae&zNMWh@Ijd=O<7Q#DU= zJYCe#*;qp|M$HikY4c)-Q3Z$G;v*~CS!~^H2~G}3g*UsEibLhK;Mm-H=^Bcxh=pbW z(CzGUo1MB>e=dmaaS|WKQ>NWmylMAF*Coja32IrF%;Vg=utkD8(_J8y1+UEcJZD60 z>9%LGfr%ExrvAKDnh}G_(1}l<9{a-3gLjD{+WKF4{ zORq4y$DZ=Me?~437QwU_z~qH4y)0^ppwXPSDX++eLc+0$qDjpX433^PgUyY(?sl!= zO^E%%U8WS!w#A>jS&ddnG=yPksURE9`=AOs#f?TYnxD5i40T$ACsCk6mp~Z3w&Zc+}+{x&%VIx z*KcrHPw4B4*33h}&3>oj7&!I~*XtFJkB`{58{U2VfbF(ty=TLBfxzLB-e~FsOC(RC zFw2lVnbHg{{G5s(4W#4%H5@-WG>3SQQt}`p>Z3UqG;iofa1%Vzo5JqKGHpjS0hYv? zO`9nPksJAl+ibxc5zVOWv5g)MsmjkxuK0^8cWB)0K8_kG`~NCN-!t_qKqO?dhi11nCA8voO9W1 z$$L9$uAGucA!<1|#l(&%OlS-*VtMu4P>efRWHv zRD5d$1H^*Z7mZzPfnzK+w`)zDq{fkoQo4Imsu<~r0oRDakz^_b116L-;))(Qyyiu1 z6V&@96EM4F4s(RKv-%<_(YZrkfBl<3`~4b7u!^O*#alfRbR-$?C>ALbayN{8&)L^e z!dy*%w8;D%ZbPn0U?Chn8PDu`A)~yb+jdkn!7Wi?sf;4j=JdNz2a^53G1+2Ft+Or-+ccqZr3Zsc_HLr!I0up3}##k$O-i< zfMa~P`TCMULlO2ze+FK?dV@dt<3GWV{@@Sr;`K|cZLP_>Z;IGAXZyC{>2}5Wa=~re zupgUA;8a{v+@{h!CjHjPv(~jbQI=dirWqPTo{1e#cko4GdrvfP2yM7A#9?WeL5`kG zF%>ewupwx9$A+ebC1l5Wv8in}URzi3$-1Bt zR1$*gmpg04{sH8w6~BmR$2fM}wi|9wk64zDep>Nxe}}s}8%DHY7tFRg(#FwtMPDSGBu>U}w?6UZ6XGRU{$3(BU5S%0f3TP%FP{4Eg|QW9OLIWgYQv)zDd~ z&zTMtz}H~ypk%h5jF5$-#}8<#{7_s9vP6TB;YTC_r{y*>-wm{;PQ@pWS0MLDN8?fh zq)=H*kU?iAsm6O*anu3kBI3j`PUr_828H0>RTDC7aFTH-GFdQ&vus>N(RhN0VoKhi zgyl8gFvstnmI4e{IEAF$2NBJFmoeEKvnb7Cj*=0P1*Nb<9DUR3=Wkf!A|-}AR?jTx zBc0gEvJ$GOdY2YAkFQg*)EY}c3SkMSFP=PZnnJY|L->jAkMxBb&xNK{Ayb3WQ5$Yo z7^u3ng2@z-jnSx~wT8>x1?Tnb>aR0S=M{H%_jvv4C0@RIgNM5d*7F&keDVgb-@L}d z@vdzKltmYTi7$$5`Ih!0z3MA5I2}XY7GTUG@(mceu zEvDL-(oLp+i(_&a5`7W3O)#__9s%i*nh_Q1Hd#xZiGc&xf`wg~W1G3Gh3x}~)DDG6 z^XFL-_zNO$G^0WENKJ==G8tbk6JI79OxE~et#Bn0vne;jlaPu_p-kP;9Kz1()=;iW z+GMPJVrPm_!rF-@m?C01R7wh{Lc9G40W;6^3JrK+#eXta)Ow&Y5CDHDq_tQbB_Asr zRLVe58#4OftTN&K_t4;&80eG+*ZCZPV+Y2E&1j^V3YlaiQKM-^M+UngttzoV5?2;@ zQ(IBQVT~U?Q1xh0ru;p1vP4yKlv{#*1FMnZ=R^GAJdCK(4n*9ozq^Wz9OaBDcUDeV zc_fw&{?K`Jm(uV-T@7*A`>RaTd4>*%b~hl?RLe4dn8W^p2w`BG0+xL;Zs1YLV3A-I zN;pt44XKzk5KA=5K~#w3s8kYRF)}+$CEnRJY*{8MH2~yB`1tYtKR8F?cRG+Dh1knB z^1&3fcsv#jGp$RbCs|VzJ+Uzvno&Yj$zndy3DDH=D55Eecn%8IW^pZw^zD#nPMn2o zdX<32Y_ez~kXvCFg-sI4do(pBT^(Ba(=*{8L2Fhj&!oB~DZFYGzMJ{etQBl%XDJnT zAvTD<0Xr=V?l1Rv`^6V{dH;a*bOJTtlQ*AY-#5H_{{cgeOk)t3rD6=1xA{R~vUb~O zo!W}W+Y{EM|jjP;V)FYptR6mPVXkef=w3|KcxkeSE_E_m8-38xG4# zbdcEd3~f;P4AXj|hQN%DEc=!FGA z8Yc32OiTwz+N4(G^q0juLr(4$o#L{3s87OkSaWqGZP+*x>BXZ0K*F~tA^>$%;!Mq9 zyjY&pPE61vX!U-esufq9LqAUVXIO56J0nENAyF#mWi3F-hbYB6t!zZ!LqszuBb826 zfOq%oPXh=iC6kK5sB{8gP7#FR5mpggZyUC416{AUJ>Kwmcf;kpfP2H`e8Jim+hT49 z_Wi(lVek@#;4kq+23I_t&mblo$35hDk9}+yPdoPghSpcC%VK56I54QdO=i_|!yztucfMz4-t_B|WVTz~Rxr zObl9VpgZI+aF%b&L?xk)q6YI793Wwj{E7xn(zk2e19n-hx<}t$>6Rq=y4WKH#Do-FqMD4~RD?QnKLY!XA^BQo5VUg|mtVk@f9w4xok zMdUntLRBXQe2yUx3U)`+W=>LGEh%OP!=IkLD&^)ZYH$A$GYwz(Jg z%KF$Iq+WbusspHE-5|DAL~h^=g6*i}RAO3s0@vm#&^j^9SMoE*?^aPu;}~q)@EA;T zSux0mPR$n(8Bm;ssyEclHdzBjqb~-LXTirNHsbS86nkdLfseZ1oFaiKT32y8MzBL@ zL0`IAruu^W%L%8;Jwz1lsc?1cK)krE4A52V)r+lv`2fFGMo2v;W6|&1*)tgkR zWU7QcaFtt%JrZk|$ zRQ8VP9LN;0$+B!UN^{k7shMJ%4^KT%aQfgWa+W8ZeDJj}Svf6AHPS0eO0OKTjgnN8 zNDhKbklf$`Df{On$vH_vf}isVj4aV$GQe0+^ixzsbEtPEz61ZF#v*wjLj2eV{z zuK}C)`YNa1(U_~*?*_^`Qi*0Er}NKB%$^4CktD#mZVjR-CNd-0!#?Z`u;yyKvgQPE zGHJ-J_MR;_?86+ml`$5z?9l15q{rZhTtOU}#uT>te6DnMQ6<2UG={~T3r>X^T$IJF z|5A|mY1)v3@JKggpbf_EoWOJ!3j=#aRnN#?(BKqFH1OagrjkvGpk%qkKd} zw)KisgV~!Cesc0jpZJSvLy%S;qHt}*#WodW8gLP*_^$w{#H2s{SAq#n2Y)FzK_eP- zJ#VsTgdEPBP=`yg0;kIfm%9r-dHE7w{*#~J2S5BF*3*j9xkIVp_VMaqN|QS45RV=lJWt`YTHq7=mLr zz?D54{n;ib0s?48WP>0%WNJppA#35i4JvhPhz*`50VLz*TnNu-hD^#OS2e`elu=tc za^Gnj%0U&@lADQxJmIPrWO__Z`7JgT2@k4iu>VG$#u14i5|i+NYdwn@b&s}alJf}` z!C$!d} zMrME-Egp}C6?usXWO=SjQzk+vDh^4M*3DZOan_y+jTbzN7QwJrNw2b4hu${LrAY_UZl_|*n>cFKxs;D z$}?O+y-hKGI@N2w`Hz=LL#AuRsafaj{=p#$g$_`2Y}nJQ3jw5A@kSA!$+=+@w->4p zoLMk2p0pS_sMx$>>G}X#;yK{*y33Tl06@ene-aINqyT8yS7SM6|9`%m`49L5Rov60 z4x_Y@D*CeE<>k(-73T}?9$w)7?t+IG54gYFgsyGgdSbuuDV%x4*`U+9OZM)$( zOa;ZAQchgDc>n#+Am;sOFPQt_d#{$tD*ZFQE90E07@m=f^miVt!X35mg-DFG<<{c`!z zV?Y#xnM_e~r7WP%vg7Q9WM~3>lQokZ3V00h6o26sGiCw~KUIR zR%(;wBO8vc*DD7`5rhH@WX)_vbr=NkRaQkl#vHZS^K?Fyg9}Elser%iHDv2aw z+!OMziRW>Tv4fX=N#PL89I*S4MrMd+s0<^G3-PlSn<~Kj2cd?~F`)4~Fq9Or;AOu* z!b+LkqL@OF`$YXR45Dnju;B^Ki_xP|xLI^e%0VO^etW-C0@ zA0P2_+#oWX`(dQvfCpPa>IazA_a5u`C=3($Nq*>(g1UX@=XJ$rZ$HE7w1T*ybuUt| z03;ZP04~QJ7`W{lzW?rfy!-BZeEjgy;y{r^N>{^_S6Oqt5CTqe7>^AN)TLq|^+MGe z_(lUfUWtAH#~BF>DYa9^EIPX;bUD(tbHI1rSmEU2iC}o;P~Fhcg);C76p~RC>hF zl>~AKh79b-f$etSak=8d=?UlainewzbsR&iL8XQjt3^Z!pz>(3iu^UM!Z-mqHOAv+ z(X_X1!TsGm9`0Xw_IO0U*_N`mj{EyFWSl_6SWhd40Q_c!`7C?ZtH5~b_65c(X!SXA+uo`3GSLP2s0poZAZs8+cAifdgM(BOD!c}bB z*>r%8u%t!CI)S=?Jwd}D!kp;=B)~!J%DBkkCdfFf|MciDb{%&3qFtM8vA>b(5V_>= zBnX|NHS?U!@l+N9lpF8{)&cS18jB{pFw{pp#kUzQn?3H35@Pn8k)^NMqh&3e2;PNxha3ELZW7`C<`#n%vY# z0wycN%otHNAduoiMMLkf*^=o?Z5Iy$x+m_$#yJ+&UL`OgW%Xg3p@_%yR4R2DntFV+ zI7oTO4BAWmCK_4FOB#4dQyoFtJdGga@grrAxdle~vS;diSal?5MGuHEp&p!JcKmO4V@q;gZh|k`Bh8HjHaXMXaI-Sx*3)ZA*vU7Z`$m;yl|Rz_uT_K0V>_ z`h@-Iio>I8?d)t}rp%NUdptOw_kc&UH|$_Zg%kMzgP1_Wo6`g{RMUyv)D9ak#Ku0< ze^bg_F4SD9B$Gs&DVS7s+z7-2=2-p^?uo00EPuV>=g#BLEJ@HlOARP_^ue;}<9zVPAdVd$CWJAO}B zz4^eT(vCQXr7SfVNIM5pLY2ux6lP;|&iTN|rGTT8(q^u%z-GT-M9umx;l)kGissVG zo^POX2kkCnxAfTit$wHAx43#9Yw4CHCiXdqK1+rRKxGUQMoUwJ1r2c}Oz4CTO0HSl z)|CgEil?ys2gAu8!%F)h=PyueY5mc#{Hdn853^FnRrd-uYm(3eAJ%8-a+08C$~4C0 zMa8ovuCT(CvL(|J7ucKAwPBC2qQg1kt>^WS8#1}{R#H?>AF(yEB^@?9SLF@kR`)~2>ci-Z6yW!Y&`&_!SG&lfOPPxzcILen0 z>CKD7#{PYFivvi_U+j^FpMUWMPUn*qFLB_8#F5$l6JpUwAKrh&ufF~TzW?rfY`2@q ztw3HF+GReHNm8+7;;W3=eH?=+!n`>w7J|kwi%`;Lx0#r1&w)|?8Tewq)C5pSc*0#8 zN*QKZaHskCz!(miLdMfNwJQ%%z~8G>eT+26F;!s{LxvUuB9BRoUV(iRK^n1s0c(oQ zhI`pv6Gz=Q+)llZJ6u$3a6w^IlCpHfP>QuSwOKGmqhV~3fC+~3A-CFOl3w=4OTwp_ z{-8+lXc&{blI9%xq&8clGknmRFT)`xtsN?7w7x=R!_pgM zKX9ch7>d?9)@21i*au^m2Xy`b-L4)(NmzTcf}F(RoR*BUtSiPC=zT#W!XXDp8;(X; z)&)b=&bdM01?XMDYlDhl+cqp-Y`4`E5sNP{_ie|E(<46MGc5ECsGR{}@S@#)&?2^<=x&%F);GNc10cgW^4|Z zB9DC}@HCw}u?h4-(fqp%@?7pHs2rA&cYwIx*<{f!%~r`omWl**0_RD%ILB2~5=!N2N9^xPz>_OIz~WjZ6(d1d*OW7p zOa6?x&xZWPVuI8DBk?Mv z;gn#oi8rM2S@^KYiR1!FzdtZ}#yv?P6DBYzsV4P;Daf1&;DxG)KVJl}G_!j6d&=Hq z4X`UF!}@F7k1<@9OXa7Y)-(R+C0SDu%N$$6x1YYnfAwGf7x<6==#TK_kG{m`Z{K3Q zoO2+%_KxLrLZ^=27xd161FBo+i~#K!^-1~Nby|ERBxpLn$7%|JP@ZYo*E#Q zy?c*im`Ua7@rI}C72_DWKt_`8{T~oY&wh@fqy~J=&)nWR9$q})&F61%e=+XGX+0IV z&<7mHb+fxOHh}T=d;0<;qMG+#*x^*w6mMDUST{_geCb0B)=AFgvgbW&o_O{$TPFtz z&o}In0~bpHu^|IjHlq(6r$C#C!*z9YI`Ja;%%9z+!BQwFc=WOXu+3c+RqaunFUrA_ zs!l9yi^Dd^Pem82P$|Mm3UKdi4H3<#(x`=FkYn+vHZ&O?*NMVuIM2cdF)Yc%sMI2h zNj24hzgzzJ=q(-q03ZNKL_t)Ub+#m`2p@(6#~<`%qA2ZcW**-XyYfDiB-YXu4ARiY6o zVY?bCEs0#h+4;GJ>%i! z3w-kS6Wrh5W9&=gzlc4gotqcD&46SvvxwZ?u&0nU6FSW2-(y1( z90S*zG8FILy~EFb{xiJ)_z}lAux|&3Xl4#GRdH_z9p}rQKHKnDDrI~RO?QC}YY5ys z{_qdK!Vf?H94bI>D=-At%_`*6vL-Z@8>4B%w%zc{U;PU2-oL})&GGP|L{W&FA8@WJ zaqXII;3PAVkp~5GU?U;x3&O5y`nPAAPdMbg-)|8d)S{ z#IKPSBu*9Dkb{)4T5#m7vqDFt5;);JJXM)5ZU=Yk_J213+(0A0az@9%lnj!K^a3tT zzTMd1g-6z!8!KTrV<5f2@M+?)yKRI4xdHoi!|i4==IwIe?*5EbPk?r`rQz5G!Hf#VohmJT!q_Zw91oQ!X_ufB9wAON)7;k**g z>%et4Mbvg|9{ua^N{3+D=9vN7{T;Su**nUt)v!u6>FWOY93E=( zS4SYt3gL{F^m9=nI+8qo!=;r(C1Q?dp4pq05h}i!g&v!G+vEp#hN3VIPc{{(IeLh) zOfeXTNta_wBvW2Sd~P6v!f_S1xhX{@>%=BL#n^(0jwva3SEvuUE6pNf{fha95tQ`w zZQFL5JNtIBgddm-KAloJ$bk~!t)SvVNR7E%-e0nNQFcC!rN)?gji15WRNV6)i2w=a ziL+C3h#gi1$(4mElbT?PdbYyx5W(fV;FC{2#ZP|nPw>Zo^vC$Y7eBz=`OdC(Y8d+g zD6p(2EK7F=j1AXl5s%`V)G$+njmh5Aw3@8&X)Bv`j96_WN{ZvyvEMd)|J{4M`~G_< z2>ZU{!}||-db(OP@-T{ij=_j?2u%M!cW<*KOOjma9aVFW$jtj+e@!$nGXRMZjz9#$ z*85A<&M z`t>oQnK^h$$g53W@wDecW6?2+H?MeYkzpUmhuhh_djb;q9k)_`p)zyN;f+ifhg!*nnyYMBplxaR>d_Lu$>xvtRrhVeIp<+g}gHJ78 z)Igc8<)!*d&W7<0C^vs*ddelYEgc}qb*Ndy+OdS)U4XFnYB+nhQQuy&AhAr&{IkC^!$MmjegJpaWl?P&OHZx5jEg+G^;FVXN-dUSA z%;~$8lxl;`hqO8$)r_jvQ>E#AI)jpcM2($)lls?=a8EfPi_eoE+s@kz#wLl-Nu zt`_2aa0ddk#aPMQ7MkJX$Di@jk3ZLjdAEZ@C#~syt)LQ`BYCkJvzDizy2|KnB{>R( zcE_vpJ^uE8`1g4K?Kil;yF*(P-FpDIq{U@Ia;XZiTgT(`6E05|JYTORKb+$|F?r61 z#LowhBz-kKBLq>hW%t~SkwnaaM{2)r}z$BA%nINYakgY7J%`;ueE4onf z`EFs!5CSzlAz0-R%mgN@9O6)+_KJcM@hJRjqlM`ro6$iEXQZja?2OXAMVY za)(?$$;WP^3W^-~ImQy&Kr{o2sZ`|2HrG!;{n3qd- z*G2+N<&>pKVR{1cZxH*A8a&$ zQ;Vy>IGl3==S~f{JDsAa=Z5BX99UF=li=d-(Dpy#OaBh{?K8C9F@jH^dY~T%+8)K| z+Ln-2_X`$VYM5r-;w;k@)`A&O{@7`=o`AsC;@^#^yzq^pNu+Od#%>)w-bu46_?!(# zLM;O_CdA5C6)POt z{f^9K*HY)%#9_$d*<@_?GlyMe0JqBaS7pFqD>aaJQ!R5Ah~<=s|0yRuvuT}zr4pNz*|#{H4e+)06#dXDe{ zMqC?E)&RUs!H7_RC^Li4R!ZfTxsSJvjtc%Bwg7M!Jo> z;D@9_A%>R44D}FSYR=NGmCOnmd?okhDW8s`Kwe(LZ5mld1LeK6~W12 z1n-mFtZUUFnTnC;A6`Uk>!6`j1L#br$>m*cfq}4N7$=xA4TH@yrm+)X>2)-DCin3D ziDxcV*F!^XJ);1rhMD^^&1DV6EVI`J|ZdQ#g`^4p~p>hovB$UKW z6j|FQilyfLJVdl1Z4U7S3b|qiXO41e%F7 z5L9Y`BP{pA!5bu&w#qcewCUFXBgr}t)6CQYO#k>N zGDI0LF3r-!{`9)RC<^W+Kr+$?i8>!IoQg=WW7><(9c{V{7(xX6Nk@8A|mvbsr ztOOoP&9k_iCQm5?Bdo{BhyWX@^daTYqaHRSsl;AHG0u(LXl-?Du?v=$F>>SZA5kJe zCwbKjHAw@L0OImu{3Hv8<-Fj-H}CM~)$8zvQh;U$B|L$tj-ie5kdWbtZxvI9+727Z z^ddgx@`fa=rA$PFSDXxY>j&VDpMLx!zWnkLkB^VI-nPnpFK?fCUCN#yAku}WS22-n z7=m7#1{Lwi-re8fumA1e;M;c}aJs+4vMj;NsJ!5%(jCkW9M*C9`h@H4!tOa=<~|^- z)v!)cO-Z}eY4ulfC_1(*oABJ4P;!XjPj-u#y;A%n!F@RPYExTT)U{c(k%Y=Atuq}X zQb%NXRD1U9q#REN+EAKEsSgC)3~5f^Gl3v3@Xlu~;ioBU&P~#Y7^SkogSHg_>y#D_ zuvdBe`jGZTQ<$5Q_?^T?Hnk|XJQ;DCgy#t|T1MDD;!#J&^hA12Xc?kVi$O<5ZjAv{ z#wOJ!WCB}*{?L#{_MpP&)I4JwxeXS^eD%$7=^gvF;d;H|{{9Z2`*leg!;7gS`z@r4wr_7 zgjy#+D>n;G8Vn$Dca-)1X*cUmRH8m%N3tLf5x`$*tQK{zkJ2h z^A#I`vqD>-sTANXrQP82C1Gy@!Qv=6xC6L9MZEIqd_r5+f(;5%aS#m+Xo=bTj_d7$ zPoICm_40(BES+pPie$uk5d}y_Y^BwP&s!okyHs|r5o&>mN_1)PqnC2G3Rc<;fL1l9 zWK=I%Rgy*v9VrpYp_IE#@JbH4b5LR9I~*~_9K4y9n%8>|)es|brE_>@U_x9ge3Ese zGD0Uy9ZJisby-WP<2}52ihVQ zktPP|RH<@jBpp~aqUuE)%_mO7OUgn#uRWA+LU24Lt8CAoRdp=OEba@ZipUctkLYR4 zC+Mfp;+3|JnhNo5u#Cjc7;Qc$X^o2cb9H9h{WXw!<%8Ui&WQSF~R!~FOTX+WOP-G~1cg7?+DPBujqX}`BNJp%6yiB>Q zrJ?=wXQPl9SdExknhuDEOqA{cMv*CFbtGa$K6xMVsIuO~X{r_}9X@6owe2)+epNDF zsnNSBC66sXP);dc{QaT6fO;)!gkM~;5)?e-=jL&S?P6FxFu8LpU)#rvl#E>@JdbG& zDn5K^h+wV2-k*{C~VG*%;=~w5TEj{K&tvE-P{L zZusy2=CAR0fA=4-oX%Lap)CtQ0&PJ$xk`Dy-mo7#e*fc7_`^>>;kMu6N%hW%r`W6- zqDjeUr8qnyvb1x%my?0G(s@u(+UN<+%a~2ZP$3ls;xMm&y0$O*@bqFApblAQ=6yh{ zI0e_T?iKmhQz4MH1N&1@4*hjXSQJ8`B-}nLL0NEtdauNbXk=2g!^yu_m^%T4UP_Dv z2c`5)6DT*?auPsP5xbW1NhJZ8GRI6+@zLPrCRRAQmaSaPw1kn(K=(Xi&*b zGcdW5=d84=7_>Velq3@NLT};FE_y z8j~ZsP({;&8<0sd6GXG|%yGU0;yTvD5?)nT+Ln4K+!BZ{l%Z#gT3#G=(@kK@YQngc z@#%d|zivnRW;>giUMUkYr92^HvN`NfX)#>glSYn8?69(pX*GbD8tk+_sZ5^mRIa6J zsPgXTh_#VLpWxu!^Of=<6(|HI#-yi|vOvxwW@%@c-o^*n78}8AdR#c*D0O54XrpFxfm@rNoqYhcCWKl%8PF`D9vWK3AeLK zPDx3Qqe+od*xT0A8M=hFRGA2}v=+6dM55PrbhsJ%-tqOzSKMxUN$;J}wKQr>$veUG zN>xp&aB|un|8C>^VS`=a2uXMLSTX@@Ejd(D3O>!6!t?(Y3kn}OY`n*M@yn`N&?+oh zkNMd4GTq0O0K|mG(!N1rMf7I5KU>XmqrsyNkNHiT%}S^*GE2y4s%9ws&Iv^-M{=0e(4a`0TspH2WH zM$EC!G_GW$c{agJNAXeQmqnn`zecugW~wC*ObY5xi9c&E@9~^8s%3IPm%e^U)?Vki zB{HQG#sUmAyKQiB4mnV- zOB)?57Fw2xeFUmg;hHh`T8bmYa)4=Om4cKC;7mEw0D&ftB*OtJLNte1tch!YfXND( zpRH51MNo4lPo$U8$qrQavh}t}YgT*+K4%zii`KadSf%;W!(f1(7yusc5 ziOr5WXLALD3h0w%dG-GtTLYq z7MdFa&5dbZ87V(E>?BDaeZzhPN8)_G3oAxjvDyiX9+0~;j&*^ZeudNBAFyo)ZrdJe zAiv?Oe}lJj!|nDA_l8&ZC$u}o>3oM{-{O5!#oli~k5rDfDE{f=Tm1DmPw3kX=D?|~ zc=io_I|%CVa9X>cp5y$&e<<%%*rSFF2KXyz^zp zkO4OzDnB3KHzYJUtyfm^Xr27T4ukg=+j&d*Gc8hL6OauRC+Jg-dr`{WihMt5+iAfV zz|PKdx>1P{LoOW~L~h|3%u73rWkh(X94eyBdHK0Vu4vRQ@EY&B#r25kW8Yk~55`Q$3OKg&^=P(w* z)QOMBOI2X56^$4_M8|1a@aolT+}}Ome7YMzZq0-Rsm3D*D>1CmoG&q+Zp8kDZ$4ta zy;40;u+2_|YVv6h3w6?NhRgK{PhURa>({S1_8Yc+$7Q?VcDXRxF`XGBiOVGcq+yLc zY^VWTD5MC`HEmRWO1mO#bgZTSE^7V|03lg2vQI1I})Abu8&sdTlw(PCfZ79VLDnYe7j zK9+IB$=S-KVk{41n=4~WUEu0?rdqwDN@cXet&N{ElRfHbkqC9lP?NMSn8O=W$`tS( z$p<1!s3Ruz(TH(QRajbXn6}4+vt;pf)=ZO8bNpHCXd^8|%idO24w)KE84J{kNvGAf zIB^L+&UabkU1{2;&q5~{EQd!jslH;80CdnYOqE2|^AU+rXYY|Mvu0LQ+yO@^N=Krz zdm_h;byTZiqv@2!#^25IPL_;v?=j~zoj5uegF4!?*&$T?yDEFQ#-UKRsXy|uI}j=& zX7%=Dk;ys|`ExP>3aL5})OT8zD3vh~=G|8ROvY$>4x&aY9E-f6xC0ke$l+GPm0cim zz!>#gag{ZcGXiHZ?3wBXQFdu)tnp{wOP&d$GAzuK0tjSiM6XnAr#~Ns1jKEC>7|+` zGY=3~AB+br+Ha?a%z-6dLbLRlCR0c&M^*S`hqq_&A>5|&(x*Ih8QxKCoSO2>$ZYZ| z#pt)!ylX(;9lclPFDG!>07bKx#N0R>8n*~>xoSxOlR)SS;K(LS3o52QB#fn6&Ei^z z-&j;|URS(&b&uB%uQ^#3c<0u?t+Dk-Qq;AkmTAfG%AKZ71}KPR429B67I8T4j>fpe z+#V$05NP=P^$ULYyWe5kZs^A`;*KN}Hafg(eO{dhRLXSoQcv%=>X;G6=NoZBE{gx@ zKmI-b(?9$J?jP>aR0%s`G?r>e@%j549vA!5(-mL8e#PbK3fm7>2E7{h%9E!|t0Ly; z_bfdN3xXu!mxr1j}8oKzom~RL+Zdn#eP+8c^2?J8gHSWFfUoltYi-9 zB35i{f2!CPg_JxrK2xh$U69R^Qq^f=MmcyEDXm9DPy*P7M}A5XoUDRoC;~HeW)p1I zSeTD&=p1?}CE1nC;Z`8BICwcaq+CP@3Uv(oQKlAzRGvBPDfiV>O-ZSNQgIPKcRrh) zo83WxPd-4faX58pBNj{Yf+i`wQPOTnu&?*WVf%)s-f_9za9&SXPbh>cbpzp{QK{p<6J$R zqoCjS7z{2U`~Kzf3IOo!`ULX@;u;Ez^F2g?CQC@NMbK2SE+@S93*7G^`T#qg;HtR4 zyT{QxcI#M{6PgHa+Z9LOaKO;F4gKi2T`#fSp+9}`1-dF0Ss?1zyu**9ic9OUogOmP z7}|BSvU{0#z{7JbDvTY?rOJTGD#60R0~*Dvym0$c(V$HFv7rw22@2bfNc6Du!PbD3 z4Ll2mRF`%REA!qGZGiXiaN8hZr#)nrrwT?KrMnIWjf~>XT(PLdJ0wG1UD2j9>^Xn` zmasoSdap7_86oPly|hZSK-ue3O{5&&#g?j*8_-y&IxTKqpR<{i`T*Q6_a4!l>64u@ z!1=rN0?ok(91|tp8p@eecqAEt{XuwL(%8q~V+fJK;f_xOlG)aPRFkMcx>aFa z+z}m#M>5$-+?2y8{#okv=VwuDUGVnpTl{yw`c)-qXj3#Q3DUk3lKA+BJN`80c&I~D zPFSjv001BWNkloQ`t9pbkMa3-#NgCCyyn;A zhU;y^adbRAKjHDy7i{}c(CTDkL`1BO;+^l7Qt>(1G4;(d!714n{Csujf{7HF7#vgp zn!#Jdht`mg%GIn{gWnZugK7+t`tyxSfw21Eg`(7G8%Tyq+7_7E8a2K0j7YF0H}j3}b4S?ZGlCN41xF1&ouRd; z!4v50$-+xUf%h0tNT%`7Qov`wx4f66thAOJb`1+?#`t>1*XlwJj?zvnOs|$&^ct0> z&fhzgz^0j&pkt_-19>LHi~+qvEYfURDfZ?8uSN6i32vt4D*e=~o7;4oB|tv&tTb)^ z6KbrBQU#51-lyFw84x^|z2NvDKZ;yBYaDFmnHiR_NS(^S1*M8EKIh`wnJ`|~FnNlG zhbXN=l2$aGQVq4h*U%0T`Qv{>fy94pg@n5)tE{HQOAua&xlmP7aJ}{i@q#F2P)*0s z!hFcL;fzSjsZ)7hNB*%gel6?-9uZrTmaFvr$#{62SD4?Aqf?|=6% z`0)=vMdYArO$CJ(1DBMK_IP<2=~{(kvJ8v|5G@qQ&m}kIHnOBMMl8wIkj#({Kl?}m zZUw?haAR4+D@vEes}306NNsaCs~pMC2|cFGwcP_H>KGC>8GMVBbPHn>vq`C$1NjgU zXUE}<$Y_RSdL?BvWKt4A6j;mzBhT7`!mcU1BTz1<-i;aLV4QBnEAiYYdU_6H7zylo zkYKG`U;@z^D>^@et!%JRF7LP;i<(a||4<(zUjGC2k#?S<)8UIN;!{G_HGr$^L zI!s!rF!CK2PZgb^nUVG`ypuC8J?=CoKf!N3km&~uCHI)b=;BuT~jM1~dDq3O?cL_(F3 zIU+BHBVNOO%|%R)n3YCrM&-g6Vcx2?p@BLSWCF&4%LxDAy@T#pTEn|{@A2WAZ_vV5 z8IU>_oh|KWDMptenKwIXQ08t-LUC@t8>RVK_>aiNj5&q(#7hu#YPj8Qcz(Lz>G28I z>lL^CipQr%JYSzf?UdWg+zc0hRcFddB~R5VmQX+#@ao|K-+lWH-n@PbZ4Iq0Y}$SB z@jSq_tFG|gvEQ!f$ARtW*l)*(EzQVJ_prUzoj9qEN+%>^u!#ZaGbE#IK)zFVoiVOu zBd}nCXpE0wfL-m<}~3)fh48g?(qq%Em8sR zw3IdWbF}_&1L4Y)9byMGHy%BBYQn5pAvm|C0#3t8;JLGNs;?xYyF&%-Q*3Yz%=3;& zZb>mTxJa2|&U>Z+%CL*84`qd`{PE95At53r0He)fVpnFC9bINU<(2rF^7TQpfCgmRR&zN*s!Dx+pES(}l&3A|h&dZ92hX<_7DdPE<&FtNv zt&uXTm0(e^jz0X&sG=-=j|1Qr07K@#b30&0Pr0=4Qc8NN3`g(y{PhvH>xSbvXgO(& z8Dnu=o1tNm@wywl07R;d5EkyCY<$+b#J~UB-~KZ`e}0566ErHNp_vWN-`RRcKRW)) zkH5!ne)AhVUY?-cfn@-aB;%~|V$2N^ChJUI>?LE?+U{%lB!?tgIGpR5ZO=wuyv6rT zNg5`miFj_u;zBoOc_)7%q9H>q(s$ep9xQ9fnsjspkQb%l3eM@K*QYC>SZ=22i^iJ~I?q`GLphQ(o9$ASe|1;@T& z-ww2;!43nqKj3=oXic$gJB}j;1-EU(zQv#raA;d_a>w;@fpx=aUF&&;3HF02AWSZ5 zx&WsGdYmdQQWg#w)EFC9Je!4_ z4^Ay(Z3(61yrU_mXO!N}WJ|b|!a%3Y^h&}IcB;AcPDquH&Af_N>P+&Fyc`<(!l$0# zG#4KZgu08S4S3nX8eyEN*o(Mnw;oInAN2U(^)~c6OIT>r7u}LHPj#2u80J+_e6eJP zVy)-KRXKFcRfanw1f$VzOdk+h5#su6mF%P9gMXQgaw-UPI$P@d9#$9?xuh{nM zfL0@h7p;&COF{2qxe&FM>y)HTu9q*0hlf}A{`+6y{?!9c>ly13p652jwWqW>0C>Q4 zzv1iU8PAs|Z2K11Qy;O5T}ERZiWMK<6TN~4FHgN!1f>rJ5`aZ~BBsc=?{wIPasyfK zZ2m)~d?kw{hd_)a?oL*j_*6d+OJzP==8i=y6jizCXjJ*S6d9*du@a?(p}tgm3DWk# z1*|Cg94ZuzfRVfrZ#iJ`(uO)UBL*wgfLr4xnTmx{BpH>@bqXUu zh8&nN&D+AZ3>zs{3F-AxOD5>ji{&TQ#~Gs<*3lQggOA0+9HZ_0-7{gMfTmtB>#~J9 zp!R13wWQvgTU1oy!^qR&!DX>+@_P7(w>qIuPNf49)JJ)?@vngln?{n)8w;c)S_cck z$|4~N>-HXnWk!}cikK{!+<{4UR1&a>Y|cq}Yb~~288Ijl1w4U~>8+0q&BzuHU)_*3 zg`74eTD+AjHc8Uy5%MUF=HT_4*ZAh!_c$%*d<$wjQX4t*n#wCBctPFfRW(H^FN`Z3 z5luP{hN+=*JE9?;_$+RYFJC_4^XHG)_AQG3+@Kp~lk|&iqw?9vLKzdV4U{}NJy*&I z((b^ofBl#Ew}123Slc-B|kJ*8rKYhg4$0s~JKcROv@RngW%x&vl z@UN0TkC>NfraC6@-Wl)HbO1opLwwLglIn`ufcB6gcYPrX1GjT(8vZiIh#KK`#==2q zd$7E4veA|t1b7u6I{UyW@oF57IO)8pI!-!%4nd4ixCB3X4ai1>U)oKy1?(|5C*=uF z`5J`q!Hi*=U{DitrebgzPu3Y_7$jKpU_jh;L_Z>F? zuXq-Ba~#KBgTQ57V7+78cO3fxhhfziLiD4f_YUvCb=wGd1up&$die?G(>cEP^RIB0 zAEStH$HV!I_0;hA<&h9szNg!P-TH|5Ys|+s!(j*Z{T9hSx&Y#g9`%wHxPh^B0pSD$ z-j*2jAvW$A%WEF*YLL$<-xrHvtIs1MrYF>}unl`&lFJ#HScNRS8oHL3za$8DFk&)g zLwZK9NzuT^v*_cm%%94%yyuf{Y%qB^+^|gkp#NP6e(bS4t@A<1_$JacPwo+v{KzdHgL_UYp zP{@;ZZkWQ^2<4-cm`${b;C#Bnw?BM`^Z724HMBun<7&E%dWG@iW&aq|n@|nXQkJm0 zRNG`6tye^NO4g%t=^n#7cVOQRT(29RpRV})_!-?JGWc@aV~CR6Bp=^zouj3%98pbC zvKjyR28d#9E8e|-i}&x};q|N6(WslEElz+n2ni-YG8Z=nJU>0*`gFl?(M$vJ;wnkW zn&*ZvK-|L`r1ZAr7O@(0L8TdV`a9xZv8usHL8FSv%imqYFg8M^%u==)xeT#s;U>e9 zm3KyyOfJYc?Qz{lQ)P9x@BqZf2lGAM#>h%m^?QU8ajsv>VmEj?`RUklE#?wrVi_r zvS)H08`5G%C?H*4Fnkh3TTHq_R$_1?`iOy6ECDK=mT&4FkmTfm2rX^p|3&p7VKX7q zf>yJPnT(z$23l5JF{8bAmUHHrYQsX?9av)E>q}BP281-+JXW^dIX0a!Ihi*T?^5@A zmr)6X91f9-R(uvJbKs$R6O*#FWYcX-29Pwydjy^245-_%8}Z{ zL9}Q*8-}pDN)iU05XaYQi^Fi+)&x5US-{90q?r3W?s`is(x}dztD82D2$DGodvj zQDMqlW^E3XLby~ zt0zG`<>naq)iOPxosmW{fNW$3cuiZg9ZjOF_mK@hKwHDdYyfo0%-)9khX;K4@C{Uz zQFxIsVM!893NWo3aS$ZTtP^15Wr^tpVYKpHsfWbtsJ&1gTt?3vlRu7*PoF>I@%b{M z(fEC&XS*U^N7Vep;h|@!Hw($; z-W8E|G-*(^C0(K_wF=E7Hn4oEg+< zcap+uGeVF}PA6c>r}q`L1vJ_x`jE7WO!DYd=#bKpQA=XAy-6~>!6-${&rk{Xi;K1t zDqOvIWmL<=n2~DMa9GprO5~;_?{XS?rAjmbowvRqA(87H!UV zJrX2F1Z;;DwK^0DJ3fE;g1g&>^?bs4U9rd-t6=pQ0(Qr8YUoX|-43+&h@WpKtmn5l zt6_U}AJRz{x=k8B9&hl?dJB1DTTAx3tZUdU1X$Ju`+kJ{U1GAI@mr_!32*!fUvC|k z>lOFsfL`7|JmC512}eJ$XotgbKA*7fdlVWi&Z+&nVO>Is-IfK|8@h)wLUoOyn~uy? zL&tF#`73ee9~Ni1Hl<{r>O32%0K&l#Y&t{;V2v$IdU&;zS$PPhN82sIVKJwW=`72E zvrXKBe5Vpww$l3pUH%=xl&5A+>GQx&vi)6 zD;lhF+S@3e;)i2n4@IT;iq{)$zow) zHN;To_b#(cfB5f3X&Z7I;q#Uf{6AfO#P4%=cgEX?H@Ley2ivOU=iTBC?3qYL+qJ-OpsSNzHwnQY`Ppy62zH~!hm_hhT(o5J=3 zR|t+nabq3Ya}Xp0SQ0ARgGVrzq$)3l(4Gx>)3Q$`0M(WT*$;H~Cp=`MI7>AF8X%q4 zTmxklF|6yY)a|HqEQg&O%NMuM$6Zlzhm$Wua#@F$>4!3u>xxV`LfL}AK}Un)@Py@q z))Ll%GKyZ)DY`lDOP|1|6)0mP5wD<=e0CGAjxmUc_e$%|DO>22Sy?LhRAkl96*lNp z*9g0(Nl)35+60^14bE4%8V?*Wa%Hqpc84v$jFfKkj4^_9NIoif#!{oHl3Rje=%hj z8@3cT`s^&X7gmr@Dav(^=Gp>4lu$|AV<@3b@b2wfeE9GI=exVgd+*-ko!6=1%B!V7 zi5XXz-vQx{y+hg(eQn7=Z8$wNqkmG1LaCeu)c3XtPC@agj>{NWEj;&$C5 zJDy5ttC%jTd==#ltO($#NYQB^J>SRNX>=Fuyv`iQr-lAc4A8|!x zn=vnnd;pJx6I&%*h@6lktDhJqjC4V%#s;M*!0^$bmRUS6BF|eGe^}OR6P9`y6YC^^ zdlXq!EXvp<$)qEtJk(QN<3qk}78@KGiy5DSykstU zxRtN~vQ?4lY=Ru{g)-jmfF2xDWF#GvhCJf=tPOv-QDM;tk)%eR3b&!wsu{O&D?L#l;DCjaqJ6a>wFWY{b9C_W9Jqe$eSv`auOJ?@G|Kv|~z3=0;UmabE(M+}*q zDFyChFpLT08HrsJ!-ro)^Pw_=2pS?4Nqq?I$KYP3BrftRrs!xdI2g|cqL%WWbPV>7 zQOcdkMG5V8)X#7!N$hi_aX~tk(+RKNJfJO0$!|>}y+}x%@8tv?%F#?i zO1(r_v^0hoOwe-4g4HMc1{?OKKvyeW5B%?4Sb9_-V=#yg{LTG%sx{^^hS^z$#+ zj;$6WQFQZX;)CuSKn=-)yh!sZd!mF7U318SX?}`-J(HN;|Ux@sjsoIf_C>mgKTa$iixfA$Y`VQ}HJbqa+;S)#PWb z99B80X!|IiG8UAndbG=C!`6^M)wH8gp*Su6FhM${)K65?Dp^aI>e-AuPL@;EY-Dk; z*NDMnru0}bz$yvBbEYATJNH_`ltxNNV)#6Ku4ka$eL{)(_e!$h7{53abv(Z%Y6!K& z0E{7x)!;ypysNifJe=Car?ajZz-`WXh2RS!@NXNb(xY&jR3%n@~>$rAVr&? zgPO#0MR{tIv=27@OUmU63dN$M5rpN#qA~vDr&9^DZ(!l-HbuwZ^p#-OS$>NV5wglfah4fa^uaE@JQGd#C41!EE zt}37LX4QmMN(XW~6NQnrxI83NY1qt1lS9l>0+=-T1=%d9CxyQf!X{wf^qpo5Y1Wl3 zltdZbl~6g!jQb>xk6^Yj1S)iX6xrR6kf}*XXT<D`Els||}!%HZ>2 zY_T0R+RuvkPc3oHoK%Bjl-rw3?I=sa$kujNLVf@d6=9HPP_gigww!@=LEl3SbboiM zjhl69)tY0o4VV5FZ(m&jZ#c~0rdUo5I6}U<*#;qCaP`K|J&OHf>)5YXTy9sm3SQma zqaOjJJgpsl6I^d$nZMwohd7p%888E6sjbeG8t)PsmIyVhb<73pSx$l z2uvkIZRyQMVUR=ENae8_F6KbEyq`8=%QAVQ)IB;uaavB0 zWr?!}S|t5!K9%r2p=WI@2svaCA(y3xJ%Y&^S*u}!_C})>$p=t}ZNT0;_M_wZdd1hr zN8Gj@`?le7dB$N8-I#a3s8qwC^wYfA;|Ep@ zEnHwNhN+Gm+In$ZOnH}@!CQOzOxB|9Vz#BOJ*SlZge?NbZ zZ=O4h!0QtPiz~Kf=nJrFyuUklqX$`CU1HIBz_Cdx%;Q0{J}ST5}>Qr z-JUtaPR5P0PMdVU5+Dyv~MFDS*k&3orWPqJuF1rj(MN`SpWSsQb)UdiDy zqTM!rn)Ex9vVu`e5JX7>$D_g0+`8Q>m^^#^WA6dXl4W8 zT0{Q?U9%sxw0OoOz&oI@pZw2dAmI*cs*GA8q%mWLk}k?iE}oe*x};2jzFRYHBFU1d z?aKrn0LVZ$zvN(`5j^RZ(^r8v79ZTIRJ

    Vm^6wJq5FbY-N!K+Ax3spGA=*MF&pV zAzY|b(wKr)&iE9y;yw;si5%s6;3+vWNy%2`-D)KPfUqeqIi_GaH7w@^uO42ZwN~tc zBq|FpzMM+O%Mk^MO);iry(H)+VLCZUlt#~d{<@YIuOV@}ZiY{vKjW8Qe#UX#psF~I zUda?0*%siRw8&*HeM%mAS%L?5Rfs-hvy^xJ)nEN3{_a2gk2sxHZsHrXMUJ;wzUT|A z-*C9$dcEO~KmUx!=SNcZb#jEI<-%X&+^2_8%X5NhYN}JR3gxKq;Rllej1q~aFF|fq ze0Q&HBBWJ=kp5Jk0UZcgb$abnO6H}l2=7_~xMB;?VNK(AOVYDwSq^3ef>f#}y{pZc zFYPQQWhzin{;`Kh1~39(g3Bzu8cxUuph=|y0|8l4W1j+19lK-mP12I z%O_*~*j^AClf`Anl;NJ>(5XQ59FF>6StOJ!HbO@gFD}zGJh>74-+6AeO!lB;IDtYv ziR3vvOr5M*c|L{qCSyUP-Ak?0JFp?F4`z<_)M5Qp4KLt0wFTR4gWCez94-r{ZrwquLd*cB%Y9tC zjF%UDl7!;txfU z1|V%xJ*`yLxM^*SlZT&DS&@a7vS{kcS~o|frkV)DDDIOeA@88!WoUztU(wI>4x1Bk z;gtuULtIc!U#&co3G#3YZXt&hGT>i;p?MoN98kUJJS{wxmmCgQFgNmU^-2xu#SC%M zG=-}@;a0gM&z;+FjC2JbHs>LM&!&mrnTX^2@4v@i{Ka43?tF(u8?l^}!BxAu#Mx9F5 zTyP2H_$ng?dMN+sohN?=e3vAf5Fg(DA=RVuOBbLY(d;6jx2ckkyG|_b!XAqGQZ*Zu z1e&uEjU)vR-(f9*8db~5=Ky&bUNP+`_R6q1p$0xqb!{v@QrV!~EBG|niGgDNzBxkZ zsff(zu{kCcorsrz`~}VeWG-pL;8ZdBV=-w zxm_}Ws@FfI%3k9|vL5d=^GbQlA(c*P6)`X*BZ|wW>P~#Z4|ojc9E~g_)(aw&MTunP zX~>=``cxWOKjN8ZbT*nntg)UXt1Sx*ld^l%ee*JHVzNS}x>6Z4nn8~ab?vV9aTu8Y z)u(L0vS~6;)qs{tA&W;dPVq&%!;YMM48q--Qfvzj$Muq}8)0%##&zMY6EB zx)1yCj@o29N_Iw`e_g6@@+zXWm=G_+-BR@W*2fOf!z#xw6LAg`ew($&8F7- z)y;6Y0S5w-*>d<7P_TmY83^J-Wt&^Zg0T^dV?&0G368@hl8Oo{>(#J!8^v_FX=tH9 z{&!4~&Y7}8fU?j(-DyA;jV+aq$#68@l?^{O$7S7p5$h%8^;eg$DEa_m)uvWkJmVDi zlyaSlqOdfufJ?yy8#(czgm79JyEFYK?>A%&s=X+Ha#WK3XD`o;4XJazORLS(VM$wF%Ca(6(Wh2GgV9I${9__4E!)Y*tm{om18Tnv5M+tao?#=DQDg`}Q?-Z5&=(>BvQfibF-o zcalmYktS%J%z{);sKipJ2H_bZ9nSJ zcLGM$BoaMr{EP`HPB>_ig+M-vYYZgr9`5n({d>HA{R$5cZ*cc;hbFK_@5qEB%U8T+fEoB>9VCTTn!*;9{Pnl<( zaV9GxQW2?AwXoP})-I`J(Xw&XGs&M$U2tghN)MaiZ`QZ?p{lpK@Np7`6A=gW8e@SL-U3co9O1onB@bFY=Z7foVD51To0~~ z4lz>Gd zbHHD$?%%i@dJSmvyQo+S@mj>z;;o{Z83#Hsfpn@-EA^8ZP$tQEqcW`g1Kd8y==_Ma zjRkw2CCwuGmMS^-9_rFOf67KLyq9E7OlE?O*TN*36k~`+CjchCV|-J;c>mLB#=qu) z#K3`ZE7Ho>QgJG&V$f`^xnWM}Y#!#hK}gV5MrX;8hh~!;49D0PEB{XO*nF$zT{@Bn z62#KIqj_U?x4bw2fUyNjpqec~Dvu4+&b7 zf)iLsYBYT8_jj-G?%jJVr-e{Yuapsp*FdK7)HN;#a&F>9=vEIginxlR)uMjSA0BssUh= z@^ON8kDSH|MQD*{t>t4Lg|3fnUmFSsSTq&P(VHR;6Eoj=4)H8b@K}QWa+up3+MSu*(X=;F z^gehao|P-@gCKZHXanq2OU0kZbd`KnCDmc4)jj0 zGKUKiHa%t2HWtX!yDcMcBVpJLg0-!9b^n0(@809#{uS;XPB@)b@;%5eIbt~7_XEei ztMdW(&7+Gs5V7~qI~JYW{ekgPFhwp zlQXDzuyQxZWWyX0_vX2s))HupQE1ldOpekidUGQwfGfcG#Jb$sIaq zm>oTMDyd3q`PmM#mr4w6k;x*-TjRa1GfCbs7;;pLRRS0y-NwsmBG9j1PNo z*h?C(eA+Q`NmFTV!$P5a=S1RrZ-?WJIR4$~9(T)%+tSh7iN8Y(5zHNjchagH=8ciK zCv-C10KgWlNN+J2QR&i{6kGzg*IV+PpNn?Lx9qIvLFcPr#hx!z&4h@ z&=&tpg609ne#b&HV?1ji)pB7+tK$@xJwwoQxt5_G$$N6udbi<-lsQY=BvrFj?~}qV zHAf*&%xf8)wx5yg5zo@hSF%}^oJy=z!sWKC8+_q?hGh0MnD|flZ#s_nuWs3B7F&FB z8mwbzmZb8F1GLem?FlL*)ToNc3oU7_@q!TiHRxIKp%zVFN}eYbjg%hV>@o6!U=a%Hxp2!e&PA{3R>nByz(qNEPvyYyy{6pX>Ts743Xi4DG9SU( z&XoN&ENv|^ZpN2+j3*(BjtC(F3p3v~>YhRmzbuX|fIDEJ<%aSBil!9mNOc;n&o_Mh z@(G{6e8GNfyu{)Kl4eu7!AKQ>8e=AP#AMQUJL^%=FlsC(83{-RO zRzro{*hlf$(9$$E#~7uLc!F4pSS4JEH*O-BP?Mlca!IIGqm@Zs5PU1fiQ9|(9s(98 zB|%oMJ~BS6B5OJ1szDR&LqYP-&BqK#W%>lhdz!Lx5h-tW_1ZSPZYV{-^c}Im!W@~& z%|kRR)ISM5{OKhZ4inX@aL->s#EQlb=`e19B!?-O=4jk%_+(?so*_Eov86SM7*gX# zM-X;{R7SOwHu6H2?y)IJDQ_m0%l!IEzB{h{ zhSm*@(Vl`G&w9maJ>h)!1^{q>?|6814|m0OyI|jUtm_I$$F?i__5=}NT^4j*u-`rb zs@RtAq3tKMexRdc+YfAehuwDQX@R=nc5K)#w>Z;IOYkkaLDwZ_1`-M#vxAB*8}Elk z#(5U6#%G}$W6YWq$hKrJVne~iVjVul&dDA^79E+qle<8Rf(2d4I|=W$x-e$672R99 z7pyNOzJsC4V!x844pP>6=BGcI!&%XIX2Xu}5yh-p>>jVdZ7={r7?#W<4lQQ6ICMmL z_Jo8PZACgoPZI{I!%n9yV|oeV<^89$hj}(AM(MT=yB|E}NKoN0oP%5-VuS=QvbfC zg2Q9D;FJW5cQ&*R(S*5xwVmHW)UAs*bAm&b4yRF?x?nYhUwR6ne3EQop3R<LW^we9N=c*XS*j8QQj(x&^<5e7q80G!!1-(qgf)D~KX`5bHA5(Fh zzn4yVzOk-N1#+s)1f_n?DQFYCP&dZ%kGvoLo(bz5PL)C~6D$`YUbRyVXJmn1{TV1z zC6n5sHhK`?clWPx_V|5=3N{T@Y?BiN9qLG4YCIA%@cfg79+a`ibAGqBQ{2RH!A1~kI zKkfe!{~>v62riWK83!EA1_u32l8H0!0r+iF3&ALkN2MDf?L-`?2n;q9+Ng5Lg$QlQ zqNz+W+L>C|Uy47NLx)V((u%7N0dS^luqHOCxg*@`u@@VR-sYgl7tT}uS(Z40whZBE zT}~BbUYC~UsmVI`Ic+w?u^F4RHbJCi{k*tz_ zCX39}+{H(Zc#yIXa$96`%!-{;GhQVMoK9zqZrJ6(g>W>uqqPRzs7T?3o4gFs^xm@& z1UDrq*{zM1PmmWD2~A(E)4xc`L zVxiBW8jBtVmw2E?Og=Ls977hmerz0vFGTe$IDAHcMzgw6 zN^6*@sZ+h-7^M#KiKs5{+%T&IB~)U}gmk$pnoP)LKL1Urf?0+M1*yxxAelpeh*IP* zk=7t>^qSiQ!Ii>AtkfovDV;;66nYEODp+`6s&8DRB%=j+TD+K9eB~jmIT*?Y!KX*F zWyE7fmD1y0c8a_h+~8D56rfBNSRMyy&RDL^1Kz7lU8oX6!!lE!BohvXq>RtuEo@S5 zv*?&|*cvskK$cR{@CGKZQ!uR07tIl|nO(56R^Cn+>}R#HS57b{xk56xcdo$3b$wWAqWw(PO{_*gOUfY3~`sfF4EZaI5X*njh|~ zO%A}Y93E<&172?>rIAK|CKXhMDQB#-n=JA}B0zqH^1Zf-`b+;Q1{oJ(OK`GMErDvz zBC#UVBQh#bie=>7#Jhw{IR@{o3T5|JKJy$?!Q;rC@>8vW&9p{XF?GEDH&PK}#^H2* zKACy$z5swzAw+3uZX~HlN#FfeK6~Ymad6w#>M@@){?YJLC2!^Ucju_=*Pwx%~4Js-_(Ff=MxK7EHW=m7ppR zODuMX&lc4rCI{p0_6}dYd4pFsw=A}mwgOrm2hYar`x-DhVQlQ=f*X-aGY+9Slt?^e zNm?+u8_pyr8-~#b_G8C!9C&(q#I+yT_XB+#RNV=zPo+=cFK) zHsAiV7$xsX7A0CWsCnTr)6-H738h-2GbS_BUm9SB#sz`p07BOxf-m7fyuUJG!%;*1P$y`C-xohE{|>0F zY_iZYb}v;kd1fdF90BxZ!a?I8MHk{6#WmO4_{Ps}(^!qW*lp30L@c?n)d!HatU4VkiqOyjE9N@LV%)W*)? zPWH#m1wVMhU$h&v-s4O$gQ20hqvL?9hV4*kop6Ops7hsPp$Z`!aYJw>l-7Yp^==}< z3@y&f4oKV31?W^f24osgf=b75plzWHP$rgyr;%q!a-0Ut44X%Chv^xIIG*5mcpf+% zhQGbN#eXqw@So(0zHMluV^g3nG}%>LBf4-r8t*o$J~UH3GRw*4qRlcHGY4GD@~jrb zWVD5vGKLYEs40{WdNCn5l-GGS=SW-K7HIZV(izoB(8*4TRY7Mxk;+JCl)cs>!?(GN zPlljb_G&WKUF^py$U!si{>8%Ur{C5!T70GEE`DokQ{x znVXxkr6?-1dTqQa;!z^rOSgtsuU-XdTpGQ=6Ly*&zHu1(G4SiJKjDvm`XiqAgZ7iz zU}U^j$qt7V2O*2@1f``?!xA2u&y#WzN`Q@XOQQU9%;R^L%5?R~Ln)~sFDMpQ*eFp? zUI4!r&xw;;dEHQ6?a3%u%pO>cFe$4PuiJ5|nbKG6GMr|PhyVZ}07*naRB7Oh60rjQ zd8rG0gkN55vRXvOq(|rcKZ`Wa9B;ynWP0n&tu|;7%|!}77caSEMi`Z>U8j{wlmEc2 z*knm8P)TkOQ=$zjPICBGQ%mzj!Gao0c~ZpN%w8XaVp&Guik0=MDBE>=4y2p1iq=)+ zTHq}$ke=n1mgxeO^ri`3Mai$RNKyOfFb+pg{RC|T+osHu7vQu(WrIt{^ZpE&V>_L2 z>{mQ~`NE6X2pHa=@)^3_W1Q~s=?dJ*N9@-<8tVZ(Jza4eM?m8`&#w{B%W>`4$1{(G z4dXcAP&C~DF&xJcyb6m8S5FONC#~P3;OoO7`yhdR6cAQqqYaG72JYA@J1<`H@gnHn zY9=uOaQv=^hon2xW^Agb;0vTSn;Iy*8>CrDM4Qa*AQDK(^bPa$rc$cgVvCtDJYQ5W zvWy7A#z#g26%?uL3eJ(19WdVqSwNb(*>(EUCAX`2-*8(#cy?-n*>pmE%?wT3gnQxJgaBV?5hbh-AV~S+dBSp_M?=)pBtGTB&FOE$*8nJxo*I;qu}GfD3e*-UZfS3%7<34 zOj9T)FYl8WJQ!3r+&?_v;mup<>4dg5O8gbdh*(z=`x{kOJr}Lh4=t3ChcYJObOX|v zCr~3FWJzW*)R|(m3>~B6dfoBm%NMvA%sck$fzA{Yr3X}U>z-;hrXx90okBi~IY==t z_1{---r(D}Ux!7dZP2YjH=bSHYN?Rh#cl_#`wlb1r_Y~p>^qlH1ajJ#gW!s4MKzq7 z!wL+tg*o)$v!DX>1mAIyp|(WSdk%lXmY2XzG(bmoPNSkivsv~^lGg&UM?J`cKCPP> zzb#reqOdftwmvIRv-5ougne;ZFbTQFd~dRW%c08r@{1;&6-x}WTAW$XMY_&UR%-+$ z^ZS>XJ1n%4r5b9Xcb362->+ojcuG>!dhD5I)cAe|qbK%?X#vX=AS-6rWWl6#KF^e7 z=_H8ImKfJe7qhZK8;7#y2H{;77`tq8M<}v%A$iIiMq`8mOOc5!`N`>`gq(23ve77@ zW8hnPh41Br%^Y16cH+6=F?>8!AtRJNd@YS{TR<;SH=i4B0iLa8HBAkFt zH#9Nmpj}dG}^1wokd&|UDP9rFBydpQ1?-s69@ zC%nTbPT=N_o(e!H76BksmSl~x-=OSH75~Zz&k*G_4)7R(>9l6kLhamUPwU1U2A2YB zNYsD%zGg6h&XC_!Doi((R@Af5>K7*nS#p`YmBY~&%SqI77$z@7?x>U*_O=@D2Xu1X z%07e?0R%rpRDSmH8yZNciFr+&1hYS~0*53+!4v1frXLb5ewIl^Yb7|ymLhcyEJ{^YFvE328w;9v`dg+$EQ*TCjIM`f!dPVS zyV}S^gk|H6jK}f`=i3CW#k5!vkyFM^?8BSa_~z|bP<4XVykZZtp)LnLfrzbb<(11u zKfP?8z{Mb$72y^uL0Dq*Q(#aIC_X=ZuAgayuB1>9IVez4kIQFIR!GXa2rC;Fx8z7Z zr3(GL@f`f_FMfyf>4N9uz~y{XNgU}=Z7rhWx*fP)ulVJcU+{;2`~$Af*Yf`vW{O7? zV3hnXDQ;F9%jqK%Oz$<^D?7Tjj_H|9`LzhNq@xucS6S$u1oDN@jj}XRjY8Fei9mY5 zRLhc)>vXq>kaH7z~+Kn{jDWfo4n`3Y&`FrzVUDW}b8 z4Px3&mUIb26_-kAF>ZreBY;z7dP8N_M6IUvv^`}(Sp@(2v)5D>b5t}`H4zz~3Ba|n zF!zNYGtaMlFO{&q0G(RU#$`+;P3EUd%Fp8xSEj4v<%1>7#uq>0lnO^}SY>`ONuD z5Q&+ImXb=Pm1W6UX}glG;0|n&u(LlJPM0$_5gYt%Dy`x_{e@~;; zY-2=PSi8K3?Il2=KJhdGxdDdLqaLgXYy3WYLMy$BzzK8A2AMElN^g5Mww6tX)X)!= zTu|s^ZogS*E?(uN&5L*pe%l5yJ~JBp01PcJeK-fh29RRsWlu|gY)Z>hq8VGM7<$fu z+RSgV8Sk5lz$J!!o|}?_MQ+`b%)P+qa=2h~iz+>Tg1s0pMmGEe)ENhhPMGerw#TqY zL<``jnEv@f&*gnAT2MiAn=ou&`lsSWf$9`5a(_W7In)ttwffBAt&2}?sL~>;GXlJ% z?*?q5#RJP^orG>r_Q0aTD5O%OB{HSonDKxvXc7rYv8j%l=G$+-!8hN1hfP~7Y0`Fw zt;qAOF~Ztsg%!g$uX7C%Mn4F;l<8KW`Yp&JmMDD%yHcZzC1qR^W!7-BO*Vq z$6iC-c_XK;h#Sf`Ua3nQM;XT(fSKjFlHz z>?T|WW_k=*=O&_DvaZn=&!Z)>P#xAOx>A8ltLW2g7Us|j6>|@zm1}s#2$wz2Ez-~y z>yj5cZPtvFwG!rU6#GqZ5y4+_k(V2(=bWI~Z3TEFWRMMLOqa}pD zqa6*d1BU{*TyQ>i{L8oB;otuHD?SWE+b;OF-QaW#yz^(gA3Ly><^fw1G>^e-A0x+z zk^bh>Mkl^*Qw?5mmiBd66qL zB24I85q@~4IhjsIJvTT6Gs#kXNe>FPr$;V%4xI2y9yB}%o9m*mn4myQpJVbQ7UI5? zq>hJ8NzFq-J1C1y35{eVVGS}~l908wR)B2|8D#ECZV_I>lyo6Yr@+^26cZs-Ap;J> zci(-7hc|CWv&`rkC_TIm-)1qTl#13y#W}&|#@S1MA>#PUzx-?b_~Vbby}c{1 zHCW7-{m_A9?|6QG#;@<*0)*LLbxvfz6Q49|gpp7}Ov-+JSQQ-C2j8)EAU2fwx;^lxVr(^zS77#0SPk?mX zFsHd1?@ToYHBcicP^bT1DyP{>K_Z#Xk^sN-*ijv1S=gsVN+;Yfm8|YFv>&Kh-vk|iN|0Is_H{ppb;l{O`;k^n=U&7;YntWeNKgIoB#RW=;gD;{BJ zqd{Gen8;Zwhi3|dhe0C zV+P$$(0=f_Y}i;JU5{s24<*T_8|ml<+mA{c?}OW82edV)b?huE68Mx*Q7w*$O}kKS zoMx1jEzOWg#^UNS__p%D$B$WHMhTy@{e5WBf2_^Ui3r9Gcp)yR1|N&v;>O_lw$1fe422T<1))5leLvp`AI5wmD#zgy2$Xf)NDB+O|^HoiVSn&LhRj5;}2MzSt$F z!o|#S*DiSd@EUiw7i{M(`Zt}4U}7!&^1P2iB0Xp$qZrP}-(aKbW*nKdCGyka?@N_x zsw$Jc;BGkj75#d})6*C9{eT@2wVB5T*>VOf^$;pcAAOWcHd*57c@%sKA z53e6^dv^mpZJ`$7hO0HjVR07sW8iu{a2y@`e#Nive=P{;KGLphrD%&q%WSTN>IkMj zRy1ZX2^7##aN1%pIv_PrpO(QKPD}=rlusPA(i#;sQxcfNA`VDZD@7+$!wi@(K3RtA z84ln|dB2n`6rJZ2#adb4<^3cDr%DQ6$pn_#5sNKr@}`Ufss!vQ^C@DXWOKftY^HM4 zBr!~M8=*?V++sU<8wm(5JDViH)9cKxT7zm#NqY$fhAUROQdu`SED7~<{PUKJnv}42 zFAiKTNT$UjHt^-7!z36MzChD*MjJ?_$t?+MmP!JjVPctRRIIPbuQLvbRl_%N3&Yo! zTiij>%&<46W_WzA@jK~EDien?eInCWR0AB_LHh>cj>=RTk!TFV$xF)y9C9ee7GO!; zBC*QRL**cX!$-7~v2S{56KVr4Ek2v(7_Jqwi?&61hri02B&yZ9R2KJ@H}|vumIe~WEJs0*%7Jt}fM(-cM)Cb|qoLINLovC)*6cl4g|d4h9mxVgE-x8Ht?+w%q9 z9eUOpHdF?0MOi9DM#VN2+^_z#)%IZ0#+Qw@m!Y_U9|X zO@o=qlLC#L*m)K!T8F(X8>(#5wd@xJ45{O^opJwgkIT(%YzKj$m$x`Xx(_Cs7@qel zK7ILuckkZe;7lm)Ps?LJ50T(-FE_AAiT&`t{PF?icr z!}WTF;zGm%IT+UA*3pj`s)ZHApr;1lz^5-?I3#swn;TbmgY|)9bZouju)*(W<18u$ z)Ys!0_r*Ehjs4i^X{2P3bdEnK0iA!ZsZfpc+YpF_=_#XoBv-!Y24A1)$K7RU) z(MJF+_Z>F!XJ&qaS1OCB5Z9BMm$u<@Ipgl`6&~*HaeH@*^QmELEe3NE z_n}!XzT#YbetN=}$0vOJ_z8dfEooOpGjdpBM$(rvwI3P8NMMBuaGA*pt$C*PR zX?CcK5=8iLfx2Icd$?=jH zN`tD@CK1XR^~iv?ID0q3BSVcQr|FN9TcxSloWZQJnO^T2<1{~i9f=b!QK?7*KszQ@;>H~6t# zaEsS?l!2~}qwmn&uy2ZwRIm&iGkhS2M+iV?)=XX$^W@xcXkb%G6U%3tKuoKDJ7Cns zFp2ZM)UfO|d{HS!id;r=uG%qQkOf}v5~$?KC6sAfr{e4{IoNWY8IdlNR>c8^A*_q0 zMKPj%dQQdsw+6KonPY*RSsFc3Qj*cQ;G0y?AT0wzvUR3jkoKJqZ}n)r*98#YS6srm&CAJOm^S-{8MS1F=jTU!c=rzPKYqYr1n7DAaSelqPD(YU zwpCTO$%@@h((+W{l;1fw@AZr)WuMnk3)V>!iQ{UC^UT6XFlC_L6Im4UcJnWTT*)1(q-VIIU=8e-ZWN^vAa*7~LqF zoKct1JTSHFMfvwxFfX-HN;ZPRi+nFR=%fx!hR=A`dRLhWP>^Mxi6RTnqm^`${eg@r z>8}e&;0??Uw;J?$%B_8F&%7eQa;TSCf>N6;5vdIA{C+DYF$)-(v7tyiPJ%rN<+3pW zW%&VTj3OMc#u#J>yc@dr08nyhaO}P8DhWsJhe9{Qeq3>CXEq{nj+BSLcU+&J@nsBL zpLf)p>4+_99|5;?8yFrTU+%!@9V(+Xr8Y)*{h^SnNBRT+^Mh&+VPq=dp5vvr*)XG@ z(-$q0P)2?i7Lmw?sfwFJRg}j$uZ7`Og#BuoUFU{Gb@BevEt}w5pXX}Hgu$55ik?+i zMJ|SBEH5}=%vTl~%ErV~O(d(dpW9-|W+nX3L*_l%RA6aaLBz6Q1x3ykYbk8e=X~+`hIs5Ohf{jHC{Wwkv0Rrq1C;`tOCwA<0}SzECC)Bzy~m+R`$U zr2l-^Gh);;Eu&SFp69xyawe5(rPlj1m07{Y61dEUfi}cSVVYskQRI(7zy6|#uvRur zV;WGZJQF4$)!fES)SBS#<_2#c?s0c_6Yn24j~Yznu#RxxmsTERZKE2rb*2t1cx5(d z!8|}u77rIKPFBapyT~k}EWKk4!x%e`e#LOd^*HMNNEvj*$8ylU6!97CmBaR2PzX5& z(i(2>Zt>MuZ-W(eI@9hYB}E=KFl=t0-3-_LiZ7o&;qmbco}Qmep%6PR4hlz6J*Mgb zvGL4%sm@SYtT*Jz2x}4Nf-S8(Qb|ZsY==22UMJ03!?&=jEv6P{yxXEMng&VU!$XZy;1Vk^C^NFz&&BXbG1VlB2|zch?IZ>zvT2IEGBc2ZnRu zHTg}ctXt92u>hb#JJk%!b6p<~VUA^0W+ID{0pzRY&=-4XDs26QUV1jF>=zyYyxlgm zF=7xU8+NL-qs9~tS;7jhj1h+_h!Bd;jqhtLiyIEjfm59K4%*sP0W@?Em2!i{pG%fU zs@(y87o{eOyGYUk(Sj|d8p>(N#%I}Tkt1n;Y7WC0=VQm;`5FK1>3jSi&;N*D{RzK6 zPWUfz#&@ph(r|Ub0l_fG;{cwnkF|!-N~SO>g&f|wpd|#`EkK_+u6M>!J0=?@l_p*c znAzoqf{J~Caj>Q+mR_o992xtr6}v6GU#G%Zn3O?&Vh+2>RTHmK;v7h)?MMQ&Zy7Ic z3N(vFK+YwzzNpo`Oe>QCP4%b#TN+5Hy1~LKk#ISeIM{-KJ9YS$wBeTWPg26+aWL2Q z&RCx1V1V)?odN0L-7--2BD2V+#Lac)jTfZQ=SHexbvy^IT7PFv_!76&CEqg^9Rx(x zW=9%)#5AfTpT*Q_HS|!;*ek7n-&jN&o8)NB72C#G9(UZ_oN(SY=((Zg@1L=ARHU4w z#lbXE%EM4)*C+;4HQzarlcf22hL00?#ip^gdK;u|j8QUte*A)8e*FdSK77Q{k0~SK zDXMDqxy80k%7o!bdb6{l@u-gH`aP!WP69>vL*C%pUc9>4tj z3-+r`k1&CFCABRSJu20q;z?!UZBzn`yvU3WOG7FitY9;xfUt~u8&OmYZaLY|DkbB# zNEBUsLKHJn%e^A)Mj=`}ZOw2>&_tXuVA%tY^2;hEc~RKQ%OWEcKD5<@@{>}o+)OSk z6(O;=ChR$oqx3Tx8eu~>VcgO5^FUNzNb+n5N&-J0gxKijhFKSjRRt$v2^NS+bPYEK ze6T3V#29AL*Vn&9G{%O?3S^b>O5!sjOsmF}B}qmq_r8=!nBbvRwP8s6P`qcdu}I8@ zRs>AItYU3Aqg^`-A||*b9Me;8yexp5NkJOj976@bV8pG{9Oh{%D9brQG*qcbMcfo=n$K$BK+iiGQ9IABLF zcvMRzg?lI?TqcufX8(lhY4P&H*Z`A^EjG7gd55?!Hl?&`4AcMwExv=y^)ApGhe6I_ zO>F{+6M~l$*|g?WT9Ynk2Mcpu>H4*ip&xZn2(4a&W~T=?{Qk+m;kDRgk^jb-sM=Vp zplmQREk-Lr1`9{YsBp-^u&tK89HI-L9OM_frJ>@llcgZltN`L8HfoJfy0mCP;SvwF z<`RO@t_8hN(Nr=JQK{2%R<{!GG+PtHY8d0C>Wgi0gO5=(cC&%84;;sV>$Rhg9!+bX$t^zf zEMv&fSMH8fq&JqdAVDUBHE0u@Z*Fk&>J`qH3$}JbYr3SMOjU$=fUdj6|9gIZ#&H}_ z2$Qg|M7>%unox-bTDZHSq;RWR((GbFAf*&QP+Kwrf%X^hdIi4-Z zNZ;Cfw1p*->Ot00nOv6*g-SGrR4`=gn%`6XZX8f~CIw~VmvOHN8*ObWESCYTPbAFR zceA!L+5?hRkhOktjtJz=Ay3uF83*g7yz#QSpsXla{}-glHDG*X6g5Q`;7 zcP4W&aW0oT)QWoYHc}0{Od`CvUJ3S{mMUjc9(PGH2d4hW7n{Gzg0AM^A%`Pox%?V2 zsA*Qv)B2lptKJ;=0XO(=J3~gtAZ%7f+^xRFQpUSLTig?42$yCk#+NefELF0e#fAc^ zCs+@TpCD8DV)#CMP^qpHR+@ty*HA7oe$s|bg#p!5u2ZMgeazdaDg0m!%-`F1F230f z|M2iF{@wFW__un+|AR01rZv1Bf-`X*cX|WBaqk9ihKnCKL?AX`uILi!LPH#z3z|2a z8Z8wN9EW2k__Mpghn5m1ZG7>fP6a5awzCn667YZ4SuSm+9$CP+AzGJk-g`njca63Z zw7e*mXH4lF*cqm+Zy=AR#-Toc-eCG2nDkS1_*CBHeXb%H=D#h4MD_+Lm2uEtJ~FE?;g+mNFZ>WW$&jeieUAu+WrRX9jyp9QmTf z%M=e{CV2R)fD7PLLesRJWHGKv0y#-_vc!B!$8enmHzt&?Kq$G*<#r&C*`^KW%LP!u zMu^p>QcWZzUbsOTmC(OZIqwABWDMGb`ba5Avca6pZ*Jez!goKWpXs>nc=zjj{POEB zndMcgf{+W%I2$CGj@$d3IM*wlUU15lrIDw>!P4v3ukig3KjL&cG1`nWKn>VkYa9A; zV2lH!4}AIZ34i+2Pk6dMPLN|JUd-?&hyVZ}07*naR1`oBHLOZ7jay2jtqR_X2cst`>jKt*uT+XuHw-*^!M>J27LXQ5YJB^oYGdNZ5DO@LlQk_iDTD@B4Sxv{~B zSgVW>Evk0aLR!VQbK_M_C|7i{Ot#$sZ>{psGv+Ka>4!z*RSFK7^l1=&n+m5ajZD2f z>RH@ox=sKNeWJH2E;K0A=qbTVRz;>}=#2ZSv=NrdY|fS0;YI~V#l~6M)VziunT-x^ zamJ$FadB8bfOdwc;y88;>$tml1*M#E-w!mAQiu)jA*UQsRJsVR`wqZy+O`;CjBD6o zTB~Q>1vri)-eHTlV5AK}t%j)Kkv?FtHHS3>q2vMqeg|q0>$#OXOT+%}2sW z%JI!(Bdr+|ZMIk!l9W$~AnpJsWMM?m%N=8K5Ol0$!f(2Otw3+ZWN;Qfl6-LM^a0dHqj;%#Lpf>nYzo2cFe-~NZP-rmqx0XfijKA# z966OsYm*;yNvadPa@447;6<}ocZIAup+Y?GfSH6A3!beJoGyYB3_qSu_(nI#K45KQ zqst{!&g$Tyau6^niEIzvT~YVXI|y0dm|%Ek>@bJk8g^MiF-rNVOtdO>rfM;c+)_af zj4@&2!T_;APQSe0wnd_oQauREWHkm6Sckh{J1Nu*{{*-Ahua_VzhD1v{Njc`^e3FR zTikUI9+eGj(y*E0FXR=zZjP=%?+25ifFZH4JV9_4M*ONGjHkeE9UpMu!x(tNKr`SX z8%*Qh>x5Si4!RwV-3G=MT%t%=(nxkPU_%VljlcLz_pv3CUW-qrv;es;0SuVWXSP;} zPZM&0;4M@cRy2OvG5K7k{2pPK%~;+X)<-%D2QBJ9;kT5s;UebrXlDeM6{$%Hi;`;a z>wsDQdlN+mo7J(PZ^bBk<+0ctGB`=ghQ77(BZ|=jtg}J(e5q>NLs$(bp0aN0Yk+{c z9UML%v9`QHn;RCYfALkG_>;d*KWOlJ67^;hv+taO-y#!uN@+x6p)Hg zO)yckx>{)`jPFZ7yu@}afHfQz41&aT$z(2Oga+HAY$aX|@;Ev^eg2Hc=P&5TDCR`Q zfjW!25Fn)Ab&}Faf8@6s67)v0d0h~Ui9EFv{;U7`U*qro_V40%u(@T*0xh>e$8NY@ zpK)9dJnhfX*i3VGdB*F{N%^G!Zark~xh+tcHUq8ksA{T)^yn@*!($3WXH68n7p(tRQ_>BYjFcP4@@bjs`0Yah+T;W(Rs$CCB&>Y zz?)?8qp3)z(7+3}CDnLoNiU5X9M>$61g5b;O=@@2!87(X8wX9HU`3`er9nL!Y0Kfz zjEz-zybntzb;9`Sv&!L%XMQ^YbEVYsQb76OU;9fze8*x=7Q#2jrq zO?$~nF~&e29cMiOBIrjCN2v8GKK4Fp>+j+igj<4VP+BUcq&uYC)*}fi6cCe7qg$)! zR~QSL>53vu&R*o&4-)D2x%vxmVCE5+;4628NbRyDkhK z(Zy<%P}}sM%Z%+z*G0M0cm%3%RL+qAXGriAhNjIB&*OGaq(?$(q;Q-Pve*wz%Wnua7=H#+LX*!lRdaF$iYYs=aoq|Q5Z-*ncm@U*hk0H;}fpe9p<4%9HUpv97Yfg znoj9{-ZfDh&^B6<(TDF=$(Qh%(tr%!&fM4-^eZrS7pYi4C z3H$So>(g~E3C4_!&SWd(hB)iPTvDWc)EcGr9E6i0m8^r5U5l~BFkb-K6EtcbHkcUj z31#!-ETqz^C2Za5IZADQrc9)2A<3`q(fCLOQE?ce`K;1@%Wd;;5`|KEqhO*|+KHv$ zm4mfP!)iH83#t`(G}BOo5F_E4kwb&L%j2G6fhkN`TeJyEyFl%SRVNE+I9k)>P)A}z zEfD7{P4D^!lbR5=~Nriu}@*vsWdn+3;(6mgw z^9gi@XsJjOx|mk=u|WL=63<)X(`E#M`c#Eb2}afDB6X(g^vt=(GnNj^6oGc7T| z$GOMRE}%q7dJNMNxn|_M`op`SD4C#kdb>8jYYOEV(M~X zNZ3F;9}-yrWIo|00Y~GXr7xvp7_m*Y#hd1gZL0XpRDN)Bop7HdmkXOrW%@?|Hm%@N z83UM*6CDcQWTtV5!0dnuaC5nEO)$5+BF+UOJ>zfAAkHF zfB*OY4Ziv2Ycvt)7P8xjpAxk6_xZrn(-R(_9`XLuC+yef@Z4v#CKja%GZ>PvoO+r~33*y8yzCnplSMe1%_zCtGUVn`RuEan%GxLZS28R_K>ooj zs&bo{7l*LGO&FJClP(kJ7+!9HN`qNMHw|ttL>$9x`$*U`UFMYMBKPq%tq_l*8xup$$DF0%b*uV#4uCj+$|vT3eso zVYDoo_&W~u+HkGp?IIjQYxo_TmJz^8`YZF?u_YwMd;*RXGi}4&p^b!SO)zIZYfC;< zDoe0{YBSDmwU5Xs88y2z?E8+U4cY?P-wC+QSFUlpdp>Pwj002JVMH`GtlrW4!AN4o zFooNd{DKoY(}9e#Z{tIw3sg5Cv)sC|xlZ-M@SedhCFhM-S()5I7?#Q$3f4K2i6+TW zr>%&Ut8l25@w8!hwX&Mksaq<6wEM(hXK*84U|w0a^&Xq0iVV$^0!%-}aIZ5M=`Uqu zwf8)Kd`5MyAYihXlreO^HEzhbQB12wg4$?@3@EEtf|Ytwd#nI^lS)tSL|Pp)Q(nX7 zAU?S{QPVdLd$pwQTT@7DY=jIx8sn!mie9EjAhx{t+1YxS9k0F8}eMTm3D+URGGjajQ&Ag`M` zKNrwnoU4YR9vi-7e)UiYjR8DAJ>&D|N7xt`qvP}EPXW3ub)OUm3L<42ahsN*tSNjr zWaCB^Q5T$A!|8m+?cFQfUhc3-G#*)G&-iV_=m&c5c)mX4%jeJd`NOaH@bSY0(qi8G zN~ymy8okM?BJlDm`3tiCyzewYxK8EiGElGKw4_oelb6D`j{?tOg7GB#3#&7H@~h^1 z7-u|v6U&;XEWWf*7BeFGFR9p~HFn4n6kHa>sCGWL<&N`U7G#o) z)S`>^ItMH*)n3rDj-(lPP0&gxa4dT!AFurS{Jxt4!!?WkQZC6rWtVW{6Q6lml?6UJ zlo6J`ogm)v3WD!%9&oqaVDp{u|5(IS#CY1y32-zEMvAM!GKzFdl`a*_eThrmz`HPj z+pd6b%OILe94DKmsbFM697fF3JrgHb+h|lsMny^{r0&a&ZvVY7H18V5?o_wxkD z35tt19A)2fbZ?=KfZ;R*syAo?99yVGy8s{E@Xk9vh~abJad4^X`L281FGHMgWJW7H zW6R-aXk1`r(iW|blg}7+x>7}B^Sg#m{+Kn6kg1r7_s>f;In|XZfRnU8e{ma6zpdC$ zz^r0a3W!hzUaACADlSV#aN6;V%!HhqYVve=DHVz%m2bSnJ@ZwuQI=+Nt3jz%koYFa zdiq7@7nE@?+4!Wq+h$%y`TT`3c5z{(e0}DbOKs(~QWz341)1?P$=U!f>70~ElcGLn zCj8y2JN)jieusy*Zy?^#q?J5u_y`##rE2M6%?#&J@>`9|(iT}8Hwv*?h(gP3$x2UO zsBt(Uz~{#+e)_|o@Q2_39>=lQ#RM^|d%1$K@p}tuZMOF#iZV?ws1;l%Y0_qfAAk4} zfAzb+!ufK-shx3ZrvkhsAawMB<9WyR`i$NWeE$3yUp{`RVV0l|^3hm(!wB(SMQ|(_Wjcgwio9X*OY*# z-`H}7w| zI))5R`Gbowyhl@(jiJR*g`~b12&|d%+*A#>;sE7<#d2$q8QjZf4R?4Swdts2r`qzc zu_xJGH1fq=HYQWhLL(Bf&X|mfln+aS+44C_xi*XEOr%MhozQPQGzW5+U}xcQ$~TX zK*QZgh=##pV4H)(wB}SavN6qy>?ko>xET37q=qolREV;jl=sZj2C5vkB2+rbxXNL4 zrGlYggv#F~&fH{(aq2%Q{+s z#NUx9xAKnJ_yDYJU>6-=Rf`l`>`__ z1*NXcsL5@S>`^Ty?ohew!YygwE zrqoMl3*h33jZ%D4R3Y$ZX$$bQ2i?BegEeAvoy_U-9OcAoQxMfe#$k&R#CiL>LtZGwy6eHyj*awX_8XTjsM@( z5eowN|Fe`0gR`-+K`D^S+Yjc3iWDet5JtR20?&MKcX`( z%ribGq>;K|5>{QfE=d1x3>Jng@IC~e04=lS4gLZHX5~|k;%1F%LT##P2&yJ+FtSQG zJp+t`3atUm;Oa>EV6~=r^ZG6R=|BBjync9yBTXkTrxndLYHmU^f^OP~1`8-uL8429>yqFykx4bh zH@9MN2;Zyt*QFxXR=A{S_zcIEfW)x$Bv{B(MFb7lpv#oCbFdpSLyZ`yY~ZN;Fm7~x z0X-#AxjfB`7xmgm#Bb9)Y$7y^&S8koGlAly0ye1&uzUq0b;~T4wKlD7$uD1V*9o#* zwxl|tluf}1##SDCepqRah+Tywpp*@ua+r`Yh8{3m3XmA@nA;NP07j}2c235J`;3pJ zx9w3Pcf-E-*gCS&=sRI?kI#5|e#Uhi*pD6i z^%?!zaU31y2ZkB?(czr~-ysZB~L8i8R=812L zQlL#{j3`wUJZDs)TQttCA?yQf!_CbtPPezXoX=?Og!5_RHaCZb;X!tv%@jP6q+FwD zw2XOHDk%sAb(0txA@0(gVn-Y{qf)sP$RF#T!+4$1*Hn~cnx}A>AToibaHZs!8Hr-0 zSe)M_qbt+8*!kyiFqw+bREUH5>Ow2Dv=-Q6p#z5my!))ch=$|1!iM4L^CLceA+y1H z0s1g(4kPgL?-1wfC{>2LO|Tj_gH0M28f!vkLNnWFnD2A!D0;HFXSJU%{R->=w@ zjxS#x3Dd*WD&`P`iysnB;^*djnf`lmHs(}W8R6@Y7inuw6@+Cn9GogN)`qn0NDbko zh8;0{XizF-(~{w?rE18@TSnNHO(#_!(n|GenE9D8fahIA5VNj!Oxpc~^|KX3ld+MG zKksHj6gWEno>uQvDSELyKm1)Qu0g94Xjvrqm?seWrZnu+S@oEfR%TXNfP%1rBRz#mf}Bra7Q!Ge+Ly zxk%_?GD?t;Q^|UJE`n113@W$3#)j|O1!{tWXPD;FRoTHXY8pMPB28jz9s^tp%R*~) zW0S* z=PJOq;m}yB{IzZPA8xE16$jm!_i=9O_pd}TH%t_r!&>jxtxpl zIi9354B`VD-#>?;tD#G(@#ELs;J6)*2Os#>PWT;e@t4~TzSA>q9Z(wuO0SfA_Mc@v z%ncG%nh-^k203XV4&pw~AsbV3GHnS#^ zb!p5f7p+WN<<>)VMvf^ro=)Fj9@@K2^YYAm@${ulntyDM=ixr4^;dLKj78yHi}arf6{8xcniUdQi5D?N>D|q| zDS=}$BWA z$JjB7GmA!RV9@$;;JP39^7MqGAJEn?1_4eyoK|UX9r(af1hOb>n9L2;=}Q|^s`UL2 zKj8N6uBO5nMF>t|V{mj}AF(w#_5;`bisLw<(dV$mDh4$+lH5uy^>WaNz0ecR zt>jYqW<*!=0UU;5b5m3&8dFNr>lxyKQF7^gKFfkcyOmis^*q#<3rRA{)U8xz@jb9` zC?yHKsv3YGKjQzdZ4Mcdp^}Y7xltNFHo{}g27#xGdZU_4g0&)-vMdT|D}hSI$tIb+ z;ylqR8Sl06|&42?{P!Bl8u#;c^1Pr#}C+_F!Lh*mS4pLxa~ zi>391n<9@m-c>g0hYneDn1;xVgQJ_gVveVP~TSlNxn4@qBxysy?gTSnbyb(~zcO>fr zX;S=!;^P?W7!@?$#Y$UKNbgHGRjFxVT*#JU%x6lO%8>Lysys|2cqO)^lTeDL8NDn( zgW*Tn@E}oFxHdQ~QSJjht%ymxaH`~}rM4Om)kq!7I@MY4fu-r-~wmq35dH8G9>KL*?+A_4QdAHf@Z}Ra_l7L<5GIv8rx3 z!EiGSudU%bz2L95TYTjWH^WNRDLUUZUn9sVnKMfp;XwV>3CS4ISQdlDsdAe;4MseZ zU1Av(z9=_cl5saGWh2yF_VaIUAmQqkUgMI=)a98zz{LZ-I>DCWS|+h3WGlRjM#u^{ z9Y^IBYszy{CdbPm{bs%ve!Q@)AZ46Xsk|ULZ!)`$M(>`~@uSm+o*+xdjLWHQEIqZ^ zat%(R7foQ#Nvr5=-qS8t0>JG=D2sNiaHGjWB6@u08Z&0x!Q3tpvJild081dqc;{eQXKDA_rvClIOf^3lr zO(13#h2@nT&Mj{?oO3wj-5{;u;q^UkZkWSP<$&94CJph=9>>6Szv9D(5BTHn|A>7* zYLktM2~F-pdduggf@_GlkX!OEyt2WoDB+>ql3t&*br+m)jZVB)o}I6_BUF8VP3^vnR9IUsF`7 zP-CWiUPRd&rZ`bk<(b0_--_b`4$D#=DuZ&@eE+<#_N?nhOsn%>d;Uo-I?3yer5X3f0*{o=G!el$thQD}qR=&v`C^_X2!sG%;kh$24h9urS zo&J4B3}&h5^^Bz6N3*M`7|Cjg>Re%=rK$dyvg7pa4h#QkTcXLLiRO+dzVs}rS(mriHL}EhVLjW-XUK@BdB7? z;jwejYMF(}(m7(>a^x|SL14*nh_h*U3{0D84Y7j-unhZTT9R5`{xX>-nU;|a9&1^? zL&=(nLSf^x=@|=&iYXwRj20ZpW@J=6wQw+CKDWNeNL#WkU=i$_&tqzg;3)UJ@ko%Jz+|UwJK=OWK~KRkiR2Zj)G22Ygyc|E z1VVm?ASK|!cv53?5j>eFo=N*}Ag3WkH_?nptMC$%({n=_wS@A}vSqlNYL~{1=3GQ@ z0J+@XMjJa=aRyMKkR-h74A>4LWDjMijxr!CH+woRb- zYK2J!rN~Tc@`N~B4#QfdW<|HRQBA;e&wzdy>~K6^uQ>JtW&?+X%_UU?<{;~)6Tk`< z*%DTgOdRt0(hk--3c=;}7T5&Spb^>%QZ|hY$Gt z`3u~gL;lJB024gw*AscEFO*peCZJa*q9|aGT%JWVBpX`sslbvIX^Aq&=R6`Cf|j-| z+Nv}GDvLV8*a+tmKxiWp4!4|X4q`ITB-2s{WI~g3$!8og$%~R-C*LbUm@=3gX7RnW z;Gk4|Y4I(>3K#%ft1H@a)8xjs)}f7@6ZcAM+>K%lL0!5eX}hUG3(CrFvrCL_4Nr6DwJb_Q*NrVahY zIh(d4gKoZVif_E(AMU@y4-o8dd`8D0D@-J;LPNrCu_YhPBf?uG0MW_~*1`Tn z5YKUQosen{&y{fJ8yDPqM5K=fJfmY5Dty@os2esnoXzoSDBj?VzdYaLO}oLSbD(KR zb^L^RS6Z@SM!gCZ@ScNYTI_XsFH%))bFf^HR?n#Lgg{$y7Sk3|^&#Z9r;ut(;t{@cuB>^b0nFZ6!f$v z*l_&v?j3&p@E$fC+orYgt4KC(h_WD(g(M44;OTO(FL^7%ga%>k<9R#bbUyJ?J7 zx0^9{6%7%7w;aIC7}}*^D&QOah9cDr7@rxJRsN_|6r|5ShHW!dL||dh$OHy9&#GRe z<{Z#zQ~Kwl_>Tqq^pZ}RgOmL2rFy9Exxi5wWu5mMtNKBCxrkX{sve1zkH85VvbcJ$FA=pXWE0Ll#t)?MTX1VQK zq6(3u^Dyl3nd>Fv7M^bv4bAt|4fV|U!fNp48C>HSlcSMI5F*Z4TVE_nv{Yzm_sCI^ z@`(yU&Ys)5C~R8{0V}Q)W%WsbWU7yH*lA?T*5GpcAh z`q5dO#@TB=LJ~Z@=;Pa?kCvF>;zib1sK$bR6S~ zr^hEeK0m^Zai2D59ZjBqR->hwGIy(ivX|#^wcbUY_Hh3S_qTU=^Y#_ar*n9|WpYBy zCx4+aO4i(PeZJz;r_boe!4#im`5}|vV?Kw3&O0NAH}kgSeV^9W93G|;pa_j9`E%S~ zUdn?U0_7PJiv&v86GSKBbFV1U%xft1pVwWgm^NF+D-E zS#uVB$zLpA(uNW5pHr;)n%f0<;0vc^wnK>Q@qSIx9zNe3Aj< zUdbFHwkW~Z1z6Vi)8r*4r82F=i?m`B*m-u~hnsu6R)M3VyWu(p0kZajuM~KIf!=7f zDd?;eu)TiAf(Zk|I_qtI&iwA<%nuNxq*YRsVEa(k!X2_@N}kbX7d_K%JwWMP%4sI3 zWo@D|ml}kp8`I^C^@*JnM;kbc;$OCV{EgpYgX81q=$k_I1lS&y6zWwytvw1jZB9mO zR74&J-|g^h`n>?~Ydt0+Xv1;ij$3nFpfGK?dM=}9JoF}l8+V+o<1!AM`hg$X89(?L z8zK0@Xp=fK4NGJ%=0YRSNVWMaSAy(vHaTmcY>aB8{?QGo&Z?I|SS z68!<>rLQj30e{x5#^*Uh~O;`3O=l^0@q%Jo3sY;Rb4@dNoMNO+F_L2N?@?IBP)bW1>MpghPq<-|xg`;e z?ykueFPREjQ@noj2HWX`rYD@*HZ!W}l^oen%Mv#R`k%RU+qU3QG@U*>S4wWl$UrJI z7O0>U#K@87@2@BjFp@%hsSJUu_-`MOV8+!6^@TeOOsw0i0ICj7tTy}OcbTXLN@ zGUxiMYG2PjH;8Ny=qA*R03jKY484LvzD#*?^act&fyd+)0MUu(|1jPZ>wR(7D&%HmBNW8mG}cf8m~^ApCQ zq?5{jKc7xKJbuNO&tLHI!_WBg_|S|rJ^pw%Ie5+&D9l!xqm$ zzQmlIcxrIm-fK9&MveMs7Xb9;#>i8N#Qd_zEYo`igs+WgaCZDfbo z7F^fg10`*3)p1VFb8FnSdD-ByYJx;FN9sAZhS8;U&P6|0Jp_nEp5k|K7!}c=pSB%s zv3cnQ*3_`dTaqj0B{y+u{Y5M1yKOlsqs$%_DJHX1N+_)pXe-UR-4b?7F`u;=EQNsg z&}z1>^nVisYy^Y)F4DHF6UUOn^Nn_j1aQ=r$}62}P$gyDkPV*O;Jt{! z|Lmv%1_YXP#`L;(f`ic>QMPn7>|7dbTSQFxSZl~%&-$!k%jA$QtLZta%f9kV@6XwWe-+FlXiii7q ze7*mI$NT$wZ(2P_$S+e(z-aNdHT0)L%MhE<;CA))_6DzBzrxFxuQ86Jhv&-br6`fGx ziabSs_oa&Rlr2VoU3S!TJY)Yz)lF4cI_1>aHXBfGU64w`6s3AQXp07iNUS02_Zl1% zW!K#6*UjZK!;*KWl`MQvrIoQK>(b^Ge1FKE>f&`!f5E}XjFk}OPD?k~pa;1&ggK;n zvy!A=bKU1urI^oVkIBMf8+23+w9+I%dtqXIY~B{hjU-Fk znv_wi=8IIq9u;=6^##~J|6D3^CUKYsQ8Vz30sriBgUdNfVdh|MWsCQbXkHG?Dy6SE zM#%_+E#ef3IFbW};PQZq3#&8^eRe6UPL{QDN})$fq*6w%lTpEw10w!e;w+g`B#gFM z-78)T#(vEBP8~jwe&9@CTBQ#D%5LyqT;Aag439bSK2JQ^tf%}+#ZCk!&N|CxY0=R< zHG(-?NuJA9?rUM5ak3{fmcKN=OKD&Q9CdomEhjDrynx{)953yH+cdnKi66%ezRd$; zE(omgy(&enFZIyem@~~f1fN;*Am(bUJ0Ku*{?odgq}3{ffwmLtbT?a^ugpKffy9Oo zJJh7BClZ9PJ(STZ0ZGJ1E2?c344uuXOybd*(pA7YH^_v@%wUpAD2;1TzL_aK=q-?Z z(KeTVGB(tE)o;z85YS38*8*SF1S_%B8QRFVv8XA1E^6Dj z;Ec{qY<AaK*#JJ^kS5jBE`yoI`-rWtdBp1wuJj?9aB==OqyVTrLNG z{P8>7jH9f0t|@4?Jl~KwL{*iV22Id z9F^*lZFi=nU@TZ>5HJ1AaLC7-g660l(0iF&sV)xF3D?4I4O)`g=xaMe|vk zlkD|G=5@s~W+KjIl?nn9J%LjeE4kw$aIsM^%)#$h91pB$PknZ6{cEIvhFP|XT*|&B zAD4a93Db6+VcIxXJlIlDX~3I{9ieBg5)_J~e%9=A;6{#UsSs2#+W?5>I6!@rPyrU097JEaRI2rbNTDXI>Tu&9b`(m5*OGQ z`)<7q0%w^7XMG0+My%yQZWAnui#rL!yGGB=W>ux?9(k@qJ!2_Kv)L}$#hGfXUPNiZ zkRj|zZX~1Qj|6@^Uf{>eO)Wce zMWs8VV>Nh5EIIb;pOGEU9Bys9PcPNQ+iT*lUVno>!wn)%{FEoYW&!IjJAAaG zuvtGSL9uOPW%IbW7xhaSkI%AUuyD#@JByh9SL3wRBGz_ra$|4wnY%4N)fg*?*RUxnMq9#bF!VPpJoEZj)F|AA{3QiD67-S>Yl(yLmVp z2YQ9{?+q=bqYvykPaNaGH}Brz?(QXW82#7y)mS`eoj0+JT2dqi1XWITg`e)lH{@ONKi&0b;MAv;%@+f6TkfBukbH_ z^%vDFnB%w{Fh98Y%?8gYUE}FI@%ht7Oq3@-Ms4Lour;&#Y~4x`oRSk#$`tFn?no#y z8|otPwq(c-AWMM7if>a&X!|AQtlXydd0m8D>+^j2YLn#!z6!e+Rlss`G zf=4|kf_VnK6)$P!yIy#mb-_^U9wu=HcxIxXEI~TDzkFgyI>Jq*9zWucl#3G|L{G& z|KSI`d-oPM<3fn$XsNp*N8JXc&n#vwnG=qcF2S8$7-LK`V6!fc(UP!wUWPAAOZ!%& zLg|RDr5!c`w^HD*j#QRSO)3_uYa|zEGoisvTv6HQ`P{p!0m*Sa_?Z%j)ULVa;7+F3qEM-B;Nl$H7 zL|(zLc%e%{Aoh>IZqXrqGUvC|VMcZ%ESn;>`@@(yWVs@edl@|Bovyp@7zf5t{Fp)R zfLkrEd>s7GTpB8AieSuykAdL_4zC!_V;ueY>T&a71p#x%F?crc?;buH+}KG3NcbT1 zZg>yak}+jcVOPcM#U=3)OYAX){ zAA{$9Ct9FM>E?uub%u2I(6iyADH*WCok=2_9RQ4u3*JXbMcEh!6<6GTcTap@=%{cM!@Q9~*^3UaP)36dP#)2I8`>RxwL|b)+E$~YXkq^AMdx00XFRD2od>)Tw z)ry#H=X$zcalKw)p19myFgaLGDyqUau%pdB|CfbOk-Am8jpnFCs~j?VRm1OXt|pZC zd`|JQ(9m|JtH`ROc-G*sHBmK&B;uoC$9go%;^AW~Kxg)MlvTqL#NRHhD@tq}RhWubAbL3YL3Z>b@D>{{FMKno!VJ;W?!uy%*eA5%;ZrvcPZ-nS;HX*I99ks@djnJQ(mq1^f+w8(QZM1a1tt zGvMY7yvD#CE(-)pWv($K!DYwep&FAc@`#GXw#6h%ezp}u?)`4p?kJG7KxLm}wCiR* z1Mi!FY36@~P)kEd_^b)7w)NmPzPy(7x(`n3a(S7JTTATQiQ$~mRYT`K8ZaaOV#_W$ ztqjpmy%IV$l{&&?^ef4kyt))*H4%R&leT0xto^i-UI`krDB0YUnK!(!r8DVE#?9!t z+`Z3p{hY%O7NQ3%2fq2{9lrnWJH$K@=SdsJLCKS2&a(huz6Jm_6^s7-hT>D%z|{0m zTcX5pFX@y##;!Oil1S7SPqTk{4!ND1Tl{M*zs$4R)_G}@<~*}FD0W22-9;(Z z#Sg`1TC+7oXBL4uEjuH;HV(Na?-7IyiUb}jCBtePQ0K%3A58)+WryXQNV^rDz*P6_EA$1|ux1 zP$eg1D$RkzqwpS#?ADT***@YVd3}abO$Z=F@aI##CknhK{vgl7dSjx`DL0v=m?Nt^ zW$~bQ&cr&=5NdRB9l1r^HCKh!09$N_&Q90gLb`nN8D2&8Qp?Cw?zU*--S7%f+U4D% z`%67l!&hx+YLKr%OgpMcmd~?k90*2!rq?~yo>79}#-1Q@jMdCb!Y-!5&{lZBCi>1u zsz0!8J~LNJfXIPCzj&v4Bt6AZ&sEOJdr|z2vfzy4z>Qc>9aV`{tTSap@q0t@X#{8* z)rP)%avhyx)XFx;0@dy58{EF?`vgd33qC#haIJaCi3thjS~fRF|X;M|W$kW_3m^*^d-bY>JzXFD`?;{kFXE`o9NG z);Ec(2OgiE@NoYXk6-U`y$P&#^#DN&Vc!-;qDd*TgwB+r1BHLLE=Zh;8mQ+IdBc& zB;b33_cZVEgZF0toxGDWz!sg>V6WkB0e`EX8ZPK!mJJ~m??P%ecPooDSxWlzw5$vO z=foreGV-9N{lUtYqmSIl9E zOQ|EzoDGh33k;0vvx4wPE+=a_Sf5K$$rijDLrk+Ew+02#jUeoD7%o}0OE`I6(a;kP9w%Yr92y%rb+Ij9q+Cl71l=~2E_m985=#5=Ei&HL z&nLh--}OI2g+#`tm$OcG&_yt#ija&I&n6PB>NeE04<+}vDx^KG=Y1U<5zi5VxZPfz&z_<+ZUN5rg&@RgT(7lTH2 zhAm4)ou>SrBmsvSUP%dqq3)6;iYZ}R{8Y*x$}NpD z#bXaAqe6pHL*G;iPU(@dyhYb{U?b^UOTrdp)!;eVgs$cyJ3P9hed|&=!9mw9-;e%W zOMyZwk^KW7tRRaDi6F;e_^3E5%%0}PHW)11%SOcR&vsL&Faoa%krg$p1Pd^cr6W^W z%zFdWsJfVeho=X8`tT7kzu@hgmw5T|4i`UY7m-41a`R!8TislN4F^E#zauw@S`A?X z3-oJ>oF%P3WYNjp-kgnISxR9lOqxHkq7SuoEifm~UM*;i@@_4_1OPp$q?kc+zSK-i z0p{;NyYQROHnYVZ5Lw-~+*Wi>y={5-bu3Og#1{8N!_@V8*0Wsi?(pOUZru6Uz?{=9uerpcOjQ?og3WHgr_Yoh*BApC~kDT zo7=KfRAEz9eH~qMCH@!?%p%e-+-Q+VF?X_}WZegDj-wb!$3YY4Qt-jY3b!`2B~$UQ zgUxzma7&Ah7SH8z;Cmo~{8+ZW9v65k1Ud+c>Z~C6xMeKz@hOF41OI}4{^D}%_XmgJ zkcXZYJr%xbxZK>}@B`m|`wi}HZ*V@7dh@N;b7d7DEqJ0>BuS5Qa=;aus&~chEOJsj zSlWyRnOOrfC+79U`FO?C(-oh;KH@wluJhW3GWOvg0yDkuJPk7?6SoacyHpy$<+#PS z-+qg4-h6|b<7P!w>dcyGF3?1!KR67J*DHSd-5>Dz%O_eKVmk&0MyHIFWwHm(na`rp z<-yMxYq=nxs-l_87`kNltyDM&I^C=}8~jkupm8(nvL#F9(*AmHvrqpF#u!Ok!=lkz zgX}Ob4QlnB2=a|I*x@xS$W>Fr1xBSZmT6R;D;BzzRaTrB4h|;&hrdKy=t69;6*|gk z|KQ->IgE1t+k^a@r4*Wssb=N^l>okKhF(l?R2-Z1JQq*S*$<#o_-Lh7qLVEvY{!_> zW?GQJ=mnB>M*T_@A;+?N5m{!Xfc9oY#Afu~+XHe*LZC}o%9lE;QaS5f83ufRd4aom zV5Cz$bb*W2=fJQ^E_;fT!zd~+qH4m+@KsNE*{eI=KIWmwdV*_ey%H0s;Pu{db7zZ>82frGVvL3LG^0$a`Db+=+vZEAwh&y{8~ z^NI&M@!2LG#=wGuC_3HV`iG z=ptxKVB4`0n=q~H{D6FE|e8pLof%W>*YloJ=MpEl2>`Q zhbkq>HAddhyL3u)O$9})_zU`-0#8p@JU%|+^XD%(`Xat*L7OIj=1q5)imJ06xJ2A@5n^0L> zr`~ksvw>1Wktq2}55oGq3vj7@CgXVFg9*fr^a4`JViA1*!mP>rhTT7FZMQI6@>D4$ z24S<$B2pD+s|!E1*5(DssrZo9D`TUiCnxicbPkrZiOPy53xa;aHHgb?C^=}u@&<_4 zWjDLQRwOGsH_E>2qQ1^Y{QT2T__zP|-{B8Gf2enC5NOAy!#Rl4Cp4dJz?S^aG(<>6 zG;!Oa3Ak~SgW9@OEa!^T(xzDS@1bs#oBEK_DD|A%_Ldoy0o#BT;GaPW@!;=0ffyV* z0R#*OFFJY3W6QA4E_~8TrY}jVWS}9$`|xhWIv3qyF@E%Yiazg4ZMRehty`!R2sS5S z3l<4o$S^{xtT`e|%~ASLwpxKi?O_`ihH_jBV~f&7^ABT#Oz zY3RfWMS@;NZE;~0JbZ;`99}~^St8I=g;{1g`*+e)|e{uWm7h_n;I^|7bf{B;o|F=M`T+e#ZOvKjGp2p$+XT z7B5?AmR9ySHJ9L>qk)!^B2=d-fpF+Or@E%ocLW;l?-$6!mnt;rryn&`wDKl*NK4yS zJs4SO^yNqMevIDteHbk!2AX%VP+x9sAmq?*@snhoja|f zNp-RI*H+Hg-yt2iFCvJ`6`zkAj%UoH&T?sDQso=qb7ms8px50F1Me z%Fc`9w8S^z_%C05j~{V^Pmd3H?|~QxZf0PR-!)jvD}MPniUm>WM6(m>B1iMm#Po9a z9<&rTC|9yhCXP(JG{-x?!|8@k2z-i(D-4tRl_1qUhT}04U!(egp*q7hxVZ&#D6LSn z1Fa%ssj@mzE!P-K2|QH?=n40>q^0lR({=JLyN?9-p#MMlj(v@@)wa z*=Yv6SiFhgYf-XD+N6bMHAFEu>q;r^l$>C2P0g7i0mTpLvT<$9vVE&C@B-t3IlU)l zAS`atMtRpv0gSHsGbWU!5`0SHvCSa)Vq&H^D&AcCciGVdMt5$*O$%#ce$Tr1xtk)KQr znDdIKrze~Z^{QxF4HR6YV9nx`u_#7}Ib-+;aCG*?+d!{ukeWOuME6s2(V z>~pTR=DAW2z#(qiezf>#KN6dn?q;aWsYYkIZLrw-Id2Zbt2eLl_T4+Opcco&&7OIU zEe91+buOHp@wg2NXEbOzI;JlA_d^EGDRxTTj~UhU&w1kO!z1qRzv7(N9#Cj;q(Swh zs&R-ZNV)T_a?v|6H!+m^0Xqh6?=JY}&0D;9@fvrxH)UCssx5##XAg(rQ}T3 zNwT`?4wLFMxn!8JM74-zFN5j_F{iZLu`RwcXAkF^=b`O87Ohv@lFS9_%st>%_CM9V zjDi(-)H^vQXlXzXi5sZBsY0wFz_t=qu*JCWJk`)8`}yZ^e`F^CG&qxI?OunNxKH97 ziGQ{itqG)+g1IH~tzx#7NEf-mxd&|u_B`TzY5AL7lg>nmtv*|jycMIYgrJTxwNS{* zTF8-Ec9_*O#?Rp`4By|q#7N+zVs5edxENwz70%DZ3vj3?;u$ADlChSdT{AGMbxK{u zfl&mHYG=G}{k+$lWqS_}YCSev9c?cJr4Lz76F6}#Tgg#2bk3p4iCN<%zj#|G!)A-= z%@db2oCaJ6@bYQmuf_}f8n1B06+g#`r=x10F>7FQB4Ou5(`>J?lrywSfT})^>N$rZ z+_UUB1HpHSoGtdU7Z_gJftN76<~{U)#MhB{1aM+t&U$vfCh+?>@qx<|!Q|n&CG2Rx zu#f=tU1ra-nun4rAvPkB0ySd4pZnWQ2Z0Wy8+WSAcfy6KiYg+7z+!18O zIC6GPuxWpYo=9a6A~ZI%(pGYFvV6Y+pP6*Tp(KoYnL~&%ZQLl4(ga*q30LC-MrPV- zy5!ky2&85Ehe6;>MsV}PhC83>tCv4L>=|?^jb3YP2yfwTeUONL;JB2}cX$Dwj^n^M z2I;e`LMFjT*nc-EQz&C>ghLgCMHe2FpGRkN*Z<=dLCXXx048csa-Jvt=HLA-e)D&~ z!Ff(f;8_4z_JvyNQ12v$WV|LY2r%8irSvV=hNBAm*KgnAFaOnF;_Wxzl>eD^_Egcp z=4)OPPmhn7Pgh)@p76Wh{|=A$4<+sP4T9LLk`07ro5!>_&UKJw%;%9nf|vC_uV}gk z>=`5o?Nf1Pq5Ia8QcesS?0s85b#9zEK6R*x(}C%WYF=pidRyZ!UrhPeSa6HnJieE9eQpFVxW z)Ab1$(JYCklv_U}1rkfF%z6!v`gzna#M@F+8``zy52*pvcnxl3Bl`QUueZ{1EZDem zdmrs9wgyL51aYkXfSi#|84~mq^kBy(J5zomZ ze=aI}lB1F~SNaWy9wDzf8`MGHm8BJsE~Aw^aAloKlIk3S(_W%IreMdpB{!;om$cfp zm_KrX@J!slB0xlBPqGWr&y#~MKpJ1s@BME|m6cw4D-J;(w& zm$lOr4XZmGy%ikDYQ7l-pr#UA`UitwyMbz(LIeI5Gf(m4eyLQHS9f=K^YRV+qM=Bv zFk>Iw3R5mGrid(glbMo17Tpu6t>EkDS zc>h7u!~giOWdqP<*%mq6b7Na6YyE?1vt`RZPz`A;l_m%ImQ46D*%z#nelHhik+TRO6weH#RFpzQ~g3UxS1=_4ko3SZww|Y z0BI=oho;u>W$onFIkwEzjJ32^4 zK!a;=;utyci<{T@{_aItsK$UDSW&c{Z-WN5YDgJz)B;f~u`rC>>_Mzy=ULCvF~}^E zeaFiV9jsM+)S6=qr$x{ZUP*ols-`%^(0!t+rv5X8%g?e4%-kfRvg0C=XDsDxQlY2k zZZdxKFdQ-Q!!-Qz@(RO%pW`0)aV_hxXAQQE5aP)@_gK;CX)6V4vE4Y%GX8vX_Ve!B zE95#e=EUtxd}|lH@e9&{PdV|~4UagfIuATp;uB^?v1?csrDk?pDw_sSst;hytoQbt zs$mUPm^RQ>cB$>Cph#0sEmQ&Fy5hPaf1j}b=3H?`e>?+;h>Y1yfuhs)!i@5DtBp%c zHuRLW&TvXM32RGhiKtP2XWAI?kDL~SPAKK%G^>9BuxXOdQNn8Acnp` zBSrtVjhNhPL7x%h!}RGY}pae&E&X*BCcP6^)Lb z8c*eG^Dt9!tP4jIOXaa=&dvLpRp zNXTg4s31t4kCL(3H>`b!PO|#9-+YJv^w+<}t5+|}3(gA>oH!F*jEKPF(<45A`ikHF z_V4kxfBUzTbJyWIBRT=V7|YjGVamf}WM?AF_T#4L+G-eVD@amsAbH?BB{o}CAbhiJ zH1X^m2|t%W7ZeHl=s7sC+%8%b3gE;aS)A0&o~$e1xC)@b-&si}E1WQClaiVxl~9A{ z1xhs3N(JOto5PL>iT$HN zYM9I0in%iZYbdK*QZ8!8czcH%kJgvzBECV4=_#A%|(1i2*S> zDDf2}jGJ`%Nj+DRgqr?Z?rXwqOUj(I3b;1A_0C3Q7g0uU?!>HOkJ7{5OoZ4I(S@zM zYzWxEcgymjLZU4>%_(}e;^Y~~;xHtK-k2Q~tX{()4#OM9%yX=UT7%CPEmVYhCGWsu z8=TxkPry!`C_vviuei<=I5m8!&u_Gso@xLU)&^~O_=29L zhyf;NLqW)C_F&$8w582Be3zX^1Di3>vV09{lLM04+6-8)2nf^d_t*(gB_%(z-mSsC ztZ&dqZ3TD8pUo7WtOMFJqe1UB5TkKo zfhLu#V?k!mIeAM;yqp zsSnUfzQYfE^UXJS{rWX7$HCtLgqJ_RW2b!n2|Qg3vU&gTfG2?jD_^;#)ke#&xvlS( zL-diAE|OV%mOykWS@S+R_Febc;yk#buUlD^J!F@PpuQW$S(G%~bHz1FrLCW*pVp6aGu{`+C$cR%U7zVarrN2R-r35AWw-I(#z!+b zY(aE)rq*UVMH+_JmKLGGAUZO(2DY}`tQ#}wgjgFZ(JmtpR@^bn?E9vMkm%Ubw6!>y zgjZQ^k)UKy=$su&2FYdbuJzmKI%z|B^Bz!w!d#2ZTuNHoboyQnTB=raB{-=rL1K8~ z-EoIEII2;PY;KWF#)i}G)Q^Dan)yj)pjyCc27RtHNpJ(4&hy5n$)WR;WMU z7pj3*`zXIf`+Go1zV)1mgM^Lt7P+=umJ`Yb0l`p#WSojfu{>0=q`zi^63S9dB)zoW z2Te9$uMWW)E)06;qJo8%pRFArygVXtnBh1U=itqo*Z9*v{{`NC`>si=7ILl`JzrS< zwBgmn=gQNa;K5~+bmizsPfeWLCuK|aI~D(whIu~W{_z2yK7Pdg!vk$6iV%$kDap2C zlpGn03q_<_F58XUVs&U@oJd^8z}q)(OV(@yH^-$WxqKO2S;kp$WIum;kH7x+{~qt( zzptd0Sj|`|>=_F;u{L)FDZt=HT7^i*%L0ACwoEU#c+}>Qu9>$3Oa6%Ey^QUE?v7uV zGzUhIB0yY@T%OF9!sjBGYfEgge25x6XuF$x3W9JaJXq`Uo+ayMp=%NCKlI%EQXKw8 zvtO9=+#4o-hQ0mr_Vi{KD@#f%xNXt66>B$^HAF23Z7Z{-nZ_c8sCm~0O56-!q=f=0?H$JrS{~*UN(^pwpH3$1q-|dNSp#`*j?5yV z_j{s&vCe7^$1cYOcQ0R5!p6%NjC$33Z4Fe`*#ZTKEnvfhiS{o#qR0iJ_F-$ zeDlq>`0WmsY~rh%ADE7GT6+WGl=c(({uH^OaIAE~BjC8RG!iEI#Q@G!g#;F|&5&?f5u zViIC|a3M6P5)+=k?FQBi5D=aigb2gg#zt~L;58K9{%$B?Bc-Z6F=HiGwW^qa-By5) zL5^At!Fq7T49`(H*2RINGgBoxZxEBE(tu% zz*kSaACAAvEB-D|-1B`5+Lc1fF|kR8H_WuY8xvrI%O>kMTZILHL-OFlRB3^k4iB{& zYd|=8o<-ovGXJ|c=l_F$LjU+j9Y|d3^-yy2b}7`fyQcf}XHf}I4}9e^^KEQdveS;x z+fT|kBUW@_YE?vtq9Yf83JX9IjHW5y?iPh+qZ?qJZ0*4df>ENuJjO>jGn1jW^g}krqt3=tG$w>C4wI@y)w;UC2g|O+d)0HX?IQob!r| ziBF$C;ql>~f9?iXO!>!s%W>ytJ_f?P0aE(2y&*Eo`YmFbR0qeqjC#;wQ`|cdrOWK{~ZL28V!(qD!Ft_}S?S=m~!+Hkj%p zF4uc7q0cR}(vx;0+kEKs zLiWbQp+r<|@pwuxGhQ$r7Ao=qPBf1rdCBWsjAo!L9j&l-)do2^R6h;$V5zQ z6-YznIINoyEwP?6nw*YjJFHot-mKU=b|oBOSTjVrcgVHn8MY*zYDTnm=(F6;mi-5n z6e@gUirK^ z_}ZGfHkcX_$s?XMWY1hueI+9ZB;3t&CygYBnp?z4&^G6rw?3qB^NtRceV|5oe{D+m$aig5p^{H6XHnr)@ib*^Siu zmD`{Z){)4HdUQ2Rx&yM*kWbeqJUrav)8|k4`uTH7t*^8G%uT71{aXg5#D;Mwbw*q< z?|(+C7aT9|?(p{YOT2h>hugbbT#f_o2j-mhKO7u2qdttb%_s@TBRpxoML%pP*9{hpaMcnvkrlw?3d zSc%0sJn{ve_C%{6&Ak)9)-c<8nB6uTrdc(tU&DZ2C~$~4jh|&K)>&>W1Ao4RP#x0e z%61peWaN0LVQ1th(-vfx%JkjKu#RQjeHP7ECvMl=MaODzcV?YE4p>E=+2$Ed) z{6Q-~J$nac5GpRaF5w3@mWn2^BA_(~4Vz{0v-O{FE-(dq@3aGW+eEptbdMX2t;pnmhr8MFWq&pD&CPo^OPbrhC>Nj*P1>&Mbs7AOKGir+#2NVV`5vD> ze!v;?IjAQ)mYo}0`6I1GXlmGtzU(dOq!PU&-HtKvt3Us9eEaTgeSM?s1p|wkohX^_ z)AfX*B8onJ{)qdBN6Pgp#_n*dC^E(KS?~N-K^euRb5-jgJ;w?Eo~#G={; zmf|6{t!~t`x(<*6N;adyU&*cAqBYJG7ba`8SJPB^J-acw?L|9TNLWS0O8 zI2f-Ti^HJwkZk#C^etC%bb2s33GdXAJ=&IkfA4Z^M~;?FEOg z$T-=Qf;)liXnk&kO zDwi#dz_JrI(yO_o-JQb|ZgMrCHD7?WWT8#lUCJlxYa5zmIV|888hELRzS)wmCstgf zb92`bitfCm#qWq702m7db%NTMXnSeewrBR`T+&P}aU`j?l?t*)FyKSFsJDsgE@`@apb4C8sAEp18k~d*Q$2N-%zU(m zI)P(!^R5OU_?dxFQ6!4j!3bpExE~!tXN!$9I{KQMW^GnI+OO{oyOkt#HJ`Tm$31t( z(OmTT_VltgFe6Dm$U&eKSpY?;Bbw}nbw@A%jE{QH@A5E_D#ipi+P~Bfditj zXfR!Ua+Q<-rft=WF`L<=Mb}ae9VoOfTZJY1^fDOVSS8z$e z1eM@oBXTx3OsP1sws~gR8d*D9kc=O-9B>Y@a$_7I7lhTfZ7(J(Ew7VCs>?8Id!VP{ zH|-e=*s_|_dKcH3FAGVX%e848Sn3?~xuvu;rJ-$2K9v<6Cr|a{^4%#)osdweQP{Y5 zatc;bn)Ccwe=1c5smz^qH*{h_Z0t7GRZ7I|lo6#UX?X7|5}bEm2YdE=X|2SuzB9ZP zm}aBZg2C@ss`2Py&kk}14qA=|mw7qsIT&95Jo6|>wd{!EN)f#6g!Pxz-8-J$k+^Zf z!Ocri!uL||EgYL7rWC3gG{lyc6N;4!!xP5~kFz^+DzsJONjKx|RQh3LfPQFw#0B}0hAyi!> zswRzy;+nzWHrrNSqr@7RT1*@kxDbeMZn*cvZ*k)9;t{`__qYx$8>e|EY)M(vsb0D( zQ$rB)JYtIhRiwSF+P2ie^jWeGozFgJFAGN^^M9OY{2%xy_>bcsce-K4Gh~f%c4J~o zBF6mQmNE8LwxB)ZKua-Wu1H8@BgYaQ16*7xoFlM&|jA1?LKTruGZ(qN` z-HR8UZlIKnlE1|&ZX49OHO!{yIt^#)I6gHw3vYrhoYHF%$ZdmTM&i6a;p3-I`1tVy zl`6)A-dbY4b|Bb%1fs=iOT#TeuE?myKu2O+Zt%^!@9^r?8_WQ1W7#=eTX~$wn7CfA zxWE65-~av(_`@H5u4__1$XJ``N*19P-8NlVD*pS3Km}%|(`zX7+2NPcMwr(&3 zUGT|&hfuk?iXGSHh2Ozo-0AAl>!JwTB6JZLYBfUfQ755?B;YUQFNZ=(;A3;_jIVMp zt40u@Noz+aUwh^TzzUp{2DTU1*QQBoCO!M=0)iVHK}c2n`!H@BtYw)w_Q+IhjYc%h zz2)zc3A0Yf%=X!3Kl}3Z?nTU!qbgR`*qk22aWn4l<9FZTyB~hQ)6*lq-rwVLIgpX~ z!%x4*=g*&TlET}YDK!XKMQP@Wd^LA&=8Aef(|JBVqjVzEaqoiN#m=%aXnUPK z_>*!4vUw;S#WyG#<%g2&ZrZN)*9$Z$%Sd|p{o(Xl+fsgHl$}Fl{b%q%x86psVOg&2 zFneV^cZ%z?XI^jXXG@vqwT7VLC%8F~Woc~O1jT(yT1r{Hdv3$6z?_ooD^*5A(QLnX z)#Ki2IN4HXE9vY-jn<00w2TEbW$_v;RwO!hrH;T#ikfqoe>s@wL>8`#rdQQG5xB{N zERXT*Jc&q{ADi?zF@__^_5rEiWlfwb&UAK^^vW&n1uvAGy*@@GE`lTzgx4bQs&CweoS971UrlB2Er%`{kp zEbC}A=U_XJlE^9*2^bX_?!nZkYzr3>%>j~!D9x*ASMr8vF1}M!*T__oX=72!S|5hQ z?cE*jUcAC#qvLN|53fkX-g+(-Vop4Rl(y}~vvxsaF?f-SZ;kWhsI_HS`u8zS?V0~ZXa>3p03*25VcyV`&<5FO_ z84JW6d@*MNbDoIviu=b$y#M$!K7Ra&$A_o7r_FHi=ZR?!z*@bC8p^ga!@NW8?vB?qQO zr!pz=1vzo}QM=U!h?lyo7+8y0m9($`KumE?e8oc#D(HsnbF`r4@|LScH7YdojEzev zR#t2v`&v7=*YNuc4O8p<8<{xg#5iaRB`a!}-Y4(xDYNd$gS?gLj%mQ{Q{pdQyuq*S z1^znU?B-8%%u3z%<%vx)oe0THsNXTPl};#|4jF9I~CsV_25>N^euN(ct1OH=Y{5 zt(7eKKV{7S4gW;{@sB!?NO<^}$Wf(^)8^3eB=WvxH6>CjL)U#;LEoAm(Fe2V^5456ct%ORujdtaHy6Bp_ZGLur8j_G6vhI>=pfXE0dCBfyL~iV z%`{c+0fjXwTE0KoENWO}xaP#e;{*Qwx4*%s&tKbPUNrT35fn)lvA|Vm>9RN1pd$1( z5Ls-=7nyi*_X6L%d5xDZUY3u_i!QGekxJyaAhHUi`^N|T?ce+dynp|`WZ9I4pP@Ue z{*o!a0%25JFG;ROkFu0WMhL12d`w8w213Z)V5b6RPvBgtj5x(bUaWeOQ zFI@C^Tb|&(;RU>7fKqU^f51>_j{rTsDT(~XA`s}gb%&dDQGp0bZxA&;nvg$w)7Pmo z#5+tjsjW;({(V?Vf(WkzGprj<UQ}DAa*zJgQZ|7cY=rY@ z0&>!)XrawU!Y8^Bvc{qsi*jxg*j%b98XSU!D^z>0qvT>b{b;Pmh_sw3{>LKUHD!Wt-6Wo^7t(yP3d}|}fcargPX)8ibi48-My@dL9 zGe$d$@&6k(FfAKKw~EMUGoR+ZrbH0W@`G~AfFQ>;7iXkD2o5;oM5{-lCn(FWl_av; zkbQ9wG${*XDHojgOhc&{H90aE>&u+`sDapN*V;D4#P9>tN=>DWTS=Q~iAaZAaDuNl zPs+e5RNU8aJb5q9ocy`SAAw<5;EWpNILcDuq^<{5HO^+ChUa^dFC459Yr6S8SZXx@%r^^eDmflj&Z~PY6?WwOO03| z6=+G0&Yv9Q=J--nyCSfC!Q@h^I-ze?69^kPpH6)Ja*q$6KH%}`9_RIn`EV_%xDVP- zXD21dyx|;57|$6gs!}$DIN|2FTn@Z`^$ItaJKS7u8Ml~BR^g8ka+l1q8D-*pdcxQH zFZlT3V>McC$a7+v6Vf+H?yozvc~03BU!ED>4S-4cr%4F1y!1?WGn_cB2Fg+RuqqG~^5VqoCOf_C`>*9@FoEDx&Y&zvh(JAu=W>Wjw2CtUIOapDj0h$jpx zmV!evse_~_vtGQrCzC;ZOR8Flg^6vU48gtC-x_fNQk_WMhhBgT-9?%IOT_$N@lW_4 z|EL2AgT-kRHm$PXd9gWVn2Jpbd2-m`3^R<&4Kl7c&zd3*I3}BY_`1MzMn|e8st~u+ znqIv^m^lofTO-0&LbCA^4Rj<;Z+Mk~kYjD>`0xRrK2+|vWbok6VoY|hvB?{^)D9J%u4bE;hS6f6 zDf9&J%U}Hq{N-Q%B^#PSxL#o7U}2h6LKM*Q^QX`F@aYrI^V;%sMQheH*qWSdd(5hn z$jru17gD{{-p`pRI8GG-I&g`eNk(M>n|BMgu^#Fjn34|n_s_p4tT*^=q2qL>-zk_hL- z(4dU!hw@)*Q#NHT;Pfn^MAUQ@th@tcMRQv$05`;`_q__^feWV1wNRqfFGg*$lAz#H z2%gPP(%`~Ye@`uWB7vB2J8;HX&(tV?xcTj+~RRs@ArJV^py_shVOh&eWB`t=|Of1~Id})U!XkrE028CWmKPz)r+uvof01 zW1DIw4Di@efyf={bHp4G)v-+HVuQo*wv^?7kHnm|Qby<}W^Gui74fKI#ffGN4lS|= z5AL~CalT?#1&vlzv`ZZ^qGK8bv=+bV=H?DBZeHN-#U1=`3?~GRcV|=C!9lJqD0dIu zd{;rOWE#ss@G%pCY_lU+DcAC>6qtFvBIm@z^#S)^KI8t&J)RyOan|IqJ>lLB$Ls;7 zzMGW#G$mzEn4Ofp*LUy7z{?kRxV^npGh)@W9G&72@H25mo#kX0{M;wt2cM>)ggB*x5hh7Flm^ie{Wr9Q z47rRO9q~TAVYVa7LI)V!3qHICP)gmYpV^QY9Ehra3@W|VO2$5)rg}!|L${UW&@~iO z0=>^(G}%iEP@9IRA{8%?_{DgIU-&IX44ksEK#(X zL^`#$2F$n;d}}>ODsD@Sp|6v6@^K(%SxPOV9LJ6t`br-ztCbz~99l6#JS$$*$H3r# zIicEn{XP&V)vShJbouwCDlUiBz&I^&OvmME;@2{FrAo^v6L)E=hGF#9mmCyb0s|(5^>f;qBu4c4Ve`Ymr^XW z<$^JB+lCGQz<1p8Zd)n=F%}eG2@VX5ld3MHY|GZ^hJhQ4ZtTDbBV>9bcsMy{AUr=C2_8$3zXMQ?c{ z8Umx3P>V__`04%6`0Ky+J3Lv0VoScq)qihDTDn3ywgKArgT`3rvj>G!xsc9fH3cg~@cxul4UfaGOtR!j}I z1B(b_y-`naYh!pORqd!X@&YEJk{dD>LHikam1@?hc9a*rA`MM;rBbkzl_5Jhqqch0 z&{^izN3OUyJ)60w6|{1K#jt$G;$Q$PH=YG%U?X*>Q3-^O%f;y(^VhM*ojg<41LI}X zsU*+__TV-O&x$lOzavRL-d0Po0F@Zo@xf_|WutU9qI12qMKnp;ykz#vKCmLK;D-5h z#fP7N#`~Y%DHSJUvZ(`1F~S`@sQS(938mVs`^O6=7dI4Ch8j?2PQqjLsBU zfFjp62XtgkooOqSr4=CcOxv^bRPUNKHQd1inJEqNAE{tAa%~j`o`{?r@T}s==%2-5 zFm0~n3+4dOI*T4#H?WXS2_CZ-oN*8H*?31g3iDqp5irr2>&k0l{q$_K-b-IF*@+9$3b+j5zdX zWV`4h*Y>3NF{9s4a9}ve9~ws$GOk1wXY{766pK1120Z3sahL{66OIGQLNgA-hWk>s z$gk~G3b=;e)uhLahQ=lD9m(5m%Sdm8^&cz^nj93ZgqOoItTq6nR~IGt_9+MzPP~vF?7;owq5>A{OgKO|H~Hr6_SYLkvxu z&|D3!QiEQ+eu39--qf=+sFLbvZZ(KZ$MG7@_PiPk+Oe1};*_xI_TpzDYe=62G3tz( znYhM@r|T7u_g6eVTyg*PEAAg3d6twFpjbbIAZu%mXvIf^^*DTwR?o1pyIk<@n|FBq z?k$ev(&t+%o6ev`gp%>|M9eF`eEEtmUq0cSXEkxU?rniS+ZeOeXDbU$d@yU}Q_iLK zGOP0~=43tP8Urrh5ASVHo-_wwQ-JhJt{P^To`hJg*v#Ax-?aMNR$rV{)@BW0bzf-U z96OIG+aWWgP?4Q*$nIu52Xfh(#7XO+r-HEt2Pj**78Z1|lo-*HO{s`FtU}3e&FWb8 zQR9NjTOk(&fR_~^&R8*pSgBxQDxj_2vPL$3X5Doig{lpIV`!Uc2FB2K@*XA?Yf?E2 zHYjGW}TKBZX^TB;=zoGdXW!MLn<+<xIXtQW6Jx=M^bcgzwosAkWDs%JLkv~&&y0BC76sf-iMd>Y~aMpPo%WUp`@ffv`r zpS^g6U*5mLzlryFFyJ@y0YBIczVjP6)m1%yj1v~LFk#?>J>ioDu1GxO6HaqnRa0Ld zIkXleDkHxYILM@lV-a6T*ptaE! zOYwiK7YID{Z2X^|@jpcS*#CbrkZ`jaWQO6e#HkgggeIy!$LO?*F^X_H#?jM99D@ZC z>AOSCV^qwZWQJS*ar?XI*=}NP)We=EB9hxXP3B6fE19k=6tV;W03ZNKL_t)be=N2; zf-jf!J?RO>0-u@uk=iVCQrn3FxkpHH_>k5O*Rz;Juc6Il6do*M;_aKa`0m|zIL4@$ zzJuApoi)#iJ2zpSDQuiUQ&uO&folsMYZ8!TawGvYmh}Gf;1~7y5}jdsv4qwhQmvuFExer1b$^(XHUyji25SlN^X^v z7oW_6&dthEMfZwBY`+`@gNlGnWnXgxRXNzX6hWXoJ-31+>HzKaE?FPVvM@3QMBR^K zqT~jM0=lZ{h6E#E;g}u`t%S9knFlW$Fg3q|g|(F;u$CIp11d=+Wjkr!55FTtXSEGX z&%5!Eo}iipTRwa)Y|0tjP%!1jlb_QCP9@Q&ub=VzpMHmN`Ty8^w^#kQ?9A&KV}8Hy zI{l7&@2XvuLnR>x6Qd*$m3Rn2LR$Jlz0d|}x)Bti5d^)l^*?AE6eF}qj7_T-BDqjO zMHISev`uSkheFb=i6&J^>a_Q+-+7(hcg``q7|$4UuEI1;yP>H{tq^Kg?Y-A;t@r)D zbB@FFe4d8Ovvc&((T_ozk8NDqlDxK$HpBR;kkN!6`J%)~8(4yh5@?pS7d`r^8p=Uo zrC6ve!_{Pv40t1#Wmq(r9B3HN%&A1udJl$rNq6V#%^{)5yg;WxgM%Wa|1wFBOx=`xQEmbQoXKq{a{DHL=ZvsUxL&B#8o|q6e(?m3<$Dg1ihmR;H8)2}xu;s7|G!JWJzvlu2QhXZ2_TK@`990Ph0rml6E zE36ObmSHua;5d#WL`JCWGQQmn-a8rsW}h1h*BAi1C5wa+olLN53fSmm0E0k$zeEYH zwln<1<#T-T?Mr+`1m8r*mvIB#8m_wIFbsVIp`O6lfRk)^BPV?2{tjg76NaNBly7bnlUA&t#)lEU-T)I6Wx|ZOoZhvJaFluZL`p$;pEqrQG}!S7z;JPSiSzSAk)ZWKlo&TO(u(qK!|2xz ziDRQwuVmLLCFjo-5<#m%J~q6$MIW5R0k2=a#>XFhjN98AjATYCDlLN(hWdbIPP!7^athnB)1Ar}*I?{!yGA&f?G5SyVSIC5%~Y{<;tJ;|VWczQ*e}Z=>9F ztArE*FH!pD0#G?-Q%uiXZkRI~c66KXz0Vj~Vb0{-cw<6j%J+;aHl3GeW^{uJ7AWt` z+;nAfgzLg)kY(T#(mvyM-wAeD?I%%uTP1vmr1p41-L%O56ZQqFBa8sc8d8x=?b6Bz zY9ETc#7D=9Ud*7Mjbz6&cHJghu7o&mo;ravpo^Xlh3^%^b`r1l{K66_qUH zbC_d})}q-p67&ao5uV97<`X2a+z08SFY&+vN{*U-1ouE^9=!DA6<7?Y%rVeu72Dya z0nhWvl5j-iTAVG7K5k(G6NB?~Zq3C7x@mzVV=H6D!STVZR|0!|Mz5w9Jd;zDo1TGL z=o%L5AQg4u6Pl)EMdaCq1k*~ZA)kweonx_3dv^O<27@lxLtA>4~EiZ?Egc?dlBW6O2&&09qY{S*%1-8RJd01>7+n{}i8~l8h zL6EbG%%Rv)7*89YS7dT2vXYfWg{7efG|n7-blkjqi|e;HOwRC9*deVyxvh3%tR(NS z1hT;ZYsDWhckCqEpP!xM$&)KQdHe|benu51i+e6?#;|GI@qxSJEpD#g;QHnsr`{Re zH)mfrGGzK)8I3){c1cc~ij~D-(WykurXIaR*7-^RtZ^Ze@rqmuOcgX9SX$EJCsdZz ze1r=s_-w9I2^kBEPL5c9t^}9n_~6MET7nBVMq#62_<;B!{^TvrE+yB;IM8iS92Er* zHH=CtNnm7C!8jQcdxXy^id~|)EFvH?&kPl^KVW4s%4M#%7+Pox6NSNRkoLQEM<(l?)Y^9%puoTyW{3UOQI>bsLO_T&G5C;9lmOJc&G7Pr?N{I)uM42pGyf_K2I^|jZbzng>7LITVa*S z!4cLZRz0vhVAs^hRN6Ak$c<3~E^YP`OdZRCiBiEjV+-W33Gh#kG4Akf^7ZL5BvdS= zBa+ZI7m}v~eu$w!Gj2H!5&Do=C+K`bLFy)x52v*Ad#1Y>?O0i$gV zd)q2Q{mIp1eE7M~;`}1ECDJx%Ym0J2IiR2!X~d-2Jz{C1@z;_Zi!9(IpOe8lA!Z=xa5P=DG%_zi(pQZAJ#ZGz`6;giB zjCG$O7JLhf5-DmAFK=DiJ#wYz+G>NJZ&XQ1QyJk>4Mi%T;ysSv*KAZ4p(A9G z(1F|QTio6qad`1fjN!0TC+v+9(vh!wf-I-{MF=R$q9Z|2sI53AWxLNUe_k$GP$v&> z)*y>3}fO#X6H8RH0TXezh#UlvoL|O)8Lj{`~ z#8jD>u&}}0Dyn2L(x|eiu<0dzA&T+9qJ$@Vj2@dc4W(PtJ?@2;g#K1aFDTG*NXXJA zvAgw}t8#P3h|?TEWHcb0o1i&Z5fON^7=%%Bo*v8v|*eo7L>!FwMrQ+L;oQcCwH4j+!&yAw{wBYKadt-+y$$(VL5@{JP0hm2YTIAkA{x&?9{ zjAx`a4lE5!5z|W5fC%TPGKU8`)fVx#H%J?ksE>J;WMW$+#YtIkmC@xeE*=U3#Q{(8 zP*97ROpmK5*HkTLDjicPkc=`WCrA$9V5RiT^VHpH$Q8dAim8!8|Jn!>l#P*~&Ug)n z1WU|m-5yjH6^}cwZ6RFPfu@m4kk*cPAKQ#d#7rxbCQOGT5RhB`f_G1!Fz?4{;izU| zrHyvlI(@3a#K=k7CTO?@QTa`%!c(e_MgonCapPsDE79qe1Eu&Z8i(2&-zL*g{$4=v znf(#$#9^-K(l2Nv7Ky{X)$7H1cnmD0p@DUz%s5C@F6l&LvB-NUp9wRM#0{WSG8-2N zC+AYYV;Eo-gUhxBSE!E~&rG{#3mbk%FF>ULOxM@|F^o6)NBs_;mmQBpp=daX;}bvPcki$9O$^-C>S#dG zYPpur9a8zk?Q^p6oQoi@6k!ggF{r4pQXWk(ePRGKVAmPoP)ZvvNLi1IEJQf>;3_Uy z0xwc7Yb5T4(fqey{!b3GxA->t(ogl9xmcwQQ6iMFc1me(#<%1&ueBXbcMcF@n`UF+ zbl)l0OrQPS8jCajQKx))G4RBo-M zUupp(g-qXSx}4Hwr_@-tNm8y1a+`dKY#f!vHQey{=@WeJ!_VXV{D3A6ExpPweE1d2+BWv1hfMrrT zroS<*Aq!|r@}Jj?NuLE7RURLH_CtLBbDxWXcvoy~Lu;XunMxKEE8_O{4j+Bd{G(l0~XP8cegkmk2sAE^MO7_Z5}~MABkHfOBN1V z^Vx~y#a7L%rZp$yzp@eV`K>a_GVKCblX7In2*>j^qOnbxc4P4ckH%6%QQ#H3>ak%b zsa>1bAjmNZ>e&Fqi^%xPv4ExObKIt4qo9#&np7&$B8qC6Qe2(CyQLJC&3m|7+63`& zGo&QwFSce>otK-v7ReMEhWC!4PuEmB1c-eJ`os#GK7 zHh|$c^%G7#8nD6pL*20vP@COPMfj>LR!lDdr!+=lx z;i|oAzG=zIh=Q!`N|i+px}SYv6UI+Y*c8;OxR{NJ;t;#NJ}-xfO*I5?H#%qPJK`wOqbMFau1y5nVaMTY!}-NIwzfkhQcP5(*bg8B%iJ(}$NlMq>$_{* z+}@yDpV7l<8>xggt9xqlX7V+(Qkx2`MyZrAFUl3S!HvUGssW?_CS-QmV5S96q8@Oc zkibm9k|l;4wDg)!uFEjzQhvtCg7t*D&IBO7f0hYsULB)BPH(i@Q7E$U^XI3uunz1AnX)H4X?PmNC0U7gi)a*H+tNl zJd3luw#j@jkc9!wu$xTh;vj(|RFo#x^=B#xV?Hq!4|cDfHahhWq;?UcUGQAAR$a zkV(qSk`|ek26bQb*I9fAz)pN7ABaS>m$PVy*q{Mz4Nspw#k0pxAwZ}__@N#g3PsI0 zU^Z|(MjrX=SFej)Jrhurm)?~3%^Ek~qqM)|#TDC3O`8jqGsX*X_K2VjFX=;-ihRljHj+*V<-(Z#+Y*QBM*po%5NXQrgum<5;^VkKRQG@C8}QZ{H^5B z8A-P*)VO{b4y-x~I_ zC%Cx0!oKehhadc0h@w(GR3nkX+#d|cvvJ837*ADXE?*Y4oN{R3!z1-*#OL0+1)mUpF2D4vXXo%77&5kKuWtifUd5raN$f~%`5Jb(TH&h~Q-R$A0m`9bV zM3TZlXu;n`{vz=psU@2iRvBC7;Dc6N(AFFSGyeqYf5gnbjBmrQPua3zW*IS;sR0SatKxWeGr44t2o3jWuwb>xH;Xdai(Vhw6CdGkh9@vnX46GJ1>}S#iWe_l z;FFI(!5ChQ1vf3yvs2=&3EaZWgKi*eA`1u0jjK$kkN}S_ukhnP@#A>(=t&&>TbmI& z63s^B0y;+TI3ADq<~Lv9&D%F6g$=kLVB=IxL0VvRX=PJkYuVG&zc%54lf3T|iV^h| z(LqBPl{D%1W-DNc^0ES(<}$Fwe%{;MHb}}EW#Kmfl978l=%JqlcO^P_$^*5y37G^i z3(}B*H6!(3CJ9}P(R3UA+l|1ZR+jXVKue2)5-M!)}D^OMPd#v0nnCK21C1W|D_ ziio=msoT61BHqu^vp||WodRxfZ;?lmtOd(hHj73(rTGL)dL>)%d6ZD(ob3nTEWVGq zp&#!thGC=kx{pz*ELPTokUTo2>F^mHoDEb?t_u$;sdh-#z_de1O56=g;?Dw(T<$U9 zpjF3&2m)Jp;}sJ#r6IAb!79}?jE?8?85_48a&Ap1{`2N_;nDIG20$j)fK^N~+_X3dG8f*fIE(=`N{a2W$gstx zhn99GA7TB3O;&>_7OF^WE%O4Y% z$}m}yyo@#{&8Y2=r|)gU)#Z+-PoAbX6y6=uoX;mY4@`%0rh0gt%Y2SBqmD5XHr!?^ zXk%mRljY?w3>!H4!03ki`#T)(?{Rbe4#$4P>C|CnWHL;vmlrQ+MpwH}Kz7c8AZ7ONz<0pP%0KkWLgElZ8}a-$ zH+0su#9W6R1d)W1XNNO^#K(w8ZZgfmDgpcuq0Sg$OWrK|Y^nugp`1qrb(ff$Y zQx{7@Hp09(jqk0x&kH?yA^9Pdl>VZW4L$}w^V#?DV?XjE&@H~-raQDn0o_P+7u{Is zJKnv0gRgz{Yq+_&hN?tnvIdzr5P!Z9@JXg#whFZ5mPMfvZj;-y^xFwJD0Q$%UNW2< zXJUsc<)Dp43p2EvQPvj+NkLZ2_q70_TtmTA(WxvZG`8km#Bj$H_ zFv{#(vI^3-O7(#W;f07TUMK_zh}1O(YwrDu!Lbh6@;PS|WM;1C?@%%jTQZn)Ta@|Y zI<1&lEQ&7hPMOx1iqi$XmW&Q{((-d{(ZtZ?fOd%7`>V?YeaVBTn#eQzoEM*#44r;Zi%j^G_P8y{vl8EL zFl8Wx^t}On_lxQq9g(A#H;VgLy|wtCurE z%M9>s4%=#5>18`m`c_>9=*y5o8|BiSpQpj?X!vEFJd;8An@On6lu%gk8AC#%NZM~r zioKu!yfY`A&vIj-AKV<}MuL7ap;eht; zEjHw)7?77QO0hQ_u-C64x7RaGmeNXFT#(L*= zZWtr}hRx!Sl5)wok(c?!@-5FoX z?`2kYr^zDllA7o6$rfco*u9o*y=SuHm|(RmqM3Z=jQutUu5WMf?)Dn@cgNT$vVM^n zq%|imbIx2K+?hNvEi_?)638YvJG;Q6$5&K^Ql>vN28HV78wVUGALz#;-d(@NyLZ>< zr(-Nt2=qh_CVVE{=ycQpjqtZjKgi@SpKB3Y7zPlV$q@vGBO_EZ;be+ssush` z)aP`B`cxT?8MiBaGUXtdj*&2HFD{Mb`^~|PVPN=-6vxsqmTE15^&EJ50zI${fEOS% z9JMqn;I1!eFZq4VaR$Q&`-U@*<(F<7I#r;$SU$TfTlO5VK%=3~VPUseI`V$rTCgz& zqcG#V*tpg(4(bJR*fCBg4#GRRfLhN+rCjCnz3l;-C)|H<*r7W zY_d^hoIr9y_z7uNiMnoNbadNd@IjiAwphYkIPitbr?`CcP29E}y(zwOca0b07N@q6 z+m?e*RJ64tqvo}G$AZNcx!RYcny8P2oE7kAt(_r@-2!B0@?P@MAn+c;%u!xoEA_GZ z2&14c%2n~H5N@rKY*gFP4gaj!_}}nt{PpRkY>2w^p=p?(P4z}eopF27!h1&=e-xw9 z$aMs)(oV+aZ@`8Zfi^Dl%wk?iw<|^FG5ba z6WjzA6CAuW9z>#f2syzx5e4G_dN=5{F&au|qY94?`0y$$iBAuyzQp&ty}89VKKdA{ zinH@G+}_<`OSy0!cD%4NGrP9bn7O#=H%I2iQ&Jn&8^yC{@8R*)6Vi1xj+&uCtmoys zGup;Q@b29^eDujjp`LLTB%_B)M^e^`%BauZ*%yo#me`#Mh_WhdW3blhG8e`4@>O%l zgUU$greqi`bJb9hL20Wfh#LoYqK%EWSNt646^FEf7U>eP86Y--gV-<^@Q`x5+5*U^ z;hrvAqzXIr`P@qaN-MrKy{9Bb%c^c+(I->+lxR^W5k3cW3A=TFnDF2;qm{u%o~;T$ zD`RecM|^izoAG;?kzkq5@qpKo!JJ}n-_Z^`&JSmJ^5ii-{LE*uogMJ<IQGEijKL5By~TNLOdHxPsv2ku#J(0p1(8Qz1(tFrm0G8 zNK}a#wa8G{28eCkvS@uDW>3cDCPBMa%_5FPBcmu`Cs;#873<=C3{`+~>ngzBFz!YX zw>=MJUZ4>Z+*uS0bZBghBpbO*Lx_Qna>y96S1!I-M2F}`Df8ufOg}ic5Zi#i|gmk?7#Lu0srB@2mAW(qn~)Tc7w`BTit{S8s;F3 zF#1xi6=O|2r;l;IjNor9dYjTi;bd+_?ZTV|%g_8o__u+1q#<=Gl=>TG&s0G*JKaw=?w>oUi0Ps z9i$&|NlVqAK zj4N_Kno0ZH~r~|i@aEW5m zc(YHZQC^+_hn^hWjY_ffPbft%RqI+?vcNBx8HGCq*8*f^k+wb9NdO%#%HdrV`~k%Dcau9`w`EdzK;(-`#}`_ zTfo+ouo%l}sccLj$rz{!qZ9h@8i1iKAT}vGr^e8dGeN1A0gyguV-TPZym|W$pM3l= zzWTMV;?27^xV^b!Q$S#2r6Vkm6;9l+TW64<5%uSymN=+8`eQWjr%0cTQ*q{V2ixOka!K+)2Yl)g(; z3{=alwr26n=(^S@=`+6XI*IJ8*ewl}l5kCh=pc|!3x<{q4x>joy}Xqm!(o%Em*2WP z<$Px1g%J3dYzHl1PpUkacU+s1Yyf9Ooh*`TPl6*iILgm--uziCRocq}%|!ZFou3t* z@uUx9K}GhXlhhLl~SuX$7mYOYF2vr*}6Ep!0p)`ZuUFqwnO^~qc5zE z{8>C?pBNeYmQjOVvTQ6PuPKM7gc5NEXmczHnq+AB1JIvg-PYyCr5DUBP{BbdS7RjF z=#X8}Hp6gW-wpev;^L9v;!+?V{xtfNzaPKAba<#cDh-%HmEWtyV8bM1o4JpdP$CWVQvVF;;RoIrqx#|ZLw@`xv3>YC z=zH(S|G)n}^w~4`lc(@UPw?goKZx;n<5m{I@%z4j;~)Fuz#seL_~5VpkI?_}S8@9n z{|nq*$Ftiklv`oj80D>2%Xqn{uAMU4vY=T4eZ67?C8H|4;UpH@*1ch~Erv%L+LBBt zkR|Rqlas{a{fL`=gXkzVp3*)kSm3u=pVQuxHbf(6O%_gzlJK7~vzA_QOPYIrkEzBK zml>1mbB%4-Dls8H#{nlx^Dd`3lvfPG%M!Sj14cnCS(K}YQ_QQ{a~e>j1Vw_VyR>r zY!dqjwb1EwkD-D-2KwNtc|EgyQGeLfFpD@NW>N9SnfQ~D?AjC;XJ@#$y1-#S;Oy)W zHr;)Xdz755evGZIbl`Y@#Le{$-d(@L@pO-Fv!2NG4u!_u*cMh0>zmj(&ZykU${JH$ z>#Ls}Fj`GZ#NIyWm=I^RW=i4(tg1@C-3Cqqz&4-s4@`_|0AGxf>nLK<_{8!p#-;>tmaqKo79co&Q%D*Cp?k|*58MLhkq-qO%H%ktq5Ra)#vtop(X zxUf>EJFOq~Aju_@2<725@AbC}P+Ck6pGq!_39U>b2ycG4G*`!;8l(Rzz5~8K)q%vW zo1Of=pp9A}rffoW#&J<@T3hUm(2%wRHln~+C1@#ROJ=9IoP@qR&q-B8sHsvmX>s^( z+Mqr%(2V)QzN8aycu*NfUMe$pLCcsmX0EqJZ%;z0mb8qDR;)eq{D>MKyC^+ zOwEJS+^}sswC(Uy$K|6-JbV5g_QM`b2|AOb0cAEia!cv+gTbgCG(q>tc1S9+XlxIx zdwJH=z%8sa#RpEO&I8|YdwYXVKK>Xt?`{~gG%|pxrsrAq*4DoOKsGCbgz)7-Tt3Z^ zrTxf{{21PQ_WqQ4JFP6d>_+cV=yb#Jc*4!iHLh>o!Hm;YERAE@L3D{D%x5tzwK?`L zy8PT~*H&MxCZ{?ywlE2aDj8nJ8DG!Aj^pSmdS$ znB;kxq-v0koYpE+oaiN;o+^mjZ0b;=^Pzj9YwOMQ5^vqO!?T z8v!@0Z{EP)yap_CE96_K%l0s7bXo%O zfMbZ>8sza~$kikG)fMF85`J+3xwt?(KZl&1!_UuQhconj|F*fLKl$081Ag}BaP{SX z4Nv~cUq=6r{v3`UeGTZXhQH1&zNr*gt|h7*L)pXzR<=2FYz68zapK zZS|Q9VpdS!ge|HxY`Wp%>JpD0J;Ju_Xlf9Cp)?*Olw3K-+Gq`Y3^NL)JbRMbUv6Cky7SCD@!?@T|p4p}zwO_25gCjM1 z{kbBBDd?3&uE|Dx@Y$haRG~}K)BIdGFyk?O_0M2Nme!u%WJ2VkLZ!mjCvA+pzZ&R0%UD9b4^}|J%#hUrYAP1RNB(@P<}$&t$@@yHECOVU%0|H) zJXFy%Mbow9;Lpzv7{w7hZT|@o^=_LoPb54TlUW!67l7ytm>M*MvN%sLy;&5}IHR-E`XJJa|E&Zt{gmu~?%sOmsNfhCkiM_!WGIe0{0| z2~`@Vb;jTDstnIJ56P5@i3+x@p=}MD?l>F{PzdghN0@iG5A55HaUAHvY*)%FMMCl$ zm%@mA^YlMy9xABqdP!)$C92D-qRsqnXXM-n|E6V{OaIJbU&GmzS6EcUuFgUFCmFhP#l|rWG?4AAr$Qrkjk4xI{H$ zZgN}Wl^l#Aft%|vY+FM&fo?mFW8mehR~X|&SsFopilXy}hc(nG=rdDyT5Ys^n%Qev zXQs3}&d+vy_Ol;gYa7fWbNmcWh+wEWR26*;+>ax!uixV1kG_EypS0%;yZ473#Z^6+@%9-{spt)kS z5uan31#1D27SC!zkILETfVU3W2eu1fyHY%P>NtN7`0fArt9S$8r@r=^_~Pe(>>=al zZ*v&&X@7ZZ@T)7}>I%U7|DVH)Kj^=YKj5GF*8k1_7XRFL%GdP|{1C2x;D>Pdr~VmS z{4f7A{6F|5-2Y!+gr8b$W^*thqgoGTYE8V54@-svN67^eeagTssrIgH)a1drmx{@NuT2-r7=Sh-S71WMJk@{632F(0bwcBka=DDk zmPp@>zJo3sSIq%U6!0X%H2A8AGp7}$Gdmec=hHr?r-m^xrUBX#DyI< zHsxkEuR8-qP0^<(Gpbm)dDrs8Ow;|X4GLY zZM`TF>s%~aHdx8MN)Nrvjz5wls95t<0|=V80N3X~hiM6Lk0w}z3?)1fU zLrW_Zkg?QaGRa%TJjs7gm-5Vw@x+8v>g3%d%+f0wSE^~pm}y_m7|#SU=OJT^Z8pzZ zF8NkWC~J{QA=0QWq8*s*sYizm7kh);_Bey7eDZCB5n>orjd*>B4Mv^D5=9#AaT#$= z)r*idlXq)I*5~HGaXB@pY>4x+Q4um|*I9TvLU_Iyxg>+jpM2V)m+5Jh5HdHq!#1D1 zFUTI8nF{WV3APr$M{6JH9^DHirh+{ zeKw(@Q!S89(@1GMPu+s(pwb^ph9lCjZwO2-mB60j8Tg2j}r9@qTEusS(zt_8WZ}Hn-`V!u~eOs4E z!~@apkV6Qgyvj3=+#j|uOT?s8wK0k7gK!#+2%i>P`fPd?ixOaQ>_VneLsem;2idsI zsef4-c)3fZEX3jeAl^zQXs3p3ZXxH~^5@NHpyU$((qdPbliZYbIxQwOz)~YHF!_VC zU@#WcA+yi9@ALz_lTX4`R788RB>Xj^i5Yf~_@-F=At71N{^m!BSnYu#+125zf~X`fwao((HPq zY}rnyFPFeQ8Piv)EmU9|spi>C26QrPQ6>)EJM;itJat??0lxlUe>Fbg|Ie53)gS)B z`boZZ81db&@0_pWqsKV@!asriANe^v{$GC${m=jVxc~KE!#Dx3jSVw5mQ?K|y9KVL zFiCe%oNFF-d~UW-b~8smo!GcK(CL34hD{ny4xA)>|K7Etag7C~NFyjM+jZm6W5l0F zMUtIP=)H4e@8$6qPtWs|(q_6$3@Bo27x9d7esqjbKvBgMP2Y^B7Gr8ca-3|EN2NxN z9KdG$VOl>e8!)M;#C(O_tp++XZabqOom>NDvzESmg}0DHAI=W=@bSRYCy&v#EwiVV zw4^BQ8ks9Mw;`7k`= z(v?R}78>d-ccBF~m23G-HsEk}hNn-T;L+2kIGi0~TRNRtoA>dSJUKd;U${<1w6pF<=g&rE7jluQyW)lAs#XyY^~+vjkP zf%Zt|1xi{U7pu`uwlw^dvwOZyl67JXK^nd8ss3x~3r!R?EVHT)wFYl0GNT3)cSy*5 zHrnBV_a(Z1${;jx6v_nj&JVyGDg(~Zx^{@QnNs7Os&1v? zA^DI7zw>*qr@jZ z{PFjENW_E*NvYN_`aM*#N$BW3a^lSf4~Fp0YUWK>Oyh_N9c)$=gj>3%2T4i|kn;Gv z{Bk2MhKgEJ%G0M0k^VZOku`yCD`U_wJ+eLvf(Npy@|F9{LKa}#Hf-At8yyhElV?wG zc6Pvic7V2>)Bm&|#G&7m2bHheZ%xHYNi>Zt2^)@0rML=AON_XrFy}#rR_tf*C%k#{ z8ejbNzmAu0UWVd@^jzWmwU_`B-rCBGUgVmhVYAS@F-(@62!%Gsd(Yp;qpK_I`xY{; zZKJdxWbW!gi3d?SUTWqXO}TNpzVS6A zU^0D|l90hlDapbAl9!DoL4)~d36t~$tkTblCh*RTcIKyNL0ZldjU*TW0hK)PvH-DF z44865S{*>Yva+%oAWVmzI9n1k>a|PjmCd-`^aN(1kEN6;)&mXSTJgJM4r@3e%mQ&5 zlcy(7I4Vy`re`XTPycvYIwMW?FT;rMe*NM2I$d1i#n1l&@bkZbr(gWvf&b)JaQ~P7 zEA(3dhE`^|x}j$bVyU{8sf23rOgz~%E`*L*LB@@V2}ZS5&%_CIDjt+uL7x56fYS*o zJ2$uj8HP@;zgyTB9S%DkgJF;=MxD{7P@6+XnL4FVXp?#43C8w-XzQpYKtghBgWf=3 z>pbgZQ7gvGA(a)$m5{pog41dRtE+)72j67{L?)21VaRiIZ7QV$Xz|)@il@(?;QZK#!ej<5@bN+#3e~$wfjl)ACV058P&7sZ!DMz3-WUUzxZ`{zbDMBcR}LFi zu7i|D0!)LcSY3_xE(bf#WERt8B~80fz>`z?O4Z&2oLVL%@YU;sXtot8%EcL`d?gnb zCTq-fZ03bZ7o(E3?43fM7wd&g+;U7XXsI`{JRwMPCap;=4V?A|)me9Ctr}LDWT51q zHyIuUA)jzME+C{}`cAG$EoyU#n2IbiNiGKP{>240OBGvVB8HF=CbUivT&q^hnla+D zp4K7H*ht!9Dw3GN07gYL)HuXTHV9R%8u!B(os5=v-n_*UcTho_HM|ROe?PEqO2!VG z|AC4`uh*XpGxMns=hJ|;#e>>uzimtvgvT9|%J3rq@s6FUh1WXp8+Whpniks(I>@>- z;=$Kcpg1$qUSAR2P3qYOsGcW;+^s=;RLVuxd&8$vS!ibz*s;^ z20CLxH8ZBID7!PqYH|;<*hqL38Ik~i58i(dPai)kkL2leg1Do%Jph-DO&#OM-rc>! z%a<={<=8NCt4*_@@_n2-J-%0i(OfFLaJ@ip%2bF6y9Cgaw2fj!A=V0k6bnpDJ}Kk zHAk`91nKm$7EDtC8x!sY=A5U!#C)CzO47P5=g2aYux0aXZV%D~6k6HUDi+EVON%>) zcH)GqPAH$|8hErA9JM3#0(kU3@c0-11it*I|191v=k5RbneYE@SN`3v7)brp&jA1S z&)~h^_){4F>7U2_fA_2KQ^Vct^zSFDYC8rhR$)fW(fB?QpsRHwiC@Y;ysThm@ zP^tx~KhFt6cG}6bHeG4up9_bIF1E#ySE(6nQiT%qRASm>Wi^%XY$t8K6)O$efb;!; z%Zo?2JipA;Xg2xN6Q7opneQc&r6V%|3<#LA!r_C+Z4cXKHDXjI2H2SQV8~X;Y8tq^ zzsI}VcQ~GUG~Tq1SxJ(mtau`^)M!+R8J{`yp$&pd-gZTgun( z=1hN}7__|IlWOBj!(I{Hxp=Z=M?`paM#u^W?Vdnm%t&%uA_nvGmD4Vl)Ilm?sN{a| z^Vnp@O@@jfKC_m0Dr2492f0YgRjctshZ=_biG=?(DcfeHzG&7~UU8i>icV|eo(s6- zh-4y5#?3z9+UWdqA{8m_NU?c0C$Mv}vM_Bk0+nRh|xavlrTuCiSk7{ z91fwX8VL3o6Ua6IKEig*e7}e@?emO_^ElsI3w8#%6=K2f5D!4@NkZ+hcP0hx8@_b& z4!*xy;Hq(FD?h5xN-55PXg7Od#(ZaKfkUF~bVLh4h5B&*YOz&QU(s#g_Ae zqH4;SYB*6tHG|4ugyEN5?7uO0|HDwne5wP9v%|q;&{xe1G9!&t=cXmaOJczA4u@lJ z8?G)d(e@47)^L4&i^Fz=TSxZ+?Q!+E#8#y#>J6XcF0ThAw>{F^M4$af<9j8xGqUj(0~u4e!7I0WK~c zlD9|+QrqBtM1d#a>Fq8-OLv))UC%*LT!4-KZn4e}#Nf7p1*RFs z@r18^{Tq0DbB(=iI5GZGm8zCDiQk#@QG5Z5siiOqE97=sR9?#{NikepT;eBw^6$g* zXV0p*jRjE1=v{zu3QBLcfm1hp;~O90&8t^Xp8%;;;P@&xbP^Ct?oXYBjT$oK`;}=O zQU(Yw_JpLmflL6YxF=a;XdHNHqx^FY#7d=7g8?=O8eT>vq{}GZwB#&g6etMRstc1v ziTYgV)bj zCQJGY%Hk0qvWANAq`5Un_VL;^LNh~|F}QJJL8B$LVHA~@Zlk=UV)fV550$}2xo)lE zDyQkrosNMrUWYLbOdGwUITYkGp&rmv^_+-<(9alpu3OoR$U|3B>6u1ze!}(`c=RX# zRowo=e*`an=BM%EyFK@Jzy3D9Ui{=w0e|L~@Z?Ya3&6krFGIilWgPDT(-AS1JQJmP z@nO`$P8q>SUPV6L7qMkyF`f#c9O`8ZTE<$#Xd`SOnVv8sHbYh|3DaC$Tu`m_g{<*g)(l2#S=1tlh`AWu)+kiH$ zWkh+8`F@z^)*o|IDgvOBWzfQ+=@znSGx#u^ddJP}9gfF)+}+%AJIPv#aji}+m{(VB zC5v`A0jF`U4T224$9sF&H(Xqu@X$XiHw-;?7r(q>^03kwP?Bfe*PQ}Puw*+)s?&;EH8;Zx}+UM9{;F)2kLL6royzajy;K7!HBW_$x2!9)7D~BX zC{+*nA?mC<#A2Bjqem9N?Cq3_oyEqK>H?&#gC86dpAc3KLNqtn1d(KNrrW})7*4cW zvR<)*>>*a+_CFSW8I(#D`Q+!zy%nCcTMV*_+b zWzz7nddEIS%VN?T_>44se!IZo2tYT4*|$N^(A1+2Ds~89(n3W(1`LKxHw=x%-SAX& zcv(vZMnzV}?3ebMMJ$5;40?-;6RJZtLNoo2;Q z-z*xPV(Q3RH?rm}TUQPa=gux+&Up`{9Yd(Dd+<5R8D=Ka`PxRN{Y>5p@Sg$rcYTb% ziRsSzL-F;g4kQleXSRkZq|{j~nGGq?;lZboHSCN`+727`vooBIN0^;>a77b~=4fVY zDYiPuW5DROUG&T=E?dAXR%DTM3*$(m5eiakMh*t*VvqtH&1RF3O^rMRQrS?xHw380 zP%s{$yl%B+r2oAT`%7Gux#84La2TGw_Z*-3@O@mIpG8840|!Y0cpRS1dK^X=kUbB_ zWKLA}}Ty7=ejc_8@TrF)ta*rhpcmv|~6W zzhz%3rZtvHJJ~8ORn%twd2ZwqK4nF9oFFelOrp~>?wY9#oQw-o7GP0)SimydGE9lX zuTjRkKrmhEf=@s0lqYiYz-Fd44?c%y$Ov6CZmG!D;8sB;0d!HUDE+L4L8)O{HCU`t z1|*tOd4hi^k4`|;+zbuAJ$X?y!ZkfzEU_i#XR|IKUMMk0^Z5~G#i}B6n=nv?ma_QG z&1IQU^71MQc0>>kq3rLyr#SoP|0P`fgZ~g-{sTXY+wb<*-~IYK{Ce|~KZPg1^cQgc zFaA;B3$K%0Zf?4O8hU_r<6074@4en9UgFCt4cwyfXk*0%##Q;i6sw0s19UEkN%p=80iU%-|7OvzUO~9YKRqB=$>tb3& zghR~H*_32!Dmpn5`Fd7j$Gi_g_-)g9W+&j*5F2w5;PUbk&!0cXc8Ivugg8r+*zsG) zyrUU!Q{89`u4KwbZs}s3$boBujwLKM8JXvB=!nQq>xSWm6B!cs_xHHDy^AfS5d3U2 zuH3WMNRN3c0%U^AQ>9r|yaM}S$Kh~}^Ycrbot78_TLaR^zc%$6TQOCs5J2~QuW7e!QXC4) zXFd4?{Ie3aN}&?(D=TY7BMG%)E_K?0$C4^j>2Dqq6XQ}vGleaarjn;(YYjZDy`iE_ z^>2J;68`Nq$LF)frd|l5v_-)RE)!y**rpxV$u?WymT76p$XrQ<^#a6Af!ETx6)V^?d1A>(@*$6?qA`B z5CL9;w_Hjol)t0;V#kM+MHuU{_+oN-SX)|En72b4d#8}a_}v0;U~)}Z{`;di^EM^B3~ZiqrX2L zaXQ_jj|d)D&D02#kKG7p$}H)Ob;=0c0I7^pV*6Cd=( zyIHu}%xfDYm>Jb5nQ44TvDn{k|O1idYo^DF`%nT+2r+&osyKCIt-B$q#nQ>YP zqDi%0^m2{UMCJA7mFJzFtr+;k!L-Yc#}}74TpX}%JGOl*=~L4L3QmIZ>HdgU@800{ z{ubN5$FL+fQOT_El!tl27-cbO<|Zqq<%7ilC#Uo{h7VVlTzmd|%GfDp!b`Kc>7n#7 z-V2StXN_6l3tTHx8rDcnN(~1K>VzcaYn`6(cz&`tr1gPx-p1!HetxTzmsF&T5t|D| zSOun^05#NO22{!1)}f6L#WW*w3qbWqPJ5yLfEALku7c2FZl9boaM20m0?F|%6R5Nf z1R~}x8zm}T$d+iLF&xA^XT(#bBWU=4*?YHG+w$zZ>l@=g*WPEJYu()!+wG1WrNNOR zafw7A5<=t!DJVp?k&%ccUh;rMLV|>N00r^HMFAoR4oKJvHwh3gARbV7f*?RaP9hP< zR@+J1?n`xbovJ$5z4w~`F?bl?H~#bVv*UD`y{uNXt9PB+Ypprwe_X!r_eB>WQZ==L z>R5R2dK$En)G#i3#U(a$Nuf2+S)k3%2G2uOWlc8=0GNr`Cbm<+|J1*Z_#;1s>v!J8 z^<6)G_xi0EO8ms{#-o4tzs19U?GNMir-{ur+}sMhg_bp!I@G%{D%DjKi=n~9t0@pC z#W+!2)<$Rio#NT7~%&xOeXgo1gfZc}1%dbY}Fgr&J?{hOC@%d4K{lmEe$)01cYoh=yb{HfAdL{v9+Iz}XhOF7C&`H{_=u#PsYJJh#jpVUTM z;Os%QLNhb4F}1GlTici{Lp5z%xoZXA2_26A7RqrHi!7q14QZ@r((Ki{i0Z zEqa~y<|Ln}ReI-%NXN^Ifv;bGjK7tyfRk5km~-t8QUA`W$2lC=@S9vfC$y$gMH0)2 zwbEwv>@(AF`JktW1Gn!OV{3IE1eaux`?o%i|J|njxrF@>8T#Ym#_hQwINjz&o6(fzYFNDJvpl0q ziNRsLL1S@s?BAP(1u@$;(p&MgTi zolPBgC$AwEJ6%P&Hi_tDEHVbNr@6IeuLEj|IBdB*i&FLYL0KUkF^dpB=Q@odTiUo7 z60nu>0Tvu89L7P#;ko!F^<6L#W$^d2K`w`s11qZ6j8tqEO-dS>APdCuja2ir*}N8z zGOg>Z{dn|YCkdS@P{K-?Q`7OX11VaL-X#9~q)NkT%W6&rh7JhwKR6uBr~;68CVEWR zlBL0G;P81KqwK7<7(wFuF=HJat=2562C}7wK?gY28p@pDS!8KDO>BSsUxfWH|0iz# zg+Gq-JMZ3g(RZ)k(%19f{}14!|Mi==|2E*46Waw~n>Ra0ennmHO0*FIudR1t(D1yg zo_vwsBkH^>SS%2;EP$h%C?+@Ait9@bek+pBdf;#E|JIXftHCd2@KTw1zNtirmcbea z>gc2;^92z$CV)*1hr@#57>ynOX~4UJ=^kaYS~4`NkOIwkb-_S;%UByc*N@IxPbfOp?{AAUM9 zIf-W$ru;ZchT9|eZ2q=n*t#bM}QNv+nL2IWy23Xq39 z4)A%-D&`CA6@y4NC-hCY92* zYB!`nhSnAzIaJe<3qsBcLf@_a`CLg0E-?O7IBVNblF51AGk|Qi%NT1oB)&+bVwflH zS>llmZ0E_I#*kw}WMWzk4l`ym7a{{8@Gu<1{7p+y8b;RMe@;e-8dEIDUb)@(QVKbh zZW<^dV79IAJp+@F(iCq-;hs`{nqwQ=8hGj{$P}Go(~bU)eVpK9Fu`M1Rc5C*@nr+gdK)mHAgN@FvMTmO(bjXzy7pDt&4Ev<--XJD zn&BTT7pNtm4>UWb)qApx_wWNqw;P!KMNIpBxyK)joc~?s-7w;}d?4}o@mmR(ha9~X z2Y{&vF3PW~fboXA?B^Z(4Lyumbn45CQ~fEyhCoAt129mN+u@_s4<+VqiPqZ3t|Sdj zgl8WbV0!CPR%+LF~3Ma5`P! z;&h2ek00UQy?cOpMPRZy6QjY}ADOE$4M}b*TCkdXHn;LrQVv)}nR5edl+FiP83`L3 z;EvCpeTJtWeUHUm8~}?!6^Y(W9@0N1oIZM`HDq`VWtu3kOzNbFIP(#VN#ee0X}wO{)>ZqGZ$7;8{O|EaGmvd#c5X>RhTSnh_kOsu?q z*$0je81(hzENXc;v%!~-3*q=r9-SP528qTStdYd_NB#&d{`>zKw}0wi#qF1V`0gaS zd;MK~efKYY3-{jvPMa5h$p<&G{-!*&ZAAwk_u#WU6x@lvXQVtTg#ZVy5sXT#RHPJw z-+~;7XtsQ zDt9&7c0j)I)F7RY{fwpjs{w~j7{VoaW>(LkGqTQOhvBqs@G zpJfA4Q!fV2K`zkZNsF}wHnESE)#LG7kMO}4zJNzh9^&5V3LCwfbxzgJKjy^k`5Nba zVxRQ~zj*!vuN!epd;kC-07*naRA0X6&Gy<}&c%6>%-*?3;;;+{wuX2ErnSMLN73&c zv=TPBGS(2PnHgdTxGhVc)P;is1p}s3Gd)_UYiDqYVFXw=>{O~g9~{I{sWBKs$RT{% zih^J9kpjDxqPH!>n9{LFmzURUI+G%75wM{6+&bu~6G66;Z zX<&4n;Yk_1W)ALLWF4TKGiF;vJ(fZQAp17SD>2zioZObdGdGu~m~{u3Tw)p+L>_EL zw7hz=Icq6F9)Xmt`ojFElw{ueAUM);sZbL2UO-zY$P`iP%1GcU9Uq+T<8=a9&;jT42g$L5)Ko19E59Hn@e{4SXsp>dFyBH4Ddvf#{^ zHK54g;LcjLB;dJJBQaN8X+>d94BHSnagH;e8_^2V{=Qs1Kd-@W8n&6ZjZ$@81aLV6 zw(i`JjRij43bJ0HIDikOg6!3(F7$@z_|6#37{_WXx2f?WNifc#!cR z9cdUI4Hndf+ITTGNqG}!Di51?(9g<7f7Hffsfd&cGFz^AF)ncb>OS6i=N(*LT>?3) zK$CR7#(ymZ>+LZmTZ7H(L?TBciI55GGh30N=G?gDsn3$q(`nfETOb0b(?vyl%^f>c zFH^bBOw_6>7U#+F)(pgM zczu0?U;VXT!#DoUH{Ou(+d&P`2M_rHF({R6mCNYE2f2P12!51@_hZomHn-*L%%dbv zmXH!O_8_LkVjNcBkTlD?puKIT$hKVau#$oWqiff;LQztli`rYTNb3oOIbd_LP^A?i zd7yNop8ar*OiJv6g*F;0r=eGUHUz#!H0%gNUbyH+#yH$sRBL(|r}CV2crZ{Lu`x8D${`pFEpx;G>>vDn*nZ)^#QER;vv~b`f8X5+bocsgdwuwqzKP4nJFr!v zKq?s~y2D@6>6VC>r+eiq&rdMC(iWsVi3r@z#Isk=@$A#jaC@GM(IqmQ2GhQV9ZAW#DQ8K|TKd5JR|1B`FzHHbH?rK>xz6g!_CbNZqMs?#la3y=>R0$ z8&z$=+Rv-`m{H$d%Ad~7=^OEzVi^z~T}&lg%FiXXZN^B@wxUM-sEQ2PfZV!nEz1-O zRRn)de}*k!XA(HrD`ql3loDqZL#rWFa0u7{YSwu=!%Ea7CY3WzQ%dE!AEx%ihxtYi)m6QyW_m>b#e{&u!XS0vJp`k;bC| zmZHG0Jgxg?^dQyU-hdKX9JUrxHl0M z?QNKe#}2%IbsrmDuc}XC9)?b`+Y$~woN01w>N6)*u!N@bNjQ8^agC0xXGdL@VXNW7 zparGswJ6($;-EQbyKt%?!izuCX)4i+I1Rjf6wkri#_G%RK>%%J^#?i!G)QyT!jPbF zBy0@qW*9SZu?Jo|@K;}bjBgom>jPr~&MUc*{$U4OeKmgdyylX!Bdir;yOw2{UP=qg zR!Yfo@dg*qC2vMQW5}NPe_;8?;Q6B&^WU8N{=K_ukeX(@GDP5L4MW5&Abi+ZTfJ_MDcYf*XQGI7hrZfZhXmem4%u7a%> zvPfWCv$c*dvId@T61Srd7SX}M7|{(C`$xSTA4E%u#>xoTYTEwz(Gz^}3tz<5#buL! zy>poBM4VCNYUL*f6$C)SN5K(u$|h02>*h{*9}6nNodj_-Qjq;r-_gfUKfz!A-~JE$ z#^3oy*|kF)%jc1D7&7 zK71o~Jb(5KAAkHYG7~4?Fe4E`=vl6&Ub*y|i0g#YUZImuT!$&wV1)5ViI8b+f9Oak zO+Q_bA`TEL{-LcGVxG8Sg?ic`E=3J?LJ~QsxXKkT%f+25dh;MtHZob#)ELi>yUGA^ z7?adx|ETvcf;Jt>AX|k%v2hhW^-y+qs*H7ia3gBgo6Dxki{HUUL6m-TZwXxSTY`RU z{oLMQIoYj5<#sAdtnE$cCZ#tNijqCJQoT1V0L}z@iRLbq1EUXBboMn)>2tE+1SP!n zyjq1)5x7SUP`E`2amvk+wv__qOJ9Kf+5ZUn3;zvn{=gqZj_vOBxqJP#zaIage->9) zB{3Zx-`fGUl&h|1WN%At*^(mghO}A{p~h6IWXy0q3WL)0D!ju4hNPuaO*W!!T}wYZ z`X22DJ#0k~x7H2I3r$&bDEcrNhX(`3Sn4fFd&j8Xp(opl(j*v90IIG{&>RA7*}c_I z2PwGM7I)N}?B}x?ErAY-x+}Hh;@$-wKY4Ou&lz1}5-{^gFJ-@P<@)byfXDOKqa#Ia zqL*dh031q)6F3p29Ip9aS#x!!%~*^x9|zt_Td#sJ&*)muF%*i?iy|jOBgGV%YY2ry z#Hbd?76Zh0a?vC1MI6*fD{(97r^jG&hNSbVqp{SWW-^oTKJ4^6+DQ-aODP>Hy}>$e zQCma0c32avRvtBg3J=RiG*YTMOV)ucQ51ht^jEOlXVg}~9{<)a^%q%fBVAy2x z7Y72oK*LrHJ*<@H7=vsD+GG`_Oz=8!BbYeyy3laiQ9fT747ZbgOCuiSi!7pH+7b=}D_9OkQcpe3w|>2;V)~ z4kV}b?S2>vYZ;wV&rOT`A7S{%X3T#mBmRq=dA_Saej5i8R}UW6HK@jbI;}iX{1x>l z-z5)9vkj-q69B_FoeEqsqc-#Vc)MCy$=sgZDqcW*deNY)Z|r^4yIvD8{I3 z=e+Nj=aTD-Nl~EmpmcTRVVjpNr32H_S0S~kDN0LOjN2ltV-2+Hcj6Pb2U%g1z*(~- z4C|Wesrmu^B&lW4JH57F4UT9JXU#0UA**7@()UaIO=}lhNkM_#2yER2j!m{ zxXG=p>%Q-UTo39@H7nU3>-xFTabf+r&V+Ayus+BKs86Q69nl4L5$L*TXh>8I5vC3p zN-(wBom0}$v9>JQLNEjT=xw%GgQQXj&2CsC1+Xh6MC5*mJ&T@z^z%Rx##{%b$%8Ih zdoo(^t6v3v@xMm=Q-2uq>gw*)xqJORd;R*q@$cioKm3p5;v!qcW8P5!Wx3KUHLRu9 z@?dMmY6Q6^R#a}xdM>2X0%?r;1hRXox_nG22! z%6_Fzbe%sACj_w|Xt+~NQtHf!LqKQ)H1S6CY;xnJR0nX`){C-+IIbq! z35>04{ix*2ZpbS^icR{)-;GNKSqzmMki1xj>SR-FJQ|Lgw7#hRQ~GJ!Q9V|qyp)^= zK83B@l~w=_1X$0ORfsl>P7X=#myRE~zjM{0}Kjl)hD9idt%5#YL7%qXw0VjxP=Y@Bk;=<99pN zE1AJ!!ZV8zmWgI@yDTu?YJ*yE$!2G5JsG14wHl&rRdWf#Zdpl!v#cDGy$CvK=3EJF zISMw;`#oX}Vslo4Te#N{*y?wi3EVQlFwM)NHM12oMyABn?sQi2-Z%~HLD%B8zD4ad&yew|lk$p{4YIpMZ3>NFQ*i%BJD()Gmkt80Ax^eJAxdWGAFL%umB zqLR5yO;W7~LMmGaVNfuzpyTFB><+7##t**yCA|0k`>({UF;dj1`+uY$}tD(*qO+1$*zpk;xED5C~By9o^Aw3&3WulG)tY)pJ-{6oc-vA%- zfjoF9TdrazvI{{lQcu!g6_e)5TVF%5h*|a&HNx}*C)Fo8lfES!AaLkG$|9Zf7N;Nf zn8`ae0AUkQDul&KkaDDEWoy9H+1`?@W>n@myoU{81+{Z-5|44SFu+G0c0)0HCPPg2 z;Vs#=%BdZ*)l*6+@ydInnw9=-ZY~4}l_;{DCfQ>lGi~$Mz_JS1*{Ylngs9N>RH#hpmydC`-!i@|H3aKe(bAvr_J5#2jcauKmT9h>UaMhY#R&OaNwvi5P+A2 zb2W&QWwpsi9ZHB=uwqFNrmPrDAaL6DT(NiD(6{_QN|hBE9pEUtf@>Yit#mA*^J@4! zdk~#$q>9qdCN1Wwc<4bNzU0@rv1!pzHWwHYR?tVw^$ghH9U-Veh|zxGxz25eVV8A} zF?>Vu8*HFA2Od3si1*%og0XFwoXX}-U%Icd+W1*SDAKMM;bp%#G&mu^v5zKwhi5nI zg6Wx{Z|OMC#P#)S%zel0?KRFfx72}9QNd^5!20Z#Ts>Qj*#lYg`ktK$5M(2qE-!Ez zTXC!)P-)TeXC(H0GJS30e7?nJpM8p_pFYJo_FDLacUoEfay+jZzKW%|lVU>6U3a0R z^|QVRs~AvVCot6dd`JVZGZrv(tX3*qMU1l6+>B~pgKbOEY6lN~=$z{%jV+jDM|xYs zG|dcq&K{T_Rvn$Tg733E$V!~HRD&AAR2oONTtD()MbvOZ9(j5;99Gw*8R3LSiva7O zA&B2P>l+_)Sk`npn7+0G&$YVP#X;$v-jZa5u5Es=>2Y9B(UKkd=ah7F0E~}p%STQr zTzJoT!9?dQ;CBQEQ$C;>SKGK~kFAd)Wb{OAQV$UF$>o)kmlqtgMOJ(!L&*V5Tmtyg z)dM`(PVfLGpKU&=9cQvy`ict8(4r@?N8K~iRuu0Ti=|^^#GN=SoKb^H#UV!q#wg33 zF-0XE*mJf8;XL<_TXm^#l-4nvEmI)oc_C8+m~&#^0~tF`W5emp5&j^eRT+E5HB|bg{{>IHGc;5Fwv_-EMRJ?{F6~!B^n$Y&zc+VMC`(}%^ zV@hFcI}`5#!imZ1`_(}H2{U{tBK~B|`Hj0uqL30j4`lTX92>kdtVv2HYDgnJw?^1PiMhJ8bZ6%OfXxe=#iPdB;8j@4fpjo;-TOL%lA1V*%ux zh`NoIkVn=-WIIU$A1xCUzR<%+_??J3DSHg$+-n0R72=#Qst|tjH@<;yfB2pHdj(f==r?k zlTSa!cfR{woVHr0%snvYY$;`Qez&ayk;kE?@QS39tQTsUHjFQmQ+x=Xa)xF=vDK^}RYz0D?Y4Gxkr++~|+%QkIs ziUwC<+sKJr_RQF&hjh}^3EA)kA!xd_M%HsSqaLs2L}zbJg5abv0wnFVaQ>) zy1a+`_a5Nhb~?Zf%K|8om!i3~&G5tUGo^GpsQ6@0vt5lG2*j){>KNV|Y6HRI`;P1L z8LwZx!t0xB-0nM$QP+CEdt{%7QjOK;OZmI<9Rx1s7d=iVJbCm8kFFkK*oiTDPG(qO z#*WydYoxgcZf|b!*{9EOzP`ozJlV(x8UZy{ttSiO=wa28t(eR9hh7vId@&KS{-Z&L z33zT^F_rYQn`eVEGo*@$-oURIRzmO&Gw+P#w$~aQ*%r@4M@e%&PRnTlSwV$^Vqaxb zbo1qD@3>v2R|#BgM@!KHw-(HD-TT2rEYA8{S#3IUEuy#vxXGON#bM*(m7}2I$X=LNX$8Fu?NSTRcp?DVtA!@jNv$K zTd5f+WN{l9yyr6}xpj$?*bf;Pj!FYWz`Z8Sn;^Tiqc{e(VKpJ*^0)w?vlO=(m1VIx zwmos0f!CJBZf?(b z`TQm3zE`ua{~O=|Y|R~L9)Zi_L%F6eAdhmgQ*lpN6R$U-_}ZU2T8R;SIAzno>2>Ag z*p)0nX;x2e53nyaNcH<$U)V{ykbxI9rB<-wCOmv_AMd{VF0L*wxQQw`Q8J22x$y=2 zs+(^3K+NK9bS$FsA-tOPscgcumcwbtVTeuu*~tWW_4*}#_3K~9vrj**!>ax;3d^!# zR<8FWCgE5Q|DNMlaTL}RU#}wmb1f@w4080Ig-ffrd z*cv(3b_w}b^_@BSAInEa5Uc#JM9z2HQW>Wll1m#u_wnbC(^nJD1WTR~k({8?4`V7n=LBqPS2PgqX?`3$Nd4dxJOP=#fVQ zLwV0p2WnsY8ZQ5{zlhtrVZ`0*2k`aTPy8O_zxSVD+q~%gW|)d!R8h}@-UXHAF!snd z>^0_4bKJW-HaL}2pEjFGfsPlY(jg>OC#^ge`5cp;;#MxKn9-c_-`B%q@|;Sg-bD7F zr^X`n;HNq&_y>-T?2F#US|>$N`kVw7$0*P>4(iDTFF)lyJ#CsHPC2Hydi0?EC#~EhYzt`p0JIAa2G2o z$Q~-CNk(GZ%C~-dzQxN|FK~Tx0}&4dZd?*-4jI_}x0T4PYpq;YAk~^y3Lw=b)>)N2 zk7H&3>CBc=)|%}Md5%=)nugH;EhlT@? z+Q_-FABTKW+ILJqcAAPa>a^axKmY0z3sO5YkRv2iHQ=DqK^7-XTZEW0#;_V2yc#&j zSrz5$cduGeXMbB>qZB>1;DGv^Q}B8m)VZ$jvM#zb*=}`~iv_3ezZAgDd|?CXTwn@P zy;2ptQ(2aUvurJ8BMv?*W+lW~6QnxP#SWJB$wf)-N<1rmUfDGa?`>DO3Ww(qyT0mC zDW%Eu+Lc1Vh*#Br4f*$~Yqv3|^odRfGIMO^*fy)U-??MkMlt5XIgncQ=qNA{;(QCs zNp6}3HPNbHcdJ;^S#^+Qy^l2ztovi%12?y4j4?VMIp!I+H)rhZ*CNQBk$u)sias-( zB5<0CYX|{GmDb|0#QWclY{%8Ax1SUdF}6B}UdoHaZ`urpMZ(cLbnh>=C$m{Tk0-e2Q1E zUf|~T7Blyj1=(2J03CXgj8(c$P+Arp#RO4K`PV~ubUkcXY+1tw(RPm}=EusW?n$v3 zW*)Y{I>0KfqXrhVM1&^jd=yR#bf_Tjm^^@s?p@r&qepMy-S^(Z{rmSZ#s)u)MLR!c z+wS!+N))N4r0#3;wAGHF@|m4L7R`_~Nwt=Qz=l`6Rsj19eCONW!pGnH7`_c!GS>Fp zScqXSxmtEUHsh~Et<8WPymKZciUE_8+$T?-;7ecnGA=K#aB+D7bH^SyM6gcIH4^*z zjO*)H_~^rr3gQGMS46`!?J#1}A)gIbir&&Ugwv(cIlxMFSESKPykghDxK0Sv7HY83 z*O0ZKfWs0@xKBD&)fDn4>K) zmGc140D8J#8!O}HF_j|@4U}YkFiIxZJ=5x>%nJ9mH8AKbdKLodHTfSAy?rQKMZLdZ ze65EdQBLW#A_KG1aaQET8WbSgCL{JpZ)39v>lg>Pr6lH=$XpQwMk(!-M_7Qfsf{2< zR*XT5G}u?aiqoI_3wZS-U%5MB?p{BzuUEhSAHn#kpY3N=ZwFbx>RgI+f^A)jd~TQ6 ze1$KVtr7-8tQ>8woSyh~hZ?-0z^yXSAfO5Ld{!Vk#~vy$MXKl4O3Ojj8}gsG&0lOB z`5Xuak{nv{`jT7YFbqyMb5wSZSh>cwo_TJy6cHOw0U&CKB3i@8->shW7pH2{AKrh6 zZCtE`1*#R3NhNa^$Ejt1y|H&+ew6I+9sX^7{4`6Q!U$@8?=( z%#c%-%7H5Ct!<$P>r6=HcZkmmHMWKa_aEY&_us|U)jim@HFL|;vB%!3Hs?ZOyQpXJ z&FxM7@7xT#4>tFLQ|Ee*hJ)fDnjI#^O529n*dx3Fc#`g$hSJD`6o9e?Mw(kt$|<8-1PAR8hE-e~{j3C)ErW>n+GtQM99?Lqqn?uvU@I zJP;|u6rlkn! zRIgjt)Ib!2;(SKzJ2G|z3^Qg~;S9yj@gCci%{&6R8GPGnDYn<3VwhuWj*rjR_?7EV z@oa3^8Aq>+yzhC7Wq0#DEUc&y@lL*7jFf{_Z*k~3xUAmsU!IYFJR*Miu8R0Uwro6k zE4S0eMy(#1jzhB_i-F(qQL`R?x3{;retm=2*EhJ?uaO4qdjT!yIm*UCS>VvtffrRc z^MP&9??<3sWFMl*xIp0sopDEM8gtIhV_!)izLGN};g1#bmj^1iQ<|QXZn>9i)epXV z7f_`m2PII8ju3qN-M8?;yYFFKYz>^L0&Dp+lQ&&KjtG`yhH3VY0T|)}*dR!Af&U_{ z!GW0pBj692*w454jc@!WK7INL=J{OjYw}PX-tA~dCU%}PtvTH61Q`MXk)|Ycv3$!{ z4TQ^ zvMP8)6&CyjbI$r46F}#bQ)OVF@750Ln&xr0>OX+qMh*9+kP)K+UWeGorDN8=<5l?A#@%mv~NhJ5j!>3G$qbVP5@{KtO`7eD`txcSPRGUD#_gZ)Z(#Lxco z*tUvm_EZ4cMT;RE$6l2*WtEr$RD>TTQoorN_GPEpS=Z>PRl0IQ;E1ST=-u61h zcm@s%3a<^fl!VRD=>Z`b0a8QKr>vYE$5O=wrO=t3BT;{DDlB}lB@|gKps@bhGHwpS z2cLlm*$KF<^FeT6H5d2qUE$q#-ok^2S2Y+KQWw&PJ}2=`Qb-=va@BBX&>o7hNcyj( zPSpUz5<8PDv?&yjI&vc742y~L`35)FxAm-M1C)-EP9K*xQr&S$;|9b??6Eewwo!p- zhR07H;Jvrs!PUKcIBll_+F`-CO@pd!U^|tS@8$I?yn68pxiiMvNDw7ACuf;lci{M#$}u8I3&~1tk1}x%3|;=vBPJsB?Oe_APx%Sr7bHeSDWq7rid&vJLZNm z1FN|WM&=2J6Hc{3Bo2)hv6Q(CRvE1KG~?(xk;xh!RdYx-=&MOnO=&}0_cbH{QwCa@ zEUPOU;2c`W6raK7ftw*)29D9fLHlZ~GkuwDd0koe5U^4w%ID=uM{qQk%6bdrYoH~@ zNdTC2&kxh{$eK+DLN3*wXipJZNb%=9$VLz=Ytjn9?}0aab5t_Q0xVS*cnx`qJ(oRL zk_P+%E^z6i6U+cq-!ms*JL^8Q5^dnbL0+L|b@ZMGBvJOJ+n}tF%!CBUIf7#_1A9TYgc7Ce`L5iW6LF8_YH? zUbC&y(}2w@mFPSMe&gl^{&wDA=e3|_wuj^uPtvT5QWMd(BLQBQ?%6CJ)0rl^Tk3J+ z>X-hnyXxU{Fpzlk_)!km1ba!!JSbHY;V~9%RtK8X{!rUq!+xg6JSJw4^qh-un}_59 z%*mYdeKkM=@AY<;xrw>_`rc82EAHnYdZMSw zo;-eph{U#?Fl-YavL2@9$eo{YBtH81d-&#WezW{)8HhOvsQ|%1KEFATvRRyFkPM0uVawP1_AQ|pcxQ0 zxs>4bn2GRcz-GgkIh-pclm-FXE@Uq!T7D@FYBN}P46^7M91>9{)#}*Mq<5@*ZvsnO zkHd|C*;1i&0UEUp0EZS-POfW06LJn5L$rGi*cz19F~g+GuIsuu#L}W zdCHB4jS@FgoSES7NWXA-wmDR+%)MG!$Y+jh1)FSD@EUSYDj^zbMy8B=F`Y7eJyRXs zh-FK6E27yB177*n`|m`(pTr@inNoQ{KJQ(g@XlLr<6^rgt_{zY^iv)CR$FF86EF2& z5JorGb{+u6c33{-V)vvtS=sTEQqWCqcyV?9JE#Fpm$p-TA?j7mykBawvARt`e3XkQ>`)#BNLmA#j8Qd{7X%| z+%VHbrSX-hGnW+`oqB?mq$2ev|I_JSgd0j3VMBGg27N~L)_?$?YBTa+wMl_uxnvO< zB7=%>=O8jyDiJm&`ex06+XIfs zNkAu>^)^1QC_!Rj1>!-NRxfXtc8KaX~0SjV7#a;<2 zY7WCaTk5364GJ#Ol*nSNhwZt;@01arhk?Z7hmZ5>@*azJG{7Y&VH=`UJF!p#7n&h` zz{g?JKAAKS>SDMtjv$C~Ur}I`8~LaMt|VpAe^&dyB%xZ|-SzWEsx8nLH_Q<7F?Hw2 zTv}h7qLTFt9H6Z=P0c0W?%yR$TS>gU(oX7x8MuD+8sGW$hj{+-C8pE{%9TE(eWb;Lm)e4V z)wXIucS4ON(^VFHbl|N=Zvg=Ixi9vB5Aqm%Wz)|ZK79K8Ic~0R>hqH-r62Iig;IdQ z`d}=UyuJ=~YYsz2utgViAsLs%y0)&D24Eox7}CW3K$0gY(q)>Y^jHMxh)GwGEmsg$ zGaa19`(T;pjBf3z{jvxzoc0%)>A-Sgi6MH7swP z1!=|Z^mzBWromn?!%Xko?1O@o&;6DEu8pHoJ`m9!%0wXOag}968Q-#)q@wg%vl1+z zRvx;F?^e-pFd2?6NG0c!LaI`D5-znBD-6p*+|`D}5kLOp7=PgxadXFvxO;t`US`Oj z`B{v?A@z_&%?UYHhr-=s7%%?}c)#U=l+KTz< zdlZ-)Y}a#Myr^_}e{;#1jn57ZWmD>jm0Dyhj#r_Y>)8~gyiIGh1Uz3wmLz{qAoaRouqLv`m(#K_s z(Esa8sXAy2-7X#X@883t`wtrwsA-K$Jx7ZIIgZ%pj_cR2a6X?!MPJrSsu%~G=C+5x zsqR2?x6DRyBNYFL)%?r;A(?sf^<0fqaGBz)BcpYCu@py3yuf9W;zxa@MzQ9SEd;to z)ClSP>@%8i><1fiZ-H}tw{^Xgomdilai==j<~XzpUrA}5+?H4&;A)r|HGWt)^-{$Q zk)!@S-mXbrFXS~Ku~IR!!*bS;fc08$>sO+Nw68|cW*`xJ)OX05Gcp%ImS?r}_nu1b z@MeP4;0t}tY9YavctyJBWY0j0w#goYuJxe7sRmKnwyV}R0aRscMn}9?@i#aKsXz1n zxWE-Qc+`^1t$?>~qX%y@0~i~JL>04{WJaXzh@sd`z#}k2D)3-@?2cl=56+X5*07Z* zv#fUsJF}j3FPjv~s-(YXFxckx%t0|L%tk@I34h#`NV3=UG-G1Uy`Sgii8*&F5Tb{; z=A|f^89k`IIs^04@we?3znstUTIbL)$u4AZM-Q1)w_>(gjraZN`=FM6*~2_j!kW_B zYOe8DW^h_^_xiyeNc_}K{WJN&2OoSm#^6IAuo>&TIsp38B&p++G53B>?E7qq=;Soh z^CaK@eqhQvu)Cu&r)p_nt%BslW`{vxQ-4)7~m zi3zExGYchlbM+w1+8X&7xVSjsz4zb2JMX-M)9IocOLJdJ6RJW&GE2qfFk-0g%>wQD zR-Rv*gk(9EMMGOJSpp8;MKj=|k3YdL{k31h>sPNB%R^0CCTs*EszKCtJK6MiBw{Ro zUCOGUT;JhkJha3Zh9CXvSMcc3LyX}KUy3<-Anhxn)dp_oEv`8v*-n>yz~l;U=-h7= zEKvn1=oLkjb%!a=u2`m_Cf6wIV_Qoqw@t>D8l`Tr7!@%YFQ+108}w<#*g$!9aonD1 zJJE2&wRK2Y7;=dZSGYZQF`RG+wwVVjloCVQ2g>Ls93Yp(a<)HinYfkRo$G$J=*T#a zY}qetH2}$2(Vj9xaJz$KIM-%R)b72lpgIf>4+<@3bU5*vZ$*O+3c|hO%^RB@UnZs; zCD$rgV29$0I}v%aBNbJRCd?aCfHja2WYZ1y<6nXO{C|Fzrg8WBT)tj@?I&^Z(?7S^ z0vft0S~q7$vMq)~ozEfl5vl32CUgqZVc03@aO4`)ww(%K9Okl8*0X8gwcn@&|=P{FXE!(#Io&+Jb2;r}K;vUj)i=3rTghbEh7T1Uo#&(=;85w%?+ za(#A|%}y{gy#4kQJbd^Vn-5H@!gp{Z?Ow1>L3cqLCaC60*itQI^isic#cyg66B)Fs zQTY{BhlEXJPV95ycHi;oC!gWdr=Jy}zlThVH>Bhf@A>j<+5KX5p}~P>bp}7Uy22N~ z@MXO7*1I@eTokk#dVVMNx%cK?XY91V`T7R8H#Zf>s51wOOs~2q8IRi6oqj@=Kb@ah z^RF>zlbDBOG@Vtb2+MLv2A34I9P1RWSTWr`nhe$#jV_z|TxK=VQ|Vu_fLhm^7}@HF zCD@HOIAtaak7P>v0?6~2SymMNgcK|@mYTuI1@Krg?h4w@_KMDCR)JO_9PC`ZY{as4 z=%iHqZ`okEU8cg(ISdF6vAWSys+B%8K2My@wrkN4Tm~XJ^t8Tzz0)#mr8gLxTWhpo z*G*=O-^HPJ;_qU9c%K4;$Khvq&BwG=YplscTQ@SkZ)pS#T(ww=2I^fH@b0*WM}ES{ z#BPCe?!|Zk!M-!gDw{i5c2&o3W-!|ni_*_#vyE&FLi5LHFz29EJJR4A2fq;wFt5it zhj9qTT-LyFPaxWA1z?{mZkX1+7S_#ZE0O!aK4+)K(a2uUTrT*M(D#vnE!WwRR`&O+ zn7A1me(Cx-e$5iQjaJ-GTdKHX;b1hObDgeKsM4=vNY zceC70)^YkO%fiStT2$)w2T$fRw+ItcJ{W`EbVDUuUAMzaYt1r|BES2WKaI{`V%_g-?(Nv~je zvJxQSI}vl@e6yFYv*#%M9wj|Y=V6oUJql<^HCxLG`EYCZE(^2MIN|LlZ`J!{wt5XZ zK*mOW{(+=o26*xE6|Qe?DPQHpUP&ie8ODm(3RIInvJUc=E!v>`tYnHhnCcL*P7vvg zbyo_Va}ZYZ2D7s8SjVQ8Tv7^y8ZKxMQ2@v6BBA%m^=)!kW8@e?PA(kzbO9w_xj418(hFG=LB9&oH=0vU(7;#L*|IlX9yd8&hoCXyS z{9eV)HN%XudzC720LK_&0QMjJ$B4UO#NF$2`bsm*pZQr_Y~|0lSsvVBgN>Xvl3DUs zlG^n|r-->>#yQ1&0Wby!#@4olj?JdJDxzfY)j;IBD>*UU)2dPL0RdCD$haxAJBOI0 z&x@umX})+lK_*nBQsVOXz+*~7pto@XXthB-v%`Sx z!tvTuQv0h&>u`S#zU(!Mtrj9cvL$Nm)6w6H{U` z(tA$azP`q@=bz%$%a{BaZmYFnRNBzQvc%fxW>oS*<5{ZV1rT`n@F9NWOJBy@Z@pd7 z)@}{-+f*CQiP+DKxJ=xR7YISkm@>#Y8UvUa9hG z(+}|>)SYnf5mGh~G+@KoB>9TfJVbfMP;+GsR;+W6vCxEEVr0jTQmWcRuEnX~=9j~} z2-U$-Qh;`x>{OL`uoc0`E&v(|9as^+J(!0qEZ*#@QVdNhE<4NQV9P}lS?d2CWLBUz z(=|(w;xP(D-3v4hK10_`H2*^kj3A$(IA}rpB$**(WJMknnjI;eJrr7T^oD^2lut$i zH$>M6#(&pA`Z3|EIV$`akb!?D;XvP&Cb9*BtE#jhex-D5jA8? zTZ_W<8j@kFCESkU)CB~iEcQq>|7D?&;=CAp^~-Fhithw!*yvk5$7f8$AS-Lz$Q~hx zGz@#}IPcNdiis`bDCIVpG?B5M5w;Q!Z4@=a4wfziVd0UmS@yuqeGOKuRA}HepH{H* z=RWbv&!6I{7mwDmIB?<(`B+Q{Evy8As}vNT6}rat{8lWh0ut8cOE9t_E${ddpQnMu z{YUrznkcab$59^9!zq)*ogI#s5@OCBF#3mLV&7*;*cs!OYGEYRiXu!;G95b91Bxv~ zOBIc@!N7{@YzdVaS(BA0;h}6}O-;!#n2Hn&LS3R|uQdUbOKruL9;2qRF|LW&a>Zsj zm;vnAE;d|VUMzGmz(~)%AT#ue7;iikl)=c-3iwcwR5y6_$zGmZ1LHIYH#M!{!;C1B zbp*y~bY{Da(TyLxd~LNnVB;quvRh}7OH!XXU^WZ3qoGjL@Myc-@IycJg`Sd@+GDFB z4du3Tm2$Yfy}|Pr&lzvZ+~dRno2mz%HvP4A53T}&+cX_&^}r;9LPZoi(RtuIBx_zn zbc%@wS@A3YG)Jio-vV?g9mU{nb7)WR$dVj~EodAce3~*GbCic2uGct#HC+U4fnS}m zuq=XX9C$5k24dErr=$9~Jrp>NWad_3=$;yATZ3YJOIVO=3(AJSY%8@TbXyDvNtR@J z8N(O+)xpG;@_8pWJZe+I6FGg6l9dsjQBubskERDLi)QZ~3Ad!r+F?nSZXLC14Y72S zLS+D(C>^Itp!t{nB*P?+RDJFrlP75!H^Uo_lt8$9o;R!}1a#+}}t&vi50%pDnXsSVI9(N$N&q8OX@n9hYhzUpV!CIE?J&q2sYOnKe1i!88uL_Eag?yggJIzmpM6%qvtrL)TqVmG2@b%%4Q#f*gq-Y4KWS34*cfp=lDjx#I5;Z zKXg#+P}O8q#*|WzDmjck7n&KtvqE6smBL5fAX#`d+uQCG5}&7m#23Ht<-c~icMr&& zen5Gyz$Ts>;nw?QQE=%onrB8$(QjY1gLShd3bqjzkby(Np=rGJR9}AkYyo_%s7p%Z z*QO_=M5liX2fB4O%qzbyaaK)BG%zVB`%YM_ZITOkD9fvabaP_vXS{m-qEj)HteKe| zf9g2U$<1)6@oGS$4S&0&R98f7SD<%?*Qw1X=b5>*h$+J)K{Q0w5Dke*|NZi^05{iG>U^ji_Xo>4LFqh-%SOZ)_-?g%^^x{mF-Dc`?_f za2yJ2w~|moR)I~hN$UK9wgTs)l-3F7kVmGZWC}=ffRI#=i2trBNvD|PtiwIM$?$?Y za{Nq5TQv%SYQ&=^b|sfhrH~v%sN{dhKk4oJrkV*8bMp2gbT0Bh+4kkR&9;6xqoceN zLbOh(IJjuGBefA3wsyb`OYBbxcL5HXmy> zwx;ul8$p+1Yz%%UzJ>$Y8<`e?)U!S+nTw#h85Q%n-=6XOvrqBKcfW^MuU;Qw3i*uJ z5I5KAHxKFIm1UYed@w$j6To&FcyLwG-tL3Lh^3UN4fCw(A`<8G8Ie1#Z+Cq5*)v?f zzHR{N0x9uXZ?ZFRlYX!`bb*wzD`anXyJq5fs7OqZKQh=@w|}pqLJ0Vtt;__^NpslwRitzR| z?a@%@5P^E25oe+m(DXXWH*M}jE_c~l9S~4xd*T>+?3SQO( z>w$%sW4bh|wn=ynfFSm|mqvq2fxvxtJVe>Y_n0^%5rppoP1=I(RK0ouK)q`~r{mQ+ zT`w})T5d3G0@=^Kz_G3c*_7s+07#TKO;&{j#;LCF6bp#Y>DVXcJlFRZT1r(vv3bR> zrq#cT8gHD&sgi{FxlaO=(@N0|cLWS)1olj9F2FhP*}cTyeDM^YV%Feksy7orR)GgV zexDsjmKgyobjs_C%z4zW<-NvawFlv?)Kny|k z$jea@1G^rhEIvqz9r8AL(Ver$4bf9#>qBVy|Kuf%?*soOZ`|_TW5sn!-W6Up4sL9g z1TdG+{s@FRc<_VZ$C)dxtUUM0ZAEH$HyZdE$HDdy1Re8(eH#KRiRwNzaB2 zH6*hoWt73WrpM9pGURH&kO5D@v`YRMv>12-bC!?uyWjaPo;`ckgAE@n5W^TFE8Bqhq?3pK{i`e7oUd{J>I&Pqpdz4R%p#(un!$7vH#~p&65spy z6TE(PjoaIswVmO*!2D6k*!wA@^Jx%%SgG zGC(ZKI;BTgLiIEJ&-`164}R$G6uEo-j(ENJ+D~Hpqdx~fWvdZ&e@Sv^d$vz_shng7 z%!d3%OBkXzb;qoCBcJJbP6;lVNY zeAxP339Yc@1Qi*vo?%#Oyg2mVtpLmcBP3c(sbE5Y8`m5&u+JTPOgwx39N+owhq$@E zsmMW3#K!ZH7KPD(A8;sr(8_c3nFrR$Fu-lYdmp@yFMi>Na8XpfPPwX$but;C1|<93 zaeI5l%NNh_(MR9KtJkmV3kyMym-aN6lx3-ggp3YC?uIKq^Wf!|l5@!+El5bo>>Y3GgVWQgY9VhPloI!(KWFYvJJCbJko}0T8@Ubryqt8 zP{N%cf>JD0B&!Bz!+S-C21=kd=z=Cff*T~+z2-3Qg5?u+3ampeRkeqdnsvu?ZWZP@d;i~ zxoa665sgq3ZZ0*Mt>t1W{YcCo*6doe6*eH0274fe1k>;A&aUx!8%R8ObpKcOJilIN zF&`9Twofc#*(@YgZ=YjKZdWCui&ezPdU1#jJtB+*#De^D?d@BSuEUX0Hf$l zcmy)aQC<%#K{Iou9q7>YL!!vwq?B9uJV-(#1hQIM4dc|c4$d9dH`g`U%y{D@OKPyF|=TkCBd9I@K&DA{XFrlZ+{!lK6~B`c20W9>>x`W z-dpbQ;G~&eNd)tLOQu>XkI@fKpC!rq(Xaj(-hclKaNnu{pV1z)px`lL$Be-F_Kc?= ze~eE*d5ZIX#*BRd4s9+#Q!Dt^z{pY@1`Q!7@tWBab<;%N6N3kk24GFPKBRFR4^Y?1 zWOYb%4da5Tc^~XXpJ6xgxgd*4>TzmnGP9jKHTYQaQy}3RIT99(v6OUC87;~~cV>Do zh?e`H^tV2PInjf($Q5~8a@`uDOonAhLad=q%VHVJ8?kB|Owo)N-Mm>FO}JlQl#0R{ z>IAUYpdgR&Y_5EDDMP%!ZKTqMsL~8n|G$? zQY3(Iz66sda>awYYmbK|w3h;F;=p&H@*=*EA7yJcD;U_$}{{U}2c?%b(OBft`9%0#4pR`yc7f{oh zog}qLaDXI6$11|netg<0xkaCQouP9N%$qY_zj}?A&z|A>)hq1uc4KnLkU`5tGFdD7 z`|M_npJ8M)^C+qCJ8h1)-+6*ZPaa}BZOdk;^n{!RF{f1*k$C;;H9q{_hj{k<8P4OIixtc}*tI5bnsMYNI|EQ=Rlsu#6UZlzMR z)kXC|nD=(v?WiE~HxgB}^a!aHGMD<0H_mm8wOi4w&au?SP2{8!?dQ)v2uU8jTZE_#~zzpWc4>jy|Nv zyxm4^-^|12cNIWJYeX;yyg)?WoAxJ4{Dmuw>-FEZb%`Bmp;=_jAzwE1B(up&H@`uajrvbTz_W;8ATzbPcVUudb)#ykipu5>!5WZ^STAlBuj;rUr)PIb* z^XxPqjCJNlX4B>?F3@_DTVgV9a_yjv=#;EFTSF3iS?~zzEQRD~+hm>jYyxtrHpL0j z6MSYi1lw(cZRG`j`1pN1dFySQE;m{Z7h|)yJawLo@E){hKQ0LxM#4u|5=F!3X`7@^ z8Vvi4j${RZF*sBQFcEn5>Ls3j_8DH?T;qImi`j(#6$_b4NTKw<*j~HDqdhE;Pv6a8 zV}r@2F*cr8I~N+pZRA+Bi2=9g9XID2T)((R&cw`}n{vgT=1NnLi{qeh$OBAUhvQ0_ zS_3G>wL3SwVhDiXZX@5$J;e`ic9`2z?-|dXXaKb~wc5bbwuDMo(1OIE#`*i;)wWW; z>UtG_%bHhZY~tHWaHu;4yiUpX#TCIye3`kH#oC^k@j8l?G**i*EesYL#H3I=X& z_?tDHR5cLgz5Kn~E$zmE+~G4q4Oa&^Y@)Jk#hP0W(<*2rr~s*d-7w&Z8y>@P65B!Q z3|~nhQodLeS1$QHsX8o>HcS2Cj`Scz96+9Hpl41ibmB}4t@{RKJsWZcHrsm8JN1mA z)m+Mf*~6E5@8_uD$l#J!PxkYS|Bt;l`?Yo3uDe?A|EQ4k?kmmU=k8U z1QHTLC=m%HF;NUsBISh#9+2Q6FNjwZ{tTr210W$DViZCogs3ba$cn?gzOHU@?m1_d ztC-*Dorm69AK$*oKXA`oQ?l%woU`{@bAEG-UbOaW*w2$oM%LjWz0!MflT9x=;f-9Rl56w%%VmkT#M}=6 zR|0ZHI{O)GGtcl42@?>Z>tznlkn<&uJz zUkwBd7E#+;A&??)GXf`jGefokw*iVhE3hJc_Vi=gYz$>J&XFs*AXjrb=n(|t%Ph%6 z$SnDwz7)fHYdideuYdXpp1pWp6G59RI+aaP6(_y&XMsTVLINjkPAK(S@kPqLuHsIS zq{=2vXYu90kACzS{_yvHpYK%x$J}K%sBKVi(io8)A7x24fmv~f4~38C;84ZaAU_h< zSC8R7a4}9bbjaBx=YWc@WHEKdXPwgD^n*8fy+>w?f}I`G#tJWk{#wJqxzOB`r7d2unW>}~ z76zRptHc^x10Ni`l#-dcS@|YK8duXV@22ALAO%RzJc;U7#_3&{CKIGOBXy$7R)-kV&_HA{>X!x92aj+Hbx!lD}IAMd) zU?(dYeYQ%XhHG0o6N1jz{=>hCn_vCY54Xp|>#O%N!~C_sj?1gMtB?gJbCS`Uj<3ep zSG=$MaCN3sOkG`!vNl9Zh(n4xI5rph(pQ|IB8Qvz66>!0VBoK10445WrEX9Rsd=9r zDe0{<@yDz<0W1W%j-YL9`Q*8*Kr+OrqP26%4{jYTXgbGm$TXFt+E%KOd08P}JiWm6 z)is<+72}}TqYl3c>n*z}#ta}B-O?q$x@4RbA8itaEY^f>-o4pyj0}|)^d)(dL z;`1N<2=Cs$E1Sy9h2}OY?fQK|+jF24ilgPGug+y%Ip5#6>^V`bjx#GPt<;u;Lt>LiLNqBP- z`tLXl3+m^KgKEQ>fiV#|da3q7gs3VK-RYBm25qyTt=i}x8ETZpbMj@^&0PEf5v&@z zEn9=O(z1If&fBE9pH-iBgeceH^n)mxd*(_{Fkj0~S*ML}aONRQ-GK8v)HbNaX$=d4 z$|y1@Y(WyeW&f;Yl>DQn*6MSMUMMoQob`KZnMjDp6El4Mbd9TN829xF?@BDfK#Tzq zgZr!*rS=L-Ob$2NT7u$GgcAgd4SpJ!X*j!KM*w`5OQBO|aI%N;2)<|NnA%`U&uQDR z4Tnz-gvLO#K=?LnV_;yk-TX`lYVv!gBvNX0-($jqsY^0#%W!&d(CY)BHH+PUD0+7P>>TcPH-{@jRcUu)eFbr3&$ z{uDp|$Nng;E+2CX_c~enzNgSsg0*Z!ObDlc*lr(~|E?-W^A&;dO^~VRjw_Y3Z zcQ-e9_x^^FmP*VUvBE(};PNTuhwnk@vlL96Dk1Qm)qUWKL7d<@H9 zya3VGkYx=a(@J^a797+nQhJ3Y8j5(`e&bw#LtEV)Z6LkRZp((7zsKu0Z}IN! zJM44v?-PKS^$VeYQY4s`8)SL(VE=IU19eYrW5d|4ae2DL#pwcW13r`}5}jnW&wI@C zJ?7kTKHua0?R(tZ-8b;D;dN>tcQo5*4J)MOEKv0zgt%n(VkRs`P&$F$D5#x#C8T6B z=?TZk?E+=1HnU7@T}x8sqcj-Hu6x=roMJ%C`k6GeVuC~8Ibta4?(swHtv+il7x?>h z`hy(M^iS-hF*r!+n-9?77q?`0_lo<-mr*N^ZBh8=nzy}!3fnNDs}jFsxP z)Z`U0ntNbuG#f`^Y#We{`<*e(xah?tSX1phBW4DsCB_&S1h0=#sTyM5l?rIVhPBRzexKYGz`thfA@R@Z6^qx@%)hw>Y zoD-ITH*enJi&w9(=U%?bnAM~z|5%7XUTAv&!ZDTN%=`ChWuMAj{sM zS65vHn&|5N6*7r$oH9(=BK)6LKx7SijN2!IJXl@LP3{60SPJF(!`0D$qz2e zjJBTqdjf&+U;LMN_pP6QxH%qPUul&@{MxVMv?Xkt*r;T{3>?zh+H$s)y3;<*Jf-vrCx=QY_SXm3;X~P*=P6iG-ofAnb%%cA;Gs8q=;9c9tayS2SDnaZNuf1NkRC{dHzrW4e3 zMp?sdt-aaaUdI@CLCCDj(`#++jIknK;>~7*BmKwi=MkeNyo;sImymbF_slY2%od7+!3L z5mk$WXO=PTCrs}X@yNF7%y52hvg~VfN&!;|LuXLcYK9XRW#6A?{9pMBw;3EhTU+x> z{8H*xJ1Xs33XGLPV}chSK%p`dJJTPHc5qE^YRd7zkN8O)Nc_wq@=)d)As{Ju@eo#w<3zW;r^d;b>09s4{pqNxrMhkCm# ziK(qY4JbuBZnhU#bQPwmnV!9Pj_b#lHEjj3ZOmAuhc2v|`AFb=-f?qtgUrM>wwk6B zVira~A|YAHIJJf~Wl@j?PlY9>1ajvZpL&@ME&3nJhr#7#)=V&D2R;sl@Dx0u#( zPYb=p7+5DuV@egVcnnE3LsHSy1*x*OSzH;}7us0%V1cVrsUPH{bVhnAZp+LWugdph zP|X4y1hOmpHuN3^RZ<5cepBx;=zq>u3OTnBb%yw|n(^nDlLOFS{uRWZ|BDZ|#>49? z|9bVu|9PDL^51}46`)czs?lr9C4J#akchTHQj5l+dBr7rgQ}5pv2_}tw_u6d<<>Bm zG%5`kdv&nVLb5IPj0%k+ED`2OV2RPI(_0Ow7wqx8M zvK9wYWl#|}cxM`io{yhC!ONE~adCN3gIT8%O^xYcc;Rf05`#nH*+UhHM3frhmCw$KiORx_Wg`K&p4lVym|W;_qS(?OJ`4=?2vbUv>J431)~eobZ0V&iv|Bk z!{yZlp1*jG>+1__<3tI&Y;v@dqPDcbhv@$99$&tGh5P%-hLo|4ii1sWi8@DC&21>) zv@`pjr{#fb1@onp)-tl{ zF(zVUcCEaC_}TSXac)-19yK)_V&zhL9aOBNYhBuUB02%D2dh-ZTGV+tr2^8TMan0B zMo(?vStL9HJD0e!5S;_q$v!CPBojF$BbB1X2UCk?o!PNtG6`?yjeH}|u^MoJjJ>RHb7n09BPjw8+|P+K)mpRw*iA=Ln&qgfgHvGH!Z}m zHyAgf0GTs@X@-FscwpP``|~~iZ`@-&ksqe})$o+Ou!FKe(Y~^kNg3oKTDI7m)9=s# zcEK@GYUGE3#82u#;_v*O|LXO{)#Z0`&UFYSXOZ2+{Fq;d)5Zv@95ns1b=i&MoLOoI zL08Ndn*oR6y?6v|m583M1uECL(lKnoG@}ZE(HkE|faU0Vq5pzIKDdb);gJ7?Fe3Dp zRmU^be469(>H_=S8P}KBxV$_;N;NkTY^xz&fN&OncF10wQ8Gv$2kD^HB|M}wx#q@P z=h_$p=D_*xj4!`@jr;St=Ibit`1fd2Ubo*Ym{3N?Ws62@l=f;1(a;$JoXj1+^h>{l zmoGoTb{cH1%kNg6$$F3Xy?muNH+T5m-~U~_d-IkXALSnx_%!7g%;oL#@P5Lpfz~8} zL$b*NEtoA1Nl_L!Ei!@_}<}=gTBbG4bxN;5V7Q-$qJ^EZY-;iQvoO-lL*yO#Zn{b zhcd61dchjRdWhV`f@pQIK+|+trulPl&^To$Vakm~9?pjB;({ViT6S~6lLRlym8l+w zjYtnFhIKTQA`LSGKBE1@ITQZ3|8v|ud;V~1JiLA?Ui+{8I>xAtnZC!%XRbyyq*-WY zlLo89N2|#MfHbqVy&1BZGsHC20C8rkt6Ci>&@kf@8;;9XFSsrhQ=+zp#ivmd>$)ev zc)FvtqPvbN$X%+8UTWZAN#Qn@BF@kv;=sFVynX;d%Vxo)>(AYWW$_A61>J00ld`da8e!iyjGhezC8;;xqH#fJqyT8YI-Zwu& z2~%~gj&2O4-V3Y2i&DPu!5Lro+i5tq?S#{Ify+miH82?iV;f~V6u)RjEt$>}!z=o4 zKkvA|y}^0EYlz{@#ibA+T|=sciAF2uLTVPiCuJc9A?B-@bsG&b6qmp`_VQe{myz-* zK)XPqvH_`f$;hP$jDsOa3PTh)(NYO1^;y;;$2*0q7k5-HWKyw7%Wf~)f=e(e{WjlP zHPLa}svZNiacBjTVr9Js_Wk$gvLQ3t&9iJkx{HrduOU@x$0r&?I&t7t5E!b3jU65u z)fl;~CaQ&N$&~%R9~X?FrGukkwl(C?;HbVtQwts}7LFaGUNaaWTnjhZtg1HAvXkF` z6Py$uEHIQR=o~zupA%VZX}PBDY{2n&Y`F4M55`W}_JjZcAOJ~3K~#dvGchcxj|c+$ z38){d)}K_1)c}>>Ybj05XuTQmv0>W89<#V7v(E0F($e7@B5SaaUI`aw2HOB1TQ4?) zX=G*VnhkkHPVDmxZ1tSjteX0qf>v@^HVb5F{2uwB1wIR`X@kjh`;Od8$@1;{xA<+$ zu9v0oX>}3%6iWd)c}5IhwjnCk0S)=;Gide9A%Gy1AI(!F9wI+~at9Is@c7Bo-%LS$ zOtC`BR`#As9;1fYB*cfNzq zzxW)v2ljn0rHd~MB<7^Nt>kAIJ2zo0LU$~Q7nT=s`A@xMhj47R7zgsB!J;5N2Njne z#^)|Yh|~|=?*r`2hZD%12og3B$S8b8AX&@ z9(G7`Nf4`1C`F0)X<)q_iVu`;l)JiDK@xy@RI_fPFf;NGQPM+2h6yqWZ5N%M%k?~Q*rjbGzq?XbInNxqi6*|;{YC8m z%Ab3IxSEm)$HkvcU=@z-QktPMKPvY8FM4cAa&GIc;nLBXW1Y-U1qHsazYna*_q zCaKN{Tjvs4pnV@Xn+VX;JjTGdIN|xT=Xn17IZoR}a~R6G5OqJ#{^`1?j<(FH2?`9o z%ZH^%;1KCYo{m$YB9QTn+H&5V@A2;48@ze_1@7+7ILG8bM|Veox$F0E4jY8FE+wPV zp#&nXTBx?|+v$X-kDuVt^>y9#Y^aj&Np?Dk*F8JWxWBu{>$h+4gD*bE{oOs}8L)%u z*4aq3=#ElrZIzOdk~Qq|UJNK!FvMKQ=rk7VVT~~{!#X7_XjQ1DTeaOW`r2BdDQ1GR zq0_RY)3n+&ES7*HW5?0o>8<=g5Bi%BVYV9FRAcQh&r%91@>bT*x$K$FQduzA^=ZoT z%cL#c-fU2t`xM6z4KH{i8_K?o6R?q8TPbOyo*%uhYoRkEV7`^KM-lUShw{jSi*`OE z83XE08*dGkg5QtG?B!>0u%zZWdBAjy#yU{Y%gmj_i9?E=>Prqhl8VfR_a+w9eAePI z&U;6~n?4U)@9PB;PjSLElY6F7nUGXHnD>yv>+;byZM0V zITdS3rdKB7JP~>3(C>_W&Wi6;#4Bj2hhc=3n%JWjrcwQgvq#Ap-Kz`^#lY`H25P_o zz+zJQl^t<9oUyC4!VNdP!ae@+`_J*ES2Vj)gPNHp7SKT})#S`X@s6AccKq(?^fEty zU71z~4skN2FY-4w=l}aq5&Q%XB%VCJ{@**7nNc)7X%?`F{xls3HlsG7YGPVK&K>)- z757LvRL3rc8ZA5I?pj+ILfU=T1iVh<;m#PU5|^a~Je>X_Z-7t6@|>1!VkiQe&`JV! zw52seG4|N-#F`E3GN>uDavKZ< zlhU+SQZ{QSBsWW08}ZM-_#EH)_IJx35f#_toNi`h4;A7d{j-t1;R;UFDTr#{T#CP8 z4$HucCr|O@@ioS%B4KcfJlsoh5qrI7qb9I#UcbS2zx#W*xqA=yiYTgq%TD^QFH1;f z6}9P<=uwh4WStqbE?RRYk{6fNgrzu%^tBEvRY=ZqeP$0R_#C_SCdO=3VS%!c3<=DX zC?^l0#4>L1u`=Q(R*Yt9fMtgSiddqd9yI8~^yutqhqdI)d&^LEm=awq=1R$mcS_cb z+16OHNJ2L2GaB_A$BNC8wMx%}dnp@&Kbw+hN zSu;GB;=otRPNJyhcW|hDb#Z~0FP`G+>KYhh4c4VbU}GXUb|P2w%GTU~JgL-LCydS#I1`oO#F5_b94>(MpsuhFH3bHGqbin=fT% z4KV7vvY_Ab!N6bjV0zHdL&U6CyGy%G4^N~s=skSsH++K~8pj`}g?0d%BxfZDflesApszI+UY|@jgu>Z z<-d1tWk7NwKg>UfC9Q{-pcOS)l@!Cl0S7W#(0gT<&KiQ^*|Qh;m4EJ+@X<#f@lXju zcVrW*(`VzLWqCw7=~EOV6%#4W1Ev(i^-G4S_(RqV%LPHI(ZrGg30lsX3P|y1M+Jz44;mfIRY!yzPioWi97%*p(e&B%-q&gM)o{#>^!<4{b23c35|G|FevOsObdqQlfLs1@sn^+R;DbZlL-b3v?r-_ zDlR><>#GbZ87NJcMP`^IW768PfJc#ux$k)Y_6D!Nc!gJQ-q3T69ym);;0>xqs}GQi z`DK_bFP92NR#`X4v&TmOeW=9+eLuqWt`LjUs_a1;xvKkHvA{xGM7}3LU*%39j&Y;zlNhnNx($d3eskJC$ zVGTakpurN@nf|bR@!+;uU`uw^%M;Ykt!yFqfUklc6aY(2()L4HG29ziXsqKj#8?_& z)dWDhCvI0l#*zY_y1sUvdLC-`xGml8d= zm_h%}>-(Bkc7#q<$cm7xuhd{o%_k^p&}w1WsM0AGgJ9$-M-ZFqW!Weu!)(>1V6Wa$ z62K!Qe&*35JjzlX+*x3Ez<1i=wn6^H4tObw?^AGDZ%AfE*p^!LyiZz`7bC0=xQUUo zEkdxsK1*fpgFVU^IE}5|yI7$0;iU}E8RW#&(r1s^DRAZlwm0Zr>R*RVrzP4jXJX?J zPk%-YaI4?g60o@=0K7jre*5j0_+d`uV2$s`m|A|P$eU9VVVbWV5Gzi$;gyF7c(bVI zt8KhX{l@9b|ME}?{DcoAzWFoX{GYLncS#v1D1mF=XL?JG2BWU1S|S|B`+%v5owW=S zoPryt{ww;Bk&Gzc=I}$zW30bZ!59e9xy8|4@k|RA9JECodM~ALZ~;`$E>j<-?0tr;&$|;xu&dp1TL>GaC>`)o7?v=3!KmA@}CXaM3O@y ztcX*r$-Nunp?pm~SCkuNmE5*KGF@&CreLcq1cMMEU6``MM5qDBx>fniDF-|WOKti( zoPR&#za(8&+#L^W%P1M2{Ge1FAgj4wuuSw84k?w2nt`#lMgR`%BF33o`%@$L`W<7v zmr63hAyqs8ghxwcHB^bY;{DR*Qb0)t#lc*~y6R02HYHL4)$ino?Pm=XW?7oJC6xN9 zL1n4;iGn=Fo*6aN$)Sy#^E>Zd8zncX;LsyfkbL|C_V54ohnwQz^;7xUudXougTGZg z6<=g>Z)xKCY9CUNu~POZx$oe5)_F!AhO=ubH99o;fA{H^dPK2^g(Dv9f zlJv3{y2EoWs2moEv1s+#O6)i}Bj=Rx*Slc^O0~usYkTtaS*Li5J`~$|pKn)}w zKYfJ9PoLtnUDEG8c!JK}@M%+vEZAca0w*DYTLZC?Wm_Gaq{q?2sh_VARfg6g0MarJYv<|^H-O=S|pt)hjVRLBE zQi2%{f5+uclhd z5)Cm_OmKTH`$lb0Y=QT+`J~-d(EDCA@%aLkmwl9wIWZ%9sr4L&7Z|waqQr)lR+Hz~ z;&lawD%)@}aDZ(rb+LJpGs8gw7jS!US;IPZxG_<0V$Y~Z%n(F)V>)6WeKHkkGA(Uj zYz`ZaJ)_j(044`}HH#~Y>t+SD4aT|dIvq2Sb0T99kj&*zW`o>wPMo3!8DApt-F%PN z0{sid6V@!YU}SLhG)I6cfyh$bDjA>^-&ztLmZm6`u^_0`kH!N(;wOC|@pu30zk7dq zy8MS3HI1ysB37D!1+%o*K3!5oHCf)o)MRs+t3jGU=9>jVPt1NlJhc7Q9=jv<9$gVHg313}Z<9xoy9y@F>9- zh8VQr40OCw`LGM>Wn=L3J_KlV%2Kur)3oh8EnC%g*nF(G-2;)F->1}eFN#T_ay%qY z+T}&H_OCjXKgMavk~!Ow#Pa6pm_T0?=K8FLR(mYI(a(*dXSFH|3*>+Hw=uu=^@p3{ z;q_DdvOoK;VjDr%Kt|bq=-ed1-YL&T zCo(#FlMv5$EfKg576m^UJ)u}WZMvO9N{2WGgc!4BQ#+GXN+^y|Hyj9xwnH4b)D@L5 zvHqlH-1r7J!}a9_o<4bu)5VD~g=*9WHXjE?9oD8la~02I<3M(Vw7wUU=R9^CQ9-2` ziIs{q&lCGh+}xe<#hWj2f8P5H&E9;EmCR&Rr4{OtDNyUPmTFWChGJ+8T#Z46Tdeb! zo9h{S`Rx}>!7Hr7El7x)WNX%rkNxgB&9 zyB(6xypcuVBC8&zL1h3`!IiQ`_F1UGYsMZn*;uWZ3$g}BD+Xv;TnX76T4cjgEk+Hf zI)c*HcL~9zm#=GBuuCdFjLQ^D|4@Y>(V!Lr6CIBmt|lLhDd+EClXZ_3Xx5gc#M}{9 zkkOg5<387@MNfY=qore0-TDh*S_-(YMO!*YciC*3)MLXWtX6C)I#{u~II1tA;jr3@ zYjb39c&92DCe^=MrTOvCF(m=6LErkh#kBE`5nT!~G=C*vjD^kqzFQ`qZx{IJ`f=GM zA`rs};4kIac3oAYqChAnRVQOB2*iE?_6$r8RZk}Y^SR840QMb-z=`kI z%n2W(EHPf{iy4VAcy2{to+n4cl~0KoO5$G=g%xVo8YkOdl)w5{p1}?S}o;-Pi zPe1+y+i9c4mRZc4?%RRFY%8U#OPwUiIN2!9eR9y-Kf%g-w>(6zmh3JPokM z@hg7;Qg$W56r~fyZ5lWA7a0|??p$ZA~Ulh-w8`EODk1&F1CTMlqoRfiW~jpwp(fr zZ)yJcfEloJG2}Q5p#(}lK*g;`AYy#fd*NJIIiERr!9|1hMA~S3PU~{F0DNMaREvx> zCulUHIA@`>hAfqA56T?9`CqigHcTyL;Xp@R$4+h*s(G%aQmP)pUys1)L%grE`y9F0 zFm6Gx^->?TI#ZcYj!Wk<2!=f80mAE4 zURNzzgCqgYrNCkAajQ^$@kwYUa+J!}6;a9wm!e$L#$s-8AtM#Z#dKVnVg{cmGHPtV*c4%#ZR6hDux+P$eiPVpZ-v#!1opYk$)OV+xIF>ip6~Fx zIAf=RdYafFXiKoG&ekJezK&LwI7Tg?^Pqy)c^0%HkrATA#l90Fe=BDE{zDb;6&Of- z^z!B3vkx}UioS@>gmqg30aXP_rbF8W!Wdgs|2_zq9NORuqv&zaUUooc7fqRg*w+Ol z;HDFRB7TE%u{jNBS=ef};==@l-lPP9nuBTwTQd=PKj|sMObmBiUS8t))2Dd;;#pmY zUjF<+6+&Sj>OHB3)^UF~@%oEb1&krkvk&Et;NkxcZ@3Q#d09nZ7DOi!H}7un>dRNy z8v4knDU0?knhM5PF-HyPw8o#;y!H?~V!bH{JbLyBf8^(X4%b(YN@iyDI~g}vW~0d8 z_qW*h!29>Nc>U&8QG9{*yF`IFC6f!r&UFN-txZsS$1K-nu1CKq)f^DSB0qcQF0TcU zcZlhM`OxVR8l*_-%L#hqVj@UBtE}-Rw6B4~+SJqH;~PD3Xeg9oJT*WIG`+E|oo*IZ z5$i0zLLS7NRXpfT?YL0N4#jlTGi6y)Ky7I> zd}HC^A4GmhvW=EVX3(Bt6>q!9%B#q=!*B;VNa&aM-TCveQDM?dk@soO`BQ%e@$B60SFSNJyQ3`Awuww<=r4QdAn0F(B>_dHPc3$s>$Zz_~9J)PId|e zFtQgKj>OEpSP^NhS_@g^l-#X4B;@<&z}!|8V`BEk4jPDXXwkCfW0c=972&Y#7|96p z9^~@pKN+^uz@x`caP{a3E>0KyzHIIo7#wOCDenalwt3h$w~`XQG=UgLeFn1>;$0~P ziSxcs0Q>ztZf|e#>dPFl0DJj#XJshaskI}LFGb8W*lsvwhLTd zRJ5sYgB#Zx{(yH)^7DwjSXN)WdWHAz-_(`X3XwLUcHIMVi8>~}f4=;naW)r_+S*kf+ke7y?*+Ccok*k~0GT78GMfqL0?dTDh?_KMjJYH)Fqld^Fz5?gK+ z0TpdfiKMt&-i=vuolL7lPsk!72p}6_D|>Iff3xmsZol=_m4x0Pb%^tex4`JAKV?Oc z^Es5lDK)u(RVtmDarB~}11ew2V^}MdK!rwRwwxa^Th+*!N>zST?SMR}B#f{YjM=OK zrMe6rBzaH*mrk2J6U-HfE{$mhRl#wHJ{F5?rTlrWWnpu`EPEkkSV~pPz=Z>+-0FO4 zxs~@rKdlnjs(u@7X^*IZ*^JrA9s7Pu^+UqPiN)mT+Inh;6^02~KmhEsmZk<^`ou)w z?tI3Sg4nYaQ*xs0Q9h(v=2UvmypLv1>`@7S^_|fob!!lEeeb$4DGwi_IdW-+w`1V9 zZr3 zn$7K4i4h&wSpU61!L?E6597=#c5aw#`3nsyw7lbdLcEQvLdVT;adpD0*RSw{AAAop ztTU300IA-aI~ezK01)LNaf4Bx5x%J7SdcEH;iDIy;E(;nAH((4C46{g)gO7*8Q3r| z`~)|{&Fu}|zI|7h>u`iNSTzQ&LzR*dWnk+F()Rxdkp!GXX1+tV$GAq92hJ&Il;; zK~J6c@Jq2s$GBPRhMi4CMAPz*&w3YdS%U`2Q_azQ6wCE2W-PUZ%W65CH2#tzce(~s zOggs79g`}b&_-}yACGvjn%%etB#at`SQ}K4gsq9VjA44?9EtFiPhZ*Jv-u+t3IA)q zjvU*=E%EUBsekQ%`p;qvp8dAs{{~f3spK0DDwJT<-gIr&Nk=!PM#MoiII{e^gYnTd z6m>Re1-mWTK2Jf~6nYEX>bEK^?Vyd%F;;}1>*uU{d^EtaC0xiE#S`dj!{J*k8lVE% z=Hgi#y#F#+I)|-oFLTH60$N@_y~g9mPoTDnK2`d)WhX2}>G8f_Qhg1{SEiNNRDyAL z{-`2IB?#OW0Q!7?hc|EE;_chFxZCd(mZ>=a03ZNKL_t*7J!XO@kG{K&e-_EXy#x^K z(`AeEYM{mXxwyQ*lgE#7d3B8o->8W5hVm&QcJ4c7OuT=8gU^2W89x8)bIiD}2xsm= z9D_q5T5EfIwdDJH2CO44*>w0oMzky+XLOV*+7g=$5l=NuI9U1!HT96Smv>jNQDP%0 zFePB{NHPn`W>a4IY#?_MmKYq^3l^Cvo*24;E0E&W5w}LJi?|vcmm5LYcQt@hmSGMP zla}95Vh!0$kq7P)e5F!#uWa312cyRuCQ0v9KQD$(+UgZfTR~J~%u;1waV~18MFj_b zOm>N21Z}oG8)gd#`eRF#X1GTEH#Jz~h8)(GP5U59cJuxL5X@jEu8{cJ#TA~=QWR8*WP-JRscVup?6gYIsNbb*t{dxqMOm*1eg`@Yd8_5BcI!B)xPt3>OzCJb(Edj~_ka1(f||b2hIvF%*cWHt&xg zU$0?2s=a#pOD$zG6f3)n`FFBKRLiW~$fi^TW8_dR^}>I=a4lE1nmjb273LCB&h z`gmusOVS5@cIe12Rjx_-!PBRYad~wK8;;Yql?sIZP^7Pf2usY^@!9u&h%dkV0;h{h z_;x}@T|&XwI7rnX3b-w3NMIGh2CLN9gMiT(@zzNnh57E7P3w5jwPA=wd*lGknu47j zwYiZqK@Q15-N8XRVp@Y7EsInfQS;j22XD1xroP}*;fr;V{5&NQ&pfp=Xsi7N=kSP`p$?u(IwzS#UsPsUR3IwBfGY;{k7Kbs5 zrt44*K!K<^rHG?txY2ak=wuX0vV&B3Cv`r$JVF;-l|aIQW*Ei>_^=sR|{k}gjz~uplq8( zHe7>a-XTVX>G^!d`FV+V zj~+k5<<%v|X{!OXGsZH+sH>$*MquA3-oAf_H}7B9dps8unuPAdwts;DliTI&VKiFO zZ&sF~X!8DH@3Ye7nXH7C)guo!U&d=^u1IsGTkN(h31C88a{;K(Ofs-c(U}z}OD2U< zVl)I<%9BXJv2Cer$r~{)8BF41S|rm@z}M)tSPlB+yGUSC1v&e{u7ySqKn~W+aQguI zt^t>Z4sM1VHL!||P74E+JW~Zi(i-C}rlLw$lLsnJh8>2Bwrnv-@g}rzlA?wDD_g@W zj75bpZ8BWn1DZax5IrbKrPMOk7mKN?q_$jaF8(1tw+$;jcS6A}PBqnH>9^m@{ZcD~3$cy1WN)j(u5r z2**#`aCdcq-@AE-@8!988a#vYNKmFdPGE^(|0Kn-^SfE|RjRPfu~KmOowE1iKHUCak%0sNjLmh)*+Rx0;tek%RFqm zu_mY*n;K(8*>3HUgmr8tMLdck^K1q~_6D}n4+JTy={qm--r#0$?AMFRj-CW>c=YHI zuCA|fy5Q8C{y1aAlEE=pC}A4ujp!-c|a@N*Eq+&n% zO|Pid#E`K?&$Z z&qZUlgSv*F^Y*X(>o|Yw=N@i|hu6akK#YO$m;V|zrQVQ;-dzct3DH6B&1xbtiZ`(_ zDIgH=fwo{S=$^#t;>wy5ljXTkj`AU4G+IHCNaW_-1XZ(QT?2RzQdX>DSwXWKTEX2S zN|Ax~u@j0+yP4%^JBPRGk~UHVIap#+)$(D$;luFg@ngJv@f?>|R~7lG2x{r@ycCp> zrG+qK*CBSyJJkEbhO6Q~=Qpn3c=U$2y z0+?MYjBXMM{kF2HxMwTs9zDLsC!c-|FJHdI#l;0SZfzB_of^LEOiGD~{r(mjcsI9GZ@M{vtRlszXm`v5;u z_Y|!*T7YSLeh8TAnoFOFrI;*26$U))gBVcXh-Z|_wm}Tpet8W|3KU9(r51LKu0KYN zQo7hl13mN-)KAuxsGrRRdskXje)!#(Sqdj{vIH^Bj*(r;ON*6aVQs(BP%mNOywjtV zIvKRlGUC3557kIFl-9U)SFxI<%+%tlT;i-llvqbqYj$)jXB(Ieku|r&!p#mzBIvmE z%wz0%I|J5x&Ys7^$(@*Z0^k!r;c71+rQuZRHhW;9cxw2$!7!Md=N#V5*b!&O)e?Am zvJ)8^lRTY-`9zXgcON^<6LSXU{X|3!ABHSA>gYP~z@gr%fo z%r6FTarp?}{N~s3==w3Xv9Xv4UN1f(BZiN`aFD;#lpAtTqlcs{fFu08Jr$%Jz1^4w-u)Smgb0L&Uws^*%i|zr?1N#l=E@J zfmQF7gj4@-4iv2=vEJZ1QwGc` z=iYqXjm4gr2VcKziP7gvY8fDrBqIbfvMqBB((6XcS|S~A%&fS&)X#HhM8g7}ju%g# z;^~v;xY)EU+a&?W#o-?QSF$;YVu*Yq}v$Q@EU|@GU1J*?8|~Vn?j3_hh#w4d z0`K0w!_E5}f*Ywo(QtdNB~MVb=SN_MK8sw@>H=??o+CdbK%s1df}`71idPLdq+GPD zrd7}QDx}anv`*D@KUe^mav%jlM{IJ?f3QyEk1dQM>+^_xrJMDTE)L*p7hsjt2e(!* ztfhg1EoCEG?61XNvZF7#YnhxXrU$K%Ye`V)7FBEPS$2wU5M?z;Yh9DnPi1*NETs9F z3}Zbq(6zM&Iw7kIGr6*+cJ_%gHdQWlQPmAI9A@ zA4iH;aDWmhYt#CkWUIf#xX?o9nGIY{K8wRUqIFq}*S8(c7}P0%$JPr|0dNh0InM}0 zDW!9%1#q6-s1G-6UXAz0!O;EOyU(a+nrd(de4G{s#FlC-HHdPrA%v@53(P%A{hE7y zu5euXh5>Ke{YVtEZU!(6m*WJVf!Gq?8$14qzrn8AQLbTGc3PT34zv)p=EhlDhYs64 z_94#yKhJ@g1BpQ{P$^}DN*;4O@FTuz1Bs_kp8g|)e-Au_9cxk>v!++0kIDraT+>{U zcGiYK^8CSu-7^}ZC_9AI7zu|>9&)IICUj7U_bd@TON0=8o;Cz2ZM0CEhr!@$wRIikRajGuq9EQL~+J4uiR(qLQn-vZZo;-bw zk3asDQJk~>&YUnWQnU@RE+!%ZZ{NMa4}SPP%&d*idG3gPM*uBaT)ExW+lP|mEx1}! ztZmYZ>88puO#Vv$LJ` zmuY%$ET*O)TN<8_z+BMN+6Htg$eQ7I(HC)eCJB;qaZNZh%RIz3WnKPtuhoS%3AeOi zEscfM0HUL>xn=8<1LqnTNEx6sMr*k=n{zh1NjbmwkVIjQ z{8f}xK!v~zqor_hgX$Ds8`qj|RN4|I;DTc6u8(Rg-KuE!ixVy`Pk8+78r!z@(7ZtR zBEWO-ozd}s8ioia9G!Y4N_-9L5mC}^)c@W64-AOW9HH~wx!}<^ceuYjmwF-(s?J>L zVn?vhVr8w=0JDR7P~LF@7_Tlb@$AJ5JbrwQZ5!~-yRp_&GUtvo;C{cy&CLy7zk7@K z?{9FvyXV5=K)t6YJk^X)1!_ugW3+pzU~SP8#qw}1q|n@m6*p;Y>POpX79X7rt5RTs zga1^j1JyEvcbYQk1xK8M!5-AIvOl6E{5>47INab&J+N91sWx=yoo_2$U|PFudg%c= zdEn8^i$Sqjas zN<|feOO|Pi1wn^L=c3bL`WeM_wl{d+l;4=!t6BwTOh6^e*?lUSe;}CZq88}lZln5@ZtSniC zj(hh9Rg(>fEmsf2L}F}ZDc^>m^#ho@~((C-?p9iYmEuG0EQ9KC)Ja$~{hBF-BowxWMJL8sr(CG6*@w3^m*9wBk zR%vjd8;IFifK(Obj>!N1;nx3(4J7{X5C0%foBt1@5l?}xhIRTv z6M;pybsV;!CP4r_rlcF&0V0?t_E0rR@}yDbRnMc)hTdQ76p4719Q%Xb4^dob9UCUz>J|ra~bQ_mt7Lbtf(F3 zY%_Eor_&`~Jb#AktE(D5P%0Y-s_pz(aAf0$g*LRhcv2yS_`&pS>c=wDoJ^H|7$cBTI1XRU#2fW1$)VL1u(N7 zg47)r8Ku~01)S_^WFSp&CF*@!6t9YR!3@qZjOX^jenEJj8*uW0ae0A9*N-ty8?Bcd zzz8zuf(sVp&=1bM?$Sb=bT)IjYN_&j1k49A2iZM|w0)(b#KimeZ}9fb8{FRAw8=81 zDAZyo_g*1cr==tTlLVEEL1fi*DdlRoe4;o;-R2-^TLbXHq$t7~9qg(%ZXt z`26$F@aEl{R@SB4VpSaAp<;;m@`WA-+Ei>Fls0D@v_9*ZYwK?#_`4E9C?K?> zeez*JzdTxz2@VZ9U2FkB2zy?(Nu|w11`r(Lh3?BiOLMlRDGxx#VDz>r8rGcZpj8w0 z@ZS|5s^@u>OrMrjsVG`e(U&NfT8xI*Voppf@wGe;k*I7mlc{N`p_q3lYS)Big{Fd) zwTidZ%X&~W32@ZY9sCSSrAce|2T5vmCZ*J&qsB`em1$)O5l6)dFr2;ItB1_V8kho< z^K1z+t4sqfW-_!L3VE#QY)l^4Fj8PX!n6q^8(ms05P$R6IIT%Ff*!tpF5Hah};8{marpN zZJ$BKOJJT^dkv{$c&-me=EO`YWVj$37zN0zEh92^+>VKFzyA_n&ROq$xWWRPOP7eY z>>w9&4N}7@A{9#&Bb#-flx9qDL=TwvB8>#se2ST#%a62@JvB(9vRwY$j)HGOeTJesa2Sx;WwG^JjSZ(F%;8`ztF3ekznM=P< zRaiE}x@Mb4d)TsU1QiZF6*z3kb$>bIxO?wSHkO2RMx~ayaRzvdWt#7K%>!{4m)uk;-8?=Pk z*0x>m#I|U4t+iB=DhzIqYw~WK@zM7f6+<~ zM{K2DY-J0)BG0pyEuk8ka8An}m673r#nK2Z2M6PSl@>;jIEM>Cu%{nli4~V_RpE+xw#SQR`EVVKM?%P3SQ_Ewc zy?W@gb;FFklsZ}v+Db4nlr34yuR#SI3bn<=7d2SgL%POSbs({g@%PLKYMhNZSW=lWDQ(B{ zPw@c`Hj*9P%)?dSks?pq+FQtTD*J|jKT>7*nwGkviBy3EIfl_ z932@u=*;2);0?``JWW@`qwB}`=;M!Yd2!v%b!WFrax?~uvr#`&N15{Ar$S*uj4eBi z2CyoZ6%F5Nj7f6%_kZvM{LXiO2T;TBv#cF#o^=RmbXawY;!GJkWdl=}B|VH|jEem+ zhmV18ed}BJYVIR2^`zqPKe#kdffFA^57u;zzG&3uX?(9mTGwa8W_5 z|BhRU0jXZH;jTapqJmZiU0eyrP?5s|%_KJtRi(?f-7(fK#)hxK+fl|AYz^!Xjpa)M zT&fyxnWqwRTJh288bv=Y|8lgSSpe8<*{Oz+wyZMrqB=c`i9WL^IEX2fjEh4LqI*!>Nmq#j8?iTI zN~+SvPxp%MR;SvaY5{zpGJ|#a;Ywzp7rh4Ujv(xMm}6`M7pD!+A3w+Cc2P~i%!X=O zE@&(e z=QWs}IqPT8V3{y!fyEmmDyn|aE{U;}XJg>$HhNsRNV{8m6(DV`J^I44G;Kbe&b@ z>SUc01>bZk3+7V6rj`~2f-{K3>e5l(C>@|r4-0?0+?b&m=w5+ABvbvP9b)-?{H%rFP~v81(`Aey81lvWomw&S-nZrY9FXOvh;jFqT3_Hsm;{uxi#+?<;f23HQuZyAy=ZnGfSYec62L zOVwOEEj&yXTa-E>#j)eF=FMzliVsy9+J^~v3jmiHH~~0K$416ou(l1W$lD3Il`+Kxi`b-agQI&cR15B-s-tr zDT`V zl`nLFvv4pTp>{kl?C|C5ZlbG zwAG$NsV_|S06?(|4e3%Ydo34MrZI-&>iP;VUVMa$i_@yg(y)2

    LehtMM8(`hAl% z0&2>2K}c;93FMBLWwi*&C0STlCgy&R?|<)mc=zsI6*kH3J?eLs9Cjfw>ARiW9&ARg z0;7@+p2UoRO=Ik5;_IJ&9T(dP<{K`rE^u{u3Ex<5jkRfutjNzVKK~Kkzj=!}ckKJ# z&1T0**(yTY8q&?S_xj$NvBAqe>adRuy~!UiN_^2ErPMdX7Aadq=p}9Cc7;B?+Uj^W z-0-Cikc>0KR?@~;X&G+5wkOTN2u70H0qmp|*@qC+r3zsezfnF&(3OiJ;%jvgk@PBN zPxCWp4RNAmZ~C%Yn^j|7bdXwhbdkJ#tt+KO!$EmDYxA&%Bbo`;_9j-WX)Z3hG!xzi z@q(O5H55`r_*es1O>vC?03ZNKL_t)M^j3Trhejo5F{xb5OmVei>GJ3q(I@afIshkEk z`IO9Oah1R`y)ZP47j&um9O1qo!eU3XWt0)%QJ^=n)(P9BG+XB@ITRV#G;Iyf_c5sx*kUSg*Uo7Q<{npBK!_ zjQiVLyt{di)9Hj|IYpD6`%LqpzH617#^_iJMn-`xdEh}Mf_U+;VlQj)TSXb$z7(8Q zOzUthF2ON!iTF`N#nP_s5cTc;S(06YN}MK6p-vsSwox11URFMr)dkL^fsfK&X-s+N|71#Cd+s zF+*uJ)T@RY(-FjCPj79apeYZKF}<(I^JD-}wKW#mPzxAu=g$)uwgQ~{4wDl6%NIIZ za6J|oO6GMmFnTPy%_1693B8Qr7n3U_)glVlhK)>Un+*V9i;0W`szz;z7}A9P69C*7 zivb`TD4mHK(NqwwBXjT_1ZcX6*Fpu^lFBl~dfjL-35#8$8rrCK1C*Sf$9=>1?{4tM z=6IH^(RR&xjkfdk-VonwrUQ8R>w!j;wE^bPX~hl$@RSkZ@TF={b!F`FwUgh4Q;{pQ_;M#TBk!Tou7r2;5Ox zH@q*f#Y(1^GFfGuC34Ch-~vLe`iNK3MsBhWBBHtR@@XgxMW@sZK(MVllLwa^lj_^lc~!u!A~ zL24C2l-^aUSNK;)@x#}ymBx|3Zp5Vx`$Sq33h1`-ZAJrQFgpq*KkfoZvcqjj6q6BmMIw3@X)q}ZQGN$5Luug|zn6`b^5c%` zz?mM%uTDMI5g<^WOpZ9J1zOorT0Xe65DvU;5{tcZKKc`TF}@ z{b&CabeoM)HfTjKy7sn;NF09uR5dA|e<@d;R4P*EVaCep=!2I-n5H-i_6P6LNH>I% z%t+zSBn%E0)K@lyF0~dx${G6YO^q3|Jfsz?hTJsq)3*$HKd%UZ%DYOhMSG};a&W3&OZ3ERyUXS*F#CxQ-910(Sm zP}$PMA%KS4yL;T--{SG{u`I7JVQhKqExm!v2d{LJxQ(pTaFTWoXz?3_QJTu8dqTl2 zIaEp;ilz!u3fwvlGdhrBGHgJ;nvA=8*qSgbzd4M^KnO|ONqv9Gk??fE!jU$bF<3Cl z%%#PBk(lT1HijuH^=34aGJ(U(ZmXRWW@D4BuvS(ObzMb^yD;irM^mP3q6BzWs!iHv z5zH9$@%&qaH;~yzWh&JOW;RsPnGnMRZ>lEMl?v;15*_RCl}$Gzy(8|imp#>i!N!uo zL`jV)d}OLgM#i_OrDYB5B|9h3UQ7>C6w9jOfW@hy8YvNQ$6kPoDTZLe_0F)cN6WK{ z3SsimWa!<}fCQCbFLVy%8Y7K69CMF9ztvz>I2_(P8U#z9Vaq&fOl^ZlqXJtH)@_73 zOc?zsXqd%fP^{UE3rkx!Dl>A(m1!7DkAYe@%(einV>!*RWkE|7FdHB{5qLA)ZUlet z^Z|F-P^EfUskD)7P}wS8HT2cR4NRdg$dRZB)~wBGv2!68uE`j7$JhIuuJMPxA+gMmsbx_kor(VA&m$o2fOG!{s9dj=!k-FpJ;v82OR{@h`hcrE$ zNUv-gR+NNqCET+3VU2NHBOsxgA0i4g#mFsDE*OsG)G;;1H@^N2Y`5Ey48cRPGs3B@ zsMAum5G6!s9VhDL*EPytGvE8-{2Z@ezeE$kJkNMMoG_necu$MQP%_L*$K8iJ93LMs z=>~U)Tg(rS1+GenRmM-%#0_*Y&@x~cK+D>2BzPj#El;>qvN2SL9S4*ZT1?O)mTBY{ zr?tX;*fx-P)QZ89P|7pARbDny*_$skW2;3BU&nkPKTp{7!z9K2+@RKZtlyakB$1&sR z0Y>ARX^bIkQ^R(eaCv@)tIKO_rcI>_i380g8l@I?P4h?^Fuv}XQ1TF&dn>#Sh3Rk4 z=8sV|THs6Wl1?c0Vjai(BOZ@OSWj6u8%{o0Ova;OdB{yi!b83nTiX%EvDq|i zHakokHlcGcV2)+!u-=2|#rJ%7f5f}DH(2_NDM5{DMbj!zza@{9ETPr@>moHiQ8Bcp zRc}I_$~akh+lj`YMRCcb%-n;mmv_Ak#TFk_GMHqgAVR4yP|?|*uZsyWPDQUPp@8Vo zL}{(86^>L9P}#>?Pqi`SV8nnk(1CSLUpWVLxHCeoF!Xp7R!e^xp@L0uBjs&z+31_oLio?O- zpi-S%9DyOq&s2!K!j_VZ>%OsD`3MENkXa)0CCFtq;wqicvImyfonB z%?`~K-2--Mh{ju5Wz%;g$*5^+O&p1E@Le z)Fx2f;0t&B=6r*PY_c+qshf}^LhCJS=Q?qvVlUHm%vaBKeA$gkcF9G~de~~JvBjaT z+p}Hc4}C*oyZ!G&oS5n)FW6#Fx<62M2s2;h7cpno$Ao2oc1HfJyTe(8NM+CR*Bgrv zjY)hYPNbS4x^Jn90K2xyvvAc_ibpJVy}-^=Ui8?sVkfdjZ((Itb8%+JecEpD;^ixR zcD9}VY}Jku|MK8 z&!m>G;bWYVw-C!WQl6txL11A*Lph_5NmN5dqY(Uym#<#o<4-<~D4}VBZh{=lQ3|`6 zTG5n;<2{bY1Mcsl8IkEK@QiX~QDX7kTY1D#M{_L&a!mSju(1HbGfM zXJ%_-4K@`vKFX58!dT2W#UI`^NlQ^l5Tz*BL!28^rkr?Og_^3d#{Dl25nahUGOjJ8=l5KDGHzE!53bmS09asWCd;`rVTExE^&T!fu@Y`&J?Q+;{CR;73N^ZZK>KK89>?bj){8=IxHJKhy$*y4o2Fv z{PX4xTRP_X5r@N(N`zi&J`^m_k*x=jn4D&fVKspsYK(G6NEH69DYk9G^@|tSoo&(D z2Aj5t{o3eFR{}9@=4%EXACFA^@bbHl!vzOZK1K{ea9t#E=-?zs)(2Ws@B(7hklV`= zi$3ZH)hr!l@S-f$ubf;X1`UyDmMT4Cl)8+ekYwVRK_f0|+#7senzXP+s8sq@q>x}0 z+e$03Ijr}uKo)a=#Cx9UYTYxbL&|55RH=+N#(xaE5`3x;Afw7rX&wgERvakvp-8o! za)Nk4n(7<}TziJth3eTy;AXHv9B#Ve4A4@I!2&Sp{BA-SEqrg@R)0q>)k zm?~PB7BoE05L2;HBElC{$0Grsjwb^wFV%Hgp@Fd?StLW4Gl?xNIAJRiQ=F;bg$mA) zLq%#N99WF3EGoWC5UK|?%rcYZG1_2GI(XJ<+Tz)=9_oZ@1g-9sW(He&adlLt_;(Mu z=3<70K>#qvVo_6XrVX&f8sDhw1-WODz!CR#b18<~)FyOyv?frUFv&#rdqmJSwsaq- zh94{s_yJCFzJg(*B`bmalH8TF;N>8gi8pC67G1+VsW4BDS%OZQ);bzPWCINEAo!cl zJN!SIYx;-v7rpu7&6m6V?r)*VpJ)qv-6BDMr9^d@*>-gm3uhG|Tafn@0!rAG|mD$xQ_Decy+SCa=N+4F8>7p3)jLPh>!CG@* z>0BV9vGITW@Bb&h{Nl@)gxjzzND!g`UxqYJ3@Q;p6jbM+WGl(&od^6V2onATeD9Bb z58wUncbLr3u%9-NsYO$wikv7mmSx6gpM8$^?{9E8+~eVK#?lx1MXfs1HL()M<=F+c z+ig&M0URG5aJai0?qn7c$|-up#>eDcbE2cPL*)0F77k%UDgkMdg;B`VboK8ff|Z;$ zy6lZC2q{>U6Vx2?OOEO(OExJhY$+&Y)zonB%*T%V&lsQPt7yE~n;Riqz5<$}m>Bu* z8GK{KZm@{X?T+2q9vA25Se6AJZg1)IkhN&1h|TU{uPISaCuSak{n-v@yM5Sm*s*^5 z=1onQl6UFrC{zTsxoIo8aPcS*vemS22_o)SXbnkEth=Cht33I*FEt7rTczcQ;p~_G zGVY%Dgy-w|`v1J_PyCZ;TStG0wPnGACKI30m|is*P)W;{HP^3HEV+>pmE(xy0A*xm zs^K!Jicn&qMI)_|+2}M~#d&7zFpKq$P$GzVab+M=SrXM-c3wG)w^-{$IZcyQ`f!5g z$SIOkY2@(4*r*XSjZ~xk`4*R#mpDH^i^FpcR_NJ|bPVH5kI$~j6oqv1C2Flq03g@D z0(vM~vH)^Ip2HdIfOX8vjJx{--oE(~4-bz~LzO*-dw*IyH8?#@q}V`G>0~1fU6~S= ziBOX^ymU@}ym^RmRSVKE#Z9`W}5dmIi&*y43?RuS3;CH6`n zl&}~UTdXY$zB+L8kX!)9N|snDDOul0C&S5UyTB@Wh4oFst%vh?Fuig(g>KN+Xk(2! zD&+OHI<&3^08I9ROEK6%wSd3~QzT3@n_v)_^*T^V!lK1s+9DxLJs^*3`vwoM#?L>L zA{Gr9Rk-zP#5}QolxjvsoMNex0Bc$nETdGnHAHjH1>O=A-$D&C-YA(L=|QGaqH1B# zwxN|Ml8ITs5|UT~=~I$^QUt1-*uITA#a12dstS%4H;s$be%brIJa| zxJPF((X;Va8)Wruw^Z|J)MGE8c)dGETN=b0q8AYNpU70vr+`AcRRpSa#k5gc+M=$V zv_+hz#WPXxa9F7_Hcj!kEXaV6Nw;V%B0L+DCzgbhGj&5KQ*9h*32j%s&ycpM2B)f0 zTu#{M3K(yTL0d%p_5_m#!M>Ovjj;ZQCiwo{4esFRx}urq6gyx+aj*1H=f+*LVuZP` z6yj`TGjSsk9O~=lkq+0O8&un`E|rh^eEp$SNB}^aw*O;v@LFXVr;jpI9gGTvln+95 z$m(koq#)0O5-Ztk=oT?fK00Hd08DPcBv z3TfrYi9|{HGA>S}Hb2E6{&w~9kS42POy)=}PEWOYsbDfb79PcL#AS{o*C&}=A_Ydv z?y_>@+HJQG5o|UaG)*}nnH!Yyg|Y`w0_mJGTad~P5j-9raC38m+xy#E4_sPho>${2 ztrf}T$YPA~yzmvlzlyMt)Z%O&_SGRl39ys&p%+{>U~C3pItRY^`9Jr(BRpTv*FV5@ zdi@dfulzM=i^ECF>9h?#jj_VW36y!pAjjU*;810ZB`5zv7;xDmGssY7G-;E8Lj`l*_mEg6f1!pPOow0r?=oy?xtYj_v&V3qgE5+34eX2%&ge^!On-K^ z$N9wtuCK1J+wYkIuzG*FCsdQ7t=LH6{YVZ~atBHsSzc+WsdH61sg$$@W`?B$-Df-= zkN9wZkJJ1}_@0mCg=}Ilw^~&rxgwJy5~9uhK9ra-h=}u+V55qUUcSQR)de=&4VpIE zFB+AVIS`1nt)gKx$)*8EEy>_4$pC)8Lfm= zLJ%OH3ejxJM}tHQl%BPLBGev8HB#0n5Z1z8K=X|0H4rpvC=ONFF9Tp*tSSKL%TP~{ zPf@m%Yyk4@q*5r9jH!&K9Yu}&SE?ALvAzp1Y90FtuU(&59IL3t^csoPJuFPeb1mFw z!CfKBx@^UgvysH0E-)Rj#coCw2bZ-5NF&F>*9r<*scOfWKxhxKj7(9(28IjW;z}CS z6&;E$jSWnuR;2yZIn-n9>}JFJq4T3`@m?AmR!&uQHQ5MlJ)9n~EC!K|Nn(iDmry1> zE+;IENo@`+YnyWoV;v`3Fq_ljO3*nJeW9An7i5Gg5ZnkMBSJM4(VDQ%iS`aQhz*Jx zas1Zf4GxkvI;*H#Xh7o$1gylGx-%&KC>SsqcWa0nZm|*WK=w&Y?x+j)M~aP^lxN&8KW{UTD5MRMFV=t7F z<>g|KWMxnaugN@U@eysdJ6v2|p!bg6J7({g=NZf6%y_K$mJSa`+}$5=I2`cq-5Z=9 zAJO|lNo{mS^Kd@$<+GR^(6sd$6$fUrN?J6UYBSC*zr|t~mWF6-=_>0OB4`%VmP`dV zY9*xs+Ed*}B0CA?B37R(5c0j$C_BH~^8Kb?l8qkYJ=ZdALLxCj(pM@ypiQy)j=%RRh*y~m=nXZ4hdKkBW`0_(&Jw45(I2raQQF& zYtOsE^YwiFF0Rcl{9;T3Yf25Ec`0H+>A9(4BKs#L*sb{Ox=QNF!;-=?5CDy=fSBB= z2s#GfS~ZdQWAY91W-1bndG)~+(mL2V{e+^%dIV*042F{wjIJZ0ZD|(Jk^DuOKrHXU zrAoU&6JS!set(9`vkP2ZzQks;3pirJ`f`?OQu}Gfi05u$nI>eJ^%d&U)u=W*>t$_# zb#kZ(7<56mj??3U`^N+B@9xoOTY>#L?T#A$kyM$PMZxZwo+4vfF3~88dT1yb_PZUf zuC8%*c7{!xLXD|q*bSQrC%`@PN?it)))I))|N^TS# zypgOWO;ZYwRa)DMXC#PewPQ8C*Hv0pem_#hbJ6|@&#u%j0?jkUBBI+d4!Gix;!uhU z#;9fr6~XROz~;K;qRwnmpiWSs5e~^X7&c@Ija8Fj4b;xTNm?gUK?pGpY{&)iR&nEX z&o?2f#hC^nE0&b5>O$7ajB1pexLMR0@iEIvT_IsvFJr)sI+S_Rff#8tO-lmoeD5n#m1*as#MACb9eme^UnTL z(vaBfxBsiC_ds1uB}qaS4hbNU)qO=7TT-#5N%bUh@hA@+<=1)BCpDWZSOt7Nf#K1> znhyAay3sa<0MdpEF)4(gT#q8a|}27L%NA9l!pYzmC82cm6J#Cd6tfhC%>2 zhYCO&iUq(`i!oY80x=_kEPSNsF=;yA@9~{)e;1eM=a@E)3Jgb^HkjJBir1zJvyQ|4 zJwDvtL8&G|V;qkIQ^65e4VvcFoi9L5dN1=H>f_fkS6b8RAb?0f?Qzfg(fvbg^PH_It`LpzJD|4y%Ju{ z%q+Ja@=}rLf~SwVra-itBoIj@BGeuqPk8h0E#BXJpsfYfh{(dg*RoNxVo=29k#utz z1z^?=jObHANmt!N3_XZM?64>36+@~dOc2SogJc|>j!nCfXr9f!^nVKr0Yi^z&Y%C~ z=iT7>dcJ-a*W(}mS?KjS5B36UpdAWS^<^?7sump+OQB7KkiA^r^YE1Jd{vsu2mlrt-fQv%48!{#lzlb==y{W3D-C6& zia@6cn@z*+Y=`UXYwY*?nD7@wE#8CKGEzzC_Xn+TWfiDYknr`9tq5;9E!H}O$tEY+ z4I>WPF`pLPy}QND?M+z+lt~Mzkbtz9!lo}W(;I8DJ`UaPif*Vwm$+dwZE$sUjs5`j3ld}4j>Jz1`KRGG+ z-BplbC11Ha7Fu|^Y1KsDjdo5dVtH`Wk{2kgt5WrKZN|;{xrM9WOvvjp#)5mXoU9LZ zFb3+4YLitgSi)ad`!-gv?GR05G<>8fIMd);Gij|;_Gkd z!BlVy2&zcbjh*L-g1KkS>-FD6a@tNnE?Fd!oSe?a&KhuPOyp^8fH_;7d@fAzwfz+~ z7FLr^U}UlwEEC$;Y_V)6G4^+k{8cOxHVz=H5!M%|PH5UN=>!wSf?iNmZK1+g`hvc6 zs?r5AwNj#k$z@Sjkz3+gaNbEaXte%B3sdr=gt(&f`^ndB2TpKN4nS##7CI*|wC{$E;W z1j+OD9Lq?6001BWNkl!(sKz?m?Bq`+r>TtscvuMqRmWR(g80v~m#EyJ) zi`hl9)il;J(vyBw542|LFEqh5Inl0jD{T&If#zysThc2_2vW~P8H}i?GE&r3h|>8c zxVpZ^%NMU>qvk}#n1>FmWa}uVA>d1Ov8^D2L#_xes1&IqV^*^}m>y82&vfZCe)Bhe z1Ap)P-ya2lw>Z=_fm=iNNG5vd(4-2$s9UtA{?!Rr3Q<`wU0q$_+u!*n;YE%~C$wq8 zX1f_vEitqS@AM7{KBXhNxp`ji6TQz&pfKrWvC7Y5-1kz?M*4j{KV$Qf5y!^qRdkDFQBN790c~e^ z_j5n@yc;}U&)4tbI$d2u|H@wjWQjGjuX6HSU}}TLpAGjkwIi52UQ8GQDPhGl7;)A9 zB+W@^7qFDYC)iHM^x2HcJ0|ofA;_V?B6vD2h%ktCCPqjVxSo;4R=|5owKGN%Bgm_< zmf(AK1Uo_?m^NWeJ3qU?<>e(>n*zowTB?Jz7M6b>0d9jEG$+&qy}9(V#|Zbm5jo1H z2-6gTT@*Y4)lj<4{e-vg-{5$Ehy$22a+*zCF@$ejN77iZI;hgf{nsLWLzFF|l$@(# ze}0aSKYESx^E0&8Flpm7eHmnb0c_66vf<5_Z*cqJ4!tk1-lIFvk5`3<-*esMKu!X*?f2?KV}TI(_-A z)Wz^satlU7CY85dype36rNv=%9)zTmtUuE}5!|k{H4PgO0QBB#xa7{bEjVP#`kxV? z5(i`RmF-1oCoYO@{CD#fwH=e|;=tc%qxJ#G&8hmzv6Y;u8my#(!&U>qP8J1P*qPGC zTLpYtZw*%ve4=|?bG){2=-LRX3-Jz9`IONAY=Vjn^)H5UBGui?GRJqMK<_ge8dMrK zO^cVXEUc4Mh}8>~Qih5qZ(-VOAu6F*Vw9+{aja-WIz$^_=?fOOu}1O0-=I)3?v|d7 zv?abXja0dlDsCI_{fGBB$WYrDhn|X|I8o59%y{UU3RWXBn`e#6q_7UEBa z00jk9{)^{b{imoQ0RZaqpV}(NSB4iNZoW(skeIwibWNkZgI>C{UNC#OOAuu}7sQnA z=i$RM^dUi3y<|xh6{XJT9j)l50ea#C&u*(H%*Ie499ebT;W@D#wC&auvTr~)Y}4Umaa&%FH_ zqE{1)jW=)J;@$gqSeA}uUXXz7h$6PgiBHd{j`+2K&_$8cjRi`?sO4)L(MgVR@GkDD zODQA3W>k$q8aS8)kv0wI z)FSWI*uZR9Qc3Ja~+K5FYt7e1ErS8#`S~@3*u{q~+Y$Fyn zN&MATc)Iv__O%-uE%cJ+bLC%=N0w)lKe3UE5*had*`=h9wvCbZ;st}s-1zPt+rRp6 zJ?{k1*YovzxY|GS&q5}zbgW8hkT|G$tvgxlYsvk~znyY#4)VovQrE^{mFfvu`Pp0g zg^d$1<362hQ-b&y^%w;(|C>oQS7YMGshvoz@9TsmfT;y!QN`NC1FH>!eNqXXjcJ)S z*zLAB+ir1jagK}qMa4K%E2w!UJ?`6D}_=asA>NyWJL3 zo8q%?0&P#zWoXe8S;yz~gDIs7MFUdhC^I<6v8=8ms`D>=vmG8z$KFXcmbN zBJus{YN%BJX?CHkWd2OM;xt&&rdmo?tpjun)jFF+spj=+R0V)})N_fV>J|~%PK86# zF`Wam0HYSIgo;XCqn|=`nMni^Dio;lev)e9BM}BYl@a71L>Gst6`?{BDnQYk4K~LL z^HTAyQTH^07~4>4!pDRft619fkoI`*8Q+;I2=f7<)fJF76(P}+=>%S)#&@Cw+#-|) zA#xaYs{(UP608KOMk{&;fOV?miLU@mX zX4_~Di{keL=Pj)-PT0FRuy`+4N2UTHV|r6xB?_VXR>i3xp+(t;jioiIKhsn(SwPEY z0p5$?hsz_5Y&c2=;$`EH;QJged2v*%)W*VCVlLjHda~8Sl1o~WOS01#_u}8|^Zb83 z@96(%ujwE5UnmX#vl`sfD4F0r9`N%pHR8Z{Q`aWghQ=|p1+g%(@eoJ{eaafhyljdt z>8>OAGGRd6lnK6*7XPa)njNy@GZ-i9Qet{4H$D5;Sb5;$Ks&Vtn-`cE&M(gKt#5q? z`^{N0wt|AdGoNLLKc$}*2@0$@s85n=mW;oZ=@)Fi^aRp~a3M=@D6qKUbXxG?=6$dt zlwC#2s|Fwp6^SG^prG0LCHFQq!AvO0jgX7%x4-o*TwhElf|OG zoH~B|#ZPea;XO|C!rW!AZom3!BTCY4%Kxf6A5~nNHaOewv6&{Q1}tfwXWZW375r?o z+2i8ktePWt_jlNCwwR^~tu>e#dha+M4miy-GU6rUD4~W)8}?^s_4lNA(%sRQj=RHw ztPENy4AACJ5ao)H#9dxoqp6~`wqiGQ+}zzp5rJNlzG-UMpY5^TY#6Z#zQ+X*k4G#^ zFW+mjO3NH^lp!sCD{ z^eb0tRw~Fck-<7sAeijM$)rGWc6N@}uV3Nv`V5l>w9>qXjSiuJ@LscM7$j^0$cdz9 zYGX*M-JxxY11;@p;LssqfmMs^@RJ`8N1PrXa5x@uI3BRf3#IQGa`qJkz7th@C55A9(G-wdnxKZ4S}uyKN{yI1MD4}Odp457Z?wOxus{^W=+sJ z?RMk-W?C5@8O{|^mkI&70nn;FgI|?#mrTnrh4;>1w`k0Z71KgWMH=sCp?WoK#hO1~ z!;)j>4CxkhLgH0Nii$Djb)tPK(%KE(HO2cwEz1%$UZ(`dq|4}K=|}@}%RQND1XmcD z3MDYe?@d~z_~aL^Y~-ng?M4cMN&b`xA*|6j$*5XMn+v~gv(ABBbU6|etEmpk0u=E* zHdl1v;IW!QKyxh)h_Go9$$voW3tmqbxNaM4XP{4lPFsQYsFP;%klE8?wA-lB5v_}2pt71`Ufr9W;_>Bc~KPDE>P)YSU6M+4<|-5w^oCpOjE0<`AnEg zYN;E{C-UPMkvv5+)Vp9KVbx!3L1R7kxB-6BkNDh=Fe$2wXrOM;CPmv!XwqqO&i>mZBrUS)R zc8+cpp9&YWDR>EAy!i}&>u>*haRVHVWvNy5Nf1Lu86i8%81!97`s=(FJProXHWU8w zKlaCPd40{-5ZzSOpzH?JQ$zB#%(%V3#qHZ0JUpH-&vSj&D%tQrrBSSn6u$P30od<% zxVpH2(lckIh3|GdT%KRleQ-KG;N81-HFY~bI|l$fo=y=zW`=2+aCIGkE)WRR>6M*y zgBLGeV87d8W;Eve_wVuH{tojzlQ1q+XApQ=-+NA(fJrC3dhrrX75B$u{EQE`F#2&( zlF{e@Z+Ba~cyW#0c8lZjh@1EC@&5fgoK7cfcN@I8zQ%611KeWpNd0Zg*Sp_gS!R6r z5U=<4_7?LJuUFCF2Hf1-;QsD@^bS(CY4wm|ni^VbI2?}n@ZkgAy?c+x#}hW24X!V* zqlov60$W99tqv7Y*!76lT}h^)LWR^I=et_T_s~&ZU3RL1pbbt}d?^~Ya{?8-7Ojpu zt}pQJPydSui-dNcqU7S+o zVUinr5gYJgWvu|bQn*q&=k!+xP)~|hk8JL*4*8B&b%^kw(>!7ZAr3RGNu6l4#H(&e zeYa{g20#vl0oZPL*zYfJef0wSv&%@@n&R-0wo2pyhHtd^8mZpP!-yahj^=fkRQl&7 zt(JtrDGxKfV??Qn<9Iyc_U;bWtzxp%=8%VO=bm8|0hqpP{ykXnnIx@$QG1>318BL~ zZZPQvo3?>W0cdoK1CC+!0Xp#J-Iw^C@BbFwynl<<2;8eEvBGszIZ8X;O5vJy)5<6t z#vtb)fB-(v7|4X5)`y<=Z6rpeUnNwQqfC~xQb;s@j6lAuAH5T7oeDu9JntC=>$%^J zm|I4nyrNK>H_pgB5nz(j?I1wgPV4_#e3LDp*68=}dT^~M0eQy~Bw!1A7Ta5Pt> z_+dp&Dd>66)}0v-g+u8eD~qWVZah>>y4uG}0c@Sjr&LeaV5ji@*n}Sn(&-o`Go`vs zdBF7eJs^uJkF{ZQpgJ_w5uNXq4b6lirXAFcj2JS?pbF*K-p$Y^g2!bjqVnc&9ch7S z)dg)rmj--udyY$NFcE^|&Ct;?6Ka`FuPGH{Y|Ql(lk;;I9$Yk+xl9WZ0i=b>%7m=E zp<=e)u`mE2Y}D~un<_Ti&{SY#fw?mp*W#=-HAkDM6%ixglnt0B=oSN}iJz_0F4vfn z1c}e2OPmqh4W<)*=kWu+fMK4-`KmfPkqFsSiz+76lvBUq)*BYT_$S z<^cUnR+R7LRA80koQ?8UQn`waqAxwy;e>1n59!Ki#cV8OhgisC*iIYl_h;B|HrVcV z*q@){>cvZLyj~f&3Hgi%$k6F)m4tyY#!Rx!PydrhN8q4>fnHx-K}2vk9C3Gd zkNLD$q+^y=jYJ~IUEj{A0Dvu8S{L??d@&lO$(zoTN7Mf zUO+@}dv}ZD;}KX4-3*7rJ?`%g00bB3=g}2U+lc_zFZg`B zhN#&rS-noL!ZkM1e9jJsBOd1y=4FPt;r4JKU0bo)Y`y{mTfOZB$4LsM5{#OaAPXoq zK!%5y%8!hEBh;#6wH8P^Z6o(SWrS%vasvztcK_U(eU?<68dWFVolC34!Ke z*#!&hzKE?J;I!cPtkiu}Hd+CN2_Q_)4jTsb&urS4y*Ov;xqK)O2CH=|U3k8N3ua1$ z#KWi?aDO`D?U!%x=G{Bk(!=VKb&deQ*|OvyM+&^c>aF#m4!X6 zBdrzeF(Wjy=@txwROQlTDk$q{ut}t>H&)S`Z0seRxk@0d0oxz+F8se*4ce^aP9E2#%5Wb5u*8rgo@XU z%Y+FQva(!+jgDkiteZ^E6dkSw33(NNqJSwlrRH<8ih^uLWeTd9Cmdu*efNZocj7%6 zl`7VYiDd`^{76D$3I!H?B*53U=h$jP7sbpp6Hg9FBWsT5he#lc^)jI7@ug~Gt)vd! z1T#alp-mhrq%E38E#&m#q4=7n#%Gf1 zil!B7DrCY$jqF&u)p%JE%9G8sL2H7IHXIx9Tet6U4=2Oc&=LSx4d1l>kjv&4_jIy3 z5KWj+$&F@Uq{MLPq!QU!|HW_@5XY}rx7+8P{HL@b(WdEtP|^32phfO3k_ilm z`j7-p^p$h5lJo-Q&6PJXC1FY>MoE*h5uq7D2ygoge%V^XW7?-IfuLW1;cVc+Nax zurRePuC6X|d47S-G?DW_zE9TMIw0CLZxBWmO0W@RwO{5wmR7xt#?IucjB!qdNVgtL zrat3%IO6zlgfB+Q^z-33KG00u^}UftCqoEAn#jLNc!0 zO^b^p9#)K&b^{HjfJ?8sLA_N247BznSaBUg->H;wX{gGSvRchLZe@qfxOs7BoMkXz zRbx6`aqcc7x-eNRqMYNCqap?o&j4C`Q{bP6`hB7iI-Uby!T zQ=>gC&PielYb{bftcO}hV_li;6m}lgecc`1m+0|KTTE>Ok%`}xMOq)i(${wr-YheI zrysC1fvGY@h>Dk0qZett=)Ou5lB&}M#=#pGnU<<2TKWO5E2-8o4yonYuJO~{ka&1_ zSUT*#3T~nTHZkH5v<{>)VdRLbF#@t@*G#GtEk3+y8GV?Ofm#rXk2OG(XjpZ|huI_i zV-^o7rv*ip&L*`cXvk_2b_9C?DUlS#w4EqzACI!$8XtoYuR;ePjWP^QP&_r-^bAyNbW46a3-Z-tIe=a4ejRgYz zi#-jSGJr=3D9vPyZ1SAF{2o62_!B(NOLSgcq7kF3E+(^)eO~aB&wqk9UwjE)I=bb- zw2#y;T}fp%Hw1~;v$nd)QWlw`2mzM9jHZ|p#*~?{Pc=Lkz@_N&_ujLaRt4#HvyBO1 z@1x^SIj52|e|vw2c|JuEO;twP6!I%IX0Zdn#l;1i6XYkjz^J)(7W$iM6AORu(QocH zVoycTMbMY{d7I59y4T{^Zu9jnz>@xCl#kNk_;7!R)8ianeqS4>Lc*LI0M-O{euvUB zLXJSAClX}BQe`ctiO95+U4|lJ~#B~TgUPKfV!%{$-q_7xDXQh~6{0le`5>%^c$k`8wUllsqKe zy_i?R7Zz%h_}*F`;u$qK_0HM=cqR2l#eZRC(7qojf#+@cYm z=n=xmVImuQz}ixL-;(5JIkQFf2Kmkuj75Z4;oPRWua6rkso$ z*a*lW&WMole6Qvx_@*Ncj~dG3&bUll)lwO!NzW9Y=V+v4!wI}^Lb;&)d{ZGr%NEr{ zQq7*QqBtMUh5@tol!bJ!9#n8+y?Sac&PLuU$*w}{osAM@oGX8BF%-b2K^u${9my(! zHOqvFA8a~oMeq&T;A^IsBpLxTLfxaZ+H?-BTx2xaIN<9{d^3-l)7TJF6X+Bw*u0lg z>5=Pk!Z94c)BtUQ#X~XC3{3mWm`$>_EIzB&6iphW1Kq6FI$s>-3rr;LIva6BZL@_) zKr4|9u-@T1p}PhvMih^mhM)8!-r+O`pek5K7Dj!En(*FiBo=U#4LnoyWnE87*?EMv zzDV6eSwh6+H(c<)Ja6DX#SIBWcl19IiBt`3UUKX1zB)vEL1rxdo;nr?sWr`4xyFom ztedw);04>wMtZ_-qTwCK|K=V(byt{#WYJ72Up!+BD#MyQ0WcptWP(;Q&+TR#@;e?d zdtdPC^+(w5_oUF~x4?Ky$vkbZ!`H|MSdp7_csaW}!MC~S4vD|8L8AtCKNUJnxO+I@ zi_boTEqz2B@cZE$gU0WjuFn_}(@`oeA| zW6#VDkMoS5eD)b0=Q$A=q^4)7#286S(Ae0ZLoFds7zGKgDLB&VEFAvQ@V`oX(#a_6 zOlEhBuNo4qabvWiM%&Vhm5|@1V~K`M%EcrhP*U%09nE<-K2-9^i|ZG7`SK;s&MzQM zh+=K&JSsIs9QwmUr}Vjs#%RVkB|!DEq^w4^7;D=$a>_O1-cUud%2PYe!l{dh)D%Fb zhO3K9yn6i-uV1~!N3UMt)vMR_8Ud_+hGFg5FvdcOPHb{0^SJYK&l^)qVPS~mmh|67 z7b*&beD<>@Qu01rrchd8iF=okg445I1BdpZ)L+_x7*^y%a{20Vph+D)M3rn`X1&lE4rl^Qf$qyl|J&sDwV(4f1);ZkLv>ueqWStBX`)v9! zOnls{c*lbatSRBS$!zfz)6KAm zg#(HATv#)tno0tNYk(`~lR*i=SE7U1I^RMNkz(TRka#2Y1gof(JB=K zrvst`!X$!jPd=L1=tLq*9*)g+hgUCNVzb@g;o*q4H}CN7?OVKk`?eZ5NKliH7_jx? zV`)*!zM?iZ4rS1gnK>P>EzR`NYPtbdGSHOqsf_2bm=mB>8v>aO)1UeI=N;hrdcOXk zu3!2~z$c&5%gd=XCED44NZENtB&O;K95AH!-Z8xLy*Nu=IrCABl6=oU7fY{Vmb78` z>LBICgD6X$&`aLQvqA!)`%pk8Uy76KObKaRW1{w7gY|MtiG$Ck2`=_~oSp5k<Bkv(>#+M z73wjyR3<1OEgD2*3WVi!@Gi3O7b)Z@)Pb}81wQ)dBW$Kkc+EAS&MMhpM#Ux?hb4#O zJwCkufaBu>>xv-qtJ1UXglw-m?3U5++)TA3_%f88W0wrZ7ecXPD@6&I940yVSn;zG zSSqS)1PN^_GT`XhWY30MT8J}Ei{k`Qg}110n^9uU8qak^+!h=Wh{SA4EQ-lP=VtJ&Z)g4$@76p0dedWb}VI*pT;GKIen`uZTR7T#4LpmD@ zRhLH%r$~sU*E~rwJYIQtOwc=39gT|MUaI1ewNns^8m24jt9aI0@gVrUP{o-=m?SXW zJ*;LqplMoMoU}}3%B{Q9_{}*uYaGhe04VP(#<*XI;iK(2u3ezkh`dZFW{))>lQq(Y z#!tppDSUKzhvL8-wm9Zxj>MB?!MucOHrJ*R18iaE!QgM18@fAYU*O(DO-m&KnpT6a zsbJH#5Ga~90zl(ust<;WsD=uG_qb+bW?tfXZwOVZITkd$vl+j2{DAwUhmjeRP1e+| z$DT3(t7%1XIBdz@k`w?PIjlCGN2mg-hIc587E%ANpEvI3h6Dg$JGKAN+|fJxm(1R- z2?d61EH*q+iFHgkaute{3bq-%6AUCwb;yixKGbY)TORg{vTW78WpABcR^uUVL=Gm` zp%kk6H5)wAFw>maM$)`oAjQ5Q&qH%>p~R- zYxCYpRRl-V398dbBzZjH`0yC{;+#*dbt)@u=93@kk`hiCg$pNW&etLwk*Inc6xd|Y zHdGQx49^Hyx1fe=CGAKlG>pZteB(Q~``JJByaPO6&(|N&)nC1a|9k&F5Bqt*C08c? z1E=em(m=Uw0<9&y*hX-(@c^KvP;UjroZ47Z)Ip_Pud|6%#OT_~Znbu#>MrR$kAD;cRf~JyM34doR8PbBnR_)l#0=7P6#}AAYjr4fj6scA&|{S~ zS)N*hmFm^VN0Tww@dUV{!1BdNF@IVs+pu4Xq`E=MLaP;<8-p4(KHFHqG)r}q|133m z3^fSrR~xp9Y(({JB66URjhe7Ic@3GI0QOL@N)48hI}-7md_T$oh~Qoh5*}@Vi zYgJ3xs#)cd?^rOfi>2aVqLn?& zyga8gLF28r$uzPS$!BPpV&se!)+Aq`v*wHEF(O+bj=6W77S>-Hs_K?pE)k%2NRJ3l zV_t6Db_SJ3%Sb#sQ=4j<)f%5giSMFmLsP)og!eFfhC?`r30uv9taFf*zB%_yTM~el znPS}JXO&IwhT4a+8?MShak6-t{I`AXKYrf1pBobKpm+c88uXy)f6T>bggYX0nw|Z` zSTL=+u}>(v?50Nkt+LY@H?lIusmscPd%cVi=f z9Y~(>ObqjU!r}oRytur=)vMPbH>=1b8BtX|fs$jl3h(`@vk9#D3qXfaYN%9d^g4){ ztt5(+y0>)-rTaS(FwcC|C3nwLeY_j_3^o9jFnq;4qz_0y1c^o{^<7r z6ZGylo#s#g(31%ZKe6|Y**o68e}nh$-ozrR(di7YXXirCj%&q^jsgziBeT1Tr`-7Z zC^}cinp1jR&l%;x2854KR!IsJ_ZTd`5CIJ zk}+rWQ&!Jm57K}0-+5mBpRecZ4|-4k#lHajk?+OiFe4=$R5ox9YT>;URlwScDV!^) z*5NaK35Jf&rfyH~zHmUm6L?G_gEinHbwJ68$!r#6R3&sI{a`G~Tso9qciI<_oWMu| z$k}@UiP6|JVOmGM}v)4ZOcnYYc&lb&&tT}Wfi`A0tm41oVq$any-fFJZZe&&f``sRA zyYo;&P8;Mj$PklQvy_l8VRh|}#TFb750#YQ7Wg-V(3 ztN1_XFnrZ$Put((HAx-2l~gF@PdP( z6;$#R3W;6;6WhSVEhNLOoW)v}7w}-5D(8j)&NM`=6m^MLvPH$6vsMu{Z!+B^{*Fcq zO29~=f^LjCC1b;BqiHL(XxcVv&IUj*h0R7Z>Ky+HR9;41Sq&TTxfK~9C-j#uR1h>V ze5+0PMw_4j=6R+n!H1VT0iHeG23ie3TT(5ouns_5WDR-pO*mw0jl2!5W>dF-(9*4n z=8kT241!(I+9r}c)S)`D@j(TcQL$!@j!reJq0e4*x_L+U&WV+xwAM|TjDq<2wC{<~ zLb7RicbajB4rxjicw2?`dReQSiG$uac#~fC;Y{O*uZfUTTAhN45nP%A&l1Y6j%S$V zb39k8VZxMAMHWhs7uPTF;_@og6)cF-wGnYk zT)ZoKS4%+I_Cc-k$@!HI>%<&P}oy&8bRCDOC#fRG;Y+NwU;)m~CorPg6EZ=_K)dKW#{G=bcc6 z=qU850!niEO5UB4ZXvkNa>7WDaaw6$l*{sP0U;PyI_xj}rRUZE`Fg(ofG@kc2LA1T z7nyG0oDz!VAv&koF|pPJ5K?s}DHlt-V;>gH$;X-+rs_JmOJH%h zGAC^FN{TwYhH<^{KhTYR{`$K02I;&Dw3yFx2F zp)e{$s1&h`$!4?1sa7){)H@GL+uaVA*XNja6FFLpGSn$OJ1{eJ0CP9oA0qPe_UXDp`;GV;WTDmh7ti^Q9zQ_1Pi%+CSh4B9YAJ@ z(i%WEM_F9FAhk$^YeM7lxzkjs+F--c4i*RC!47FzZI!r+@ufDHAi843h_447Cn-O{mS7`M_zGEVO#xDaUW6vwz~P& zJiX{y(09lMu?WQJLG%#==7dwhedQ_4>HaE9xfyK>RtHlEusz#gzdwsff-0=fxW2r^ z<%`R}J|~WQ=7=l(+#RVOj)gA*8C#hC(dBPW6TtC@8rA?x5dn!Iz(ZhSOq_xk!pfz~u>;%wsdY*UHg>iP;Fef$xwF3;m0VocH08m8?Q zXbx?h+$rek7F>;uU_xfHKSZ9UAB;h zBzJ)!R#>Ngx}=zxQyeyyw!1B)6B6VACMmyPmRQyWvLd?V_aW&MbT8XcR+#2PdR)HG zMxUusl9>{|Dq8c~5qQ2I^oR%S@m#fk`sc7*zj$8#pRecZ|A(vp!k-8J_@9KEmh2d+ zgWyDGn4w40BJFIcTGS!QR_@t(5Y3_=Vi-bg+VisT^juS@l<+*G=I>>F(KKeJ8LLN@ zLrmmGdV)B~`?(({X_{6%N$V%q%Ou=yrVY-{&v3rm<7|J1X|t_nsWM3c2z7xc-M^YA zibkcUcud>HR?YN|f*C3Tf;nXI1I+C7Ei6mNgIR~RDH+RZtqno};5sD69ko2+>J2N~DM&StvvZf{=qqBZDB~k}J3f zcL_m=1Y(N>DHm`-Ku8gZNP<9)lDHg4go?`N)H(a?y;nQu|9@j}G2Su0f78Tu)eIFvQ4l?Yg=}FA&NY%Cvhj8e9!Dm^sf7!~3GZy)=xw*^!9)E@ zD{}FjqUqLTn~4`EA_pIiOoACoA4#R4%TU|8N3$6J5Hfa7z(zS04{5xR)pCxOo}LQ? z2}rgVBa6j=XT+k87*cX9-1rG#v}p{HqSmucu8e36d3+$t>V43Z&vg-V8BafXS#lGI z4;wMV*~A8SE9yta>5u`3il9yPf!I)0i?7l(GFhw^T^!fgSl}GR@!@oh>uH+}4caD_ zMpFkc8^k*%12$7Dg^EMay~kp3v5J&mde}Qa%aDNYH)|G)bjsdwG6Sru%^`iymxNsE|1GVA|La7{*I{s zFA!Z>5&WHXbb|V@0Bynskx4!nI=>_J5uWCW23{(D@LCAKx}!* zOT)Js6V^uKqSDep8dLoJiBXI_GJ>Bye}?^b&&e?gU7RqW?js%vS*+DYDgOPY>y(LB zK(8oXNES{&kT-%i39zvRZ{ObHlg~cm_Dbsa;}Y-mPs$)7Q?llL<$KJSB^&=AU5arq zZ6WG`GBc9(pubk*HHTy}57qKc< z)^kSTaLYX=@A;ujTTZ_2MzX!~`wE+iRK9c$&axTkHIzb8nT1qu`MtmYa`Imum&dQt z!;e?M@A>I*2j-<|s90pD;omZqDArcAVxnvnCB~&9t|Q@I>F7UR92oV zv9#1_ir$B3LMEj2-mzHxFVf|x_R28;2MH}%CZc!}!6)-0PRZ<({M@BUZQ>c7b_<8M ze15$E$>_sQT$UOp62*|r%M$G#iz$cwvCB>U`Fl*C3z?_m4+#8!mL_FU0KHU}6H0DN zVF@#Frcw;chC`uZ=o5W*8lE|Yq6<|6O^S6enrSaPLN?zC(P<(ukF*Nqo4XEgLXiR} zMK7rZTrWP*z%qBt%ZxT{00o{uzrkiR)uEvP0W_-^G=4orG@nv&NJ*29f7ck#X{zNl zbvB;vkz4{^M#c}Xx#Rx!9v^@7Q7{*1KSec;tvq+R)o;A$gqLkZX@)mUOP*%~Ze@!7 zagQ&5`74}o$L6U)&=*77#73h*(bb^Xpxc6nyZabcPzjLrtqXP6Wtxy zZ8kU_uFzPZ?DuhGUISK#4uyCDRn~c@!CI~PgLVX6TjJRF`v&cU!u@9!PDu4 z-FA!peg}~~Wsrv67d+fQMxk2#1jG615pU)h$LlNX_j??V$5K+v=QHjf9>OoW_SZGR z`P}jP^&1=x2W&ReyWg*M%=3(=$0uxdI~e1e;C# z+-8PlS@8Py7S~r-*lq*rHcb;AA0Kf%9!b~cYw-L<40`VFBeqszGYEIIiIHZolu7V2 z>lL3Wwb8TMe&ujdV`>?$ zo08xt=0oVjrq;O;WdZEmQb(+%mbyxj_m#s;p-OMHw-`P+-m@&T zl-0$pO+||rW)cBi9AF?7V7J@i#d|MtIPS6C?!r>2{IjwFyfbDnUb8lBFfTJc`shF0Ex%7!#-U2t9>pdAy&}$4h0^iR`kL7Q_T~F6EvTv3bzkvTlu?z&DAO3v zsSoSwJA8v0P|0A137|caM@D9au$<#ijOw5^DWkJF5x`iwC5Vwb5=xIq z$U##yiD>uU7qm7J)EWH=XaNeis$uC?z*9GaPl`qxF}h>!F|-WztzzjGeGV6*Y$32% zk2<(VG)#j~Z8ODMc(G7mOpP^G50)KI>qY>qE!8l=CIObPUO&3yb%YNW2PYNmF;wW4 z%#w@OuyFJJPmkP4aFbfBMz1KNSub?}^SS(JJ-C~*=)7z!;x^IqGc>jYB&_t;`;1kSKq}i{^Bph_trLJ>yy?3Pf`Am5V{I{=7mLyb;GLA zQ|0y|Bz}ke5zk+~MAHq|CnI1@f!o`AK<;?xlD4kE>2$)==`lq|cmMz(07*naQ~_!_ z^5dgi9FzAXu8NRy-SPJJZTy=iWp7Z7!c06j9rQU$ND1%x{hAmzUQ&QG2VmzjUY}2K z`Da0+87=4V79~vn+ygcnMcnP}ZKYS`eDO4&SsqeSYn>q2s2|Ox(!z6tH3nwXDYMYb zLSA5usf%q{e!f)#;wg<}XS5hxeDk-!u5T_U|K)Lc{A%9m`vdT={T|>i{S=ybECe!U zm}giQD-V4(|JI|)Y25tgw%RfwE7b(HTBvG!$D$ZAVpO7YDDfzX%$Ua-XJ&*ZZ?d%a zDaHV}Ai2h7jf2sQCuR{dZr%9x{eFktHU{HUYq5EBL7zY%8}BGetkc|(*q)5*6K+J+ zm+ibCE8?@FSu9zhL>)jZmDCqWt`FzIH(-ZnUQ8H6kd z=a4}qA?AdQCV*0dS<=B2n;ck}6`Zu0AsrDe3kM<%jVhf^2&AI3w(^pIST09j@UOB%hM+zH-vH@{| zrdoh;rn;Re$9DpHMy|Q6aN1@T8-!#(WVQz_6Wv#y!F;2ya=uIRM=zQ906#Sm1p1}H zyyM05m-z6@Kft~(lN#vbN?9EQ(xUm(2KYiy3lE=IgtXc=K|>naBvc&UNgGe{u}WQz zed$={Gk))i$!zAz=m zI8{n_GvNhP-B{(Hk4=_$EeRe(3ZN7hy3x<9v@-eFPQVKC+7iN&2`QCr!|{p?*`#zE z5o{b71l`(3Vnz|IhcFY2dmF_9l@G0r=vMx_h85J4(;7)1c^y|pctPq~lQ9x!f(+(M z7wEJl`@BQ`t$+J+>R%q0$M-`e@y(xv{^W0kNJGmBg5=25Aq!A|qHO5aw0UkZU9A#| zigb{Tv$XP63mYvk@uEEE61KJ6j;EA47T8`ux7=)2;CO-~R|5=Rq<9Kq<9n&J1U7`5 zX~Jf^#c{vK;dsDyx2>6OLEc0wHoGDt6DCR9nsKNu=2+-&jHWr&CRVV>R<%L+;+2g_ z_YPkc%%?}(KRyQNuLk$;n&_Oe{+wg1Yo4?~B6Slv9M`N{ChT?-c2wU5vx7qdF{Vx} z1W-mS;N8OmPLHQd%`BCw$l8vf;Ra>hpqdumxEBIuxfDSOQTeR z1<ex~gchGv_PQpEi$45<{CRPjlEKnM;noV!3c?EAN3EK0K&XVh#ox6w$M?h5=fDX|Hmr(gGwiF8u=5W=rWV9S0h=#q(7C%HHXvBxh!YgKRkt;ol{y*hJcuV zzK$<__+d!QN<9(N-!?VIsgCT@Pe1(x-~ISg#(kmko!_BUuVUJ?F3ox>C{r7G(3X-v zQcT@Pyll$+M&^2|bvTiwRV5)waVfXDfYZ)0_F+hD4NA9>g>^J_V%c*~PTpr(#8-2Tj|EvExpfgjgsGt*W9h>AFe`QS&1|4_}E6Y-t zYfKDTwDBw$8&HIdoig7{SP&!B(m2Gl+*Xf)EAr3gcT=!j)(Iv|-Xfu2v?3FuiDKal zhcv-%f57qj8b^YyQNheU3TEl!E++*a4s`Y40(s>p)-WSz^nDCaD3f2h*CsyIh*1U@ zX6v|nxWg~s-e6gl_}R>u0_9_~-<^${6!_Z})wrg`Fyq*fLjn0Hy>c7Ti&-?6Dj{khRo=7iWhG3gredYkl!!tU4o!^75mGRv z9CFD>Zxb15WZi3c5`e%=XYk@H7PvA8C{1b@5td>u^2{r$vDj%s#Uksyvh*fsmcyT@ zy9FU57*VB>{=$`3<(nt_P5C9f257M)u>lezyUqE!fp>Tgr*y>#NtJ^Z>^xW>p39jW zO<2}VTIn||tf~-HzW0`_?F*oaVtkkn=$y$&`O2SNSG>FY@~)0sUgB~wB;X-8@j(V^X_bdgx}@TsNZH|lE9;7=%RPw}3f7_my8%MyGWWfz#W=d;|Vl20uzm%!iwV( zc%KMJ9jgt9)E;LWSgaDV?m*`aY$q61*1`R9Gb zpp4fb>=ZHx$fTa<#C-JqynHK^=ZnG5UO5VVZSOv1BsBNuK# z7RO189~%{826t{fQaTJ9sq>olo20ngX!K1+gD?4$W|5Ncr{2fl&B51Tr9xg64Ka0= zQsm6ZYDqSON^O^dR31-Zml27kzUnZzWeEA67g2xAf20})=OvObuZ&3@uON!hJ&MjT z(2A(vJkg|L)8bs1paS%N@}GfUU0+W9%j5F+K0W$x{s*A{*gs8GLM!>McG~wa(xgn3 z;ASbQIH+n+=s0`Xf@q%eV6ocAU_FM_(clDRnuBTyg;q4|%481KMUH|Hm2m$-%USN}$^UTRKkjj5lxI;LV#`S^**gF@coGW_1N6 zWCTfDuPZ2*j9PWyL4ysW3!5SDE}^rH-INw)uz{U0GI9Tjz)^jkv>%S6>a& zB%`WnZQ*uWQW=;rjTIlS#aF;8X^o7(75>o+yqGBtqJ*7SRDH%#+JI$p3Fj3F%c#&c zhE{Rm+5kIBFcSa_D-7ZBd3P&oA0H(8SgH~JnefY8hB!KxBTB)H5f~{IC!XAcMqNaJ zu33Z8Chg+rJ^2DsTn!T{1u8ZMwEEH>lc_Pu_DD6AaUqopwzOd6vdTRMoaDSo+B@p| zVS}7CQnFy0R$|GQA^39JVIztTLFa$ZXqeH^lkeb$Y3?{Uu=OXjpPqp8 z37KWQm|bu7Mm#|4jvFRB26a^7D&@jWt=iGBWq;Bj5BPlMoZ?<)@ad@3Ig@CDJLIA ztZc#Yxztd7B?vt{P2arS%$I@0_pnmIe-Mm_^d=>Rn}BNHj80?6E3;lU;1B{A}hZ~_llj7XYQC=J8}hy4LR^g};{;P_gZi%PHjYUfTW6G?QL#AEAxw$s_$wYJ&Ck9ktd&&8{a*bcVxy84?{cU)k>w9#> zWIQaTIv`3c%9Qt|Q57H>8z5=i=I3@oL-M)ZZ1Md0OKdh%K+060+R!G2ZnU6Z(?%IR zW_a`VEuPMgSe6+^-={FvDQXN|5x2+lEcDUR4+GYu+jE|Drny^U}8LFmL6KA zc-45g>BvF{8(?vZ2{8*HxY0fnq-CqD@2Gi&V5Kb1;=P)Q5-e-l@NTAz&D&_Wg+)Yd zs<_b#g{D=54n$m1CahQv8#vuA@dCUq+u;)xs4)vYtLJV(8|OeubBM+bUdGa31N4f`X}?+Bi$(4irZpfK5=ixpti0xg zgJ%P;qcVy+dH^TQJf35!N|ct*OjSxMC8Vb+BmtRx&HIAod`9<~ zRxExtdXFJm6HM$E+<|E`)pE$q(H9gz-$V$m3>A({6ZseM9yhAcL>v^nW9dsV?#|~K zi!bjC{?bYoi)GZZITmJCs^(B`~)N*{4*&Y7k%Eq!2E6 z6*W1`0R8XAX>}Gh5iQ0IaT8o$AMwEl?_-)KO1v6lzEILH$zq6vTK9LH_-NLsH0_ZE zj`Z1$eX}{xmy}tN@a$Gi@h|<-FXFRTufl_BDIg`4Jn7{*^wyN6a*(xv4| z8t*t>9q=1|KQk37u*;se7B==LDnS9rZSvrZ>M< z@^RPDN}0`WHMW{IJS~pW)EYM1hH10G@%kFO{WhfJ9;~NO(or5Rsn*NILoQDoqM&L;CLMX3MG$*zO*B(0$}!xx8$&hte6-WOjHx(XJCrY($t`v25qr06c5ZM z5Im3G7Y?wbhBoF)el&t{!{w9p1#W_=g#@Vu@FLhS5-|Hk6s|22->7N<>qn_b+89{- z3i2N*E2-3o+S)UjDV0v_REiOp|?lCEU z`}Q7h?(XW$l!P8)z@C(YAm@bJWq`bb!GQq{tZe`y&c}cs`QabNkNrJAil@gX#F?kO z4gkl($!v$4p?AZ>!vnti=_diaD#?`(C|1h!4dpLQKdDP2MVc36=0NAg-t+m1g3#&1 z%#-fn^oY+sdxcM5eTrAFUg6cNS9tyU4W6E!LLrdTVZxY7+Nw5Uy3_;#D+O*-$Zb z6j2I?eDq%nL7Uw1L=0BSNDCA-E(GP@Ps>%2xkq4kIjY|Q%?3TRU{pEqD}WYbwZb~J zg32Na70B{Vuf0s;Sf5sPgX3TgVynnT=7V zPl?ua8l{3Hz*l2J!Q(?J%3Je}%Lv6Ro+Sv1?8&OoUbnKLQeQDH%3Edy#Z(SaG6BiB zMV8huO%wLVJq}k#Ow)!+KMQ6A@3f4z^s)eY!J?gUirx(+^~f5*V%uunR*w4sOf+eg ziqWJ2J(9Rir!(H(-s15*^Nj2(pyF6eDEcb$FrD~XLyo#wb;LP}#em`YvuC)vzQ(3a z*lspx(+1XKvuQfE*k+Dp>A1bS!$%+eGUoHbBq$6;U99Yj2~-S&WDVry%t*oDfNfB9qM0y2Nt8zg@<)E@CbiO7D^Y`5LI+tzI z0>O}5c#r4Ovxx>Ey$_X%5!6k!g&BWsRm(_|mD0!v96{NC3`{36Zqo}S8_NLYfG}+i z*2ryW%E(8Q3TZ?y>afFfjHDCR2NjdoQC?iMvH2U?|wPTLLBLB296 zesFVxmyeFa;t=atyyK*X^Tu%67@lOIeOJnA(h$Qxb-`{?Y`bE=7_Lt7zw0N%b5mS7 zfYT%T=@H9vjyO-I>c!iX!raUb{cm(!)C^C%EVHwRRDJQqwW<(2Ajb_F!x&`4k=Jjlj4CsRGW{cDp zI#VMcI^srktbh{48I$5vg($sXDEe{ASSv%>Rz^g|R9f#47c9$++uJvoA5SX)(khFa zS~|}))T%M`jQ$bstT|K&|LepdO*D=__TxW_XV0HS%p)V&9N}XP2_*re3UpiW_RSl7 z^3f-8crv}B3FTorq`PTr2sVgw;1p1tgeuAC+*d^$Z8$E_Mp>_=pO}EN=0$(jYPMH& zXt%Jlkc}X`php2A!@rzSWD=XWRLY>|0L_OCSXNvRthO=|2XV^{6$|-JspwEj5Fn)z z`RsN`n9y3`V8X1!fA8mN*IR;iI|LXp;JvKHP{wLwik3Xhg;xtM8yqq5P@EIdcqu{ z!Og8b2jH@@BT$+zlwXuop935B&|aWrL&kgD_0e9?QLYFL=c6nw7lXtXDi|GM;j9*90*27)bQN+tRev zSTvgvZ%R9fh0;`+e&eecS1*~Rb16Agx;-Zi9B9U8sxysC3+n2$T7)`SVQR%Qi-8BY z$jnNJ(UU$FnpkmSMmn*N@tjK4j9|*0a!6wiJafSh$sRAa2Us_}nSjqG$9K>7cv=jz zIX10fD;+nOu+a(o%?8J5k3$!1x?tlDEQTE%HfuEPaGb59EfY4I30o-6r!yYCV}W7O zhU4K1lM1{Ids+%v1T9KN?J<~z1FcnJolFxHGnT&4l8lH}CaZ)Q7-4jhsRl%5g3U&N z1vPX-H>PuhN~oE}psWd&E|`TiCwf@(L<%@;IaJI6Tb&IZ)|F?Y3`JWh;k^`A*%LY! zz+Mw0|0aN+z1+-~fyCeOVSQOtr~ky8{QJu0R8o%b8HvT-m@>X09vj4D$VkT+etDzx zCl14Ra&ala?_y9}gPRhvrzJYfOItzNUzc}cj?{yYow1qS9M7IV!+Y<&91Sadh@fJl z3B6Z!_@WN9DG~Hed2Xe(Ranmh~Y*GPn8Ul{)n^~$ zlULt`H36)_7uka`nqiTe7X&q?}6=z18dxl3B-#oWM%-qg_hE$gHU^O_s znGx-U;wj{iDlDp2!wBBDLpa9QLGowjr7UCAg(NTq%+rfip*)v6I@GCVP)i%?Aags= znp74yn#22wGAtt(1C?GPRYZYV)#28VNCG|2WB>pl07*naRDPD+7Wm+aDHRJ6k#FWg zN<0-zJ~lBG{iz*=hvnSIt&ciPmO)2m>WaHUCNf*N?J>rR3b&xyFhHb$PL*Mu%8D49 z)JDaA-PbOs{pE3a{8}vIrzq`jLpWa3kbwPrq)FR%MQHeRwZtp;#0iZMOse{{Gk+Pp|rfn@ma=ByXm?WWt zu}tx0hIPZk;{$H*?y#I`xn&)uOv5s6&a?3k5BrdQi^S4N+KM4SxMeJj43Ymt;PMLC zsNU*68e9?9ni~8OO^{>PQV(XGl|zs9ki!TtEE}W^Rf%__%0YoY2vZfGHHA z(q%db0rFVJPzbr$Z~?S)Va4opj*~p=+e)n9=ZQ-~ZNc9XqiR9|u4tV@u!NgStJDuC z`yfB>WsngvvvOF(bEUxL+_0-k*_THFj9P$L4Na{8q@my(HfBdGBw>aLj+@;zKHVzb z+~4AF&TsJ79`Er>evbz>+WT3s7ho?N>_zdx=7at8@B1hsfkhlyazK4h6Kb9^(gJu)x{|P{)#@QhwOvqt~ zt33{8ttca_DY40jiUF7NA<@eyK^8-7x_q@oc1o|(3m9LE^goS8c_=ipIc;zyNtXP9 zANcB5@c#QBph;tVq_3o#Iwp4>A2h37xkIEAR><$&l`~xy2Ii~GZIYbL!_(CXI27Qc zkH3rm>)Zcl$-~~+9Hg}|Qx_x^hE@X{nd0Yd%*8JMVd0JE#7> zk^>s!*ra=bE}ICnTlj4`D5*l%1g&{(Au5ZVhcor48LWm5Q1Tao4ylBWDC%rAjOado zKi($^CrYrF2QS+*onz#ScZ)%Y>VVVHgc}5PF^DO;E(NhIS#M6p5uXQmX0D^8m!8=R>B}kCdPU(^V{S+5jab&Oqm-qqMTsfPVrbrZ85f$h$KkJ-}AC*lqWC zetm=e?htAY(Trn&U_!zHmCYliZPheYIu#qrREWf~GGi1CjRTBj&3zzFpwmV$pU$}b z>l)$>j2&LVS7;-Y=jDXX5*o`{Xn@mqIpMJZux!tusWtK zLVDOG7zc)&$vZc!)?z>((U)G*{eqZMF<6TQMGP!hBg8*Rh$iYKWwQ)mBK3qRE|~@p zFzJ%LmfbMTBTyf6EvwgH{jqxhVLnw zaUE<1O^XDV0U|5!J{MF`t8(d4D#`e7FAz5fH%CF`d(M>j^Q%au+jQD2Dp5@mgw3HI z)sr0~?olc}JO>I=w&MV~hvGIZSe`MsS>JT0YRRk4PHT^>tW?1-0QjL8MhmOI=)@xC;Cv2F38?D6Gkhc9i9cyZX_YBs#r z6xV%&{Q_vFs+r@c4Tq~6%;vbCXFT==4|WSI6At?WRCee~hxH!TqKViM&6QmXmCF?G z*$LZH+XxK4I6CHHnHklms0d*xn~tpFn3b#*Z4EYeXg>o>ynk8()pe!1_C~v{)-bdb z9zFob#j|5fJQ~aFK7eMiP}Zc9MIRX!y>G6Q122!uK;r*jC9y1@sZM`NTzw=MXZ~1KB_|Zy^C=0i)?9l@tsW_LYMQsisk6AXJc>Wez<| zqfjtT=tUded-*<&$HN-yT^~fZ4qtqg8emNQAY-$md~tb5r&-Otqnn_mr?DcQ^2VH_ zSaHL5KKc$`fBIScEIKl#rHT*^Y;su9#@ACwMF++ClTyQV^pWAc7ccRnKl*oLvzcH9 zGz83Y(>5#clL4&(He;U8c>DGa=hMPrccpz-6q>It*qF?w7?U#A*fo`3G`HyXjuXX; zoQ%9o`8~UY9V09!RvUe|zH{m1n~Cm)Dtwd{4zO;H|Ax;n{nD|q>$D#5_N8JZhdaUx z-ulQ+cL!J}#Q=pQ><*wARTX(za>G+;94Sjo4>j2&k-1!LR1vHOcGQNYS=Yr1tWAueaV_!+`LUQ;w~sxeYQ5$RDtn-o{a9j>mg zu;1=5wHQE%3Ph*a@^gsdTu7LYMnBGLQ0gDHi3X-Q5i}_&Z_c8;DP%FK2RTi4gWG~` zhS!g8@#gk50!zR;M@M6pTg(8!KtI2TlWZ8-%p5479I#0HiSzZ_Y$tr-3tzwoAAAwJ z-7XkcgnNU{f_GzNW5?6^38%*sPLC%ny+{26fygE}5Or_yPC$=^00+;)MU#3m5Mq)f z1Y)*qCS^1-<^r@BnkRrf)j6p5L#w#%RLy$cgDb*_)Sn-*mqI9MB$~z0p|yziMkR!y zlmMdTsfB4;e>k*?{KOje^HQXJ#$mOfA)~_Mu8ZXpIP}l=XAF5_(uOhIkT!-Z>m^4< zil!aeQ!Ol2Q)cvflsjI^Nl&XOOSq=879f=RNtIAk7`l@#mT6x|hDE&Z7Yz1W0YEKY zL#^5_)hc0`$U#tJv)u(yWAax+Sub*^#9JXmkI{( zgruqrdo34AG2Us>cWLIWWG<{g7B9=R7_clI%@+6)b$o^Zq};e__=IyBZBDvMcS+Sd z(yq>UT^O`fb{O_vk+SVbNs9r8|3qtT*Oz{}+{~AO#IO7i{D*-2cJUP&*)3#o<<*GoDf^v)%x21Xrdq%$YT=V>pw9?Y88OyL(z%v?AhQf+xXYdE(j*Crg@~V6&Asv_kp~&rI?oli?jDPr9+RdT6q%E)(F~=1p;{E3Du~5L*h9(b9o2yv z<5bycHYgStn_UVf0oW9cd2+?oN6iQ(VHnw2l^ZJ=F`6k=tUx{PD$7_=+9;gcf5QNU z$9tngyg@YVxn;93hpCPRFfE`S*46qFO8I+!uR=R2xnlA6RzSy=P{*{OQVlG@iBfHh zj3sxa@pGtSY3CeH=!#oiz@hfOts+9#rq!t$j$={deSP58$4 zh_7OUFFbbKEFHTASURxu88-@Sq~R=r`RNgl{(zg~vsl&;#_r`ziRG5BtSaV!^%<>A z=pJ^ohz;g2w$MGamG@9;HAEfXctl(A*#x!`oSMM8V&Xe8fVA=ZlC`~tl`L#vL;d2J zu9kn7b+D0FmI*Mak_Q#I%0VJKzMY%>1b{z(xs^XZ5B{hM&Am%Wgf z4S#$w7;{QH&X#QAMOirH@`RqSly903TK4d-^GV9TXUvOcf)d-(#QQ~VioPK9|D>8`w7a~l%?rAZs^ zZg25dzV%7SwN}}^E%pgx|Lidc3BJP0E5|LlFb^2UVj%J>8L0ytj|W`eT-D~rJM+$Y z;3baAXt%k&dy9`g{s_IN8+{^6FxhCbpanIIwB>U$r*7blf@>R7BS8_ofh z^YsoxKr48pGHD{;V8Q}52T(bhcc9WYkhJIWzFCo|hif!Yr)l#L$rvLa$sh_AbW%Rn zjHE4TpyV(CBMwt(kKjcY@JvtqIHFVwiA)vI+9@lp*u0DibHv45uSu$K`hdUs*h+9J#n(G>y<1AZ9gt}d>+ui$ z!@y>9Iq5Ht%j0wM=)diEVA|2g%!um(NAlOI$;hFkL_?q$5r2ju8Fp^LU3hL$#vidq zgCD_*u7JQ98=U9t+QXj2pp=KZ4t9+Y(2EM3V_cpKS`!=&S2!FG*ls51)Hpn8V}L+j z0)-V22?biC%FoOyPEbjt5MWZ_COf!8r>7ZDPiLH;PT~Kq#RAG6 z0M}fNj61_K_L|QvVWe8g6kf5U`~41w-8L*8+6XD6osqeJY1xq0u=I|*yLL3~4p?Nxj7Tdj)eOGoG|)S1 zshDAUNF)ajxY$#`S z*r5!`Y>@$I1m1yOCaWtG%-ko~GCnijjJ$NIy1KcR^;rfjiWU|oiXd4XGICcr*pXo@ zp$d~64h~8>`4I(}^;$u&-eY8_aL7`r3k1|+SX=0Z#=H1%)@3QHDqZMZt@CLIM!cP{Z17DLUx#4sz<;6%B6;WSJU5x6m(fOx0MDWc4uMDVF}{LQ}L zFFd}*pLzTUKlk(z{^a>%{P+F_|Lf84H}(@=?F35$w&u9$if4=Bg>JFiY=Q2$y?=wZ zPxm98&w@RBj6N?VUmIYGN!@7b_GA-KM} z!j%J)373mC-Q#P)s7|RDZ%6u6=QIOlSD~zS_ zNEk318Ps`jNE1vN(vz6_hHBvE*^82)$KOSz4S2@hbT1KD(=2ud&AFhL7L;IEv2G!5 zku~ijJt7X!1+KvB&)(wplfMx^f6G+@<&i#Qn3^G9xq(lMi}!cB;qcb3wmy_{K6`e9 z=g*%-K?UW7%Yw2fx1E<`!|CCSPe1u2E_xpwcfqv)bVcwbJb;VNv8*eQ11m3Y1`E)X z@~?}NHR(LJT7t4sRCz54S6y4^4SK7y>qDLl_MXM=kOmFO(G9@Yw>>1 zEfb}{lF~^|2qoWldC7BpUYEb+d#Mc@N&3b$5=FG?6~|OtCtby~`uO}RFR#7?T|Si| z1Y82Nj{L!Y?{dmt9+$`G=yCtGZ(#qs{~f3XCnv8t$;iNzKMUwjPb)Wv2F9pt5RvD} zfAEsSTL~-U-Y;lARgoe|negG0&hsyuk(zK4m#UBpr*k3Dv;hk2c6;o$TkLjw z=w?If=v49EqOl0bbgxoM^f78^rRYeaeMrVlDNj~lL1zshm@4`^mMh-V>grhfjQjh0 zEXz_h1j!me4CQn+0p*iy`mLhTQ1f7(#6B^M7fhlpK`A7Bsh}@tX8^;H5bz_Lw)|71BGXZ@a3C^`jy@&IChaALpD=T9yWwJyQL5uBA4g4z| zjtM6&X?fI2?gFD8GeM>`4b@=9Vq2|9(t@1ByHE=Cih&0u_mZk4{`siqtd@CBD~5I- zJO#wEAs2;Zc`{$wg2-uMI}+Rpsq`J`Luwxi6fK@sEI;_O6z?rR@A)+?{yh$+ynH@c z?}0J&Squ3{cqv#VZ3Jl_&n2D)qb!vbJ>O))h~TV6c)qn33%|6%rY%CjR6nO`p~Zkw z-2j^-6{b6y0nm;oReZ;1{QUfg|MBTF{Hdo;@F$jE#?R_!__LcQd~2_`-%St$p38(6 zGq7I-?;ozPcfrH`E4+UDF&-c9;ydvM^k7JI+FL{wizuc|!!#-Dv53muOyk_0T0@hy zf`@$sYc$K@St_KPVz(G>X2A^=OqTqPwAJx-=@ttqp?q6p^hoX~Mj%s&gH^GD&88|o zLulL;oe3{uxC|sN1Bvhb;bMPUwEZmzAgx8hL>=my_xMOW&^&Ngo-`I3QWC!K{9BK$ zxG={_ozXJsTUtwv(zp=NU^agkgr1Z=iNlPPVhD88fY$Kx#S6Uu!TbE9(M%gl&LeSZt$Go)!ZXRAglGC8YBL)ZEE66xXQ6uPc`W zL2Mw?A(L8yC{G9}BQ)Ilup4mTAST=lNZDYNGC21!aY5F2u7D42A8 z=-mp4RyTgj^BZGeFRR8cFAbjM$ytz!>(sh8&26x9+-fM|n$1qCCp)7`2f;a&mC{gF zQ8S=P!)~|3{;50`12y6kQMQzlPZU%VOaQB%r)ITbWp(40 z(5=qH`@1{b-M$@zWJn|>dM|as_?oouC8(R~(NdVWRfE=P$Gg6{!p+Td9Imdg+ia^j z5N@uuNvTTA9DOmoeSM45!)I{^Qh89D+5|+h*@UL@U^peLhVls>&BaK(KJlDKeUmoeMm9rAOAxIeRz*fMqoK}Zs)dj3d|7}QOAQgekK%pZ zpyWV|b}m?H!I~J}dsO6hg2&Th;Z(ZjWe>0rX;5-$3Or~r+0+f4FFOOa5*nh9sl1epZHH%Vm`#t@3E4Oc;@O`^C8wU}w> z$vo3_9g{jFjNo4aIfV&zCXPEeeqp)8|Mc`4Klk*@_%BX>8~@e(5&ryk!AI9SEIMJg z2%gQrVR77S_V{4`9M5F~e|o~x-96Jma)GCCi)6lSW*CaXl*(=ZHVc|G+QU2MjQ0_T z^j<1z_hGYw2(0CM$8psfj%XaX(WXXqSTf`UL-7I|imqCsHl-5B#z9=7FRh+0;j5`< z4m*?mXc=P5K+9CrzUVR_>Rl-35SH z#ZL}GkaE~@K|g=~42Q!ZE-P;8jRjmIOsShx>cg^hJf0q_*k}+mO74@4kzTcog7bAO zoC{;VVnT0KlybAIs9=?)*8pzYwhCW4tVA0m&jLJ62QdCF3hYGH1CMSyCsCmIzZ!nK6vsKmox>KP-(B0bJy8ygf#CCwdX3t0|cp3@P5 z#YIKJ z;gbn3Op9D!cvpmlB9#=d8a?MpQi}?!XiKAqInt2CB>;2M;(o{j(^FCD1vCVIf72Rv zhZsmq(+1nk4lS*DoSYjh?2cVvESl5H9+8(Ffsk0|OoU~wDeoKdEQDT3p}iqhn0W`y z=M(PU+~Mu*Tb!3UY@g<1vo5PRR818K6`~1BmSGhUDmRrXI2Fb9%`;qGU)5ce+j8WR zrqk*$JC^f;)BJ>|r$@Yb^BVKK3?RtLx&YP_!i{G}oEuid_L?9_DJfDNnegpmLZ(Us ztK^JS2XerzD?UJiyvHV8nB?XVZ=puzy=Ivo&L>JU)|CsWmbF}qr3^{u!!DD{e@li* zM7eg>y0+476wfjjEBQSVtQ`xi#z=cU|7o!;g-_?OPpUS1N6KhkR?xINm5s-8(IFM{ z9ne!Mdy*067IOQ18yEukI`qx)T(@Ly^m_dOK&=kMrB00=Rz&L@QO}K<)uFAEwXdMWLGw+B6lzZ&HD( z2Go5KEKDEVO2a|6iJ7l6IBNC2vfq>QAsn*R=b6kk35#A5ZsTd?+2v&Pbi#1?0d)h! z^efY5`s(F2z6>P3=Z8T5Q+M2n2Jl2t=}1)|D6Hg_qC?V?2TK+)t*vsy^I+%S%S>t; zqD1<*1Y#W!V+Q&w2tH8BG;8?oh^=GeecYn)438#xis0wxk-6$nN6d%N9 z#fqprQgvzz?{I9|1|NL!OPF+rNgKf!oKW_VZ(8rQK~NDqJU-$6?!F?nTr3W})Gmls z&z((qT5=@S5y{_L4IW}}Lw~P?lrxdrQokPT3*$gWn0ld<2IqV_Ej~WLm7-T7AP)hb z;#eEf&I%lo!(;J1F=MC7`fJCYhNrIvKheh(kP*?hC|3`%JVER zd36_PZlm4Mv_m%&w%Z-H(+0=m0n^kNs|Pf192j%S!7~plwP&csH2}h_Ph?O>AYWPAsFeZT8*ft%pNP~ zaeB-#R1~>@GG8Tvl_ineb+ck_Isg4Yd8M@{n^Ug{Op_jwhv{pz5Zo*IUgiP#i|5nO zE0&rKCI*mQh75zrwyS|x){tmqe~dw0Y<)vHqGcOp6NluFNO?S+F^Zy%ue)FbWjfF` zmQ}QN=4U4PCQ=KPRr(6@E)Ph!uNFR{g2k;Yv=E1rNs(Z7u;dMeXMW8Q$y-SYVB(2c zC207H6X#TVC9Jj9X(K(%a}k(J6Ym_>QJs_3_liN`R7GemV`PBPRT?d` z+&1d?d<{E^j{&MXqiEB%AvG{f9=MZ*BFT}dK?%>w9Jpo=(;eSI$6uBQ{M^%b@LwfaXvqWUEDGS(4Zbet_DGqj3AEZw0mbQ z)}XqnLDNFE9D2txFMxWeY|(>L;fl!|SC~d}iD0PJ2u=5~6O4CSOa&`pWf165tg$$! zI<&DiU$ILZK0eFw@wnJFE(3}0{jv1Pvmpy=Z#1_~BY)3>Dd9o9Y(dDC^^t^; zO{A_y6V4Br1cL=^V1W)`{}FLto!iKR`KFvLsV#j zmY_x}tDmQD5GbyPR5R>oZNl?s*D#yM>%a-6q~8&P0dhXic>U@%PUjQ1?!16)Y(Z*f zV_cl4zl_R@wO_+4=^@Y4tWbmpCQm3TC}a>1C>LD$jaN}{k`hu?z6QLA+@dJSp_0^o zhueLv7%UfG0iG;2Zt0~|uOy80RHvslAzdpJ@;khXkc$o_OJ3eN%;)TxztJg!TMgYt zr8b6BnqEc>aF>kWGbuPI?K&Q-3<&{DVYk4Dy>upJp7+SQ=O>b3{JVu5+JESWFDLxvad~`B9}i#p5c{A0L$HlOjY&=1UO9)DV+$JR zxFy#}d_*++;Om5>KA1(&n8Z3~rNof+phUTRFC-(tLxH9+UmP?qGZ^R>Z5|q@> z)^UITfVXdN$!m}SDYju5ND$n+`{PUsu!2K_!*SE}k59OMeuZf>p{ZhO4cbDQ-l$fy z?$Nk7&?aD67M$h>oaeb14N-&Wh#yU#e*y_BX3^o&2>fOxSFiC{3?whAl9qB_wd!MB zBDKv}Gh_)A*NI|rAIU0&n$(7t+u^}}zzPn&sv*hg5H>=(QY$35GI!{ac2b#zP|vW! z3Yg`V-wG6+bzrg%Y{gR$8GJBxuf+z}9`!vniTJXzbX09SmQG%kQY%|8dU6AuR#Gn@ zXj)W)i#RNtRy(FujiOowhE*iG6r?biUtBX$jg&kSa48c{o3UPRF$@xdw=t6RQaNij zUb9jGiAEH=YFWa|j!hd!4o$+Ioqp2cC9+ab#bT>vr@}$6sE3`rnZbR5EejsZ@GU#z|7P#qgKtf<`mXO<&%5{jUCymf-=@1~ zre}r;47XIFAr*_Vq@pq2&?J0!TB!wlW${C<1y_ubF4 zp0&Q;&-VwkDJbaV)2D0MsLdl>m>b0?JE80qW+?}F}zxuTdmj@|IodWA`}YR|N_izjDx9B_KqpLft{Jl4kdAz>IPQ3U8g3^vyl#(9s}CNS2uoZUYI6vCuZmea7Rn$AjdY zfFE|J6A&1M9Bm6dVNP06Xdn|I>;e)X901C=%*}d@n>TM^wOL1lK_{4Zbb897qpv0H z1b~N+9%A0j1Z_rfArqLH%{>4E1SNy9JYbo*d9tc-8Fqxh#^5xh>?NaU(J7>0#)OUT z7;@HXT4X2IMgi$eZb=EI2rQK?@Bu$cXcuFTByYI!ca~IADyQ6YlNW=!F?y4dUmMS< zd1hZ{m!4Gk*=q8T^5pDld)kvqF`5<@9`*@KT?!wX$zZFbgqXy05AAM5FLf(bL&URs zd3{U!o4Mt_@Nkl%55~l11gM9pvLujBc&iuZLzxglK1U0SjQ{?B09daNEBxU)T%W<$ z)j#$XIC& z3NXdGnG?Z`GcI*v9!ezRnS&&+DmdC~aP8z2(`FMMXC9aoz2eK0FMtGS(`MN+Eg+-H znXn){d`dR48M)~a;5YbO)7D7CG|clJyX_7S9zMj?cFX;*qcfHqvb2m2!o3$DlWk!c zk<7!U81HqpZrDs~oNSIDQ=BkYDpVPgYOl0lc0=!mec$2n<8y4c7qG^Sy7rt*ppue; z=b%N3zRv@kSxt8$okin*S;kHCDWHu2jFc|GrYj6=(kIk(;ers+(W`$VPE#wAI(-FY z6OXoL=)FOV|B&OTR?eZLa z%@`wRE4}>!-YKOz6(CYa_<;p;g;^{n3+SZ=>^d7P9PD`(%vpb93`10q9TMt26WyNbk|3dCM~BizzV6ZW<TmQfQm7?vAHj@q9Z*%Y%%E)hDeq*?ce# zi{qKs0T^bDolKz|_^DVm%7l#yWAnC2#F094cHnYoNE{jxpQ5yLot|3a7?X{iy0wNyZsgJ zzj+@Y|LCje^BjlzggJ?Ep|Jw+c+WOsqhzp`;#H+l^4fVYX9IvXjN{`Co_YFdOfq3L zO%N5V*HdH{o1pm=)bBYuxL4aN+`W4j`*|NxT$$hFGN$mE@toc=&wH00;#397=t3`w zy!vO^O|LR_NcEh4UZLP|K}{;SP)w(|KpH`k1wMMx0^tQmihJswE;%;7a=IoX@(LJ` zq*jGDi@G4F#{12LC>g%l#`jspBkCb9PqC$>PnJc2mzu#PCq^0nRT0iZn8`xJmJxIc zD4|%%rO1h7!LaJHCRK z&C`3p>5Tqo{(DV;UR7#oDG89}rUE0B7Hs;uLaG}(>rUSEFH&92AtY zg9#HD;@TRDsSI0F10kJUcZByctqDf^MpB-aCC@tf;Bnp8W@vU ze>96F4yrU>LnfZND!h5cT}qGG(T`%r`*-*7*S2rqS5AQ4$r`7wID!J*vA^8mTAOep zD_mWi!RCE5)~FH7??9uKJCZc0s&r2E32mGyg`d6L(Wys{j4NgIR7-qY?w<@!uJJ@VW(?Eet^lGU`@9CqSEL*i3a zNqjp^Vp>57%QP|)`fS*3_jvQod)V%_=zIQb$?WJJo<#NUq55E&y84kTqQ&=EbJy>0fROO{i+g1gqI0R!}HBk-I#e z&-|TPBt?F*3|>V`<_J+03oui@Zjte6rDQE*>nu0gY<|&q-I56c+BLc_J_#NL)E8-` z@yzunK%+7yYwjf;lAaJ$tCWutOjE^#Q_o7gBegZHp&oSk3B9=qL^?WBnMr5=oidoYa$qbkIwU2LRcKe zhjg{1`~}(aVo6pMHikt;61ta=O+L$It4%m)(-=DVkftM5E9!(>(@anhX|Fq*_6{&2 zG56|XO;mS(9N1#O5QfLIQeM_46kV|A(RJ3xiw*~z3hO)x#xm|2yX z+ahu|{-cbU7xhJ=35OP?MvcGIebKO4+#648j1wE0QIMKVO2aZ+MF7lM17(D{Fe}5O zHKV>%<4COwD65c^3TF8pbD}B5ym2P%fwt6MFkT;W-M=f&){CnG4lYAB>pddtjkNIA z4c>uIis5hV?%}WBdj%hCTRgaVifwCXP^@Ob$^V$QKc)c#ZFTXOBOeWjiG{e#Mf)kbi1^6VOh#-hn!1>EOJ{eoDc<(nvv~U1=a~1L zD~gs}D+$WJwDkAfd==j5k$0`Wf$|F-o42CdJS2}in3)?pgTTG}_i%A}SsP=5eiW3* zRn!&NUO0ygT8pfTK)ROit|W>k^xQgbKl3zhKXW@`wj|`P=0P`BZ7i;sRsqA=`5Ers zdo#MJk`&%P3Yl&d;V3Dm<&*)<%3tQ$nBc|{A5z0$BXX)ma+s8q7xg)cW|nuZ{TY8Y zAYUQBTX!W{b3@5)el`%afk4ujbU0KJMY zU&=RqC`MRRiVI?}VI$(K0o{5XB%G#K^SKxk@`Yb?uxlKy!}S)qwzr;-4b>Nz#3u27)^yGQz_QFT%WaKBQqi~m{b}z0a_*- zWB5GdxFI+`TI1TuF^)DzSglsjNujN=SrQzhEV_jNVk9>q{g+vmIb#i|5=;wVO$oUs zr*f}*v^#n#-};QJ?Iq4Hcj&u0&MFp~eWA_Ld4TXCpW|V-Lt4~6iV^=Aw$R`%s5Gpm zO+hY|fM#`#Xm2dRgmDr$zq-Jk*I&o^#Thz>1#$L}0gx8qP$3R$wF&i@#CPt+W(g!?{-b5_dLg1OQc0C|8}z>c^|bh~zb` zuSqLAs11IIjfO|^T9n_VRxC6tRZD^)Rirp;`Py7N6$qg`HE%H1y>2?vF)-9k=331j ztJ>4*J1mrbnhJrZ1Z`*wRWa+&Towu=Mth96iFe1cug2GF9GbDwnd}|JIpIoypYezI z&a)5Wd*u#3a!s)pK@-K~3f&uS9&K<0!@RqM^*yZ*q5Ab6Lr8DYh!DQUPy_e6zfQG| zb%dze{fS!I+;BRr@Z#ziN35-F@yulvUFw&LwjY58+3XV0=v1erTv+2~S_%Jr=Vzgw zpREEo*ftIgiBC%<;oqK931Np=b;$NpfF3gS9t%1N(N;90Tx{tn%mbjx$YWNj7zjD! zX_FfuMr2e;N^=NLiJStdC%yH|)3|neE$GH=8l7#D!pkej?zHA2NI6=_Cs81mBrqe@ z3oU+6hebmr57X?1n;Fg?U%Zi0y5_N|+^ zas4J2Z2~yiVd-XQtznuP436E^4sYCf9S`1o6BC;+ARJ6Q7g9wM4kU{YdTle~S~Bi| zMQp_cxmEK-DUGwVf`Hw1vqi@%8wlA+jm4fH>6lVkGDN&Uso{@9rFy`e(ADUQSKWRYmb zyIRTA#l!xE;?ejM3PNyS5u_h|xfhhDfiP#r3#%8w?{Gk6qd|WNByQ zhR{arq%dmH-D>Kc9EYrVI&HDyjy3^D#~ZBIEqvpXqG_vC1s)+8jo7i@%~T`;j~+gX z#4Sm60Gs(S7A`P!SxQtOO)+&qC#p0WrQ48kz?Nht5s5712k@YsurPs; z$YQ`mfO0h_kiK>qK6_h$qZhHT%{BN~mUdW-pHrVLEgC{FG&8jZKWUL#W!Vf#aCat% zG}?KGA|$O!P)^=+Y^FIT2Ae7gQ*=(2)4{;!*7-d#sp)nTC98qS5axt$6FO-nPC;w& zS$AY~E6?2-*_(T~4ZCvINF#02%l6@HRD@?1Lxs2T1%Mj^CN9aWP=PxY+j&;N2uKSM<2ZMCJybL_PoC_+nM1ra5f2kNw@f`4}JsR_vj;d<+@_OUct~YnM3x5qiKz` z3%1)c*gpOqER@bldt9G8n@z?>kCX+-sr2rBMi-#7Mr#mYny?eYW_G;i^fqq0LIdRy z+&rd*Z{RsWm)( z`zfq9s{&F10<%8O!Bi3moLx~arb*-FLSz|DnhgP4C^scO2d4Y-^A`An0=k z`V4Kr{Wl-r@x=uTJ73s8*|Fy8uaJOW6=A*A=AClC+X8Rf)`S1p*FP6ltJM%4aX8Y3hskR5Ug&01$j0vQsefGg%V385vYtK_x|_km%jV z#L8{V0ZEsRX&_1>8a)|7Qz-&`R2w?7y}K7IOUPh|!2x5!SfHh>EtQ-?SQK(1k-7D3 z=46^k^|O_d!$l8!cvGb&ViMIYnmJ%SCUL1~LaLiE4j4KnBk8R+Qpuy2!FHt0z~{d2 zu(BVn!}S)r`n6N&pZ`Xzj)UN?v=9i3_>4ht^aB5el`Rjm3(ka$lpyhzTw}_gslF4o zNGbm=Kq3ZX#}^dEZfS0aq^2y8L7#H=^Ec}wtT)GywkobqPHyvjFFNQD6;HWnZl_W% zi40JM^&YZh!+v{HGB}(rJ~57i5{HmDZI4X?n3k z60Dt(+VOK15xGoq5p{S2yk-0@;(4N)quAYb40o9HP{=~cUL@u+)`VidJnuR1BuJF)I* zHGlH^TyFy%YQZDBql)+UhxqQR*YL}?6c;CJbQ4TIVT}pvRfFwk?B^|}*5EKS#@aTO z_-?BXIERs6(tt@Ps0$`d9-Tq0qXF16y>IHkJCAPQCNu_wEp1oXpcA-t4i9}OA5!@P zB^Qvf$YzAz=E+R?1X73F5~xn4iWt=O%MLs6p&{|97cFLHCf}lMiixOr(Nzn0nB6o& zm&&`R^p7G3{mP@(l)(uVsbv_<386AV7(-QlR2h+x0Tbk*tUG3p1K#QNYk2X67b$B* zeW8@uyLr~^EO^o*sxAek%uPLjK)ojZIZ-z*0#j?1nh)C}u!pOzo@6XKa zJV2-PuQR=(XCoq%9FvC6Hu()w-oj{fbc=#AL6H#)MJ5OfYfH7t^kkG&#;ixPDp1*x zmZh|F=5rhQ*JDb=sa}(zd_a80r7~h7Om0>^;gu;>T*gl^-ZU*uIhm2{`1i$>1t~-2 zGlCHrn=xXMq((IjhIKicjCC)o@K2d-`bsUPYzbKsK)(2W@Xdiybhr-JTkU%AC0~m5 zzx?MhtzrTi@M0^Hyp=DU7E5#bgNI71(OQtlxoiYLgbD+)A+kHrGHoN17_!(M>wDB1 zX#q(4*djHEG^ZMz{tVEvOBu&d6}ob2R!8EnYum5N5+xVE`~Q>B9#9Owt}8sN^cAg=PT0 zDM2CAplYl|EawulZC-^Y1IAKOW0gRX>2RI16RUX6Rzop#sYyOuqq&kysTPV!@VUyy z%t**7q;1?8kH!uv-rdMt%g4wFlg#&O1AxC!N5w$HDv}<3s58uSB0??(Ux7=qz-V#J zG&RaMb7~(=6{}3RRHm5sXy|k-6d|G9O?k`m%%UEAt^6Ed4U;r@iw14ek%A}IXya*_ zDOdKKh(HdTg|b28h-QGe;DL1f&GV1rd+z)yK6L6hKVHG-4xJ4v7fh~@?Iq0im|6x0 z#95&w2d`N)A#KJqg-T(bXLQMR#zS4>0-HPb2+PSd8=l4*&*KXioYS6ww&AtLiJ;?7ggo$haLFPkoa_7@qYpF3n!&FrGe%- zG504H=7J5G#o?4bB+DFW^c;GGSydE*i9SDi9L37}mb2j8OlziM77{!9umHi$>(}x0 zv(H4%vwIP{U92X+ar);X>sa{cZQP;EVV2SFi_CI%NgL0KhFmTylltM~M|kkyA!YYI zy5;G!TBt0VTlgy_m3N%dTWrqhx6HWBj4^WnoAn02=L`oenKvS8^cwPjIZ0SOH&OB*0l zN8QnEaHz7-Hi#15BOy&CMcG40Q4zGE#)z)5BBX##8oRwwBqyXwJmo1UFR?G+ zJK0PqcxBpzlKtSL(Utk_DG`ilDRLN8BB)y8zIx_mdrl<0rf!{(&UjA4*IJW2U3^|h z;{C8ufsFxQ(9A=H<24b%GMU7aZXZQ)$~dctOkk$WHN8+@zYzoDbU|DA6=tm=pB zaJ@yZ-Jkestls;1wO@8lmpy^SLdyX6#)!p6E-ImR$a9A@p5uA2PWpe3HIC8PlG18HSf*J_Wkt}fV|%%U&9(p>V9^YEQk`NP*u=e=fQoOW;-Mt{ z!53;W_O!vpV%)b3ORb>OVmbCDt#EO#LwHQv6D+8NII=DyQ$lrdhAx#K0is2}F8h^IyIDAH_$jD9gVtpZjs6NyuVBL2h~CTEOsLGJO*j8Ov>@|N zkhfB`W7#`m^t*dSd>~usj&94apZ_9&ks%|v?TC9pI4rv z9;o6ccMtK`?|vAs-r8WkwgJqr>Vo68g3pfq_6&W$3&1BEP_xf)!~n8K;3+y8EtoR zo;nK*Aq#`q@02`>Gk4n)WM(7Y(?|3YAZ1Z0K$06NqLlD?gON93Oq7rs#n5M(@no>T zI!P!>yjT@fv^1%R%*BbloH!u8JGdF)%Zv1jAZME+w4)eNnl;Uls{a=B>a2>9<=(WB16y)FfW~^3~HC+;2ajE?_ z3-j3*aJvK)8x;&b6IATg92bdoqTtqSjHY_hvdQXc^BY#iJcpEM%yMmuDeG#p#_18L z7EowYbPIk$q*tVXntOdq^rqJSY-yS&WH?v!97I$?VQk-V!0AKDX4cW&vE6R5-EVPr z{uo#LEljC^NVZWbJ(l@~w8_LIS{Kclg_O`j!P&H->4d3Gm~@4 zQY|8jICU48&lnn5fzq1ZX_pWOe5w+$YD`(cs)dXUlbG*Cx@~*KF z$!xMn_VSiP6tB9b;M*BtTZtfEwguAleS_7LS zV0{9tP92+Tz|l2eI__8f)?E4%EGo>bKNI;oRY)UZ+&h}Q8wy5)f zrVW!I29(;cg5r92yyx0exQPieli!j+cck(bqfsjh9-bxB2V=eBgB?mMsb)+j*@YTy zk=hXp|DhrA8E{Q+#TPB^|A|As1%kDt7b{7`IwU$F|B2(w$mG@^|vZ=7E&avzO}A{~x5?!J!K-*}A^oJG0_Ckf-MC?(J^B-xzSkCbRJ z%RSUn1nsogFjX4>&Vn;hbCd-{ zEcqcS65Vl&k>8TqQcWolIHr(8e&64KP19jjKU|0FEqq=6fiJ_!pZwFf`m6sHL?XLh zbE8V|t#uxvoIqZu%~1!O*+*J}=3&;7u0HHz$@2-dKbr%Jl7go#v*C#`_=qu7VC9Nc zn{c!_#_9D_tT!vDvJht@+?;z-MN*CvU>*tzl1O!CP9iINgN2Jo9Fo;Ty$D0dqdimz zz3pEIg&CmE-*O(Kb}JmV!Nye7ZxcVh2yhwt*_N22@}u3 zhDEZBq%xEcTsDwYJJa9tH4Ol6{N4^DI|h{)*{NdEVLRplzc_4;0Yr5HS~Qy4s$=?> zz7FyaemUls--Y?c4a@+v_xO)>wA~K6-(&re-@xi8ei-||^}VniFkf|ScYw`t_L%w1 z<^elurF>{5W6%amg{d~ZzD}VA76Hy1@Z(o^uyey#J@pR!o;UY6=?-fXF6TXbzk}pDJ4ZQ2+ zcVTmUGW?y&c|bxpTPgJ^qh}{wJ(LGb2oYnq7Qu%w_xQ3R<3BCnOF%3X>lOMuV}E&x z5C6tTaQ65Ns-tVF0?gT6j!2ML+%1w95-KT~bRaD@yv5Edi$o3pI6XebYFbC(sDkD# z3IgUvw<*#dcDo(s-tqYS9Cz>Ci$=tx@NQ9fiWHE>dx^1F2!XL!UY*W}I^qIU#R;YU zsRD2%Oh^mLHFh_J_aKIYMNv*(v?5)+CTJ0##oyPMs3D87nGd*CHV`})&63Jm`7f=L zj;^7wshW!u6z`5{F%?Rtg{CCbj2n2SlO&6Rkw+}BmrbXHdBsRcix#h3)rI(X_s#+x zMTCY&qDug)ddL>LV8}Se2A)jwTTqJ?w=}yIiJHWV4rhqgbqgG9lHD z!C~u=)_^ChdLx-ArfD{$%_yuK_mFavKcPgE8JJrfPLEHHVlP~6au&-(C5*Iy4s`Eq z7cVC4D#dw%p`dsf|Ce6+1Qj>hAxd^{g4t$Hlogj(SGah5hI!ue;FyX+6hwNYkw>${ zyhF`lZR8B*AOhy_-2cpRwAtX=jZF7CdI+a>VRW`uvWB^FFoO}jnoO*edMlgy zQf-Q;%Mi6@Bxp$e%!2#pA+%fsUh$|x^<}AUdPxdm`kZm(hO%2EqafkWCOmX{3=jfj zn#?n%bBR{1lnG&AqbUY4UZ^T@s$<*>);gfB)D96+IiA_H@I-2zGeM;y%opjO{zNLo z7z8;+%1Tbn>ONt?60nhMvIJ;X!>JM%SfeoN6*3ZbOqSpK6e36CtJ8KO8grK}UW~C& zCWk#sOwcv^O@!I1)smuQbqI%fu8IjjR|mf3+abU2_rsoj7W455b|Qb*+SmWT7Q~m{ z0esn)L%!+T(9X|s`uZnu^GDy0`Fp+#SLX)bIea#>Y$mEzBPGRHoQK8gB^jeki&83U zy~9Ou)*Aly)m_|v)9{toUc$={ceptVW;w!x-5yvQ;7kwVqcHcV72B#s6ugCHTl;|R z8ytIt^{RI-^+s!sry%&8qg(i;%M0A6@2+@b3RtIlMPD_)Ysu zcO$oC)axSpCDO36-uH(c_;>Eox9kgd`7a&ve`jRSfS7ps(>%M9K*bIw*%p$qkjf4j z(>F+RcFH)b$*@>EDgh$C39$gq5e249fVlP4Q+V!$7qFTpg01MuE5q7=T3Ppn=gk&$ zKVLwk(mug*h*OBDmjdEr)@!kW183(Kc>T`n+?-piwET=z9MT%Sm>L=8jLlhYC<*Gc zf=dm6QGi0D<9*}C4LtqK(;>@}U}C7GQb90xE3hLRc>M4Y9zH%R@K?q<86p?D7CRD; zYXYC(D>h3S7b2~Ltkv`+8mFy%pb6isiF{6CQu39}EkccKsP{N2;}kA8!9J!0TB1o* zgcb@`X%WAf9rw)oMl@gIkJ{KkvQc5t4C~ne3ac?;Zwu&?NJZqihgwCXa-gu_Xb(YT z(yL|JsxUTgYLSkAqds!8H8-~?s4;7<`2`mX>FAVNlQB>+h0Ia_I*-b{CW8TC3j2~m zU_A~O(TFp6?=e(#qxcd~Sw%7Mo&VWkH9uU3>$BwAKKDFU-}tR))55P!8-y1R0ffhq zEddXDy*1hate8nPOST}~XJjK}2)Q$^I1Zdj)nZPjeQ}VHQj;~!*x`u4J~}$ZYIB0t z+UU+M{OhTB6fXQxzjRnbu@+K*UkqW&vn_Uls9h|ke@45=IMl_#Vn1*3`0*p$J6mI> zy|RlTY5u8Uh?EO2nLmI?ROmJa5={EYrcL-Gfc4P^H?QBs(Po2bvqsZZG$yLf4@F&* z9PMevu;0tKrqH&btB2$6(jNaBK>#1S^cv&UD*PVeJ&cm_W z@_W*VMW0rH&}hY?Kv;fp$?g%`8(a zELBTPm}`UFDak6K#WfX>@q8=!EyKR!GC;MJN@1g4lD1rt#ehw+{t26_G7?^L@RfIE z4Ap}jBMV?^erH*qnN}!is4_scFh94kP6Q~|FvCCpeUP90S=d+pbLcO>3wCn)q;KU@ zY1Cf7j)(94Jly@W{|4~gKY<(XxQ!c6PdL85VFYl(P={?Mxp^6fQpqiK%n&9GDx}Bf zejkb-+daUKY#-q6wKXOOo|}%bc7^TdqQz7iAsR4Ixy!Udg|b0rwMKPB0!*8t!8r3Q zaeX#??(q%WmZ6f7g(6CecANB7IftG1 z(2)4Fu5R5R_(P#s)S5htqvVD%T4wCzI@IV2T%=w|`X`GnJtTyHE_{n#7=S!;=HUvG zaae}UW`k#+dj`)ueY`wrbTbe0V&OB?WR=STq`kKy|@EepD z1EcOd55)@zprT>Y39HTVvTebG0UIWLDLJ+R=4Fg`+4%?+F0XXWq+`9+a_*5|#OA%z z7q1eLo4f5E=NFfd?I&S#kD*64TPnb*@c^StE*g+bX$?ku08%zjg@%Yj+JvK{Q=D8o zLECJCgP<|_cgppXA>hT)a9mto;_`9}vw10s=Z0#bI;6*2b6@6uC~2KcDP2qXyj7&4 z_XJ|bd+kP+QVviPh^$#@(6U%TTx?clvKc)z=1WK>fv2gYRe{evSOO~WnbY;2pXnk! z!bfvUsrXQCcCM)s$i#+`WRX7ll-7GS#a7YSaA zHA8*p&9c>o;h`PPp!x|^+8`mXy!Y8eEBlBD84+0)cZ_(Eh#|%zRV*7^0svFT$)Ec- z(f_9(#{A+-zwPVegLl6dZ@mA5SpBJQ!qYDqZa%HpZ0M66wAVT}WYbPf7#&Z=Oi290Tn$qBWySFBMsZMRX*Z z;Dr}o#EsMI?8Y05E^eR>$kfo9z@=e7?{WXlH*tP`8MNt%mKTC=M1~(#1_2V8z~r<` zc*)VU;9 zm5fl$Nr8n;CzTzNlMPE4q;pGmHp>zaL^?ZV8<~9JjIs;qEwj@D>pFa|;Pah7V}Xzg z(9!U>IyYRcxGo>G@$_-CI7}gDPH4P3@kZ|=r!_L}Z>O@w7IlN8!2mXa3k>B|lt;>$B|gX@Y#spT+9NF%QfNvpwa~HX^Zk(8%>SR=!XaD5tuU zj6+#0Q>n_MG+Z+sz-ZHqJvF1IZNUOaPJqxhCkTo7yzjW$UgGlV65HJs=6S};^bApzgv2^AoSMQv}TISm|6!5V34(kT%Gj{tOwig#Tzq&-H1itxi zN|#6|apPp!$7GRN+O-CoX{A583)ZU@ZanoY)|(BERx31Vp@z^#JE{_f=$L)RejiPx z{cew|{S|t}r=ptXWhEr+IVpvC0MEu#_Ve>VU1v^bM0u%82OuvoiRPWQdN0dQ{%lLd z02QranKZa&%9%4IG!?W|o@OcoEknug64njMXiSxiq^I@JXoDb>Jnw%Lcv4Lo&q#Rn zia84oS*T^gmTB;=Fz(~Z5r6%?gc`B3qPyEuqy z)gaP@HWNPYHtag7)6JSNQcQ+@yHqqeXtAgf^Swbs!LhjpY`**Zaq+eP8f>-t|8<>w z<6rp~@X8N;7^g4){kZkE4Nk5z{io7G27Diy+_*8U9ro9#yNs%u8HSI+@I#M2il03J z&alHI3bPK|Z=v4NZHCS9d3Epcy%vGn$nK0NVB>egF|~iq3^wWNxvD*vDVR`Q>@=6FQ4F^HLLtIyc4v-BisOEqZLtD=*zy>=lU9M>usJZ*!p)esA(xgkW3bVe zoR-`KS;!4SHYLKopl}zA)d%Q{iWezDd5l3c)k@k(nl|`Pd;nc>=#+=5DZx7<+(3D-W2~jq9;T8qa4m+D(WCD+JnTrDpU2-BMEy+0$^?rnc^CeF zs?fR{F0amT=gu9x_WEnMclUmzNNu;6dq?jbeZLR&nuKM*Dtg+Z@hxGyO!=LpFru+Z zY*hAH#4EZvQzAx95e<+4Zr{KE5ZlWw_f9~yF^!6fLLJe{pyb}Wmh8P?(H_vh1opWE z(DLT#8poSs2pU$?gefBo$$tvFNrHhoE_YizI)8-S)t-AGFak4JXVIqUZnR9wU|G<< zSBkk}A}x}o(sG-MQNlkn7)o^_qDm`qV@L=`ptWV}y9+eqUu~hx8GkZ`i_C%=lXz=! z*a-z_E4zW$`%Tp#r~XWfF|ANoRq(Zd*{;0-8498#b zM=}4*FX7_v`_jLsYvt?z^?#53mw!F3KLecJ60Fw3!A&Y)sdRGI+sVpdgy`^JB;d@k z7vMwac>l#4czq?X22Adlw-*6*rGF?>cbYQxSFCDPw@AAYAzYYilpvC5yjgrNcQf95 zd=1Z{p_!mlz0HGA-1m+V1rOGzjZLE(@TFaY&k&Us>cKsWW0=&rA^CIN{Lqj%G$ekf zgR#Q^03ZNKL_t*RmpH!4pucRgq+hoI3w8mz(WA)?t?>&g$^%VW2fRF_vhcKpR6n!H zllaRGM8Xyk-Fw01_7ZpR-^0B(-;DUkJXRTfZ`K%(DD3dEQ>o$qB$bdn3NMeJ5Zr0D zXKGjGL+h{|NKD#Q6_@)hF1J^xJSmNZA+xOmJ|pnPcbfo08wn5QeQYclrIbL)$Zf=W zeS~M9xn0|*F0z{Y+^exO&yL-0hewYd;?A9WQG6}3oAbb%kEU}I?b50VP$T{Hj==!X z)0TPNNXZsqmXxU~f;rJSsmQoXnt|hBPf4oM_h~s5Gt$l#BQnS{C*CU+#dzXwDXJNn zpHbjc!>LzEQ@}6|CX5BJl$r*mYLGIujDl5LC>#<-8f*n4uQa3FgbfHUm5)ds_D3P* zgVt6Q=IU-!4Jb`Z&gd!YWJ66k8B3|C^(Qi1N~~1*?4MQ_MEw zYP-dwN00FM;w<)AszS65X()SMGOOG@QWb0-#G|q{D!EY+>o={|Sg+Sut=3pgD<)K7 zc;i!5$NS;t*ll;XIKRMt-m`Ymp>FBU$U^S3O=~bx&7ZVaYAXP8?wK(=kiiABnBq4} z#Q=|)*7^qJZBSvz>rTVJbt*o|!RWLZxC2Q<3Wp zi%Gl$?#oE0l7NXe4tlaVkW6u5>Q)}2J!4H5YR7IFH5wj%7!x9*R}#P5SU1(rT?{T< z$T#U!*N($}ZsA4%p^O-rp#)C$hDxYuOe)dD+58ya?@YQQFG2!->Z%nf>ze5pt!x*q z*cn4QlM$t&QPU}XTgG}8(n}fP5(BS_#C4{ra4`uLYd(h@aw&4uglX+K{f_U#<$w5{ z*k8N;_k5kaJUPYPfBWn3i68qgZoMG5c1wV1!46X|_Ki+p(vUxq2$%3xaYcs8JZyrW z-9ErC&KI~?DR ztBXtQc6~@yF-X8+AK~Yhzuy!7$t{L{JS-*=+10L1s2h%tkMWL|-dW6npeMPwGaS5KZB%v|worjDKk0~<%hKJp4L3pGXBET(0^WElZqEbQE@ zR4?KSsU@aJkf^SEs2XC@VVLt!p8t-qXzD;OFQrxun~OIx`GcQ_$cWr5-(P*eR-6n; zfE%YkLg<#cQs#*&5{4ydG(40=V5kuo(d7~SX29T>P~xX#HiX?Li_=lSrq!o5blQP( z;^Qp*wBTe>WQ0m1eVC(bUyTYzdWE&M!*vSKnL1oL#HNUk{f2Kktl)?1aDA3veZ7W$ z?bl;E^^)I49$YFH0)SQ%=!zsvUW1XLGeFJ8X*v)9JIRVe9`Y@FE-QBU@8+KP)TwPwlZDvEK z$7}MDd@ZJxbdtVrXMNy&zv&V8j7FCLNtzA-cmk5WruI^Ycg%xhl$?c14>#05kMy;n zRtRWn`b@NHr<_VjroH8)UJ26^4o}qBz{veOlr@s^j6Ks{ysV|5y&>zLI9N)hiyEYo zIFv9!c_Nl`p|mg=arV;Ck&sKN7{q7<=m45CsU%fH0h~-mXr_@2{#V_9C_q(&$|6n1 zdq55`)GwaVj^u;+n{*Rr;;5nWkLv_i}<%_iAchDzSc18piPTHaAtHd#^~(u;)= zwp6CUXe6x|l{#=OJAsH}^W4)|z5l21_$&Ts9l(DluUCHH6`VdJIK2r>Q$RhPmNo-m z^Fr+cc{S2qq-xdW1 z^CcAmZxcP0+1fe1TA^ z1OofYTHwVffOy$S>cA~2xnq;U6#Kd3jW^!FK4EE&Yiw!k(3=Rj2^x^ppkF%By$;sIeh>>BzjUS9-O~!YsiOP`T zjgnacyR^Y%4}8~Oh94gvR`A1hxPBL0=b!VrIQk3U2Hmir6ja1_GL43QZ0wJag|d!Y znRB01TGDd=tpP#C29Z0J6K~CFx+pjCoIS?D)Mmrw#RcAY)LE4>L(*pLaUcWsO z&l}d8HP)L`tfm#FsbMuWXcKhrXiZV0Qq~r`{SJ>FJ;K9>kFdSm^1v>2Sgomh%j5+^ za_P{7{EOJbyt9%}FZM-RUd0!OtZZZ~;Lz!7Pc=bH2kR1qXZiW?5aYvI;e2(;Z#XRGWBo{lY zL)oJT5Ac8doe$zezxp9O|NQf@0k!Ck_e?2JANjxRa*GjGOkpi zv?N9+xv9W+hF4#G6^|Z0tSZdBmm?77#e@iHg0bg7n}`|zv5qyXDun#Lhy3rk=bppO z8#j@3Uv)+#VTQ;=sc9%K%nbMM-^bbIc}#yWI{7(iNRC2HFffSN(VV7CHD3<%m#y+R z+lawTTSiA&UfhM220^C{Lo6EtnNd!cs8KRW@2+Wd{FyQY)qH%Ua>!#LXQ8A%k|RK} zvwP9RW78Y5R86~rbowY;+yeK-Tuv*V%-L*G%6yZOZY;)=5by5jW>C*8^`UziFjZf8 zW1X?!5p`j*AjdPxv?9CGi%XkJkia9*TP=j1Oo|H`Y!(@0p~Q0*&tDTIj8luP0Vw5# zY-T+BEc_q&!-v)Ta2>ASMc36o{gpWRn?H)P5BxNIHtv&8(&TIPsu)P_FP&4^B-R?_LrBqyxQXI>=8cs`X_MhSAP|^Z{Nm? zFT8+vzWgq{>*aUgsi&UCjceDiIX=RAbAK_BtY9=`47$q52f4K7h*7$J2>M@P7J zdV=+4g;iSxWD$zBuHv3JIUk)ZZF8(QYpU{MSYf?G-JfJ78DqZlu$x4u0*sHyZgoQ! zN0V0X$I)54jll{y9S`kzL+ z37l?JVdh@Dgrdg%7|K9nnu&A2GxJhhWJqNujI^{ns1)>)rk8x@rA`@b5AwI9ad^TS zut^QTGG5%v2junqHv_sF#1w4;dPSqBeM4jaR@TX$ap6%@LdiilN3`_BbiT7L zjG>=-rjU&>Ma0zmNL|sPwnU}qSSgc@N}B9g6~jd{TTd}six>(Qu{a`0a>xJ2T1_oP8l-AVJ#6q@b$~@6lxT8WL;vo}exaO=je$CkKW~F{jnf{qsBU zk;86#Xh{6FTus_vboVcI$5+N8R-^waI;?wv3bPApKGF`*0AuGlXs=dB!Bh&EkG^*L z^o$Ze(69H}YoEXm{=g679q)Vx-t(UKqIV1beFsbp(jD4b9mfN#-NPfv+}r|QYHT7l z8)Fw`;o;|We~|>SpLe+0?qKuGslP2Ep9b(K50_A>rbp7l)wQw?iUgkTn#zGm70*5Q zJZ{{$S@c_%sGXX!9b^jF41uA~d)$5fH9UCmfPO`ziE+`?Nxxw(Xe2l3Ol5}=unfT1 z5l}|Eh32#uyi3u-E5=O-2@#zWUKY|WB{|t#m?kDQWk^K~xNx;3R)cUf;enlYjkJCk zl^AV-r;6!7biX=0z?tc{^|(*s12CkFodP1yJt^`Di%K|?*MuvDEME7*Mc8l=HrK2s zhQfl@eH09!G*-27YLKb*ESC7Qj1jh84YRa`EWl=&WRiTBN}Z*83DKb|eg(@Z%ji8$ zqm1gD?mMi%?}xGbG;+KT*Wo%`Z{2Hue1g+I_ZP7F`Jcr0EOC2M7QqAxgX^l52Y#ez zq_r;!SPfadYfNlCqmF58T+|GZoJ~kYjKj2IEi0yz)Imqbu6OMAGafxW!@WCq@bOna zj$i+c{|7I=_%hz{;&XWJxo2^5{W_kybrZL4-omwO*Ku-sisR!WOsfg&)e)xF&?Z8; zMW8dMwh06@MC7DIZD8iuZ}&LAILG$tvLMPSH7_T`5;~YXD6xo<^lVmyZLcQ9P8**n zPEL+-a(sfRt$>yhot0q)XdHv*+T+1t3FAANa$*^=DVYhy1^`kenO@Xrrq8qhAcqD% zl!%_cpRi`BR29}{B8v&|fSdQU8!%N%B`hvc|3wWM!X%Dy@u^Wf}x z=ti9PQq7vy5)-Eil=YYT0GP_MQDBvMZXPv@sItkBwhmn=HG0`tr2*(v$Fec!>3uM& z1IS2-g3<`D4E19S_^3XGqZ1NO*(r#tA|swn5bFgL#KJbSz>r~-1yal`FNH1#F)7(s ztJhrWSi&l6fR%$o$f8j(nvR||n@aIiNERp_ma9ZV$pO(m79fp*pGn8@AN)tL|F*w` zt6NWfMz5K+Ug6=N`FdRYk^csd?*OuoSl&>E(>g2zB+GpEip6ysz*TqP6&QYEe+MU< z=kY$-KrZ)?d4^97u!g2^d?9%jK6vg{g{0oi6LS{8*G*lkgV*1UgKEg5pW+o@xQRs8`(!yahGLX_ZRz9Z1 zMVM#92Y=!J#CQJ>e+^&#)qe^vzx*zs_wvhyceG<#k$x?`5+4$hgxr{VOuuW*olqTH zdBR#63&DP#(dW6cqr)pjvXU!&;fs_JR2&S9WgOQcnzUjan^z$yZH1?wc?L&E8+N~A zVN)JLEhD8U)!J=$xO49wF0Zb}bX^@j69lP_-q|#$f+#*uOE7_wd@Nd%h!G*ie^1G! zvM`v`(Vf}aEH<(_%W1=+o*W}HUDCk4q>G^c(uQnh#K=~2D;IYxm>5~C7*Yo#M*k~G z*PTUKxJQs8?pUM{~mA=)AdY?01vC~W1=7OtP#~M>1WS~NsK9X9g0fU&HCI> zz-XrwIO8J2Hjv{xDT2RClImlf+wOOmA3w&$`8nQr<8}P{uU*H<=@D+6-oR70Z{o!l z-j4UY`#pHuOE2KYjhomUuW@>GjHAs7rqzVDYEYdZBAD2~R}t*%pcec2-VF18huwCE z?L5QXB96TGg4Vg(07!Bx)aa_$G}0~hMXS0gA)Yaj-fWI=bh5#Evnp_NBp*f5KZ#pI0s-Sj*>XX7N$k7YZ|Kcy*A*EpDm z1xFWCQ-!9KU(zBNe|N^xrV*)X!D2GFv`|~-QHc9n%XHr$8g&~_7vF|HCZ_%(-Iynwa24=gp;EXwy2Gyu@ z}?cp)cJ3yr1F%NH3no!o^DEsgj{4m=MSqx2kZna$@Z1GGRoD zm67$Od*#4KjU!&Fi&Eb*$pM;H0gW=VS9t?5hl*kI&EJa4KmI4-Z;^DbXI}pp^l8J@ zS;M?f1S(239*WjQx>*}HOLz{E4onJc#qlBEY=Al?lo2n;vd8t>ZN#4qe0;}Mte3VB;Z)#tEG3V=T%PR*a> zsW=A6T}&%R*}3*5oK{2*V9P^8;GqOi=kSHC$a@;7g*;<2)tMvjfHny=Mk*1YVa*VMno^}gLmVw1#>tb=Qxvi_gEPjX zCN)hf0mT6N?ik_P9eoD08B8WVOTH-S2z2s3;9n%1M{$&%Y(lmu#mq*Z^Yws-{j_D_U z4*Q?`dE2}8c5T_hGqs5Gor6V9Bu z6_0(~_2Yvbkg< z%_0v{scN(ZUj)KwH`v&A16lyF^MD;YX4pA$AF`%!qpl(7$}l(@6g>x3Sfq|+LIK$% z$YxlJ1UBi(aiTqeRz>IFA+1GdGMZaD6r0Yv!&y&6GKM7HfPZH>^cYGFT*Vw)#DOu%SZEPbAekmMRn|v>HMI z+nj-l3`c!W2WVZGRAOWv6mLR3m~U$1yPs?ezD}v;V*YN2Gz}1=F4V6u;QY0ie)e@Z z{7c4)_|L67KJ6*M&wT{febHmEK61=!o>8bskiyDj1Ba8Oj0sH+gFB!T9`KI0`(-@g zNb%Wn3Thkd^suu*b)ZyvF?6!dR6vCjuQ3T77ECeegt>RL?wF1pkJ&wqYpwu$-6Ca= zjm=Qj_EPPRR5&XIb1G}B)KOAk;;iG7UltLAT1_jT`?evmZAkq2FLQryQ+cTYuY*Ce z;HVg(blUa!_i!CH{#hQ{hi5UP9<$q?ah`C>klf&uxQlYr5eMcOhX;l?{L(LCXKMJ` zulX9>@Q6o1o8sW`5J$(yICuU64{IzM;sub*%vhU*xk!t&flkf>&%oNEW8SDxI664M z;g$VpB2cRDy|ewy3A>GyX;6ZyjD<|Y7Oj8_2qx$9WHy{SaRQIH>BfR4rF%2&4@fYl zb!_^Ky(^b-&qpt!C%hp!5^Qv&tUvz9Djo|OpNY?DjflgtE$Gu`RG6qRZYU&O=2m&= z)h%Zi-aO@8UUJ;xSGx@C4a08x6+dIZ547 z=MrLNPS!%$OPJLfax`y4wPFkJsgD8-`J6>y-l~9bi>8J;BDut7@>VkESu6F!Wf7&7 z(h{LFRjsT%snn?V%_Eg2z+34}bxKdCQn2JTZe_8uLYJuTV#X2LFZ~+k$31>~$KI~( zdYG@{GiP!7o4+0N*MBLFAMgSLLn5(4k{qoSMF7E!jQ)|PWB)|lla$cP-s7OyF=8tt zsX-Qz%-Ohipp(CobuJG;YBHj#&8mrEJ<=C8bH+2znEM9D>th@p9pi!f@5h~Y-i~Xp zyAC(qa09Nn?gm_Q&3T+YeHy#dN$i~7!O5MISgj_sHeqUvJ#0YVY;b(M#`@?uK$&xg zw8m&sAK>dW8gL@7VuUTWXvC0XRnzMNe#da6ts2gqJ%^L0PN8WFKwXQ}hRF=tCbTB# zb4N#qSx28cHpj=X*@B-m^Z7L6fkDdNDKMBOyCIoKUcT{Y%x9y3JSGe4B?x>D9tC4M zHEN@NRVtKK%%-|Bw$w`%C~Y+BowF< zsp?gvRw$sq$R^v9nf1I~+lIuS@9N!tL7To3j_-2MiFrb87%_xC;4LlXONPW@nv96= zu%|^Pv}B?{RwCep8UT7UCDaXFV|sFYbd1;j-#?Fg@4Fu_dC5!g%+LBPoY>ukcf&ld zv6@!Mue{}%ujG65weip`qL9fr1tPzKn_KT4O(q;1ALH=&kOi3y$*C@Q10_{O?Q&^B zh=Dn$919O%JWSl641yE8ySVYj8$xA(*o>Q7NJ%Z`Ef#o7s0#KD_HgOa6(njYh6!m0Jt@jwu&OL#@}E5|HqNuM%c6x(|7u2aViBO_ z5sMZ-nt9bJF(rqAH$!2E)+z5b(KybNVX{nD!6NTmQ$iVAZux1T3h)^K03ZNKL_t)| z2E#-!MN_CEo6aHN)fOqQ~(FAT)BedqeI+v@lKpNaT@2(oyUbod>S76$WyrC#+z{V z?0H;!{v6JnJ%^L0PU6JL69JT*RsoJP!}0MN`$vb^Y>r~`t-)c+=aT!ZR%jNB^^`Z~ zG~4rVPN_2tW(w7Y)wIHDXTr3aFljVIV2ZYC(ST+`hR-qhSs!n(ULQjzrE0<|0m5={ z!$Cr*4z&K0S9}8p|7q^XtnWI!YC#&6dsrn2q&J1?4Utw1tn|Cbd@3zD7?zS$W>5)H z&`l5k>PDqzi=S&ld4qgkDPT}1>&gaeD}bHc{TQ8UYSnzmbTt|>$7iBcfnbp^lI(+u zH5@q1R4#Z$a7Q{u)^b4FXlRoq3nHyY!RU}sqM8Cz)^~Yi@32Xuc~H+iqfoIJicoZ) z=&XdZFYBR7{b&IxZ8^zLOVNOnw^CeMUBjMA%uHHCa@zR5YSi~mU~BTJM*d1Gm4HK_ z|N6(!zu-BGq_I!T_2Hj*JFb7hqi}refHfwxd8O&3T`0e4n)N5+Pv`s|G{K=b-sdwu zEHiH0*@3KQG>e9=OElJEpgIAwp;2ioVweb%Z&QO)xozHIGC>t+n+?v{gqz!0{Bger zjtQVWIT<1et4}3{4{gu_*biR&jp`!47Kuhy7bD+n(I|slP|-)2+1=Y+cH5Bn3s~)y z0(&Y%UL1EFEs#?ZdV{z;VjF@C+?hDR8@M@|Mpw=(3E%9B334 zxnr7u`Lg3*eC5haJ%yCx)6iH$i;z81f+}=8_z4lt1K63_h zH)v~6>*%#98fFUuI5r!s=MC<^?*Tk`=>ZO}9D#0@il~gJ%TytB5`i-kixz|gXs0{` z7%@yFD+~`R59X2+cS5K#0!~qf_M8eN#XIGs8SCxk(M(w*StGKTwB%c<1a5WyK5fv# zQY`48!Sc?le@%~hIBWo;mg4}{R2QiRY>>A|_b^5>Iwj=rD9~$yNZt_utT%?^^^D6`_Hplh z_h9$VkKnfVp2mf9*W%i1ufh5AXK~#PH{ts0uEV*rXK>=gNt`@+61rMpXSITD*0}fL zMLhVxC9IE+qduUL5?Ck?m!a7%rCyF&BmLFNJ(vf~1RRQBnpS8^%LqWwqZbs>EVxBJ zE9T7xhx-ROK8hiUSqFTA@v&Tn+B2iNo$pyj_et3`bl4j(pwf_~vNMlV84J3|j!OGN zsy@_V5o5&~fXa>XJ1~JO8x3YLC@>~;w3zhAbG1c0x>tSZ(xZt*MP^t9QXLwxnyAKr z6l0<+A%vJqv6eEGA`Vn)i+Tr{CWacc>4J$O$+2Wgg>qP~Xc3?5qb?Fl+Y6!Q5Q4=$ z1`TzDXC2~zNDCDW>nH0>CG!ByHjKJCpo)CWjlnF-rlKqpFvux!jKM|LNtTDvuvR<0 z&jI|T#>AvJ5}|mR$>_*r2HLMcdw@9XsZWRe(7!@|I zG^VfSp;$|LnVgLvK%)vwVQ#m$M~^(chY8_Idqj6?5ZII<#0M?t%VI8+I$G&0306Ax>hew4aj7`o&h(=kw^6=)F z(BMw_efX-o0SEH|_Vy2P?>+aUeMqr0O*nPx6wX|@1{cnp#dP`>+<4w__WT*Frd`bQ z23M|J!W|#^2=*`U!^}yM51Wcr@|1GCDPf1zOfhofJE(T}-Q03??KR z&8xP`xdw7Jx$%hooO{RRD_3xQyotlCq^d=@M_9nD*((vx!$hhaxcdr5P-A}1o>1k5 z{JvD3QM(jCnnj63JEYICTU)uSsZ42)PkKV4Q-1#00Gj; zl+d^?D|S9TkC}LZw5l+-%JOh7rL-7vvLNfkN4t@GR!qcy?YJ(Sv$gfgm4 zuuLct!EEe2Dbx7oj!6YNMI7vo1xw;lyJv9jNO6y5V&07I<9wEBDXCb?uTXG&g4dSk zv^a-JH70YInDllnEfD~hCj)r%c8A?IB>qCyZ1WX`zDOG0CMM@AJtDirDm>_FH39+}wCmZHmLgBfS3gZ@~NC|9*VcSG^3+d){+# z^7Lug#)qvWKo&4nH0!*9QzB=yI&h-z^kE4cn{K%8!ArQZe}INY0MbA$zZ-;8^FS4w zlcMd$FSb&G2goWmvFJdCMM1ft<|ZO7y35y|X*hG{Y)#0`JV0G=Y{c2Qj6;0~@WA~K z;^=5i*fkpjmOKUFWPf<~JpEqM(VdM5K}CY4O+)i{SCN?qSbg z3#^#FWRrkXt_s(r^i|1h$O0g3Cz7-UjYdOFkMg>s-`L~8?y8IF3T2)iacDyV5#F{A zi2^wr2D~~w<2=#g?-9193pi#{g)PCWoD5WYxyt}wWg*9jOW1-Ipei^+V`NzqQRZyn z+08~8IUA@bD4;dxdm2`gAayJ3TV4VD_kXW8neE!H?b@#Y*t+tFN8-fyz8a^#?d3T9 z)n9=fQQE5`zFIuJ>Zxcca&|*`zLkGHWE?{K zV^IMTl-QkT6;8^j{3!JutPaFwhu}aY_Q9B_N5d)Y45=DrY&!Qwu>r}JgiK|fU|w;F zMp=38`N=IYr0QboSTXeYAOoj;ERPTYi1sMCdECQ8Mki^@TsfwIE5AA?X-bed=|4EW5NpE zLUpd0q_(gn8Eg8v3z{|TV1;uHIEM+aIj))BcrVN#m0?YW4SRHFm|$^9AXnAi3A%UX zIf|!^3?7F1le%s9){n>1hsz5!v-gU(mpa76NeoV3TrYIdNYfBP0@W}jC?%Nzf&wWP z3{iPhYf3jj`s*8#-s>qXv<|RK{qFa?7vJ+w|5yCb5B?B7_<{FBG`i~+**F~HSPjcl z$|s`euJnA7Y^w@K!`wSQ_`%!oz=IFa3rfX<4Zl=){RsY|Y*m^NLb!lTY75z07vvG! zDf`rE!cC97sT2XC$`lY!MlU6!(aAnKIKst?ccITSSu|FiOQ%&M6Et#@c7nuuDnMXZ zfTY>rw6YU~e=s+)oYWYDGc*8GnraMgNSZfCHj^@gn#~(Y=?2N)StvRg4L~$^q$8R( zc!h&l_`sU;=|Gss?K>0?gWVZC; zaWw&#QW0fQgpoR)V5o(hEve?98bucoTG@qhAIUI+P-r-TQ3>?aMWTp;a%(v3=l?zY z?|$PaykB#>wrjf{nss#HS{#1gt8w9Lz5&xogV5^dXb^8OpDXrqC;$;QI!kZ95E?9_ z=*F130g5IW{}{%}yuUf=?#L!_4IqO z=V>-9FW{s*<(iRDrI>amtaO5_U}v=xNetT+T$N;04#wp6Het+-3t zA5De43XEDR9|M%NYeq?Otm2N*K1ela+A3u1HK zy!(^38Ify!-3?fs3e~ImvS|k7RhB9Q$%Kkbp3gdzSMi<*Ze8!;(y1M+o1=Ay(jw)O zCFaDRZgDM4(E?BbCK$|T0EW#x!)$|D4V(2E%?)R#-C+KhhT2=|c)*04td`@tj{9<2 zs;%r(ndFubd9ZekR2IKlQYO5p-cy?&qZOH zht-hC_maj7yh)@qFrzq<5>h@Go7))8`vPSg8^P2Rmo8t%PyE!6icd&CJe45P~Z?FOSGgusQ#VA)|LB$($pF0i@4{G|KUu0>Ff>MOcvT}OaO!4qv zrSvq?BfJjZC5L2!#xyA|oIb~!M2jMj5kGV9SZ`(&yg(e653b;WOP6S^kYZMZpD{h2 zUJ4F_LX`iW)bgB4G4&z6uDP*!=T4oC(|mn74RI?#QVCAXpp+)|D$?kI)Py+F&>&v& z->~M`htVZqWdioS3?c1qlIKvO*mz49va!*u=*s}UU) zJlWOG9T%ez$hfTm<&-MgAdN{gQDCV`0fbOoi26+xuLNZJ5{ubMnFg{b#oQ{%#iaq* zf$D_K%l{#8>j&UZe#-W)yj|P1UH@f!`_2yTfBCmz_ov^0-Oqa#eA;kg4Y*8jnV{B6 z(5Ovo8ZXXu{F$&)D-i8|&sMR#3?M!%hepQt_IZi$IuIWHYC@)ezBA10-xR zCGKVGqg+sv`d9IzT1t4I4eP$a-u@mgU%rf^g9FSx4)_V|(`5J900?@e2t-4FcIN?t zQO!Z|j^;|jFd650>cmOtG+~knJ8cyNdx==}R9!{_hjI!UDnXxTbONJAhT5~9l;(b! z!=HpbF?~xktfY#HI|rY)kqu?8)l6~+dhZ2#^sez5Bz{Imj>3q86eeX<>B1z6Olr$$ zNvi(b24Gv5{4m10;&5DWZ9blG|MoW0G>Tuf^&n^qTd7fRTuiAmHXy z5C|Irp+c0Dv!@@Q2f9$6M0y8$C~gPQozQk2+5!iACXI27Z4A&<@_Ud9(x|)8$cx}< zRqJ!odJfQ~bxMjm3QqBbiDQmPBgx@Mq|?zxU)0xn^*k+ z?CjZ3`g^5|Kwk9qXicgi+N3cJW-wTUHwj-1SosMA8_hZ{spCC3z@763t~*T0h}c{c zpk*UUQT4WKgUSRqVdQXpXPY!ktwFkB_Kqfk>sDv5>!F@9^U-+A`-Rb#$1_meA5;1< zV7NWgf;1FRA$W79^DG*H5QyrwA@Olq)5GlryWo{>`czeaF%n?JB11XN4LP1gx7Rxl zSrQU>CGdtQ=8EYAqY{DQ9!TEmDI1MW??|kG1OZz$Kss-K`yb)9+dhD|{MK*aIe-0m zc;JV8qNzPijQU9ckV) zrQEP&f~e>pDO7bF#&>phaP5sZU}tBS2ikbA+zmE&^x5E(*5Uo5cVEQCix)!zm}F_U zien2PBbx|~k)w;$3$Q^Uu0<%1#!Rcn7G? zN1UzLXdbb7rlK(IWlzeyBd0FW&*X{Uc$Q^vs%P zYt)=E+ZOVCDg~5KyVwA|Wc6Ko+1W$|ny_LL{xzrRlANE>i+5+lU8|UbAvJa>^_jad zq;8Qo#5zN$=7f$zvtXf|=m}}-m{ad*7VIV-jALWb0urYd&lj-6;vZ_NH5|tM3@(uh zwu${?Y6`O%>-8EtCr^e|QLUQj-W0Y`%)P_h!o*k5#F|%9CY}rjCNR++;xeYKX~ayW z2wFU!Oo9-;kfpXxGpQrbS&R+LBryQ-*=6mP?}7E|+DNfS62H{RIwChA=;DJ}WwMwq z^R#1H{`{tk>w-Lkh?nI-+s8nm(lsPGS*?-=x^Wod4$+R5bw=_xs8)3WX7d3Ohd?K) z4OmlpDi=~wtAvMALD5!%-Z_Nfn-LMW8KFrv22~oy8o1)oZ7|Wmc+apeQ6&N+@Y1ST zQ5+LCodEZ}tojvr9-Pz6sK7PMI$AGMATZT9fglV@@2Xhs);S3Uch( z9VVfkFpFrR?6_G^)tYhBrjOr;ywJNbEut`kp`sAR4v|chl0(o>*Cxf4y*>QWFTWAL z{_AhSqaOV!_=3;>LOlQZFTkfi{t3`&LhqY}ek%9O*gWDf8t%LQUVQlWkD!TTEeUXm z19>CN(j{c0UU8*Kbq?523?4%IDkNtIR4G7TGaP5moWZr%UKF(5OERYNkm0lcxnqKl#_3%KSrfk+y*u>f>tYd2^FO^7U{ zrz9??mBU*#2?WDi>-1f=HZrU=Qili+9nvmU9Snq$UN#OgoKc#?>XXw3!6N5d`R~N) zs8O;D8Q#>)O?NGJNhv?D$dylPPyI53X?SiUI<;3LOv-!YbsdMQgPT4}q4W_)B1$$% z(&{s0#c&DBPrgGU7F-(^RD7jr^cSO@fP9LkM*TXGzX&4!qIFOs2>WyEGQOe>nv$i5IB@GPUmIXNuG z50!n9iV*%gMOqgk3G8@%jQbya07nOhu+wMI`v#k}V$ueg;_w*UV~5!Wm}hL(YaFcw z=7b>=rsrcc5_N`UJRc7i98@?Z`{6zBY>*pa-xwANHj}*TNa$b>72IEu8|@wm1V-gA|Zl`hZ}Fa9yeTn1LkgMa5PK>O;k$h zj(x^#9ep$7@ZbMqb1`6gk4NY>6Z-~hU8&Bv%amzjff<3V*?;I zR~xV{TjZH@nhyezTGMEVq!Pl0Jt6_WSp*>Eui1>Cw5-^ag$YVuXf!ACea=RKNJF=d zriD%|#S8L!n+FXYjOA2VbdA|iGALSFC(2hBlfcaX_QmIkMPVScgt$;f$rKb<7K;nU zg=a(=yU>;pR)VC;y2Q+R<7;SWVF!8EXTg8))$j`!ws+v|+OF;T3t5{B*8pGs-{JIM z`vRQ&cRz{aANxLR_Bw3#Vi<|AN!#;&)uXsI4Y~U=ZHuQ13JlT@4n{#v(St&ytyC8qtxK&)O>fTWALew6aoI$_KDy zMRlqUrGVtNh!O`6!0sL18dA(@Odlx3HDj-rpDTFqRw^4(Y2XwlHdw(I@{b&qJVDk$Jhgy-8J+0m^z^j#!l4%G##yt8mPsY? zaW&0m#iE(eB4uk>l=-MhhjGi&B>zu@-moM(D{0HLmN2VY7E!Q3 zgGW|ds9I94Mtcjg2w|?hQ`)Enx%8p&WTPTf4V|R!eN=*a001BWNklv?iq$C%y<#e3(O~oU~XKj9x+tqm{)f zs!e1nn3YHr5Z3t~w@|<+ITUT$32ETZ{51R-pS`{FZr65g*F&=QZ@vZi``?1|U;HIF z@#C+-{%d~>>q8#oEJ1xeQZ}T;{kHc|z0tm=Y&wkWrkqAEooFk?U622CTz}*BICu6O&YeAjQ)kZL#O^M3Pn?9oarwaqaPgjd zaq;3^xc9z$(fcM2C8>>I@r)9rurc)!BJs)54M72 zq(oTBW=abYlMR-m&HLWz}Y~RZ;QxtzE3J}%WIbWs`6>-0E?HBhOZ^9 zE{b$vh9bD^o<6LjU3FPzsD(w4y5dFIAAJ+H8e&Qr{R*Z6oz^qjR3tsvY9`qNH$U-}ejYj1R z!M)ybS6}15I(-s5SJvpFIG)!vUeh+Q5eOX5@mcIpg`vETS8W#na5Mu4$A^%e9q3wc z1{0dk*pMsZYyfH{W-xU|XPD70Mtf7T`m)(53!uF(fWlz2EWySQd6I9p&WC)phx3bf z_IGTi=P_{hhbr9q)PYt~rl0XU|2aZ*z=I?|9Fx@5FDuyAi&d~{tSGYIPw$eEa6U-L59kNp(zs7G(_y4$r~+x3vI<7=+N(HA@q=brsM>|D77-1R;- z6qF2s5#?Q6h0w|qX2`p5HjZmvEQ`O3ikQZN^^gi^^;D}3u909&B6D|8xjJ6ja}P6c zYVW078$6;=V@8y@)_pt+?>zW<%VzTcch@k6yRu@~RC5SXZG>((JUqZXcYhSOedvRD z&wJjBcfRW#_~SqRBfRU@cjLY9dmnE9$cJ&yJ$K{oyY9jVKXe;zeb=q{zz1)`@zESA ziVTp}#r{Nzc%y}{5~ni8J5KEI>?r>yIxHE=@vZ2hxx(gB4k$9#J0m|UWgzV} z_0V1lRfrcuqCQg@ypW6Xc0P0$ z=6C&ZSXh!DvaFx259sNL_=)caX#Z9KGND<=r}zmx;nX>_;|>$Sszsv_y~E*H(atlc zRjF)h0kfUbawEWwDpuNXtb*G&NBEE*Va?Q;N@;1^t`js?gh1Cd` z96_^~A(OyzsuDYltCwd93#zkJ)^oxc*@YzSI3Uywm-qMaTW@^}-g)ag@XNpSWPH&J zo{#_fDNn=AkGvVvw8H-WA@=sKAngb_VK+;t6>syT&ANF^R(QCMXKG_($XE$Z9vT3h zcJOJR_5@sW?iy^?8%`q(-9kp1AjEDnx>-mz_x6#g(=O@p%VC377gMo7fTz#X$3(O2t48VfZ)U(OIVlSSV>#iFmTGF2n{GH_KiV1qUjjB9$~wvNjO5 z9GAwwPht=0(Be70p==Qj$TY@g9`AYjL{kcCa7+MZw<>DKXpA-$5I`j@t zc5T;oecaarpYb$I|LmXR{O`RKoA3W_9KQDtVFv&zH)No`XdnPAHss2a&V>dFbVjokP==*oLMWbKUDjgyq1hIcea*ZWm z!-62mOKVUwMna>~8Y)U#hCJT8RZ}{r?xNrx8MglY{eA519}a+=$5e9C2`A2-#I@(n zLng(+l|x**{2-1Gk7CHuvkb9{p!Bqg%6QK`-ssvFD|y2`I92HnFTyi<2Vf1w_bX)C zU|q%oijw=Vkn9)c2~rkV48$m)8ZMY@pjdoH`MDUUy``vu7N;TVMuVA@vOqaV5sSK` z>pWUP?9W8qMrDX!bpj8?VRD{4uFL$(o(Hn3*o>dKvl+vPRplToQw!3FlJ6tstD3gY zRBpHqKq$skI%WBzq2NQ1L(6HzJT|h|6w*;Ks8T6(HdrUwI4S5Vn--!>2B8YptzhYp zc%ErF!t4G$`X_mu=YQ$aXFmt}+MkBa0{G1DVA|3wR9;z(=Jk=Oap;lS+7+TRx(F`y zBW%1Shyk7Vil$-Z?g*A%!qAP^@?>GB*hmlc4+PU{g3p5UI}JCVx&XOyUwseVXqj55 zV6vT>&DX({BfJ;`2^Coy11W$CuXJEju z2{(zuS4L3^834k`!5q$gf_XC1NiZ8VXqy(!!P}xq;6^`T#v767ti2irj$SDr z3}t5nm5m816jIh01p;vTe0p<2W5iDW&v+Rob$IPusJp#U*N%ojSF={fUkYw^Bq)g)rQ{5IS_?0w_f0j zAY3P?a-qxM^FkIvHdq&65$Zk{P^VNk3t4Z*e?^}v>@h7NnVB?Fh!R`yFk3Z6#$+HN zYOyJY6xJ#yTq#e>v=s5t@ba?d6uz=Z=U{`zaW&{HZAqF1?$Xjh1rDr5At4TY_dkWb z=*zdz%I(^&?fOf#=4pldKKI!;`NRJX=YHc&zz=;Fj_$o1^PXXI)L__Q!+9PD*4#5a z`8wfy9|H*R@WtUnP3~Ikxe}6ccY)-AUo%dxdj4fuRHTK^t}<1Gb&u%;ImNCaiPhoG zY1MS)?@5_Fh86;qTELB^b6=S9F%}2*PWBOj+@0}@kyhn!H5d%X^BU_1*0}t@WoQd- z6>O+5JdNbEs@6WkslCZih$M*H7bzO!&>axx@PunM3VfN$!F03OqY0l5Jt*xHYzAfQ zcqtkq86!d2K)g4dwgNBqL8=xsUZ=KbN;o@AlMv>Np^oSo9U2OXPw63BOv_fq`+k_kC>=^Z?#^F86%ur7XhhWA5N~wJu@W$aqHL4ui0dLMBPDZdg zhQE^u8B~NsBgjhm>ptq1#E-i;IyxH;oKklVXP_0go~BfYlpR+liDncG9a8X6V+^Xg z055tm{5$>$?95gb@u!#EcoT%FI87S*0t^Xc<7&~IG7j!&-wKO#G_;Pp;j$m#U~`DG zYP2RROtZNsyk-@c$VG2)y{!TGe2gZFbqzurChHJWoNtN~n6OuHw$9Jp>ulyr^b3Oq zj8dmm?YIX|MqBVEls0_JX{MAG34lLZT07;vA8fCbZA0QAUES?UgT4snZv${X;84u( zZk&WnaU+x%NCPgI5ph^59^hU=4pkM34)bAitOEh&uDE!i^Dy(NR zqyj!u8mBm3ALHHcc@I8%@uPU`;~$4R?zj_2^Ew`nU@Os#rs%`Bngv!CDyEE`iZ5}e zRLd=IFxkoOICJJ49&yW!aRv40G&?K12z|!U7gm^ zs#^*U6_XlCnzk$wClZ>OZ4>kA4j0kf$Akz&L02+M^abFHY=-iB=Hw@7U|rhh3f&04MUxUgteXiX|=NSLe>Qn%bF6P8Aj@do~kXF~qPYv9*jx4pS+*LH2! zU)pte>NF0Y|F>}VbN?DH{ObRMqksO*xN=`dzY?%Br4r7_84AE+@Fi3P(mmJ*s?l)h zp;U0s2uF~&vuFfoY$2_SX+_AQ7!}26SJ@}>yc#OE;6EurZv_CXn2vmG!Vwp-r%3s| zkAs3x*#|zAXt?BSMOjZk-jmdnI`{{UPA|XGA`u&R-*hVNtr`ijSI?n}EUv*A1k{8g zCo>su&a{v?D9cFDD2QXBomq=%9i|g-r`fpA8Yn@GoZMO_P?VMY>M3a|Fi=e&HWmm) zr9qljggfpi5V@2#Q{7g)lz_T#)AwWeX2dhpc=2O8#Sm(JFYn}k-=dL zD0xn(*%UL^IM}I_3MJFLRIh?b4UB~OV3HKXJ5@I0XJwiE#UYa0=-?@V?g=(D+HXRw zlqnj@bh?<_i+R-2t%NX98%7rfA{}`3kHDV${D&>L@;|ry{CV1k;{1|omSBsJvr-HO zRIF__m#|2|vnF`J*0_urH%}{Iy}_XwHl|pqV1kjP68RzyvlrA3F0lWb{P{zprs(c*BK`-&@&nU=z? za^8?p$JWMzM){)-Zyi33!=*b9&>o)AOM6%F`)~gPeDt1+m8I^LwadI-&9Db0=t;(F z)kO@KJ6w#LsEkQDV>VBpIfc`w&cHYEbH+wWBQZG(fw|AvyR?V9FMbq#y@tclH@!TT zaUy0{bof|e!+}z{uxJ8USSjSH28oO*4^rKcv8C~RE1e=ejVV{HeO8BTP}ne$Meezd zSW!>eps=CjF|7y~WGEAiGT!v_q*^1OkLj0n#;URKnIt|JUNGq|%?4J;Q)9|unX4>| zsJJj*FiV`p4B~l3Bg@!JqXd$ERrla_I7Xw$I4KcKFdaGZqVd0&05vq9s4|)1-}xQD z@4N%}nV;V_BerY1w(ApST{(XZ?)Z|Iz<=~@xan*EKF&YVv2)TG*BOdCsFMpsK(a?6 zuPq}YIXTZMbDqJ}M@%B)khOq|v@Z6X{uvgKfYi>_0oBQ;GWxHL&m&X5SX{*6t!Zik z<2i_O$RVTV&Is(>*DMdpl-?&S(nqpb9ca@2AQ@oFriVFv?lcK-VhQeFM#X}MA}`f> z&gp)FGxP9XO8rzNaI+J!Vm~Z-OCl5b0zM8{B0QAvW6GD#S}sA3ahMPNqR6mfrnM#& z6s?lEgf*Bfgsx0Bv1C+9b&|qzi)#7@-=Z^pgK?H*DwO*|0BNi>7-4SeWzi5}Vu&lu zda*&0|K%d^o(fwTmIgRPEt+D~RzxGo2eU*58^!bUjt_qDPn6sN^KrdeC_egPpfWBu zHc>KW(nhkFc`96eSaaMX^=^?GVj15Kz`9giWoc-i^hC(7y%qMn7i^mm|A}R%&Y*Dq zJcC4YM+f^Zzw(-Yp7nMn%;nECtYO&eN0`mg=ZTkofq0L3{rN5qV`WR+Jt%cH(~QiB__ zRzz4751Gpy&!3Ty8qeeU>u@&Xa2eKMj1^J-t}xqE{%$Z*dKdrQoK>RElL^niV@4d8F6WhF#}^y8^Mf< z!S%=*Pr{-Ac3xGkwpU2Tb=v4aDI=_;-ZM&Q8#ic)iAOcNM8Zp{DTUNU@#SNJ)g9VG za;o$n3o@}pS<4dgDKe5vs_i6WO2UR#Dly`Q&?4)b#9`46b)YpyLv;W)K|!DObDsly zl1d}fAgo{_HX=0xc^r_jO)Ja%W(b{!>O|ZopOp!$6@Ws zJSdtCT(QUWFHc$tZnm6E7?owoYmlJbOTwO=U`A<7A?O4hTlt|?7qOG%LP7#~VC9LL`UW!8dqq9PhY!J&~X;tO-1466WWNpufIK<5c2)q!H1 z(zv7Xd5QN$8v;CBu<_B^ASs;3NX3ZQ&D586CO#xXAr3;|o~?~`f=;Fat+q`RnGlr% z#D)Q5Og2;}14`&)6?bSQP%L5QY{Gc20}&i(nX5@~q^2r^77a&dNfb64@F19qR^>1& z>y6$MqRB=IQ(IC22}ZV7s0V%ZV}dgjCXpb%u!d$C(nHXoi&;uEB8I}HNnIjrfOtVQ zk!cO#Rf)EbfWs&rE~)ZxHct{HUU1V)trMV|l`SVfbvPie_*USxuZKPImQVV7m3;U^ zz`XgG|NeaPR5V)CK2b2&ie*XcJyPvb9GCn@Dill{Yj+$YxGF1c*lj1UJFT#q8nh7- zZ-!M9=xT*VDC&+jG^WG|6uWIz7Ii_xt_UvZsQ{Cbn^Mn9hYGCJ$?C+nj*B8v2j_#dPf*JE>jm_~fM1j4%Jv^{?8Ky}|MzIPi za;)OVj2EWT<-m#4BjY3$4Q7tc&$(o|67Rj4$9vl(cpsL2 zxDjy{DjCM0R`nR~NVlX?Q?=t^NfP>$W8$G1x)$-Iygs``4BUpQ3FUVJKk|QIe#iS@ zuYL{in8$885!z1@X2b5m=^!(1+YV6pbBOvsmt>5(5b(3_z;co&lK# zH?3r(aX=l7^^n+8!f!0*%}26>cQ#>+fuYGv7qJIzj{~{$J%t+w79FkeHFM%0^LGzhE~lu*gS}=_N$$yHFCgL9 zSx&nmN&-e_Gx@8)=SJO#FPcd?%s^V16myN z^F2(pFO4BoZ8|#&BBM4lHztI{Q%HjavkBsNA-@ZRZ03Z$YPwbj4RsD+Qq?04>22t5 z9SciSAw{sjV$~&@m7FRYO3!&7Y>(0j0F zOdRXkun_@PJLtB-jw()wVuxJ{$;6licg6Bv!Pn5(wh&97lnuzOFfmYROKC&S7E!Wn z$%t;Z%OPJO?oSYrXM_|_hZJ*}Kh4Zu(YV>pv@DoM7Im8h(go5Qry7I- zTEr7%CUkm6Q|=aZM{HD$6Z)KfRCArM#%g+GT$05G9`Zd2+)qY$R?H+LZxRGm5nw1< z_K1t(jVEOu-XWscJ+TY#GdAnvi1+M$BB|7 zZY&BJ@na&91E0SV0o0LXghIhIZa^K~9Ie_LoJ3G^8j?t=P*QT%0I|kga^ZUr#hpnES!=&3oOr#3#y&ciuFI-P6@z&>ugX*A#7Adgb&_anbMSJGpp!NOUOlkw!j21leyMR%c1LFISz0i$M(<1IF@&OO#x_JGIR+{?wa4$Kvq-G_HQ z&rCviq6?st#t`YNUF5u9$kkYFIc2kt&t3)cMzD{GkAZX{c* z;``GYur@=7!RN3b$K}_ts}lAcTCkP(6{9jeb8SpRt<=+o1cLmr^u#Au57kI39vm>5 zz22T&)5h)EHY7e?%dF4R^slM-JIwGn(UC}yo#>dFckXYBLN6sO$pk)g z`nVf32s)#yoN-+#&$FT(OG%SbCY1pkaX7W|NX2$Do@aHLbBdkmA!UVdObW~}f-RNg zS+A52DNkSAq~7+{-+|2?7p}V&XHT9&cf%bY`3TOPKaa;e`mxx%avA5&oX3R=7x0$f z{%t&P>2gK#RF^>k?GA53up<~1$)ItQagbM3r#&Qw%qmZtMpaUABkFJ_Q?;7Aq(3(y zlin#)BfK%aa+2?l4XXvTm6`#SkFErSQR><0_Z3bTIMY!q^(R9LC)%q(stG}JxM}%? z<9;s$3=QvXUMJDn7N}8Vmv*@9^!7#u$s$UXdOMK@!yGq#zK`lH8futW?aYw>^G9Ky z`z&B*cYAZ#uI<{chvmBX#V>~Z%@^U+M=#>+JO2>#>tBb>n|>biQS_nBn6_1|1N!_q zosL8%4-a*6cn}snsPO>KK?BGjGAr471vQD#4j}Mu6sPIfFSxH63EZs!hIw$*f;`%v z0y8IfNEWFUj;uXKV*|)&P!jCLBC3=JaiujwS(|`R&788TE#u8O1WD*3da*qzY^0uJ zp$4tkM-)53T&v#FLht8m%&gU3whw1wMOrltsMp^Kj`p;|!l;;>udJIk?X{BZK zX#&tS67HbneSFWW;4l1A_|DG5Xj<^cpt+MWvk*p># zxFvHfZADt%9U&}TENtn)42hJrxp!>T&|0dP1sjBdyGcWvCTwhubecIVN|QFYZ_p79 zthE_@Htb5nDNl88O;)q5$L21t&Kx-QjQcc30<)gx>t!%d#MtH?Zv^(my~+FuZECAN z_sw?q+cqRVzTf+9ho9*9D1*4ZV9x z9tj&Xi4L#>tq+^T*vv+fLT*wik;@UjCiqeU98(2NPV)0WkMOEGfJqum99{az@vSM0 zEg+b{M{d6zciwpi+O&(^otTpB9~@%RhLfjHK}2zCcZD|X;L@ebI6OS6iGz4{&-)?< zRnUcA%o>A?1`tTHwi3r;7(6^V9jOS3BG1eSeq^Jc@vKsE%SFQhsN8FXmQ*&gyO&~8 zcZ0SD>(QAtZnUK<1jgrYzTk4mApEKh2vov3E##<@O&4hRXQe`~$o7a0!{E@2H_fR? zYFj*tfNWZX^tB>l*cRyobMg1OGAYC$|L7I)FMA1a;lj2@yj|P1UE9UO(*8}40B(8& z@SGRm%*F4(iTB)!`Axr!&FlURt~UGf001BWNkl_~w1peH(I{%R?WE}nHL+8n?X z6T6L&y%f{Fx7Pqb5K4_Y;mHHi{NNhd(I!#N2prUY7I=v^n#;W@=RDuHszE*Vv$H7P4V?3~2 zl|+*9lNc6OC-~0c1+;e7bHc`jRD1{bQf+K7k>|ia6V@)70;Xxv^hhles%QB&#(rJ< zZw^PY8`4W9lOS$L#(8^zsOzZTQG*6&)59c&I++lc30YVq%3E4&(iY*_kV#lXOr6zP zk5NdX39Br&RVIG%pUD<;@>N1AuN6IO8fj<9xX*%uqMCMY60xF)+9lITQZ<1Nv}Uls z{T1+Ueg*ux>mPmxwqN`O%-{W8XgC$u?xhFee}>1?M?W2M&)XNnH!R}i#}I484Lc#Q zJ`~>4V8ZBDiAuXq0#k)jZM@z%Y%)x|YixRlTAWY1JJ1B{KEu34GGO?Zp<+0pJ2)e& zXpAY9J2}84mnN-mSiLd_jc#oHY7go z%d8)?)}H8Y?-pr~${W6O(-ky0B|=hp+8X?VIz*0)=^`=n@Yx*vvS8Q>SP+=8d&f;R z59v}J!VdRFDzY;bAto>J95d#Nz)kuIQ{(3{Za&CZE{hFhuoJxGg;fkR#Oc08^TO>I z{Xm#5(a_znx3>?0OlBh(eDDJ4VQD66Zxlzw^? zk@@;v6FE8Lny-gGg`CWKD^T#se~vR4d9t!2^CW zg7BxO|EQ?UgjR}0thT#DdJny|^^i)7pe5{*dwEY7Wko`1H5y_sjGC$Eu9^oSCX|#q zv8*I0CPMDZMtGxjDm#ngl`JP(I@7wSr~#2J)gdy}E0J!2ur#>CRIt#)rVF8l8TArd z{5~-fu5Cn|h8nyQ(WcoH(M2sY)T7yO z7{eBY`OJn^G!7zRQ%BO8r>)1Lp&-3fs`>tVLcI;J7EL&(WM6vPp234A>)d>wUEs}% zvC?>Tm@U*2Pku7&fBSyer$6ywnVvDd^)2Y%^bL^Cn0f%X@4i3NW3E5v^Wne$htXV1 z@@_HPFutD(4(SxUfX4ZT&!xGbcQ7TzvGRu595W2M1I)I8$_{p zr9XMwkk~dP{?e@8`+@581T%lTH-D0sCLSE$CxFN5(4BPSh<@V6nCTJp;E8I%CqdG! z3|4i>=bNCrGSMMNE93CoG&+hTWW|9tZyuJLV|KGqv0~|uRQgq;$*8meFxPys@n?Q- z#FI%yVJ4``*yPV{dv3}teV8^5*Geg>ByR(MDkcq-gr&JHZzxwqVN)tr9@UVDCOj*8 z4I?QdrZlmU)|5oOnlWi;mAo+_rGlu}FESE7g#J^*Xi7v(YljhxS-#x`CdAo%GK(gN zilTdGG}f?hN$9zH5OM6F^f!THh09mGRuJ=8F!abqPTS6d%qw4 zyywGDo!)-ow`;q$Yr8&{;{L+5z+ZVb@K>IV_77f;)t|f%_^scBz2*lXmk;8A>JhK2isK2!=y`GQ&OceYe*QmP=xs8J4tIAnMXPuPDXVNecBoRl)IUNiX}$TXw6aEAuJG{dIq^2~LsCuR z4Kfj?`mbJt{)Nwt#@EAewfDUT^H==0@!2!#N7S(M`@e(3FZypj=Ev2?J^>95o73v8 z3y$14Z-7Ixo>sBsc?j=k{7&ic-C(v3ZxS|&?#w0RrI3-ud5d$$#B+#=K%0b`qK9%U z8+icWeP(lvYA1D|_o%OP2ss1ehNXio10~3_@w(4@a6Cs*YN@=2flu=7*0ya({AF5Z z^IBVbiokycz?W(4H)L7SdO;v#`9FJ}o-HN`hB}PL_nkUEx7ZAU@^57Xl5rwnMa5ox zF*Uk~@XWjRI{ii=&}mT^07Nhc+;imMjotfro(&p4KUYexw0oQVLY&1yCZjmLW0_A~(!T+E zREf<|(>p5*o>$5bL4U3fWrLEWjr2qEJu){)*P0-Ke?M&(SJ^ohn|RTqEsG;7I3*}s zRZ2KxiU3Am$#`5gOR~t3E>*l4uZJLCE3C~iP5bgMhknz`;g5LaHmiHPwrjh#>(6tL zarPYgXFe17%x7Zy%9r89pS&0S_kI`ikG}?f$L&xL&|*%j)AA5@3aw#+Ya>4>Eu$8D z1l5z8!w)JX!-(f$$4khuv1rdpVfyveuL{_7MnZ|SP;L;QsX7kbaXM_JBrN5HDi7Y# zTnQT?59J;2_;29BX7s6u^c(4=CBdcP0i%4q<2) z@hajt25KWM!v#>Lovb_3;p`(PX(Gp z8e24$n1Nyg5IO9kY&!6L5pRpnl?fzB8>-Z=&6E-=#ZDX^9g7JJ8(Wg?jkK7;Azfi= z_464Mp1^E{P^Fr4FONA7?H0;fRlNbyt1j&pG3HKtj4F9(n4(Vmd;b9a>%Ja#_T0nv zUb4FLBbfij^PoP#WD^yq7|aYi&-$x4{4+fEeB(`_rWQey37zMX=v7&XlLKbD2y#sn zjrEGxT)AU+wSslSY#r9X*kIwAB9z^ok+?o7n*D$5y?L~4Sykrwn{(}Ro7V+F5I_)g z0-|68754Ng@rvCS#*O4P&&)bAM$B@5;2AS2AGnwCWY{cmej{mP zG$by-rbMkKlYRkaUuPmO!9+?SXjaK!kd~HVz*HhBf`Zq%!=bdq&s!I+MOwwmBo-5j zB!@+F0@7|OPQzq29i8L`J0x>Sj%a>Rskp=q%rm}In`zRm8z@`)hUJtYZnY`LBs^o9 z7p3X};cVcm-pS8NzNeQ`wz~0MG0->+5+X(OWzo^%M`g>)sF`ZfZuUxvhKV#77`?$n zy@NyM|MLY}TkVF;*lb%*uFbaXo@~2qtIf7;+s1|`+qUg)`}yAAz5l|z=XGYznK|<& znQALq&b3JneF(StA*1LDGMe(q9HRMw6V}m>fvA&aRsYq`BPm?9hAr!t;o8RuT@iOr z!n$tOW8y=dKvhzF!n}TT`>!S*X9=Vr4{=OSH0$5mQ_9Hv3hhAc(g_2v6zcKt_S2&3 zs5eM2Tia?P(Fkn7G0%e6HtCH~mH&{g$WvBk-|^ao;QJ%|qFhJ%bEPXJ%}B1j_S9Zy zxwkCx!>YziMe)8s^xcxGmrO8jC?RA9Q54bgu|s!sntUAZhFXoM9cW; z#q=HzGd9V%0`u5o5$6f@OEW@c*kTi;{D2!4x+DXfnsEoSDgkWEvKn&XoOy)%)yR7* zCHIlC)w66#DwIcg}G0>O5l zBa6ssr1u2n4tBRg|%FgdI?PN@UnVVl` z<#K#h!Y=>t+!EY17&#A}S!F)LP!8E|$hQ}Saa7`r`}F>N+RWV4P5KHK6&o?8EW*l= zRFF!jL8u5>=A5$bEa|~2riSearO=bFXguNwT1r#p^{-1+vJMK*bscV~gLbaG$|z3I z2xyfYbzrN}RFpd6vWdBI@r#rOH5(7a$GQxInN_v!bbUF z_GE?d`fcvIjsAXAcMqH?c09tf4#S1Zt~T#)A9%V$yL*fM>I za1d(6FI z4PeDlj=)7g9x}%=32;JjvsJK63rAb@*J}7%NR6Y~Ci@gyW&B=ypuo^|Aukk;4X~Ky zRd%+Gdfti(LiA8kl*xn`pmSJJ9T3sc98nih-9IM!4LZ4ItyH zDji5Da3*1HYucVoOUUNXr6V4SK;X4fax%{z#o%3v!*2h`2<{la3n3?DenNmRLW@~2 zhx%)DTw;2rDrY`{8VWf|6x)QU7IBxVQLX{3sm^bFyaP_ia9iQLY{C%34}X!_3dKo; zpb}8>jbhVtD;U!>sS+nM=5wjD=6tTxrj*l3Yl zfHDa$;JcC-^YkQG&D`HDS)UC1s)oRBuFyE$0ziNnE4a~EzLi6-w-Gs#%pt5PQXpQR zBf%?x$t3T#>SI0glgkDqbD|s{w_q2J7gCGK9MQo);^=YFz}FPj$li|sN)KFp z^{tBOUO1DG`JFFiR0qrx4nVwcI9A9bvKOGIE`JY^p2@@YsfGS1@9|<~zpgwRtoG`` z2@suLI9y_suS2M=#aL?r1Wxz`-0n;KBx}ZC0NxOzp>>*ANAsbX zSIbPuw2|Yj4*YQcteWv*akmtOzR{#i>|TxR)rR)On>hkB0NuKvTX~CqCwG^zlCXG% zRZqe>C!5Siy?CS#;-?Q zJO)c`1CPVLrKfd*-NTqD!#d4~IG5RCg*^Iv=g;X#(a+PVOM|FFuk#;@Qd{p`*S;)W z*9V+#qZhS&reojCSs0J92h{cDIKN+3dR&g1Yo?w_p#UAs13ZiOoAb~Wem63o;ab~z*wbXIIujDL+ zOWAasEJ@C>@MPttvenNI5?{Q!$+c*+_g~^|v~r|c3TQA?b|L&0M)?UZ9sq3Yn#*9N z2Dj>k)rmrQAHZHCoS0wLvVR39rocv8ZKIa8`)T7TI|v*P4jEo(}oDbVZ-|(eDF%fu`pW!wB! zyOvKyH|vicJjrg>d8cOX1rngd?I3%Tpbh6od}SN^I~LvNoa6HVNSp;tW7))^~*c{|&&6->Tbkc7Pj^jTatOcb`#PJ0W0DE0<)>$2lNYV}v@u&_` zK8oKIA2`G>aMTABC8B-H#q}s8uEL)!8-wpX$l~Y||2kPx%Yewm)9~Ys^@T?R5&_f7 zG4uHwN4&QFks`nz)h|h8Qq)*jHdMcD!vj`vKldnH18roSq%~1)l~G$H+XqMuZU&kxqk41jc%4G+U5b zyD6n4$|G3fvg`DWSJpk0SM7~gx$~VMIxK%6mbiQ@{BXQ1>yo^piZcDaCCvIY%p-v3 zF^Axl4`|DfbL8Eh0zVSa)W}d|d*YEa6GS!MR?B*B5ahfb7LyTCt~|Uj8@%Uu|{zR@>8&aIa7MBMq17uWCR6QI7KAY7``+=uVe4ag5 z%idex&5AOgXx_1-?6*FYl6G7mwmg)tU?*hKEr$H)c>5QiXOYseF@9yyk0ku;)j z_*|XKwA4yKdd;_jW}OL6jMkU3de&0*5TR-m)|Tj;FJ!c?S&o$iZ5qME?UU2~`8*ZI z0|U9x)%X=jl9Gcpjj+K~^M29xn)fTsuRZ6+cBV7Fna6PE`F0x0+J>3gGDX8TGa0Jd zy_ck1aUHfQ>)wKSK1+>M@%^jf5^cH`!CLAmjw99N^{&4Fc}<4Z7!;Q;520p<6pUea z8h2I_4GU`afWn>X7x#%ApCvWNN55|lmr<;oJGN!03@--bZ+{U<%D*6OEbKc{rfbL8 zdJws&;j&XGx2#!A5ucz>VfVGff~2nYUiRnjz7;cqzvwP*=;WjoOdX=&jSYrS&GdUH z8BGx3yC4u}m^8x2$v%-OmD*0$3XUMmgO_JYLdyWuM6cNHbu@SSn6nb;quzve<{JVh|65JWXOI0lHA6eS}0pFaZ|XLJ}G#X@31eno(oN*Av;;Q~vziV!1Lj z!~`_EzdMZ)nu29r;g%iq0*YKFXMC5p)37hV|CGAtNy_EVfHT|EmOtTt8Pk}Q7h_$_ zOXN3f2)|4WTQ`tOd7R>EGmi@Kw3DTa?WbFlj=56g^p-EG!{_;>z}wmIBnH|h7OU&v z(Dh$Ypld+X(X?!N3=Ln%Mit{pG7=9lPvT@O@LhD2P=maImkv7by;AD!IREDRbhBU& zw#qEqqwoo;&);+we2iJ|_ldT}_c*@gFD(uWH@NWB_g|DXV$Re%ir3YR z#Ky*~%vXwjn)G8Ls5j1OjTJ|8JKza7rLAbgf~JKoX5*?!nsyUoQ7aRrp)CEY%; z!-d-lPPX{ZId)=aKGCuvx)GWNMcjULVO^Dd1lDc5*di~DLNBPZV*tj`=KP<+b>GI? zcAs{GPM)z_scZzXcD$2^SJ^Hje1SC#>ihXqlPD$$B^Xd*Qq`EHo|nq2HC#$-#T<<( z8s@|9=_G7B!^$xkA?^qbeK`a*Q!#uR^#B^zY=zFLYS<#omAWlIQGA!mc%Fjd zW|zz_Hk=RYHtFCAiOI>Vex`Cdck3LrWT8B^pQi`Q4tmt&&_fK#QpIEEk!+{*;dw~) z*+>*)E$~O+3aBP@?2OcS;&>vy%I`{b#dW<>WQ|zw#N&K| zXt8g-^H5yYUpzSn!*EvuT1k6M<}qb5j;-)7iPIQhOJg;N)pi`#g!RHr9$EzX4t4fDUc>Zbi-Je< z5kM0dps=)W40csZAz{75;O~U}DJ^&?jwM3Dj<+jp&n*Duh-c{7@C~tWE z6qYHb@NH#g9hL2P4+jZEi%!WExSfLQKQC@oB4QWjJuI29c7(~MCY>I!AWm4XZdt|d z$ckY#fK|21X}hSeue4k)gf&H!S~9I^=yx(Le-*fv*wKdjPIfUn;8hR_R+c`mQDU!DpXeqYL}h)}s~RL~D9Xw7hl)g*g8 zm$}WfUgWl)G;iRt-U;cVgjA;TFr^L;^t~w+9Nj+0N)rCUV2h5ebiXD(nLM}-uS#b| zl6X3|!_a=3ry#*h;zEm`lua_qtzPV6B3 zN!x0%BX&N-e<8eB+D(UXlU4=JIyMUiDJ1NqGRR^`YxB%~5kMgM2r#y+yxth{?J^^VB z76X0^ep5ZM7@_d}M1!&tB2G%C6-kL%E9G4_3thtwoaE_L)=mu(Ek*J7bUw4#_xw>@ z5f9SlJR;c|pqTaKZS5YDb@y4iKZQ=UF;8ytUGyJ$M+>bbC|_x?lH@)MwP>9po&%if zvaaB~7agW~THm2%&!LsY*E+dAcDl0yV=gJ1#126Xg9hfeSvi)^Q#r0ZbzIN+Lwyat z2?zW#=!yhJ`}#G2rtf7nnIP@>#XRYetB9NaISIZCU4q^>gl!((BAGFG@`$_h!pan5 z!C4JXG^%9kG%hhACL;$BZo|ly#da$1PLXYAkSnuNkzk4a`Z$O!VN)kYUQdJkU1P=6 zY9R0)uZn?xPQajL{{MCXsOg54<^q+LtjBhCm9s} z^;aw#k-SWUz>&{fpf&Ojj0LaGukki`%1ikC0bUJ5h6MpC%yDb9L~<4GN*E!AxV90e zPI|-|I;h;KAvnWdCkCPfVU8IoOHl`jstp_mk{)4!pKF-2bVl7PC6KNpB-n zbuar1H9lu@ECk936Td)$8F*~N_J^B$^Jc+75mriCiqfE^Fa7<8s6!QFHzK@S0t5Fb z9G=dJi(|XOPGKbsNPiotZYuT1B3c^J%EghGSgopBGK1Y@KmjZgCquOeN3Y=lMhr@` zKwh0~Fs)@*Ex6#_9X2~RYL1ot;(fu>DZeFs(6 z$qLtddmglWtbL!x+tulQCp6q!wGe=(=OK0`eBL2C8A-s*-RgRtIdU*~;G)`omwcmi(G_IbF!(w4X4MAVf47#W#-mtR zSTlB%trrfcwgC{-ZRLskd6=S*Tdo^xiv%oDM|qEl>0K4Fza7Evta$iXsr{t1L$DRtR^4R7E1dJ%DrbPS^IWEaq znaq!bOUjXfwxCAd9$b&spwbnumMce~WvQ#7`zp*#BnT6$Akn#sD>7X&)f0!WIB^>j z_`CPHCc(ukIJu#@dJu|MnGTf4d9c>U&Ch~wG!i7Cz3Uv?e4GHQ8H0Z6^CCr=^;(kN z^6G;NZaQSn@>!D=^2U|cuIr1sXEMKRT=Tw}BEF_1DSd3t;p*IQ9sgn?=-p3enF!J( zTE3u_P?`IMUh4l>I-`OGpu!bWLqqdPnxww-8wwe~|7D(m=!g_>pB=Du1ml50Y zz$7iQAEfrPwxF0FFdaReb#9|rC&l?3hEs4U;&xe z#!%z>9LVO00J1V(pM5P<;so4Xjz?vKd7EGM&~t(kC$>13&D&)2dh}#yb?~?dyuY){ zDD#7`gVcpEtQoH{5mFLxlTE@u@1Gi)@7X!|uMK(QdV$!PeP@l5#Drq>q&k`rrm4Jn! z77n^NT31BSx%1yhx~H+3(V~sbz}BcUdm#R73)wc1swNM-tw@H^u&Gal2a{;6&nFWy zR$$<0SW#I%Q!;}OZ71@i(AjnlW8A_ZL~VuCme>88$O)IC^2!?2)JDr zs57+4`BDJR!Tpgd^ZS}(--+^b&g-39ERV$2IcO58-iRmGbw(x~<@@E&XBQ5ZX7BNz z(q8OYUHO*F5Xb_;=;0t@gR(|q@277+EKKUIuJedGPm8Za?jS4}VqKzWjfJpd zf#!VA_jWYP;ER8U9MvD0wP{C5cpgjr?O=0B&kt0lUX=*cL7<6+iN{65pc?ZC@Og1v zb584G|A0{?PrmU?R1Npz9(8dBSa3BpDF)vkc6LjWpQjZ6h;Y|1Q82Kr-e(smS%$peKh$rCE0*QnDW|i$| z+?ZD5b5ph#azoRmg|sZPR7xxJ&Gb&Pc-)Vtx-EyATv$op4FD8uCqkR(-83T>&LV(H zww)znhXh-Roc=UkqnCV%Yt}IDzFNzwv-LEMt}SlTx;pEYC9=LAi|P6Tee9)O@Y?yY z+(yJ(-$w22eRE4%$~>Vc{|lth&!dn0(Qm-=wLj)z617qk4NHo#Y!UU36;>gv?y9n^ znx-=YZ4QPj6!UY0o!!-mNPF!$W%4}Ty7|5hGT#JKHD2I1gj)mi_MCqtg^N^0_7P^#8;A0zU6Brz+4r>~_O)R+TK1-eP{A5XqKl2dO1uQI zw%NgCidj?HVy8o^R+k0waiJllMSb2`*`-1V`m#-5HFv}vQ{{zllhr|YCS0?Bi+y4= zWd*;BRGXT!@zzn!&^Dp-K-_tuRjsKoBd$^-o>qOIfpX0QGB8}O1~om>!ZvWdbcdKz zY>1W?1Osy_Rm&@wrC86~2i>8;GvVdE%7^oQv70Ih#Jq)Q+3;@4Cw7R}@!uV8tr9fD zTI5KEGBlE@Z?M#I(3;AakfMZ!w)^Ezi&IrL00U~Ip^{RhWHY!vHH$o=E5z3MG|*i) zBG^y)k>jatpN1A*wR#?>UzkH6MUMcJLSwU)Pm@o<+K$FgFt)kfh)l`g@gz!E{L|p- z60*nQ4?NJiNL^4r44+AO)Pe|hF|_kFLPh*(qb9lMEzg;n)vyrHn?ej+_yyq{^nSe-?YtLZx;eI^j`0v4%pJ6f64yEZU{ z&55zxCVDv9_YLoF5ieHTHn?lZgzdl5CmPWzf6V*yC9>p`!V9N5h>*C3SkCuNhnbZlt-y2WER5Zf2c^N;g>H5>E5;S6mq#4 z(bdoTK2vxo^Mwb2;9Y$FH@N+}O!X%S7bJgiemd61O*Neibxtzv;IL-&)AE{B;ea^2gaoa3o zwLK?XvlE=Z)A9B#q+10cRmsM;LPch(q)(gs^3$(UR$hZ;49mU4;gyvvTVb2~^j9rIQ#D(9PN2h)V>jA|vv?O@0FJi$* zY77^`QJIGSsiJdeSbODWF{Gi;_Qxb+yW>!qVskH9!dw!DI7sV)$hxQVF^27Y6 zS}5c_PU`tWxpp4Nx~$)h;k&yw`WlvtW}Aei*EPeHB@bP~q_9B(Q(UBzugAdKMf z#24mi=F8*eJZ6jBARIgfKYhKuEb%mFFa}~Wm#*}r7VoVCsc~tqg9*o$MdaAF5#MU? zE71i5E<0w5)|N=^1VX;TREn1j?^}#sFG7c4&d;LfdaFX@uhkcX6d0;na&7kMRnZtN zhZT-nAQmQ6w;q?^`V6Z%-fu-2J8t(s=B`ILzv}o3yq!|myJe$gMJqCeLCZ+(*#%p( z`dv|u6SZ&2T}fSdF91YrIzvE3KO;S|%@=CIldU1T%n(EWTodXSk7iGFe7#+V8(Mv; zcc9AhT@q?Jy(O|3$$=$LSi^n=WWb)dqs}}2X|X9rC_p2Jr3r9etJdft735?_2)i^L z+BCKKWPTHGJ@_s@gZc9Go@VrO{V$Av9( z);r$3m?W>=G4dkz@gRq$*jRSjAR~}Id6v%E-*|>-{=1^lqCuwpaV+TfB~`R%%Bqz~ zUY^LWrc0@);G5}+V$Q?iM_H+4;8EwIVE7|!jtcf|d+YAo>TmmynyC7}}1ZSxHY zV5qvF;f&;VBB+uv#8Va;l%Tlz_<2~jYiJ$UGq^!bm?CuG=@=;O2%fNdi6f%sro)1T z79wzD(a=XLTETMS(cfY|K$iv~8@AKho6>ZPl!;M*OTGUz>x5!evC{Du1t~V;&!D(@ z2o9EUx+iAUzU-9Qe7WjO8T(Ck#~L004dsVK!rU>eeOGI5-M|~Q45buU>4&C`ooHZV zgy%8DPNLIW@gR!Qpuc9Zzvaa(xrx(CSIi-x50PEYqcVIGIGJ0?~ zIs3^;V&fF)-tL&_i?EYr407C*#UbPz{1sKPwz-yd2|-RbG-0O2vs?wdie^$@ZxJ8e zNs9W8n{7bwrvZ6WSeiPf@UVu+s+^jsuL!e}bRJb$OxG9R z)uFnUYohd)TL-p>7Avn`^!dPkOWlF3u2%=IOy-tAzrRwTufkRZVN#XDjv4Ql-;BE6 zmtP~-oSiQw-Z5=FUm_kwm1NLf(H}5=r7`EeI{WDg39MrKRSxdjnFKO4?;?A(XBt1) z0nY}@d*4}!&aL)26^~h!F?BOKN&?QjDs!S8hQdT0Co>`l>`%@^7bM_PF zSdVIZ7Jl7znuVH`k~fYw;O8x8({^6!vZe|;vBz!p!Uu2sgvZzRs8Z6iyP<39GV}_Y z@P!ZT7g2N$WA9<*#on@D$t)x8Nh^lGPXx50GJ{z|4F{wssJ?3qOsn z_dA>(5Ks6s&5g?X=%O%GwyDc}4NKZlye)YZydF$REY`IJbs0Y)cr7oeO21%(fcHwagC=yd!~7Bs;?$9>$nE%$H`O zOH__MM#IUMfLc;;Dm^WSRSY=Fd#6gH;Dz!%6xymCHHc9**!(aUdUCX-%u!7RPYL*iFZYS zjpg^0po!fV-PmNlZ=P`ao&rOkpsYUYVOE%8LPC^^ zIo}oy(^0?62c5zgqbM>g%P*MTu7_6}Gf5ml+gd?{KH8;9YM>^=heebKW_J8hJu;||dm_x`V#0flUj$wN?wk=->~Z5_l=lo2=g<2pW9g}&>L6ms>7Z0Wxi zy`uqm+>tU$(!NDh$|!xB_V%`waFxnTA4H5)I|%WB$#T5- z1RQmp(SoyuEM9S~a1cw_a<8Hbik)ASVd@M%x{eJ;llGsh=PnM8&YC^1E?EM&1RoKj&Va~|YwjHJx|WfG0FvHhMysu*3W`MU9A zUE|_3cV>3Q`PwMa!Rrb+5L9daIJePmRU@Dq9z|A z#HbQ;RwZRXA+iz)c$L5xGtReB+d`fvmJ&5Sq?Mn8Kvu&4F~s7q(sS*zUW*@myIqLD z_9Lu`X2yBT4+viuRucO2X;4tc#WUOZ>#1C>^N0XJ@gMA-eE9KH6OKF^7RU*{{k-ZT zp0oD$#tin(OztSdZ#JrN5KVqW1%(!iKazRe2oOSrdn3ZCDOl7JjnWsDFH=YY%&8|i z?BS~~B13BPLPN+7RgzC|UEtM#uG(y9P;{uKF6j>|$kLpTw~S4soq)cJd~2EI6ja9v zuZ&D;d-IaEH6VToN6wh6dI!z1PaPKG12I~5YLrvNR5a$@gPAPumntw_1<95#TNd`d zZ0Bk%=GQ-RmylB0k+};Rz|(StLg&X8Z4O))`2{7(mv$z}{8>ZV8@`s4Pjb{6+X?xz z16tqdut+}2T#Z>&S?Sqw!tKJhJ6}`Q2gqotmQ1JaxkkR%<&|N_)}ohpUZ*AV-ISD9 zc`@F+^Fhe`Pc!@%*kk$)WxRBw=b);s!zSI|f2lRd0rMqhwqUH5(P?70WZL-6zMH?iUi!QTYbmbDK9& z*uIxo3aerQu;IRZQf_j(;vP|pek)>W=mtu5-1Q!;nrihL;hb%la_ND_0$fd$1_Fz) zLSYI0pMn8T17C?bFjK=@s~g(5RMv#!bL-!xExnQ(#e@uJzfGkVH{6gbkM?zEmMgh) zv3xF5wNd9p%U3mviZ+oMEJ(-H{!JFwNC7KtW&Ni3u73nloDlvFf>Tqp?PJ?_XNs|Y zP+R`V3=}8Hl_I0Zs8Bl1sS7QpEd&2DJw_*I6C|B*)NH@4sP6sj*nFPW)kYBJ{O%csg30q+6lc=S?2&G}*oT>|CYBW;y&ubSE2G_|NSP#!s>`Qi&w+MVOXE3xECKus;oP8HI(=@WLf@`;Ihp5Upwm6uE_C(SoeJ z)$N2P^~KRC8}7d(Qq)h6&#AFfK&6ri0Pdd;H0f`LL-ve(=l{6*e-Tq~HD%fM zzUcXR6SruEhHi2XUwUiqU7im|E$JNQ+_>L70jmr75|gwo=QaK08PT*p*6wl{@>TV6 zby3KVj?CAFH$^b}Yj;E4(iq29FFJS8qCxL*kj0>AxMI8niZ&IteY>gxT`*;uW=sh` zOrT(erIkOPQoiajt9@g4#7SL_%cTGKa8=&G2@tmuOBx%5ayVjTUBZQ;YF`?o(TRMQ za#2vyiHC^JuFE?|)9_5N<}^y@8fklPCLYd(sB-Xxdnvh@ps8%vA^Eu_n=AKYmpW+- z5k0TIZUdK!&b${#lbF7c+=-u&EFKZV1H|e!sb}%S45y_?kx_LrPq~IO+EQfjPDWtX;C(ve+ z-YRCLLPluoOKJ#VV$@c5ys~x_DFXVy+?57WL%_73rjx5RSoZ&APoC(SOhfA%TcPcf zTDk8HyM3A=<_|3oyJELN1?+7yurQQHGj@Kct_z9R1-(C69PaOWmk-`}He_`-kawW~ z6^A+jb}&)|#aeTNC8S^?Bc|q9^e7Jd*o?Z!A26HwYGX89=xjJ*CtRs$xmL$P{<%sa zz-~!98uf;1PeQBN7Lob(y~wm?l90!3bDUu>S?7qL-@=kX&4C`v034L1@=JX^VFDq> zsl~2fG9?Bb?3N*Wu!K;-iiD20w}tV8p!4VR@|rlD1o%*oy16HG)e}eU5tHN3U55?0 zYtwUsg~`PFU;QwMRC%Gd@JBy>%d_?e%6N2EIq9kCnqe#Cind2@6WN#HXQ)U_sEG!P zDj%x0ik^)9|A~X`oGG=+d7rA*v-2}HZ{w9NnrgncMMC!x(mDed#?HF8ioQps`Ps2% z!w15Jot4CCosoc(OzGOLPsga@EH*r>P?x;!C!Ha)BUlJVbcoKcKb7|_ukw?i?{V^V z2AlhC&Y}HA#n39$K-mE%_A%-PuDWkVy{1NrfuAf8wcV4iZ%P$6%k;ZBKLD@BDtyV_`)RV@5l%{}*lYL^C$1O(&|KD=+kK;Ht9cb(Xc&LXCjDHi`qp>AIiZ z(?-e>*tYR^INkmh2A_L(shP)<2FIJ1k~HXp} zf5c8V{zyqP#s6O7H~3~?Bi&5AD4If7jx(*jX%3~4LINB+KP|4~EvkjBk@OXii_UApEFOwR!SeG3l>87uMX~ zqQ%04`itQz#*W**>xN!UGbb1&d!@on=rjS=Aw5WwrW2&v{6yz{eb9`G-HVb|E}eaZ&o!9A1e6+E!flQ zk&X){*E*R}yO4%n>x`u$B9E(1L4a;Ch9)D~zTH2Ju|)Bbb?Es2b^)w~40`=qxVSkM zr3DsMz*Bad(VEW=zq@WGOT(1J`>D!arOpB3*af|j$K*p?4Qlj|M)w1ndMI^r&Qj2} z0Y3XEF}akk$#%K@f1a1Gth-)52fCtfh$LCp*ratD=NPEroeF z5a04`Ie8_JF(&iH#3iECE^U<*MZPeSc{!73r{YauP^Ig#+Zynq<3&dNryg%g9gmfK zJZqjikG>4`{F4hslxDWTUq0U3iYMI38#4_%QrW%@%XonBs9XdgMkT!{7kk5MOL~^qu`t(X2XhF-uxRJo6#&-_@ z34fEo6EXG+5^6QBod<+-A#;GdbntKkteRgQY8C#*^FJI$o}**$+OTcAyJtT?E5}uU zCBrG^W#yo^QidE|)gR)$Q#unQnQA!wf$(h@&Yjd1@;fS&C1)=wm7CZ1e`(?@pw@CE zdawJ6cY1ni_hOP11;PB;?u6yuZ`^NCs`18t-tqokh6+Tn0N0;WMp|cUZm|KJ-23@g zd*YtoEJp1%Zul~oo_l_H?s|{?nQXo~4{G28hdRoMMOg(%fV%cK$toX;373+RWu_!flxk%u%{oLHPV^{HSAX?I-@m>%XYP z%sqVAGNjzQ-1Gfv?djNuqNksp%2N)F59c3&3n&Wx(pQcgvVvYXOonk6KpaeyAy0ym zzuJD7?)IP;iD>w*@1w70HadW>GJR;7Fuv%ww|QIq1LEI~=F$G4o~;2bzKvz`oBba-}_pOEa$~d+D(Ii~3S>8aP5jryi2RYV8(^E&WzT%YxIiuS73L0zhhUTnf&RMyy--il4@z#+3rvFkf8C3YINbA$Ywhze z!S9dTw|Frg|CP=ysGaOjf#ul0D&x~cop}>Dwy+M?Ng!0O-8E0msV^n`Gmt$u4=~j!+Et;wwR;_*)?@fxM#oG7OC;8%w2$%NElF@5_iab@fa;> z!C$q{g`BNH5e>MqiRG;MH_&IfTm+1+ob?{&=JZfGx$sNc$a2O?zx#w&Bn!d-1kM&n zdRs~`#B-WSg~U(=pn{5V4~Oc6QXXXZWq4?m_fPB+MGP`_MDk^YvarI#!mK^)KO_#T zVmkp*EP2TC2|KaS^)Mmi3&1N}#HYCYx-MwnoV&8-%~KQs-BLg0Fy63?&61 zq}GJKUA4n{8($y%9j;0PJAiqzj;1t2UNf%oyCa3VL+tlfOyE3r73U->egsC{<_rK% zY_N_P{D$>rX>C!XLRTp&h<`h?*kB3sPGD|oY6=x$-`OWF_n}xK5jl4ODV9-c`TN?< z-T<+~5R{|9T%F@dvpJFr9kI_U;MIs2i^>X(lsW!?^_o|=tPkSe_j#yh;)V8=&bHvR zyQJz_)TZSab*SfGkf5|2&S_`rvLxR0I%A02YKUypTodYHpj9ou5Cnc<3Xmf7TuR}z z$4Y-I|KYgC(Ej4@rkC_Ou)@(yf=y1Xug;2#E!;`=`Hx|t9X>X90h?u4TzaE{w^1hY zu$-+eAZY3|ncJ}f5O^Lv{wIJOR@TM4ty#aVeJW~cvkDGiT zI*z9WV5qu`_<+&9wOZ8BLAtR0(CP|! zBAosi?j=QM?vEXU_Qfs7XnR0`i1??1yurrSFOPI@w6vWIk+?!~O4Jdwdl;%K#yXW% zQ??U$U@y>RUKt@}X;ytk?FYg@%l#p(sWwZ5GCwOT@~|KDl+&`b{4DFQgezkUs`SHY zM3D}FcCL|vG8MI%y>f4+qVkN+wn~Ot*9-3jUip7$T{Gvo;d<6*+i`YJvx;YViUawh zPjl7u%8H(Y9D7X?QtCEb%85NjSp3>rq(Sy#* zNZ)D=b<9YNytk_ewX>`3L&Hu#CVYs(4}*-(opHI=t7n`z8=PaPr7ueXk3`(5_&VY& zLj4%}CXOK?rkReqJQxM)!Xd3t@q;7?&!spsJT-k;ix414X1hFovuJCD^m2>L11guc+N*o}7~w-B93rPDhOE`H?L zZ7=rETGYeG{0n{agZrME7xf8Cq!sDuL`6nyqd6UNT1db#1U$gM@jYzlC2O^1`k{HF zUM_{EP_iOOIU4pDRkD{_C1oTY{ABY;d$!KuLzP>~FoOfFj$?~0eP&$ey<8-#4} zHILXNGzWN&PWu33D3K5?uMm(tfT3RCekvBLbpRJbjLIc`$Va%kr7Pz(CgyuvHnjBV zlFp;=^*GS$yn(UxT>;1oXg&D(4WO~|n$eBVa4YrJ8W{8nwu`iyf-Xfn1_l(!^40u8 z6=4=5)dwL!J9Yo%R9oHo&GW`yZIKUd)BEZ~EhlYrfs;!r=R0!LULk|$Jcsya6V+&9A&ULH>BG%P$sS}D1lQwyN#bDtc$BaB_X@w53ztVNKGhHyyuC3L zF2g3QRU-x)#5zI!=r1&hJ0qnbj>B^B2k!s10M#gspErFlHhJx{(EojSCEB<~U$y)8 z7xx$%S)eq~f}eF3GS>}4L-IS~9J-PKvxQQrT}k)vBB{k9KV`Q6ho)-`sQinzlWk+N zZDX=0+qP?}sV3iS+jX-w*{-R{wr%(B{NH=O@3#-Teb!!U?|s(kD%SW@Ta(0Ar!vR@ zi0(=+q}{@rfuos=!o#97i1t!($cpvsu3?>TG;1spLdp!~go$z!5?8Ir8y%m9Rw*)< zcC{)xr(hFq2X7w?mPO>#|LW|u3G#p5bUr=zh)LYf5l|c^1@(2yTHcm*FW(o0eNRJk z1$nG2g~Gpx*6)|goVy!JxBcnR#~xTO+T0;Blth_?q%3kiN%voy$2f*U6H{MWhKsng zlKk&LK3B{B;>UrA0YT2|7Zs+OvKW|m1H;f5|ig>du^xx&fQ!iVTT_0na- z$FtQX{9ZU$fxfRO z9~U?-7ufw4aL}3G6s*jn-prQoyruCg57=~rZLsUGY_i0?_AV&1Zs02NBb?yo-fS}B zRFsmmqZG1lTX|J5#b;EM0wygi&XXr!P;;ZB;TK=P99#Ic#X_}zDxM%Xb#k5A{@5lK z+V;KlWuB1aM95RfxhWWLs$M6&Ix?^UrxR4=kuQv7EKj2?X!d8)2SR5=Aeh2K0q7;@ z$!eE-++bfdc>k~orTwRg5l;8`H>XZ0nGf!WJ5qk+-DjHATNPP z|3#h7x*Pe&8gpLPZ8ptoD=!rQSV*PSV2NoCSn44C7D1C zgAF9*fTWLrn9a={Bgt(hXIu$ffn5n>qbmTPHKIp`9s9#UOmbHm1~g))S?5?n#S8ut zZj2~)ka2MbvdMY*&;tFr@+kPU^0Pw}pzQkVLY3==M{Z~CJ)(T~8!T5WufwUQ`&uC~ z^O+~B#O+|hyYL63koz$IBf{d$%=NzT1flTI$}aKXf~}!K9p?b|4bMphi5M6SS~+q& zc>wFazq~lvZPe0F4R;@4W$q4Q_pOwk`-kB7Fxxt>r7L@yg0%SqmmGD7l+gEuf*C!K zd0gUEqfF)Qnj23tyZhHWq^Q*{a`RfcXx!()k#hrk5jnh&Nq7%gN??pL>!0~E1R%F>jmG0h>eKjfm$x-lW3!#^AdU@_A-NL5FZk3@*4nQ(i$kQ+SwD%RJeCC5%oAaaM~oVyHw8 z4L1eAp@#k}ciP(KE!DrsxhA1H4;%(oIT+%JcMs2bbSZwZl#_er&Bo#%5^Hj#BRH(1 zS3bk@7J+>VD=1KEdRgt+ejk+YiF0t_F&LVkYBe@;)&gTZRbBQa_~fxFKFtoBCAH@p&+F$Jc*dOXv+Nx`aW-vM5+iq z{}TN1^Am11crTsH<}8v#j%$-T_KEo`4@FEhMoYeLnLDyj_WBBS_AiRxCU~A+x{b~= z9r~{=g|JpvzMYUU^deV-BgW&1NCU?(_zNekKoSmR;h6{2o^nInYr(s2{G^_29_#V{M!5!<{Qq=iUQt8@IKc!OzWKuEC zN7T_u0@JH48}gtQ(Vqlf$ZyOcPwipkRoa5V4=*5GZnmZLTi{vb+EXozI2cAVv4_>K zyOL#)iAt}VE`^g9SBf%HT2V#b&CVMoAwG%6Z!Iq4a=it5Z`@wKQlA(Suh-99L+##Q zuK7G&xZo)FQk#B1nt$Rvz(g_^Q~RPQtzIcHk)-8m5;6)UMp4{DNofx748Jp=)n5OL zi3O-oURMH8i1YpXKH8RAFYp&~KfUGso?zrx@sIbYzA22a3)X=blh z;e#S?2;B;LQez|Fec*kWk8Exqe-2ziwT550V3(18hrIBcZU{wQ60Buo1a`6X+yVgx zqSKg2EzTQCwb0=j#>wp3gYa5JtRb8{ghQb1S(!wL4W~U4>tFWk5qp|=W@8?FKYzq2 zk>FK(!aaH2JgzBir;MtMoMq<_*y>6ls8QvwNFMn|Jv1?|J+1zV@~e6_!@MzD zMCr)R-DH?#VTmMR9on6LRZL2(8S7HGQpH20B=w>&7&|u?BL|jpxFmcu6&^8LkFOB*4F%y<{-jV?fpS^wvKarl)7gb`3(^+h| z)d3N-kP1A3mFw@K)h}J4cL+-$WTYJ48KYnFTcB7I?sSHHzn~3fYagn)%q9t#@2%jRDUSiT!TUtMq zY_mORXwk=7HU)*FcU3OO++OL|S`BlQ5*@<~`{>u>F*?&P)jbuUW1oE4V?*jZeU*n; z7SA?3wnf&VnrR-Kofo4~RNvei{U*j?4y)onXT}n!77{|ZK zRO)%`Pzd2Y{L-lD5DL&a1?~1D;^Q^DFA_uY>$0 z->KcFSLXia}G9j}x(vB(_}s4d&4eZb^m7*^dfbm~U9a1l_T%(NZft?^SS5N7A|ZvK>1f^Sdu{YfncH zI}8J3m?4QhKVdG^I&)^QQ3Y&*#dQ)-k4qv)#!1-3i_?)<;4DW2F&LpveiE*tDj?{#B zQE@O}BnI=DKf z)vF+vYN2FHm{j&CHzOd<2U=0cNE6Ju#M}E=p)<%xw^|ZN0YqEj6B7e6@iwflZ+{b< zu>PsUa`2e?T5@~wqow;axAD8QTb6(8#cx~WbdhWZT&+>rV#H$7PJ`BBEU^HsRsnjS zzA-d9cnlHBCGBy8*Qo8$OIi{ZSTz*gP|QA$gY8!g&6AnJ_X9W3=ToilF@>?bTgq& zz?;BnsIUvPqaiAkZ~e1hCzd3q#8?;cwc*%cQUYN6O+<6lU}(~K35sSWd238Q;FL2s&A`SdrLN|VjU>4BTo{&H^LquR1_sKtbcc)D@l%)Lr;Y6lj<0GbwG3pW&vp*TJa0)7Fx84-)t$^S2$*jXRF-IlxB-D7tT-6r00xT$Cc$1459aw>iJnLP*R(GP`z5e-Y0h_OHQ zy3_uZBE8mzOMM(;;UB`yF$gL+Mj-Bet&O#fjszc}Dt<_cP2&JaN@&VqdHR+O2QL*G zKIr9P`piFFnB#l^=Qrx`(9G?8mpJ7npbpI?5gB~C#swCUgs;)@qe_L6RQ!Vgb}%|~ zBybg_)cFN-Au@uEMhPpEN*t>Kn>4*j2vedWl&(OhgcEa(8XK0a#~oq+Ju}p6)su3y z%pX;M-qYgHCJsUSk`}*>*v5N~h-goRRwRR7`OsznPh7`l|Aex?xd0HB&j57a*$RMI zyYIL)m=|`-jyvKJA#F0SA}SB-&HwGM!9mYYNsJ6V8HzSR%_t@6>^?xB^Q%TW2dz>E zay2gDw+5SC&if3Jt)}vt6k(}_UA?}ydpGYtj+6_=?R%a}Htj=^DHXMT9vDp;f+V!m zXPVu4t#E)S*>seNLx_#hkCEFuv$~#d_~+kQ-!R_8NLBrH^@~CHmpNj)JSr{guwwWx z#8O0ZfPs*B1;GwbB1cOr)VKLgFtTH$5jI>F}DChfB96f<{}Dz&73 zBetS&te$*;Mw04|Uhe8h;ni?}m?b#u_v5sp%Fmp-`f?mG67 zU=B2XxVCnRH1wq4|Gb?)BV-@?YVYlt_PQ>{wAwLvASsiaY3A2{Ix^JRom}|&2=M}S z4g%aALeUsPi>HuW)92rsySw;HFY+Xw%?z+JJ^px+BD;jT4AKZD-M9-;Jf0;>#8{zZ zdE}9yZ;*c#->HY9qZ1ES*6%v~#%C0H#~G!}ddif?v&OjO1D#T^ad}-aXB+}d(UL&EdWqlj+FFB110%pfCXRxPh6Lg%S?ywrk84$pPTVf6W+h`UblOUzaT8 z>TCZBK0KW-Ck{ZL+}@6|$FUKq>^%;M^js(F@%fdV+j6>+??moe&eghx;hEE{T~|!` zUUqhU{GooKuEjwAN=@>k#e*r!ZouZpmq!jzNZ|-gM6`-ZPK>T)}^mUx5&Rl^>!-c?hb*Nev@gg794wU#%;lR zWYayiK@zS$DvuC0uJ{L{p_v<{pyqK$R%~~Hn$(Nf@Zp=UKHHw zB>exo0M*l8U>2%-^NnRbcbvgG1%Wm|upJV@*aY{lQOeHy7!V9rn{a; zJ^WvrU|inl;#f1;DZ~yWuEMnz!@=^>_xN(F@T=(9TcN*0Tr~2GOltS1lBN9F61d;*GV9W2I|PJyXWWwtx+O;F3IG%RA#`#W&k` zCEYQ}Ih`tD(7QgOve4Xh$g8i|oai`$L58hQscWZ4WU77DD|XN8-F4}oSVE5>cDW@1 z3lsX8dZnS(hjLKmv4kX%ST*TMYOP|VAv0`oYsQqp8Cdv@y>eC%J460K9jXjlF0Q$j zpr7gZe}xiKi_Z%N(9wr>IF^?`v#-&u1AGTFlrr2SLthE%YtiL|dWP6__*K&YYffClvcxWg6r*px^d0ei1x%o^UntZQzN%Q1?Tq~leV(C@QG(`R zO+3jJ@Sepr*y}Xb>5DdOF4`!5?~3#jXcdRr#3vEll%P!nx+%mGJ!frVYua1Pzvv>d zZh;SPWBg2Yj#H&jXzu@woSZ@?Y$EMSJ-RfKEtd6G$=IPR&Z!800~3wo(4%!xlEW(-Vs!dDbrdi2aaW0%4m2tmyZBE7ed3XPE<_9S)FDJ|Mua1_^kGi!! zXVXftNGw%@tVQC#a0__QbgXjd!cr7T>Kv0!wC~~6zv+{6e+wLlQ_&W<`dHf3o4CpB zzAvIApZW;6UH*+>_kaU49q;+v$q-u2(@IXCz(Ul zM#3G<`puP}b-M>&2`|cC+wk5lv&sXVE?7Wc$GIb75?;RfDD>zn|M|_}^A8sK*Ib;3 z>>n#1`wtiTMP4nlVI}9iq>OF=M7*&W z5l|ZSRMO(_OQ@%U0AyYSreo6)6KPBt^4qadcJ9J;E9YltYL)gq(TbdfwB7+@XXR*#?wm5GSMmntyL%=$}Rl#vhi^ zTr0H$?b&e&43N;N=?@{-UyOV{{`}$KcOl~|h~jfbPWZk=jmO<(fp3v}BmPK8Z}$x!}wGd>;bR<(kb%uK!a@H{Hs3nMPM z)QZDj9;2mWzsm4|Uh#Rp8O|KkjFGXXdqDyz4?VVEpiR+c5vZUOP$28Q2|3&+eg-I`VBKdDtl%B}3~N#zei*b&KLq%ou#~ zmBQzXWtVR>yE5M;zt{;}Y&N*+C!i^it*ubs(BTJqE3mrK;0dr3HlEK6E*%cF)#o<2 z6SMV7Bk{uo+h}L)o8^V*NZC}0r~s!FZ~&^#c3_3Di3(~S%{XJ5ic)P{L7Nl}#3LAi z1(<~HD8-TfvI*!!>7cjlcvLw_4Z_I9SGaEox66Kq^-L|GV|hszT-yfigVwgDsUH*% z?lJxgY_C0^nMoVXz)guiW;iqwIi*(o`xhdjZ}s#`x?fc*IcI-{-QFu8UMCP*9>^H6 zctILXk#eRiAoU9J#p`y7|5K+(CtzJog8`Zy8Sgc|NanZhc8)mBvHdP`X$&juh|_CE za!oo5RmHdkW5G}Uikzb31EweYfM16mCXesUzw$v~Amlg#s$iGGna9hDmL6q)*vN{8 zjK&IQRU2j-kjB<+EySf4WK#%@n^vF{d4;ZZASyTC!G|mbL$iZQH%Y3`Kj?*e>rf2) z8k2y%a~(On^lB(jdaXDK1J?QR2QuQeX6~)m$*_l&n_+*c`Y~iu;@UA2NaIs zCu*w6E>%nd<@1|SaitLY`sho>X85j0e~+pNBx6}Geq&KUXh~XD$4E!_su@I+rK@5b z1*Xu}!B2%+tdDfS(lUw)+cC&k$#c0BLcsXF7Bj#0kH1hi{^JE~q}#vpEq*eSZL3yv zjRnOm-N~ME115sPKCEnsS|}JZo&y(H8MP0NDKdE}_h-~BBNp*1szPH#ix~T$vBn;_ z+&8;^HT*+b*hn@Qg9qxYhu@#QwYNXHxRa|upb%&csSZ^Y-?A%$8X35lGMV_mO}i{n z!|Nz3>a{8KRtf@!dyQyN_D@4Jv30y8U$siHs6!F-!0!O$@EvaBD(PQ^OX4vAff*&A zB*|!H9qQ2s#E{}h`mSSZ2O>+~1XdRluu6saIc_*|Qce)Vs4KE4365bI zWfjzWg+f=8-q#8kaDRwmDMymgB&uiRiGWR{C9tq$)*~t&lus5DFo~iK7PDdRlk-dQ zo?*#`uV~l@BpWxSBq?~RDwkUEg<-~?XlmlukW^{+H^}t=o!^GfFq8aPlWnKM7E?LA z;yui|O={71Q~u2Q@vlWS_|oOu&++|Hjdrve-$8qjg1%;DLPfTmW@RUV3sHTTp^K{w zpd_iqR#?|T+6>Q~p&*e%A^o92$l+(`PZt<74pP5vZfhh?2fCYMy^BH!o6kJ_nF!6X ze5=D>OSBZrh}7*5S^>=7<;i4}9<>tzDbjGXZuI^zCpCf??fX?PAr~NKaONbndHuul z>b;d3h)(PgFtmfDSIV)zjRn&sJzY5PaoWZPJC1&O#Qj6)5C& zobO1*9LtIGQB3CAP-8>q62ESYP2`mkk!hWirgPq%^L@>^X^v$2UE-%z(|v>oDqk4$ zZ?@~cpOyA;4@xt<^O{f*)Bz;D8nb79WZL5xm#vcY6TbLlbzyst;x~?R`C;Aax|x7s z+otkA&DC)5yGxE5AV`pjeb0S?Ed9K|s9PV`zF=4qpgMC1n4=oB&m2tZ%GfoaC2Dd? zkvXt`vAPh~@?E(kx?6wpy;x;-1O3=iSlnk%jG^n==j^o$VU^K{HOl2t{Z*SvDI%0^ zT%L8D&_4z54?a5C`xo~(L=#MRH)U<%&+4m^NS+6Bl+NCGv@frIyQ$hlRUB~N8aGDA zkk@snNnFVawEMp3(ZV9YKYc56lEZ85HxWKVX7QK%*EN{qUxlS z(htCUd29i$yl6=cPX+@<5{yvn*KeKqzyUAGpM7Tg1V0i zO3Hn)euHk^zWmquTFfU8ji-Ji?D${?lB0=F*=Wrb!0$Zv%D3vq<(%ZNnOt@$TWEKw zmK6Zuxzxt`;x|7W?TB6giOoZ|uEx7ZXffR%SW6dYdO69XOc6!!WG_m+^S8gGiSCe8 z6}rgg&|2`2UC4FCI_oik?xKxk3>Lr0zenINz|u>@y*4KGk>m%l;!H@9VB!v|KW{Ct zR_CiU;3=-NYA-tzBB}IYrE+*4)}mQEIH43r$B~Ygb>#HvX>s(?uX@L5*jG&?=Q<3% zm|QP?-lw#HS}*7KJ_^SmLhE{iCiK-skvAE7lck}WBQs^XvyY7Jb-?645^Cn~B6^Rb z-jGjvT}ysW0D_sFgnik?a-aEcQbZd<^@<70^?vbQNT{pIOE?{z5e zN!wcU-YTg1l0SF+PfPiTgo!pG(1f0mBM@ZQI4I#=Lna+ zL6qcQ9)vHok}s}rsj`-FPOU+WX^v_>8=&|!0%M;TUu%PdnMi^$0*yjB z5)6E$P@nPM!0}Gv%CBV-vD-7Op=(W|*fMX9&y(rNY-Tz1SbrL_>@4zgsrfvD509A+ z*Lhb0W1#fN3E*uI{!<7pz19%XFx8aPFgck4CPv<=uXz^?n^tUDt35t&2vCk!zpN#Z zH(*B$6;${d;dptM30hfnp53{7>`J}=e6TuESy~T1SE!}sJ|>lydm-)HGSN4exRG7m z6G4Mhtd=-(V(srfY`=DVOZkr=e2{{6?RMO2%%9nXBch>f#tX_w>OO-k>L;W1Z-f%dr{e>@NqfaA< z@5$Qk%{m2bdpBwlv=l9GxN2&3d|nH)jAA3f z4DJU{wIe>DldMc?w1ri-VWV7rQTAW4d0a6N>X&b5SH%Y_j#-!padMhRtU=@t{W*I< zROXU%Nm?d~^}HOb|Euj@(dl2O&?#wf=ZC{DRdO5e4nWN~u&!1~=~^pEcBC)~xZfz> zTg=FY&nZHQLo%N2tSZ~uazQ+epKq}|vM34EGSh72%B_S|poS;IO80^Z7JU4c0s|L}k!y;n&`_uy4?v zg$Jm83*D-^@s4~3(@m~d$9c;47k)^9K}AhY?fXsx{11&jE~pUCv+WVFdWD(1fMJ7r zwXufW=?m`lL+GlorAP>7Zz8D}bk4M3iRA=VCuQJF0%p2{cO{DqT`(BmaL`3o+1UBw z_uVA(wSLO{Ru=9Ex7U3Mg$Mz$!hBt(|AS8jW(DgN<{$XFK)r5|9F zUm3TMk(!_N{170>=u6aWXWJ1B=^XE)X=dtq#IA-ptr5MjlOPq=_;GcZ)?M8l^c~S{ zTypr(6MHlSdHXZNC;5FV1583`oh)t^UkstwVdZSMK;4asUPu#GF094ugJ>Fg+h{?( z-~tXq+ypFPqR{F^SZ+DZk?4}U5;U4F3R2r0Vt&ytiR})pDja(-&X0j=$(YrQM#`nX z$XRINqn|WkMA6M|>iR*xc&#P9Z#`YKacA*v0*dKChSNUQh=(u1y>}PHCLXG8W-`*2 z>%A$)wq{LZhn^l9EoMCLmcR|YDab$Ui_Ex|1YqpS|CJm^|DU#PN<=%4Yi=K(ip-T< zM{5L}J5-N?JTB>sU7e6Vv$dA8iY>xA7N58L;D_u(|MFDWDV4C!jpHLxU3ZcgT-jJ5 z&FjaD)`?!D2hZBQyJ8pDiuA7YvhiGQbfqY=8e0JkP;C%u?aD26pe-R;KxPuDJP;&- zHX0E+zx1;(3#d}mmbA)gA|xjINwn zyVCJ?XK3YecOPWOoDEoLGaTMT#wk^NZDo|f_2slSHsO0jY6WC-RbGgz^;uw$U0Gs?s#m30R`Sdx?C-L@#us4)c?3ctXn|y|pKsjK1KO35Ydt)URTtVBsZFA?3 zvMB{xpZgUsWKRyWB^^5Cj`}~~uo4I?VA!1bDl8=wxklPjfC3<&4kJDfc3-Gjs)yoY z{akYkO5_hUXD({ViV1e{S%aB+GI6c+PT%>Sg0e>I8r#Vgv_tr+<%!h%r-62>3C6M=RTjF^3Cg_0|^ zQEpUqpU&;tM&8q8HQZ$p;?pRf4aDNjZA2tnMU&qg$zK{vRX4qPO{vL0j=}5+m*3BG zEmz=dA z{$BErZ*jy~@66%6A!jby)w_1EFp(>)XJV1VNE{d?WOB*vXXwh$b^xjvx$C9BSL(2R zB<6~CUTgnDfi)##z{fp7ZKxomjV;HQqM}yW>zh*lnOo+S;y(OtdCO&GR&Tx<+Du6( zD^#f~%0C*m#TcQ(3!vzLPitX|!LvJzn8Jaq>-G)^a22hC>BiIDZL~C)*)mbJR3-WX zY75hqS9KrAC&A*wG7#OUddj1%(3yX|$+?#gWps; zDQHEmJ5ZAgNZuo@MFOr|E77Q2T}5nDCZiQYgORe5O66G%8hl7ad35S_KjlIde_vwv z1796==MVKV9tEcN#%sX9dPkXMCU7DIPss*{@EUTih3P&)6PTLMT8-x&ICSGWY>!rB zKNGj_28ZRD_rN1!d$faF83UE+e-`z4|ES)#0ljnVOvh!3A1@{cazZ9am*2#kzBVnH z)Ggo~U}x_m2m*6;jchrHYpjfu?IHU)fAo<13QAP57CTU)a)-W>TGv-JgBmeqR1n{5 z@>*IF8?DS`c=39FC5(kG|2>FD)FVvI*r+MucgsVxo*$v3A*b^tGwriDX2shH;@$u3 zDBf)R$6Uc*YdXL`n}%z>8(A`p3U@U$n?@R&YRzA*EN+WFxr`f_82^sjJ}hqTSS-Z~ z6EYVYo`D);_-M}iSXcg^2yN4y*R07E4$fQS6V=-45|9TOA#4>oA_9SsNTXS>R|)r* zxYeBXHcH|l1z1G4xI>S6V{@Zn)8Pc2=aY8&^iM^e7LHm(eflf!5fANJ>!)g4khfpenA<>YDmOPnlAm zu~+HRL}hkFR2b2~4@uBawdcJ6h$hWnNR^MY z^wdmxT^6{p&z{h830Wl~O*xB(>YkAwxaTmoYi?ux>i;jWw#=SfnW=^%*<{y*1x(DM zz70Po5i#=PeD98Lo3YKAQfLn*B@V+7MN|ZyHS21g#{RKZ2kE8d081exzwO)n5(O>- zgp6ikXyuIFaPJYgs{DMI?hvquTp~ZH7)I&AAQ0_IrCMocdIO~M8z`*5*1Cu&`7 zYY9a~i1GnAcznm2Kxn?_2q&acbZClIIk^t|jFBComTCFeg&7aR+OU~?VpVale!r}i?J9|ul!Gc1;=BF(1<0%ajKCt}l)epb zB4axA1NTRb*t_UIRAq^|3S|zU7AB&Sazo?N_ONLt7?|UIaAE|`Qn&uT4)No1crbc2Nu9sqS zT`ZcHrQd|ZAIN+n;jO>m_=LU^wo7qtDtS!UloKZ0-@&p(?EkT9jsZU+^3(j&+!ki` zrUmk<|Yl`uj*cZ3@{dyCm(fSeJ zPr1oFI*@LPsLB5NOUuj}yCa4j{|pyaMe6QOMZ}728eVyvkdrI6k;7Y5lJqP_9NNhj z2E3maN8xO=C!!f1eY{?8_Fi1;t*V}H;fFf0PoEMmuB!xARxKBlBoxC!g!uN@k6W)T zJ=O_zL0ejRE1yW!Al;sX4=76_Dpd<9|>gFVLLDn@Dk+ncMbiK|UpOMvW_nt|w}sC38j!R!?T)vs!&BgLs(o7W1h zSXZPdew+YdhgN=#9n?r3O6t{|m?#bMGcTK@wJY9ulr22XLNYvlgXPv|kZ|(C8gFpi z06BX=lp4CQt}cF2J#!sIRLzATHdSb8CB~y&sYDWm{?>ej2*%DL(&0F-0$8Z$ zmpAFglgV_3e_0fvhFj16H-QJbK5wy`b6#~@QE(+W$0bR>*}(UT>JZEbYS*ROgcXIk zlF*7+nOUXrj>LXKuI&c?1KsZ?BUqX}5Zy>!@4E5UBlR2*;Tn*(*nW-K&Ze!Z8Rq0u zT_zde^w|VLhf(7KS3`ZKNt~wd+_{q%1RGOJqhWqr3q+4bLySPxIZptYL_mg#Q7$Lr z1@^P8iFEiE&-t?&4CD0>{u3|;HEFTsP;gcm|=WD*Pw3>O`YS znSfNw&OQyBJs|FY;Yj{-kL4kW+Bnf7euHjZ2>VY&N+7m`Y<}1lr{$-^~lzY(!^br8y!t+#*WjS_gve+uyk?QZ9puLv&hb-A3cY_6{RW+nz`BWZ@` zvI9RspPgIt8O*7zqNP&;Rtg@q$8~!Nw{QOX?CN?$sQol0Hor~G?fH?ULY4^;x`T$w zN7LZmR;Do#cBLe@Qy?qE9Yt^)$bZH^5?x9{ARhu}nH4okO;w?Wvo?(TLOMrlhYB%P zfQe04X-$)ZBUGmmT?`Lu)a*jkk1{Ri1?-}8A(5UGcXHmP?>3iT*-?Yb2G1=L+Wa10 z8=D1Wij;VjL#bM4TLPYhiPi6&-`8Dt3U#^oK90o{_P!t7M=41{w*9dY4uS>tR!%P? zF}f)nM6=+j91rSxLS_i&DXV{dMxSinwvcvySbx4C6nZWBVz;>)fLP z1Ifi0T5dFzWKlnI^J(av>IXEwg57+(FHC=;h^r&l|V1=V=M~L_4R9N)ZZZsXAe~#0t^t zxX+3)yJt28^$L@=<^dft-aF@5AFxEq-xfz86+8t}!Q9)C{;Ii8M%5b=zHSXhR=8+$ z`8vV4p&&aqW%5cwvY2)gFF!@r?X3Ivtg=&ip?qKlGF!!uDCqb7UR`f>r9%u_;i7EywY7L=jJYNBcVa7ulsv(*4X z^i|K}k|<%^X6DfK?8gN!+*Hmxy4#y++H*b!lVzWyDLv!tnycwrV8j5HvRsV|Q(-Tv z5$6bmdwLZSBgtsl^*byZY?sr6?<&M$?(Xl;!(Ww-ML3iG=wwwS?~HHJAWQE8UY{F` zcj$lNRpQwPEf~C2m4+^^2Ie)XMQ>v;+|>S|6BI1bl7_o3C&-ZxhM&<%>O5D0^x9Ko zUw2EI+=j5vf)96;xzk&0D5}`<7zu8 zx(m*5*dC~P`1?OXTa%p4}oNax|mE~V{d3v z&;sFsTFpWda-k--(?{uIGd4OMXtd|=qjaD*Tpx5Y`=&L7^6|B0_*z{2`|1i_D&E)s zf|h3zQ{#D(pqaur9lWLmLjbZ6QV=}-#*RhVuxcY2UDL34=yv; zybxe^AO^)EAr~w^RK?`lQ?`|)LW4O+-@fBB=i%ii*q0ItJ{%BXl;OxVnZ+AVA)^sn zxAdB7XBI9}(8wN=JWoFMfXLkt)5z)4YTXt46)lG=W*6)Kf{O)#lh9?>qNS$Oly&eK z*0|MJxlt!5!Y)~|+03!7B{>;F)RA%UYx%$_ zZz?u2CKf{=&EB)b*H4#d$+X|^+ z6=&uzO`FO`86g$Bf8}0&QVUP-WOiZT<>k>(f_ti#+vULBxhgjIzA}ZMOoNJC%qj18 z-|6-x-=V}Fc$Ge~QeN!{**yw3IlD1|h9Ui)3GxB9{bgy3Q>A_g18C0y^0LG1P0&bk z7!J|d)N{K155lzRqp@%lGbgTeuVV85+7Eb!;A>~8#G?!D-@g`4EjJ7f+R~!S zf320=*RU;h##lFE1uqHHOcRYjolY77(@p}z0BYc|rJB@Vg@)OTTlFxy(8i=YFMDQ@ zqd&VOL?EIe%}0+2q}eSHE5=8M4V+qMk#aQb*YnZk+Tm$j;`>qj=412JQ4RB#OTHPf zCTw|WKcwLEdyxB6hr>0Gz}as~mOHDl-qVm3(RMKGGpIAgDu6!${B4Ad5LZ6E*Gne_ z&LU#&&*pN^EU=sVCNSJ}LUe>LKaQrAiN|ptmr};V*&Zv#q*3tV>RY%u@h{Yu{T{ZF z*XfF4LlSmtD)&8@nKQj1qhHmdxsdE>pE)TOypwEaGBliN!%)I{?l?UF#4p2c%XjEVuT`bJlI2t|kR3u7mfwtFSX50hWIOTDCkR zHMD@!ft}{VEoL9yaf00W%fVFCRmWdY?YivLNqaMNtG%^$9YH1uub1k7vx>2FCR)Rh z*PPEe9%7Y>8o{ab=oZwlGiumzqeN7W@{*2P!yJ_)%2f%2`wmEatOWYhh1il|fk~Ac zZWdlK*2{{iqT znCFO2nECg0ifnY9>^3kuVh1{aDouIX@BlI*L%$OR-cPwMlGiYLS=PdGvMw{N#HiM{ zP@Wg}N3&hbwB|r%cjY;Hbx15=+g2Mc*|Es#8mJQo!r^=T*N?sKhWCTd@Fl)vK94B` z(5CxT%c#=XXnn~v=7e&{3Nl&_flS8mQarT)9KTbo;Sq@Gp=tWSf845;)%ZZ|K+&*b z)>8#J(gzQHqe02dKbp>%e$bf(hG7f9>QLZw5k{p+w+AG_>2m`%ec|{)`VJ$*&;(@r zVR(>n+LKxaOA`~#b=WLHQZ)6WbA@e12iuHjC%>K*&Li$UYxgvZ4{+S?Z-SKwT2zt6 zrj6p!+4T`-Dat76G@!|e6k{ER-Y?{HzV-~>rH>%TLcBH*V~1r&L5wMER{7RWFS_8Y zbn~7$cAP=ESYKWjqatMZ?g2N;@KN^Ry6aL2Vree<|5zF;xwG+YP?Jp?tO`Q8hB({4 zAlD$8o~osFp4{9N9-hf@MUM;?Dx=n~SnnTtrVI{V?b993!tYT>oFybOc5!~GjZk#d zQ4SD|A%B0=fwiOJo?=g~UqCbp|0 zw@x~&j<;v{vf+r|-8jcy5>)-}jIWwe>TmBiLGptcBE|2)?u5z+??&i(&8`&wBw8|} zzthG--V@XuYqBHp2|-@_wMzoyfyvqbS0kTY|EosSC{jyArs@23nMqWg-($ulx8-)< zN}k=x0OgP1*m^U}K4`KCZ8f<IB z1||xkbm3uw$#>58Y}cE7dI~;^N_)Gr+@YVTS{nt;e=D{n4lE5S4y`%G4$x}Z(IORW zkCiFHV9XrO!9?ugX-PA*F|RWsVSF|%-`dVHy9v7Qcx57_Ju6AEaC0c&!qmi}-y-O@ z7Fy1^8$sgXB{Z9I+)@yNbCS_wZ!<~2!c&7W^JgPYTacz&=#_sg*Pi0zT! z#nh9p@KTK{h<=ERpIwmo-N8Q%4i6j>QM%Bog6y(QfjkFL#iFI*me`j_-dwKLf(8t?hXToq^nG%A>}tts;4eOVR}>51Ce(=( z7A3Qi{*Bi0{;FM4o&?EW9me11rG-ouFml%USB1gI$Y7h5KxRVk8>zq{uELP^jA`LLw=gEI$|kql!oVi+A@E)0MTAOX6KjOuE1-gnnC-pGHOoR ztuzyvgL96ipvVC1wNeWjK*o0J3V7ESEcvA4I>nyH+=YK7F8rG~gU8I`8{Q3POMlLj zX9xB-lQ27IJesAG|Dov{9OM3Y(!Cb`(QZQHhO+qsyHZQDs3qp_ar=lOmA zg}wLfYj$>KmQd*+w#u;a@p}F36{3c#Yi^S!!sMLjCt}EEe4jc`P2*?p8~wyecTSG1 zsJBLg_gY%xF8rt=H|~?{k1wr%3uZ z7CHr)C(qppH-vyLyyY)Rb%@NgGy};)ugxXh<;L4jfen*MY{W+Q_%2cCPVHEj5z=JF zifhQfa>|aC(*fkGK4LEu1KrypNIyi6V=w zfYAO$f<8UCSQ2m$@TX^`2}`N7m~*IztcXldswWm10e>aOZ)WC6NylyH>Sw4oVwTwJ ziL~>Rb`(V-Hin(IPHzfmZ?VFY{g8m>v+VKU*CnFMRn2x)gII^&Kh7n+58>uz* zQ$IqcqRuj{A&~!}VKwJ!{$G9X@l33ehVnkCJo{GmKW``;7?^(d72-4uEu|6(A!Jdr zJxcT2H5=O!CRJ-as&g7LP_&0lm^LY*ITu?iKL}h4UY9;Poo^8OTL~a~b2&T^cBdbx zc~NfBri?7(Qc{XJhKq&T zS9%TJwy&;M+Cd$q>(BO@!Bf_!jL&IOh2JNUcA&j>GqgC*#vVam5ZwZ!*Zc-137K`H zajrj^1(6q|;DtAhjlU9g8QvG&2lCkM<7{8ykCghm^AY*hpVjiP241RyiXOXM{z*8? zuu_E;yKSQAWtb&xPOfVpao+1%E1$^kPI}7cb4DvVEQtF^31npA{zG|alh95Ug`lVO zR9bmgH2N*>;j+j*-C3OD^VGU%kIQB?p!m!&-(UmI44vIG@}a(x>NqsB6}^=3>@c9| zE==PV>o%+N;3?r_nk!=_A0Pjh1r!ou=+<|b`L8$a;}k2fUB=C`ZxpF~gL|L1=hzqt z`Wq(bwn1L?9W*2xmQ38AvnOxQr>)+Yu?ktxB(E7K<%Ex`X{|%{nJ+RRQ$ySK?yg=> zfD~l)CtF)=LG%%grb7@pvy+M#6O^6VNgTA zL=XiB7gyStbcgOoT&|2+e#{h#76_NJ7!l&2U^N;Tt? z@l5p?g(N&0szwgwCR}gnr?2b9ygx9XQqp9jl2L9=NPMiw=#5%eXW_n`S-ngB`(?d$ z7dRRoKHt$Mi*ymjrt*gSNZ9QYi!03(mLHFOSxG|=MY7EBN4(#0RsbjqZ|Trkvr?3Artyu z*1Q>{dzzE&7~8eq;Slq~VloEsfE=@`<#=*JVNHUm*b{?{ms&O-v<7I+#yi;>V`LL) z$Yuyl)We}%I0oapyDt9jPq3eO_uq7xs6#XuyIeXSC^2rojl}kSfW1fkvjz1V@;832 zFdVFsmy1BBcN&MMnfjPnGeU*TLGYsKiVWn@RBn8)vnay&+WRqDH{4r8$jr@sY0GK< z-uz^z*SB7!3wal~;U5{&-n4{^npO6PzqR@&JPRg$;J*5Dc91mnLOxrgoiH6zYaj*$ zu7N47jw8&6R`0|dfx1WQFN>>Zr&#*9Rq}9{kpCGe2JHrJ}^Jx)H%bgenjfsdVL?7HM!b+or{8AK2K# z`C3L@v!V`J;5GBfHn1HrUcD0Fcm66%qY2_#lxjF!WQdt1kfH3Q8K;8J0myu7@_W~! z*g(d@2B}@sy)1Jg)@=;z+dk}wwsmFLKL^N8lV~UtmU-~4$}&=7hm!IKGqZxFnmoT~hj+n#W~c^_Z?39S3~CtJHCgzdZ$G#>%0 zX&U0XDoK+vRasJiD}5{o^Z^-1ovBb_%i`js1yb5}`PFvL)NJ$*v$=nPVnh zezRyx@MHsy*X?FKiyK&78_g0;a(^O?B0hS#ru|)cuBwSfG|h=v8%mL7L5a=rcZzjA zX1x9TA=A*t`XthpYM$Mk@Y6lMM`8#F8J2c#+?(i@BGleD^kA|0S+`O^~|L zx8`#q;f;W?&{?(dNMJq^0`xWy1zG^~P279xk0V%ygakt4Y;sV$2K$|_=k#7Z^l~o* zAb8k_-sNRVE_;x&^_CJ}k|$KOk899NfImoUsDG3kauzByO56PlobCy>JQ4MG zx~mN7g_r5g93T9xRdlx@+ObA<+qb5SVVUbQe9-hmRH1xWcqzNlenZ!23kXH<&epVV zBc|Xo7_DfCE6_o)ws*~Ob){zwf-{(CW<4KA)|*`zXQflZ!|TRXJl_HvY*aKSlH?u~{w-E)1Dv+sAZ1@m>kfC72xi9VgtoAdL1)Z;;^8R=0D#6K ziJB022I`+CD9Vj(fyxP5I`)-)Iw+=DKLdt zat;Cm6El76*DNQv1AtmMM8<)C8sn;N0uN%44yQnwYEeI6Z`!pCW-uEWG{IMZ1K;jBK?z%w6#LIa*aR9_r2XVN)Fr=m zS+tXnu)FNeRBZ?CQ|tv7ggA;`SakD)iPyD`g&}L^-6QHPie!y-htE)=fPrh%R(@H| zPIr^pUjVv#Cl4*YN9P32u+2dZcns(<+R$S`J!M$4jXh1haw4seZ`YCS`w|GEjLoF{)$8)Svr-N)YrV z>ma!yzI{k>Ho^;|NDo8*xxGfrugs0jWMFM1{>x<4o=TeBOKzcS%0G$i87*Kn4b!4dzB1Z0K+JLh zed_Jk(5v67>PDL3a;`2{{ob3acb+uL4Kzzp6f&B2pBCwfKxAOrMUeNZd;e=a=F!KI z@iZ`?V0z9#|8KmivRfpe&#WrCmQ9&!D5TJ$s--+>NttF6qLv01#Gg(w;T_{3$bnMG zQdSqVsGoRYAgu<$H&$wbGl_418*53}0Q5`Tm18Td)v{sbi(*-K?%`TmhLIj=%0Mo< zPUy$aLa1EVt>(NJ!#jN`Hp0Z3b*Py`JtPa^G5N9h3=u}TV(8(?UUslGjui>)JX>-3W`a7=*D2M2B|+sL9p_^JLX|a=9omH%+2U z`RmD1StA7Y4>>wvhM^^iYpt9+V}}2bvb6nue7%zk*jo6P`{YLUu>GbHD&UT;#o&z* zrV3w{&n5&F6{iei(QSNPO#{uOK#!C0l|CO$+Ik%FXJ0qqivc8P zvNr;J7!c@d;PZM(b42Rf|L*61k3JuGa--wrc`P-)b8CO49^ zf43OHEMAE08+F*GTh{b1YSUq9bh>f#f?1X*|8d3OjFfuFvC-RHY&BcFV@-$XulLrz z4u-vS1QqvmyShMhYM_j_>J4Mv|Fi&>CsH9UQjL@3{0tTxx0#Xoo8x8#A+6ONPhV6c z#~Czwt^dKg&iam)4@wS%3t{@i`EzV?K~8b;{=u9~=OEUHMfjy^(!{^qk}-@q)rW&@ z&~Q~7eB)mrLY=Ohvh=EQJaPCT1VxTTo+_k{PoWDNb~9+0W8dLGxUNmgVSGu=;m}P_ z$bjfU4dd9(hRz(Ln7)S0e0)7_3zxbD8!B9y>$3-)B4z_hse4;Vluy z&aK&G-VIB&9kedn)t_w5B6ZnVtquPoi`Mva4r_MxvD2BZR-=HQ>xw_-EvM|`)wnXFZ?!hw!^xfe4p916^nMABozZoKJmYMuco~BG%v3rW{xzJ{ImHCn_Q@2iEd__v z$+jzNf!EgT5RYSN^N3&LrG-DjgZ49F@yn9GHl#}J|1=SVlw@Pa2$35nCrm+SXEs&= zILZ-LcK;v`w3f#MLgyzdbFs2+NErXnP#04cQjsa=#2E{K&TK*FJrmnPPB5Did$sO$ z{8#9SdPnP{`4*04W0NV;%HzNZgbGy>jCDUN&G5fn&4@?kh)d<4(PsCqCgs$+R>dP$ zw3?tu6OQfU6>b50STsr_WKdqcLdmlF6R$VfZcq0wkOjecQ>A=(`Kk*m$IL!M8NSRt zu~dT9IMm%55l-Dk40Jqf3{0}*__&$*b=Ys@3B*1N9K?cl|4v;U{pT|~vDzO|)qaVO9lfvFCoR=(J(-qLI<@QU%2DbMz))z6V2eQ})pQ&%k6YW5pk5@~MF7$z}{((tm42hFqx;y8EG z4>i9eK$yO*cu)N<|GH@JzvRN&Aqlmfyk?Fb$OgVPoJ+P8M7Cuzq(4^1XNP;GEy*|W z-d;SLZ7k+6OD(qj2f06W)X)F5>YR3}?RDclh8>6*5AwS>U+C7-aK>yLVo2s**eL3m zaEff+>(nIM71AGfyQ))-2-5a-<qL3}SShH_aZ1vh zaE$iFydz$owF;{gY$+$;i48XjEHAyzzQ%^pva3h%m*jbYiP%E*N)w7?%wDmvO>^-# zA+~fEW;-EbI0-tTi>&@Y8*U@wqaj$~ua7Tq2cr!j+#&myHCrcIs`q7ns3BJGl2dWs zl?kFg#F#NP?k|5Q$&!6T_La#&Xe<3*`RycLX$8S3%3`1(dIPk9;_MzLK_ikEfY!FX zzT({cbu>~TNxJ+>&*2~D2yrkz7*g9kKxWImlsFA_ZZXx*DAQi*CRc$=4Y^Ex|66Fm zxLj|2?y+pw=&?8S&T{#?E$N3T?Z-t5{OA41+DpQf(OG4je$l?xT5xsF0&7NrwyjCS?MMz;w{i-T+H4tZiJ z8~#|0tcqwX72#$hmts^6cMX_CNdTgDTIz$ao4ikGpdV9l##-1_K z&+x&A^&zXIb|s3VZ0MlzB3KJ(=WZ$<|5<|qC97g!UuKG2lF`uiV{Y_Bk?f?Uut z9YEMP0$#`^HRzKV401a-Z=$ch_VgNGO!);a^0jfFvYovKcF|RA&SShRRely9Nr|g( z4N^!j*wVhe+8Mo{^%VI10=YI=2U60t`6Ck1$(IQ!JvkfFxAJYlH!uZ3*o3uTVaH)@u?d-7f%{k$hSM!&KNgdIv^ELzh>uCb#PRjY z1Z1hEMb6bPdfaUVZ4u5z@pSlIErM5?!i;$hLvY}2sLV@6 zs$f)z-&cSv8r}C$Jf>@)6Bj|2iMS;D!nDrw1cwmmEWEcn+aFT{TqpXIbJPy^vh65L zHM7wd5v%3@qzIi~vb3x}hMnZKMqLX#bVkhD$%hG;KlVb-eJ6a~4Ob!o)3Px0F0Qxm zKIaGZYt??kAj^c~U1x(}`)SZBjb3c`pL=~UTb51gN(pvpv2ta8R=+ON?)3YZ!UUmJ z_@YXWUx0?{CzY5McnXzrYWUO12h|6@-9Ehc8Z`$er0T8}{m-b}f!jtB6(M{zR`gNk z5Q=bWUEv#fI+!m}RwWg&W~SLREcDAFsq8T#VU;&;v!uhkG$tkL-^mX&e}S_}9XYPo z>R?YqpPk{T+2Wh(B+;3>3RZk~Ky{5Fo!u(D2pP3^$Qszejkl{BIr{dO2P%aUboTWTsokU%F-}wk(uwZz*s!UMAW(WF?oj1qS zJsS)^EfUxyM(|LQ!N=|A)Cql0u^-c8sg<=&N{v8Qpo5B$JET0&g>2;&nim0g(A+}@ z2JODBmiJ-%70^~r>(DWyp1hjWDM)laUMb{a@|OuFpbf_cFXPB1JgmFD&8Wow?wfYA zYp@BPQix@er7-OxPYQ;vMTINKQVmy~Y}tOLwrQPJpM(c>45SN>Z!6>7;KF7f#p5Q0@ClEZl|oJaFSxxpbnDsCiW0B`&g6fI|H z?AVRf+VM*yhcOW8f?S^gt$qap_Wt+Z_%Vx(S7Wh{^HEcg?{6O~!~ewqU9wi&)IXoi zh^mjmSQ?+Ed$F2{hpLoPVBtwZ2BY9gJ#vnQV{GExjPD!uBo%#@wpxP3dC;_5ZyJB` z?MD3G)mA&6ofhH&gQeU;1#FQbr+!WDEeTGt{&ay|_!0r8(t_Hwmh)v}jvXg%Z@wxf z?Edszz|Ut?M1dD8^8TWO?Vu#TD-k@M7G?0)zvyOrcvQ+ic{kK=U#$AhA;YPGmf3bJ z^cgwhA2`8*q+>diN}bu2pd`F|E=d^X$v(>_w{X}JNEIeMyr<%7r*6A?2*%LcuZ>3} zuMC0SS?XI_%1m+3mK;E z&-Mu;Wo)?PkZH2mo1~v(Y@ap+Rhko+&C8j_Xd| zZAV$QpQf*)B9BH``L+-mhTt^Sw&BU@(oTeHMW92(3r?1$^i!y(r^yFdR0*My)Dzo zqWjBmvh7~JzNvp#lEz}oWoR%ndcdzV5)6>994f`0%2cnCS7t67!rb~{A zEKqK4qo+$oBSutpaDvPb8PaJv@PL8+WmO^`k5+}HR!6XJ8m%A!;e0-!rR%+lsP!?B z{DPbsr|P}YxK9oW5Lgb^PaU`VDNPBBPV%|xz6Gi14L7IvTTMocg4C6T$l+wz~BGlStKfDS8GiGqfb-)(mU`7|`ue6Jee2y+Prd#!ZD%Key=$-J`5 zemE&#Q;rwnt6|{(ob@L*JT;j$dSCjyK!K=G-p3>FX$dvGknUTP$O_XxLcz8BRQP%!z1drU6nKRiCS zBqhw35($|eXuc0~VX87` zvGcHC5lQDeER)No)L*)^@=-V{XQTj7ly{pGlh;HZqwoIraJnCwOf}_J$S9TNUZrtj zZ5f`mb?V~7N!6pI1j;cXSI>SLA18Vn0-Le12cW%&H?9_g&sQtIdqi)r$`CPK5eJ#i zj*|cylhefSR%teC-lRX(@k$Nh(C9cdzOM5=Zp&s?qRo(`r>2WDP)d2F+=SE}U#IbR z`Eflu{`QXLyE-Y)D!@Y&aWJisV7URbb+(P%r(=_6F2|aWJ5T?7E?o0{SPIN+C_opE z*HoaI+nx_(R-p{qU19{*!*V~xzuLOx*K^Tm1AXJsshNK}ziUoax0-IP7BHr|<^g^U z{;j?(EhgWNvfR|Ws8b@F{<@SnIU0&cWGy$^h&5CG-uPdEDM(f0_#{)Hoo=CrnIwh) z%LpoQ(6}~8Hb~5UAkIF>u|d3YY&gcI@mtqLC)Fw`9N6sqrtwGsg9oSM(FoI0d{D#z zS-+c5m%??=I0(gUW^j5Z*G%NW^8di$*Hnw?xo`Q9*jPLwmm_|Y&2*&gZp&=8@nYyf zq7iro$9gFOt(YEn3j+c^^zXRyL37qZEg^5`&$txNb-!o_N( z648@>`=TZd8?QTF?J+O0BeDphn|g)pHOL)~y7~FpjjfS!UM~xnx(r=Ag!>zwF3Qg- zgA+HzA%@c21u5>uYgs((YX6YZR!&vNqH&sYmVNqrj^(hZL}tKPW+)Mh4mKCiF1FPa z-k=gJw}eoe;L-xPZW!X{oW`<*xwoE|x(@=KSyz(Csq0I?k~Y1_MT_X=0g?tcd5f<$ zPmx$mlRf+i{2`L7FydLS$A~ zHP!of{-30j2jz9=FVrd>)BuZZYR_uCRa9vARHGT6wYA3AU?chh&MfDy;r$*Izdf4r z*M|qB)}jOUeSU^dI@wRP6X45X-u`bkj(U$priF55y9)@eJr%Vi^a@Q8H>9D80lC8GYY=z! z2p+bRRcfNRR9Hg;{65C^{LBRV7&9$#<tKTfD8k; z4F&~R<6eNISt|rq19=6?BghioI#9)V=0+Ghk;T%|2%CG_Q5*r9a^zGMkwXrKMq%;V za|CnLC5N%RbtX;l~}+q{_{4y-fS0FWw7@I@w} zz)~B(|K34=Ekxjhm0Z*!W84ccXHRc=Skco5e3+O27vtEEZO)YS)OsRTCB9GAbIJmb7$Yywl=hUb6KRq6hJ|T~MxKQFj;O1Y{KIlfWTb!Hpxh#Kf&Z z-c|gaX6gJE-zHZn=7n!83ee0d_K;xJybzLYdWUBL!Y@R>&XlRfOg6;3pEjEnb_iJL zwl2S_EX=_~=)?wvA?kFrv`8{GV=!kliH2@V#={p_bZ_P+1+#OA&$E%C;Y6i_p`r^I zTY8I6T9(R(eG^9cyLlJ)dVj@p;LHvsHk?2;-rDkO@)_-E0hH$IV76P#(Uq7oaE6BM zsAP`4xMW&YOrY9FXHxHP`qFlUT!}_Gyba}t()u>?xhb$38|(KC4)^T*bM`estt-Cf zu{UI(s(;y&^gSwIIX*rtuup;Rj~UXvX(PM=elzqkJ+z8&1L+uEhsmd5)(y`7Hh*-U1l{KCVc%JoJ9MW;FClp#gY<@NphmZawX2g zfhO!1BAgTl+R6|1`m#?sN*vMh;3A%?UN7ODipsx8u+~_i6Z%q6%rt2HfG)wp z>gw=hp$RqkMMs?pD#hEEA;Z%t@IVeRMVZ4XJ>tkpG4jNwQpz#?Nh~kw#(`-Awan40 z`>N7^tP!3j{~M)Li%OEX{hh?{l6yoBfw^@5^jtceg+Q8du!wtBd{m9dR8Cw1S3p3T zdh`@N-(B^A?|ZB63)?xLhPpsHHV@e-3R4z_LfWP1DM;K53N6Zz13qMz1(ndM(0MKu zfibpZaH%lYp)o9OTJICN*y!q@9~39;E$jJk!I%8Qa-UGy_o*Day?4Cq0xoP>RVf`r zOiMMXVa#Kz7x76W4m0N1o=hULG*B3>9{Nq!t6A8(tl`4wkwfxi>vsJ?SAUjPzCyZ$ z8t5Aseiph?$$IQ`stdsKLlJ6CXkW&;l|vB!UbP1{T{n>YR8(hd zYdha73?U1>=N*?SFn)4y!QyKq;kEgWvZP8?OUd@QgMncgFcpGLn8R@A)*`!P-abJ4 zcvn#QldbN?`Ym3RU7S_SBuct7OLHLSpkNXD(*PR6igzcolKY#H78_;w?T)qX>tOxA z*#zJ21t`GtCoT6TOKzCdlv-5}WySNUd5Ve-d-{s`8!BL;D)az||6 zTIYDbq*!PjRES4DJ4X2PDJjzd>+qaRMu%hMLqY?Z0b+u96nF6uk*VnfNGvAmSnBF% zExOm#h%+g32X=nY){$S^Uih?bWV6rzNa4$j~#i?~K!oB5a$ zaws!2xUj#yW>7NKbjVvblw22>451$OXHIWWf%OwRs`LylIs_XhdFrlUH|)J?uuC-$ zDqPFoVuqr>J=hdhAzH{QS?JATbx|mfA%(%z^i70K|CGqt(0WDx*ESGi@%UawpHIys ziAW7)&i$!Hl)8UJjaO2v`G|szum%h9@a+rn)_)$h8J3VC(oni) z`-8*CPX01@F|PQH^j_sziHYqe$LUR7hpsUC4Zx{$!MZF|6zSWRr@0QpgaF`x(;YYx z0?KdRp{keV;8kexI8anzD}U!G46tILJ8IDGp$8WW6w{e#p$GQipDQTre+ziaAyuF6 z{(`DAHd-rp$~Se23Ki+IxcH8eZHf8-;gm-!#Qp`+YqHJQmH7l1)*VAiQfYiv%nB_M zx6OF>SYV;%p<=KM;9za3_K|dIh_3a9zdJ43 zczarv$Vze}pBcACt3|ttTLrwzwDRcW?jPGOWY}u-Jlj{N&hJTj2(lKhJTLz3G_kvN zJ#{|2i2Z>Zk)U^thNDQkQg1#8E+5ZSSsz?ZjmpwxjAcsi?uOPlaBt_yqV@OJS$j5i_*rJ*F>4WZCci11vi(C8zM!tprd4*e6I1~ zedIu?#dLlB!L|N%oK5&qJg7+UT4Zxuo0^SCA!&|BL`gCnp+YW^#I)UN_~nAE6|UO^sInIcZ7WfA)7LyZYT4 z=^f3ZnT?74sAiSt;Y(%@(+E|iGR%FT;pop{A$$Q0dT+`sD1pFH&X?6 zzk!6d>xyp`ea7UTgwVexKDKk{rvdQ`t44U&Oe9o*ybVuxFNA7Kq+pz_Y;lu$8mxIZ z)!koGdZIw_sNQno%5-cXDtZ_9Sbd<3w zvu$&kKP_~U%Qp97{6Hf%_a&dYo(_+jlht<>gULSu(xl}jK1I?WEe~yv=kyr&#;#WD zY|a}SrA%|?%dz6%^y>IY7BSvXTwkU8_S1N?jnqDD$j!wh*|BOR>VLZunEmU<_(Pym zvxeGf=IE9LTKy+g3)U&cLg+W&U@hBkzyV^ZvM#nBn^n7edW!;!u{sgm9DW$jQLQW= z$H3L6O_^1^a-2UZ9)2E>a>n+_bxu+%7)R2FoW?WoYvx;KnWEa8sE^8Ea@ zjx{#SE30)?VJ5{UBNB};wXyEo64h=xpNwX+pI>#bJXMQ(N*iWII1cMoHh^U3mG{Xl?h^DZUk37c)%7C!9Gs>1K50yz z>}Ey2d>f*X*; zHHA*AzeIizOHwdO5wgH{YX#`}Tz7v}bC9lqLXRDOswbO*&en;8S>p;JDx_DH^Mj@(ygWdYj-#MhdaowRyZ$ zgkbj_rm|dljqHqOn2i`((tTC$@1;PUZ^Jo_{>R<(MAHcPZPXAdFiiTE5%V152wT7?8 zdJUl|!_LpYRSnZG#hkiCE)|8GnnXghM}Axltu(%?tIV7xV+`-6-Xwq$}by4QstqrVvT=wM#_dgm#@H^Y>j<; zq>(zWgD5a0b9O^fuUOMONl);YvLO7Q7GMS|LVp>)%NgO;))XDU#r~bYr+u<)?brM= z%0Rn~Ux-m~8kKI|Su!*aguNjmnYB_AhCBs28goT@qE^I;$f9rLI2s>OzmjE*7zk8sce`6O2!%+TRD}EmYfXkM(w^c zC=4C#)eS55lMA^NLH|zkea81?;P5*zVm=^74V`n>vG~#FJ@`m}&(IB8yvFwB#BjV3 zT?$Ks*s^I-jto>swWQ@rqH?hbIp`5Ij8@LOk1~g+iq9gyT>Q>&z!al%uaS-P_cYKF zu(K!Q`Aj#c27MDli2rhTQo+(#MxuamcE9FJQ(j*@~aC{2~)0h&nwko!bg_Wjq_{ zRpGd#7?USglBi9t3+VX8`m z|8lAJx0=+5ji8$NIWZnji{~+2-s>jg&N}pd-vEzk(`=v@;;F1B5V%Bb@Ysxoz6k3;G?WfGUR57%LZbu2p({;sKr2?@WTs>C@aEdRjm9=Y%KEcY8yBN8%mqqD{?crcZ0utvl$(BMk`h zeh!qkp%+?iQra#`5`+hHQ{m0y%8d?cQMPyqmh20wGdGKmAnYi~%ZR@M z`mYFTu;)bciP5krxdFM$z3<$rd2h;Ht!B=fqSkmy)&w$AbK-N5Zfv+(LN5BIYx*b| z!t}CBPcCKdpwrp@ET{gOLs6(EErRUUC>i8I=BU__EX-)gG|8y!<`r}7t=hqHb%e*z zp(Tle38S&{f;x=fhm}zyTKp%l0J7gmBSdjqBilq{D%3X{%@N78s+5PvWgqQAy|{Nj zNS*Q?|JPN-;_$d0m$i@#n_ck2SZReE8!}orxf9%GrBN{?TC5l*AM?c8x z$T=sRDEYx0)HpV8F+G*`&cJz9{bAs`$+I^-f4@Rx=f!e&!WZ&fYN+L(#))Bq3watH z4^@Ty+p-EpIRmG6ve9z^Php~^&-HJ{3*^D}-48q=l!TUw<94^P>x;z45~amCMZ^8* zh8rk5{p}4FZd)k&x7?^Ui=k`kjJsd4K3D#l$8_Na5{e`6GuN{U_5Su>S*FuOQnkxP zuvkVL4ak$~1Lp=36}0Cj^U|}J()SFl|9o4CIrC0bVJD@PFx5Y8aNrm!4n6EM?o3Vg z)|vmdpHy6)icRO>S9w%Nq{*orfHMdHGFFbVt`z_FI_>JI)uY1)*&?3AI2*kWH=CD? z{@7HdhpK?Zg~DrM2tzhWsJ^N%o5Ns-Cb?)D*Ew1tBo2QKFCkqJs?|0uH9*yhaD4eg zag0zhU)>y>ZWU^1vqJq3nBy@x_lWpBU-^p_J{7}+QT>`!n&(D#)CxytUXa0fjK=et zui8aHIwdq6=|)tf8DJw6)_a9LuJd3zpiHhwywK)ea*lMDwGgzTB8Je(B_qk}*ho;1j!jA){%!X*zO9^7S!2#waeo&}XnD~O}0YQ_6DOSz34$|~g$g!fY#IUR=|KQV2ejy-Gfea~CzP4()lK)d1P1HXtPyZ6`+5!X%INjp!tGqDf{ zVP>Ab%rV1p12{H^V1lgFV={7htm(pmHD-HX4P%4a=;QRzdpQao{Y|kzC0=nDE0-*c zr3bPaR9xXbZmkp1ji2LRR2AZe5tZ+@8$dS&Y-SrZ!6%}jC1QZ20SPs%nKtaXfAa?U ztTJ+lD$H+5MGF-o^$1Q5^!~3+e2U?5J70Iz+{^r;+PgB$RS{{>KRzt z^1Xj???QtR;92TZ36>Zk!8J zn|QjZnxjU`Aj&n0#0i4W3FjUfJjm@Ez4fo&3@#kh4~BpHZqe?kL17*aqkCE$iafSI zlU$z^M$$JonwAG}1CpDVWL9i3=ZB!d-fu?WD zde|U#cd|la5VZL$z97jaevYKODtm&N*=(l5*mm3YBaOqlAV>^oDj(^l5(KCMf1KXL zMNTYqh_?R&c%P)U9XG=u5UmKn2A*|B8~fQ?x3MI1utT9w#DXOLalAR zz~!&WiL?~i#IjPwa@cRgKkU={M);!6+#1^%NTUi*-__=~2eNpM_&srZ2H!l-$P_MN zg%7A+BTD#_6*dGt|$H@5!ej@W+;y?DwEwvuX;qO>`geSK{y%9$sDD2qpx zukp;49AF?1*jqjOZxvQdgO@Kb>1m#rQ2Q0evO`Bf zxJ(-%VV?OuL>CkzTf#5QWBeu%OkOfzgkX~YFINb%lB`=wH zOt5!LKCS!;K!#QT&wPw4?vOKVGdGqm3ua>qG_;D3F?6U)X~V22xSZwWO^yUAx$JObmD88t#}?z& z6lpOnOHxo@VVbNEPjU!vnfy_T-9xWA9gj0Mz+L}Mgcp3lI`@L7>*SYU8P#CWnU>7V z(9-6gdhrH_O?QyCJ^McOy&RU#o4^M53kqz#8|JqXwVx{ahw=S|LqfyQqu*l&rB zo3fki4nOiCWFjC14Q`wSfBs%1$GXrElr$!G<(63J-1hz3au+{FPmKi;!zV+{V}gAC zkyd|ut9Ib^c+b#$(AMBPyQYnBFO${fQ3m!;|HG-_PwUX@cgfC-W(fD;6T>y^Z8MEo+fD+MfACcz1&FHgo`Ob+VMo9B%wKdjVQqQ156(T zrie&IPz7Kvfo3UsNJ;kMzfR#W@NtL$4MvKNl2y+OXqPGQad@H@&j5ZppDdeo!AuKu z`uXEOs*BeoxAvyom=Qz~Zv9eGAO;HF!1qCRNjLA%LC*{+ND|1Cf0~^`0m>x+@y5Z6 z<~!HbsF-*+UyE&#_(*-O2@t*(BwlB4o1%M!s&%wl*x(Hh+r7K8=e{XZmiRT`kHkdz zT??fm;kInoiUdO6l_#cD4~Tmb55%&(t>UW~nA-QyJ2o&hw1!3*E+8nB7ujijbSXB0 z7vw`Uw_O2<&zWhiITfRk?6$_Iuw1@Km+HGow+Ndo1Qyt;U;dJ5%&l#rKmX|2E1eU3D;X(1r+M;KnQBdvi z-_6kbta_g?qIiwCfQ=@QSH@is+A3#ylaGG zVfZgnz&O@N5nLwEs&$!(CUX{3fRd;ZgU7M9aZuP&y`>a&-unB_Ix9{=LFMH&iAz(8 zZ;3PBBe2X5yc6v6ZY7saOY2~b&fKg~qiwG_P>p=5*hAH;MTPD#WnIX#(M%~*ZGlXP zraLiq?t&9HMdCD?i}##HZqXXJ;dP!w{ ziFkOvP-o!amvMsfaQzBMDM-<8)Klu`ivPHYut!lVNbp)TdgljLCzETFkS*6&@&#f+ zs6g6RX6#fNef}W#)V?AKeQ~NDRYQ)vg0?Vte?G!e=li5FH|HPHqS^Qn+;B0gd5KJ+uz7 zGwRY4bLoSc7BQ}5r@TgOm{1?Yxy|jy70%j!CxWgMJd;U?Gj#TzekUbWGzYSWfD>;B zW>pZNK8{I<^w_FN6gVXYqZZs0G(tBD@e1?atq{L_t9nQUm39-wIb@K&?{s{;;?BV- zF4B;n9N(b9T-1relwSs)IfH^Po89IA@$?Ohb#P6$O&i;`ZQE{a+fHL!CnvVmq_J%~ zjcqn=(pdMj&->l|5BBWAS~Ig}&=kQE&3ft%3y%{HcMR%|WG1Y(Kj#CLwOg-EniY0l zKx6swC^2S9$Zfv>K-1eng+Bg>dgtQq8}nAoL1><8iOo#yScU>@QZf0$aE580gMqe7 zOaJ-+Pl5;e1r)FKkADAiD4Ob4-Ua9(w8ag)!1P`tWwA1HZ7MDAT{6=WlXB^%Z+gY; zDi}X_cCO`b9cO+w#@guc{U|5{z&I>~t}SdVo(ge%yqCS#)1v?24tt*V*gwu)?pX$+ z1y`AycRMU7n<{S9E3Ez6PND|wI-px>|n=E1-LA^F97dhAnt~b^~$sjv&cq2(-PuWqL8#<FR<=XG8!^Q$H%;!-Is$o8;0I|1_qM_vhZ(pa42r>P!IoEc7Xz7Chx#zcnTl=a_Jh zHC#dmf^}9*fIO%qiS%$}zKu9jow^t%`o3A`*L39NA<=y+r1G&R;B+(g=5xW#Vm9db zfqQ#6!(1)f+{|vmtE!UuRv_LpaSyS*W)4d`O=HlScfOzO9IjDv;18OyQ7Cf1TSnc$ z9=OnHy0?tY<|-eiD0f)X>~KUG_pCSZia7y@z-Hw<3_o;AP~&Tf-eeAl9C@>0$WK~Y zp;OC0=+dgUVZXey+jJVKOUqh4dr6sdCjH5P*8zTpmvy{U`GQW9KBKHTd5i;t@?#~@ z9xtiJl~ODG6dmRl<$7BMn{l+=c|F^#%;SXVdta6Ji%4rk0O^+D;I1Zw>P>#ZcKAs! zcQ%zQ8sMMl&BZZzVnXqHd)e9;(2|Wy$5?eo8iD4G#}VTdS=40T2+8nZw_5vswfN@| zAxha8TW*lwaX=<3IWMmQIOqF~svSX4`w!}{S(~*;n@H#z?{8d<)EOzHvv0m@1r!^yQ%eQFLN%n0;a!o(v#J5#XdVr48}M1pufIarZQU${ zBIRYFtP@X~nbI;j%itq(F8k=)fVhg_C41=GoVIY$m<|wc))h^L)c;OK5LMi`%{L3|)oht!B_p40`5!krm&H&LbGolT$+#}5r|FL+=K zO5x;1XXDG6fimPz9?~yzNZ!m+FPd8*kBYzYy1-GF+j@{44Uh080jyzs56tqZr@Jyh zi{OEOo($_Dt(LclZ7R}bTL4@ha;8Fwt@?>`6*!vVV$M4 z|K8P=V*fs4YUrR`08-DN*}tl|TU%6PzMsxYC+Df${Egs8uqx?CANVpfcl9N8#`@3X zZ+qulyM->OrDLCt(y=L-lSJL%Fy`0~Zi(0J;7xk7SyIsG*bYtI!s1?Nvs8Lxzc(=} z*V4b*J=8gl^bHj=KU`2NGmJ0X>|Q2?m2a+ZU3x8k)aMpE1{L4 z_)(mQoERS%`I%5l4jp+t!$ESy;1r&>s>&UyEHD}hS3)vl3A4jlE!0{YF&9NSb6CS_ zNbHM1Ff^$PWqiC73ek~*eY^H5| zq?ns~w4AeELnfxcmBD;r{{1ADJd|6D;5PXtVH44W$Hh7+foK1>?gM8<2o_ihy(5*j z+pKr(o*jLC{Ri!c_r16}^K7sT>{D}(Z~>q7iE`K9Edofn&rNSopn-idg6}NjJ+H^t zJ~hFdiR78LJ0Af_@Ey!O?gqS5-?oVO<~D>vc?$aqcN(ulJOTD)y#fAWTx9Lj>CHgV z%}?u`%^vQ(p}6SH^E>4b3B#$-K-m!8>MW;}=~j)0x^8r4(+nv8qv@Rlan3qyo?+z} zwa}t6>a%Kk48dx+Ee^uNQ1s4F98ynqeb4%Zo2C;7)wNN!Ep(di@@(egBwSZdIupFA z9*uqu;C9(Me4U>)L=NxVlnv*v;;S8`zinDhc{Bsjt<`m8N+E5g{X~VS$EY=nL4VUE z$i5B|4i7)lGv;DQX{(1CNthU-j$lb24a&!QL(U~Xk1`E&^l36CYxgXH1eKR{y8ezz zJAcmW`DvgNywwh(+HWt-Ny>5Igm-x|QrAH;_de%QN)WvY+l9An*Yz3jz$d&mMb$54 z(YhFFL%A^LN~IFi<4yHGDs>09?k4$dszrb1Ubz?Yh>xrr0OdYa2M~s7vN-{lDK|2x zMaA46D&NpTbSj_|1FeFJ%%`q|>OAo!<9?N%aU&OFdO0`>I^} zMLfot#&KVvjY^?nc60BxU2#~V0WBjD#lEv0d>yl(v9V#uHF#Hg77cpl+(TNEi@aTs z%HG}%KT4+%npjtog958y#b*Ry5u?BfzsPRzod?Jgn4Fy}r{c(N+o;`|9gDUu@QsD~ z@ZE#_&*&n|1rD=O^O5iDtl|UQ*5aaD9!J2yRlK5&6(_7Vo7O}&og;qR-C@m@P+7I> z1mN-d>*#zBn&8v#&G1j#KI=U{+VYROKgT90_Hq~n;0pqJjzgTD4&VCSb5Qgjx-?y9 z2wd%yv9T2HTA`d3JC*R~+6LdW1$2%1FIDBWoIRzr_i>lBh!BQ;qau72DUPgzv@b$bccW`;ToXuJu+Pjb19RFUlj$D=ywAL~qEIlOH^(7>dqkF51Y z!SNrS)%E!DZDp~yPvvAL8)uf{VCxtlhWH_eDYn~eJofTNPQ|*r(==S9n^m8P-*9atQk>Lx zDd|`L$EhYk)CNr0zREKkvxt)tknkEgUtV2wu^)?AbMilZd4PJTx}ck5l4#niQL zrcgF;eJ%7>v)bGF;`ztNLLYh7bOV$A_FTb%U>B_Da~}uRL~8Qmnq%unPmNT7a$q1j zb&8ngo$V$_VYb~?ZQs$F?HpsOAhl6g5cMcGZolVtp7M@xdS3ZLyLVy4TrfM*Pf4Gp17a>N-NB;{*tH z)zai4wH@U)Im$5l)DnHTeJf988Au{YA&U(~7S_>tQ+-k7tCLNescSt0nMqR@zrWO% z{YlvF`9n}WfZR~D|4nILTE0TD%DYNm3N6w&l`Z#v{vwsDy7VxVF* zUj%%$;p$8uE*Y|%Tls>px@^#}%$W1>CzU}a8xF~cr*QxhlfsSjsxjm22pyyFn6k}S zjjSs&UN$Zhu5#tG!^Cj%pD%TvRx{1`fKA{K+AlwLv>n&hYFH-Uq|W9X&i*G2%N$U6jJ|q%G3rwL^jl7DAs-(Zxw86eRr$_FMl|q-Q(^na? zoR6khPR29Wz@Is7&hkA&T%De_&#ZZ0Z6iy6l!>CzR=ngq`LvI%x&bp#9N(E9J@`3r zKLN6-n2vuAvnv9@J}xv(wqF8T81s2`eW@OPoF&XMm5=L+%&$K0rNMGu7Qgh`gw zgfb69YdjO7aug;aSPS@1+cBFh{23*YpMmQ!Mg~QoWta&ym||Z>*6^FOYANk>9+V<%5Khrza+{o+0O1tmc+1jK z@Z~E7a?_&^mb8zxJG~Uwb#m1wSe+FWXM(@3b+-q7*BeggNz3cEGkR*WQ2zN0tN_TF zM@FlwRkX?HjYPN=+xObPs=y)ehp*c7maz{jGd|3T?DMM1j;({6tnUmWjz?V6n~~62~G%S7Cbyw0rbLfPCQuSr24_HiJ|^yg1JS6sOlLx zK3zVkQB~BGP%#<$+Hh5!qHr7Res7(I@D_CjOv4e6ifDxjp>fPQc5+q%lDfxX6MLtf zos709C@D>;OJA`#bq_bUO%I0m`wQloByE)Oa@PYE#l!Yk)3=CuHz0o>-}AH3V9(XB z)*-Ng1p^;Of4-03ZjWic);-nwmJ-W?`&r!1%ZWo4vCarc)t{q8Zf)kvTy2wFq2PXk zA1OHRvoh$>yo|ugeEz>)0BxeQNG-}pI0{1N_&@v4WrJpcjEk6l2?O6!b=>fU! z_=^7pla^*q&NzD8utW=y=>#3r7P>&Gsd)4uhScGc55#7088l3WI^iV#sHJHVo`&7j zQkDyGO4`^#i1n5afNy|ALFIyf0mw@TQZanYJCeup;|5i?7F=g&c8A94JC%JnwcmG7 z{uJ_MxT)zTs^O9RL41h+YEaz{`x3Ie8~2vzuir7f<DOK>9wjES zEE&?D&%`u{q+znx_=wRrwY$U-;xvo^%-#oV=ljaq*e65|?7Fhg80Jo<_&ciR2Y#T1 z-UY@$Oa4_kmo>?drDWr}7EG`P@re#gI%(|lGU6voAPH*Vl(ijMkTXj&7(~>O0Lq4E zOZkUN4_|vb$*fz?$-!_G*(Juq4qtkR+*i{=WiJfpp(&-YBLFI!v;5f-BXW}6FKC<4 zg9t5Q_Zu@w_Kgk5nZ~9jwp#?kZWR5q}pxBb%)h0gCzX?^H+*iVmZoO@Y@3;uXP3X+mvv z@K0aR|0t1&%dx|xIJ@q*r{1O2y4esPW0#z^&Gwf}W5BoO5ck%W^%b)JBW_=ge3!nh z^+>#}UJ?6-`0F%WeEF>~=OTx$=ri@v=H!<`v;=zUMgx~Tqm)Y!ehU#<1 z<(_pO+59v%L~l}*OE4kvd-WWo4)=?SyTU@F=clL7dW0tq&z}UDh=+$X)i0mt4X!|b za-(BE@_nhM4=xw$l`87QGdn)l)M0Qr;LCk|+CEa(e^P+keodnX&aCh@=TGDD_1*UA zvv5tW334a)Nr1b>EN8=ArogGS8Xta2>rm?4TPV9J9zKbHwg}!$;9o)!r*6aA&ZZ&K zwsWd*Y{;247cns*6GWl)dM#a@E)Z|mUgzvGIDTc%^IX{7vrY2XWwviOc6GT8HiWE1&H@VHTfrq-6r5`StgR#7C1w}OOL&Lp_=J9w zg^*Uk%ou6k;suHd5hY+DEE`!i;@FI(`Oi^&_tXglmqCDni0EWv)oVXb%x#Iu1=!W* zYSliS;N#II9bIyaMhw^haEHJ zIm)G|fD67rP-1K>L63B76AD8v*xc&Gai%xL`;wNADGupIT@5{~6)Ves9>?u~8xdq- zS8*e=y6ZRL!&44S3r>b~NeWf7=zts?4TqJ5X*Ca9ij=Bf*NKsQN#g!C+2#LC2bS-M zK*Zy4k>CH)lpL^r1*~rPQSbph9bj)~N8sF&Z?Bn%Y&Y*+0&XV-PTyd>g(>QajozZ= zsq0w(-4&kJ4UF|Y{@f$?kS>W< z!Y^Z{yq%i*?i}+CwH}ha2PSsc`T^2n>Gc`U&A>%7j{)C|)r6c*%pA9iQPwS~baBXK z_TSxe<2{+}+NXe#ecTqdii}RcA_|R>6P0OTK8#Be#vtZ&n2p-!0wsM@<>Yz30SwI{ zr%uVv$sm!iLXl?yvaE&}rrqGRk&Si`jLKs4IbJXOuJ* z8p;A-qROeHy1wV=F$6lsC)@@2 z1LNs0Flzd{ckyOcL?kqqKhlm$1*cw%>#}W^R&)ikG$;90x^hjCP#3d07r?+p92nistNjGz__Wlo4BldHZ_m^**zqjmODyB)yr zo}zey)pQ0n-!$`#*$nJ&C4oPRq^sf;YZ9grzL}d-gVyr7#&a?~N}(%u3RTw_UeVM9 zLmT)p|3$+tWq=+VIb5eN#|P@M_XgcVb|FLsp@DlcW>9R(&#SX~KzQ`6ZDcv#LFSb! zR(Hg%8JUd8v-ccDipA~^J8P)P7Ve|Gcy|Y|c@w1nf729KYa3g<^$l!DHVz!%1B+Br zixs9eJnkg&dUCds1L=$vVd^BYEXE1B#Al(&-@Y+;#|_mvv68yqml^CYE-p5;G@cS& z358Y*)TE$|>>3IY1>99oHuM}eP{aoOo@a6QQHww_?Rg-&W(v^jDbq(PJCfR-UXrog z2+yPeS40$pC}X(Gau4LK7DS-#;CTtEi15>sHx>{{I5Oh6A6HK%kw!^iXT&9zjgU|l z8g83YluLKt{i_kcU0(i;Q$Z0D~=m@*1M7 zE#YQrsCzf>VfzOHB-t4;rOMT1XFJz|TUDr6O^WNZY#3!%E>ph`FJwhiOh!lxvDeou zG_wcGIE3>*qGJ-BnbL<3B+$JKot&PovGM*H=3+T-cn>*Qo6jTw?OJPs72N#*}%+VWR4vVqU+qE>X z2^mgmOTULp`xa9f8Bp|QGb%G>cFXz-BHT-`k<`P2k2w8SjJ$sJxBTWNR(J-mw)8oZ z)~6pJuEn3Vddry`W-pZDF-B=*fp;BHb7I%#Ds}YlpLBvVQlL*1A<_8uFs;Ju{F-QR z{P%r58b1UA#w`tJM>yK=tM4~mv2ON-J_p=LDY=pET9V?aF}qHVdG`ynmvp>)pL_Td z1sq`A$E@82o`Qx>58X_!YiFa|O}J*^T8QJ1c%4VUq=p=?+*HqFdF{Q?;%jT~=c5mj zirCHO%+H%Jw9)TrRLXo|14Hit+ zBp5Zztk+bSB}E*ibMSQULPIFWk~iFTH7%*WU7q|!Iym#b-Q1`<<9b$ft?Gk*Yn`6c ziD7J6M0CK^&oaMbhnO{lCLlVOBICWLi+B_;a%kfk1C_Nh>-as5d$2(v%}269cY7@Q zE>wJZYhgUc!+7RN@Dy(wT78y;kasdlb=~(f3W{2<-d?On&9B+GVgCz^#{q}GrX6JX z5b(s4-M)^k$TJQw5ix419Ap1237q7N3FY;qM4C)r97n-x9bVSvD18nNi%sX@5Yl2D zM%Ri4Emi$tKS_Hay0rE{sLqe^2^1;IWEm$tGJ z0|)J4m2murujnu7vDAwh@+=0*!Iw(ABkRhiMFnE9GJvK2g0k+pIcI-BVP4?Jntq&M zRHEkj?(7ZY!tT{-d+);C9T4!}yGt2vDB5-0e`kEPG@QBF42V#Jh+$jyEFXpT=nja{ z)xe;2n^c|Y3U2b6udXKw{F1x;8Z_ykA$8|trh{okW6Pn#S| zS8ZhYbFkN9a z+Rj*=C2`fcnnW?NUIZRPv(0NY9Bgsw@Zt@!#%;jgM>!Q&x{!orNZ@|P8fJ%ugQcaf zA|(u&4R@U}>YP__$x_3lP#Id=xgzA}n$xRgCxU<>lpsB}hB>xSl1?YCMxf2Rz+YmW z_VepGv%LBmvMquMVVOM% zF=iIp5oi_vqW*)2UhwGLQ{IWcH+4|xP4aZ68(Qd_3@D2ENG=N=^R4h2Xiu-FYEq-> zrDj$}reb2kyUq;ohV<3%^z$4zl}u_?^yd>&7N{)%Y!oOY%OzpTij6n;u!-=w9)Eg9 z?}L5I*Ak_a%Ys(s_?`Z1K=zfj(++R08|rKNSbYeBGT+XR6Su+U+b?u0$|~=CJNjsDMPs zD^%&jp!qdu(ke-i*f9dXq2xp}dTP@{403Ux^MRm$oyz5U8pf1=i7rl7aw7h&LxBj6 zTLc4D!LgmjKqy>n=C|{O?j?vQ7_shhO-$!wg=(e!7gI)_i&?Unm~aWlg8Y)^%+JIR9NA zwRORHvCyneS$R1OUmq}*RpW0aLp zvhS~XgEbx9hCizXO5_3#saE(oRj?8uIk^VuB3TbESCl9B9`xZgr8>>vatffa#sGBV zytWf7NLcC%#*3ykokqp&V3Ktx^B4}AkMZ`uaZN)BFD)pi+l`92B`PY{ev*~oKTc$j5Z~ihoVd6 z9R1oU-^lU%Dh?|32D|k7f5q~Nqn;<_t+>-gP1>%FK6!D+p2rBgmB%q>-NDU)B07k! zW`={H8pKRd?nYm{-Oo|&t9xP5>?2y4?GYFMPL#)Qywe1-UPj#sXEjpqrKR_9YfZ=O zA3w8Jwve%Jn|1|Ra3mipA13$NX3a#zG0LQ>@Uei?i}mAuup!`;bK=y6>48?1>1$;+oHvp+G}|q?EZxsl-a+TtHE-HM$!d z>DXD2SCaQ)6COyBpq3aYMjo&bAesGlTMhv!`n2mgLo?rz_0ss(qk*skV$Y#jlDXJe zwB1;$U0jR}v0@A;0xf<0sQLK@`j4@^LIY3bzg8~0{Qjq5g`4uRzkd?VF&=V*RtGia zJ1{vh+)!}nIA~KoR8HM1@^U{P0o_%PV@@u1;wX4jm}JF{3PW%>TlXBDujK9?^?T8k zhO1-}fhr$Rw>XuAiqi#w88*^XwWO5H3ZR!oQ8QJ*`RL2}``}{%LM>NyN^lY6+`7vC z(sDzFw0(++L(&_BOJaMT zGH|wqmhvS^Q`Sn7)?Co(1D^^)LWM<|E=0eimJ_VyAJeB0xkAm3TFbvFGy+-+A1_9! zx!^FfLL@+jqFToeHeXc3m{=j~&nfCMImwXwoo#_$@K*j{*|vw`pQ1ObR}iqW*|e7k z($b0`abjQ>CC*>#BZe~LpqM1;kJNW?qvjk-K4P7UTydbN`LS|{!>vl8#~dm>L;O1Z zdp=tP++m(7eqfIW+GdZ+@-LcwP6&^e{O&d8c})Z9@cZ3wx9^MZE>-|doYy4Q_3Dgo zYLDOYwj}w+458d!u%(|6RWX!9$9-zG*Q_g#w`0ll0DP@kWhC6*V3`Ex1Q>ek2Gfxz zWzb?aocT_oiWVN74dD+8lm(9bBvQ2~@_O{cVqUIg2Ehy0BSJfeiAdFqdIkE@t@F`$R5lTl!!R3@A^RLnga#)&ChnS{cC{ zD%>ge@M;*3%o(QA5cZei5X)s3-)v#tsx~EWI{>VCrafGzcsfd=GmGMiLY5x`v_`KS zsM`APgvSNIzi4hl$xTJT=b^^v6bsM{jhbd6#qXz51i5|2B5IGX@t;e2%A^WFu(hq2 z^k@#jal@qId>zT1i8^yU$7-^jv3{xQnVFUmJC(h(@5-raYGBJg1V<;F2@t3dm@@Z{ZQf87rZp`t zY7PyxkX?$;Nrqs>R#FA{7D6NKrB3V!^O1$-;aQF0KhW z%OSq=ZhPRb3%9!~zYiqOhwE>hIJX0e2r5pJlhPIJ<&+T({b}L?UNx@|r~)`N4c$Ud zhlhtDW1jeVlr2S;4deVgEDzi+1oYqbuxi~FjFpj{H*5s|3|^{j+=Mdf(&NVJ3+yKT zHVQxqtI#?O1ap}6Ey@=O7~udFL*k%-q#G)V&dC&}D8;e^%JLCGeRj46A?AIKomO)G zVRKq(sS#m3Naj;^^3#B0>EQ*{Od3o3O4wPCz}FXlseeokOqF0HrhJhZU9EnE`*^mJ z6j#ph>mJjTu$6^4(1E*3hu%oQnDyYQLpe)iRzpjMQR4fM8aRN=G#AZ-$~3}`Y!v0Bw%go3R_ZtHqQpEsBJEwBgy zn&CkuLiviZk>Iy56L(6AMS8_zcAE8LAQXKu?yhgYVXtW_BKzwc?s~1;vWfJ7ZY}%R zBbz2b1@y}BAMr6-Sv+oVSUh@RCrq5ZE(vywcFy$!W%h;Sm?HG2!VQ9r|6Jl(U-d51 z2ig1hb|}lvPezdwq^!RB-{T>Jar+eM@)o_sk$tDc{6}?hM?`*80V8CD@6byc4uM6f{2!nT*ehjZClSCrz))&2+Rp9@-O&l*`50AOfj} z03nD{qY0zbuF7Kooor!MZD)op_KN?mWM4=P%gQ?)>yB6V4E~{V{xEFpMT!7$y zbv0CjMZoQHa^AahClDrzv2~gI_hbaJ&5|P-`evBS#J5Zx9+yRw+&>3YuKNf`mgeSR zLoNpkV+g4e6d`s>wo`+98H*U(5Aq7|UDF-5S2A0#aP*wR-12NlO7wM8DXhVGjN!@R zBkigUpi(nD)ERVIYLFatrB4Czf?2V8#9nlVk;ba>@pOi>s=%23o|f{$XF=O3D{cg2 zCaqfIwfyiCYe?|bN3iI^%y0>n<0=+&R?xI2LAv|xBjbAb!YhLCl)NhsvA=C4gif(B zC08pp^uC1B#t)I1+ANY-<=S>ek*XesM10L0<4v%XUxo2niuKhRj5Ljgx&vDN`531+zBA-l-)8fD>LtsL;a-!^vVo6W?T zV+JKCB&p;f#8_EKW=|6?u%#QV2bGOYo4qIr@GEdc-Now4-o#7AA?yP-ZG^xyC6h}p zK%XE%e)Dfh6iRQ=kDt>3qgOT-@#tufR(5QaohDI9HV+%MZ1_soF0o`hl6Ld(ZY?%Ioyh``W&qQ$gAP2m;`-0n|aw?3@y$Or!bnJnb-L*m;Yo zhMzPZ^yS6tJk1|R(HnGi-zVc@YF_988)GefQ^9R8SDUKzoYmuaF~t|dRr(yb#iobG z`(vT$;ok}E8#$B?pT<37Wys+-zMLq0-gaIn^mfHKo@3ot(Kh~yjuA_qgts^SsZ4Vt zrz+zbXMLA*-Gr~56UEWb4UtuepBTgDsXdjMa?V4TW5;d}b;mO$)bJTR1&e`O6vX7% z*<7C~Kl^8F|A`qk_tWgb`|CWKsK_5ZS5Gg{xqRVE7rW*nlPIvx&dK^sE!oxGEkx<_ zb<25DXK};Wg%s6@+N_s?U|GS9MAc2VE}qZ+PbN-3G(f9-;a_STB|D7f}dkF0k-P!)tmH}J;tQ@pq7qnvZNdh}~S78D=aaG9NrhArpdTX_GT9aXj-jqtG z0<3VGF>PDP(0oStYV5EM^|l|mdj`WlzACKPH-+$@B$dIJ==9UgMUn^o=HX!2`qV5g zmJ8dLO@7I6Ea}SWLkfVP|b?rf$!mavRrtVLfHK+Te zG(6JTWIITjD`(qJ%OGU^dR)bpQ;FeLB0Dsa;Qh<|&Ey}U5-4MT*+desz6ICE%Cc(cVC%xWGA^d?B79~mE0E#-E5 zW#hSbKEj6yxJP;bSq|0RY^gsM9~F8QRx|`#tjc$@&{im^ul#s+z2%Aw_>9A}P6#ia z&=vUDrU0JA0hofx^TaQ@6<=CbDD%wvsDo|PJPUyf@&X6b+UsT#&086KdlbWhpz;t* z%e*H$qbEml;{IINCcp)d;pxh}iG!RZk}EhplH{A zrO+1)iT8~&s~z@rb!@ldH+3cCO-QEFbr;~w?^Q{AAkP8jd2RR_w!5(0~4Aqrx^c&Oiz~P+mp>qjjqJ@uYQ3Bf+Xh&Z(q@ z5(z2)dz&d5J|TY}ay4Oq1?n#Zluk31whXk-O$3kD=M}Drys=UfppB0SX$ul>75|fu z;*T|M;QnVO+}FrTOtcu57|1yn4|>~B7jTE*ed1`(*1=d6d4I-<6>V`wbPp-C=XqN3 zui!lYOD}@L&dBg7a6REzD2-R+r|6KAqPzGvyO9@K7P)GUN!Wp?FS9Jf`H)xUF}AA+3;aU@0bt#&Pih$9^0#c$ zC+&6~^-qNb3Ne8xkK~*L^Sk9n46WHd7w+L{vF?4%jm&fewe4_`%lB>+J9GHzZ;#BA z20yPtt6kM|WWO2Up$h8U{qT@ZB-jnp!tY`jhSXptGA&8cw_(zntkZrsa8r`$Rb*q? z3#G!ejd|11WuDpzhI{^7SU5f9{b*_mdtDC**>1NEmOWdDFF2TR^-> zZD4}0V!ZtoX6H7<=junMyOsu)5d;>T&LQ9KjbjTCfjp$v4FZ$`oXhS|hXjPlP6- zk9E~H5Vfjkw1)QEG+Fy<4-TwcGc$6=xKzxvm0Z?plbfKMQUaU&+=b;jkMQ)TOzn8r zI9w+C>v70dS?d&_XB9JHjzC|@o&wQV--W2|ky4>Up285z^Hp)*i zvgvTMt338hnd@u4skb93PRW%R)IT$wsx$Wz` z1VewBPMPShG7xovM+kcewKK`oOu`8B+f!Ql>m#OwFBHCqXt}+MnuNR26E#L|=0p`| zwv!qbt+84h!?W0srsmDT&~^@vEI?C0$ho`KremhS?Mudk+(r{rli$_X^K5228gzPj z$a+dPU3&U;%R=Uc;Q@b&PsvC4qF#~JE(I8nl1|jK+GsUjJ&qs*E69j&xVX1k5jYG- zO=eSPyUS9|pwl6yV1Bbx5f}W%MShgFW1kntD`l<6w^k^FTk36@*Gdg&wtA{ta}4>5 zXD`&-*2_>;Z9v0Q)X;wfN9C`8zo4vh5a5!P_W4Md9DUsk7PrIv6)|5fG5`Y%4Yg|R zRxV=StFZlFAj)Y;eCUFDOCahMr*T+lhnu`*zpN4nHG{`jb}E2Q=cco*@O?`2!}<7= z)g5G8(}f7Hv8<7vg2M{FqY}=6ZfseHun_6Ie||psVP-R7 zgr=7X|#?VKtjhe<5BXvE~gNeepeFb?(M!$ch^ z%v+wAr(m6st(w8(o*px%*Eauzj{9uJj~d6F17+i5Zmk>&a8y}zlVF*22sfseuB^gJhe}<@|;wNnWN|cB32*IV4;O;Ap^N$yE?3yh3$Bg;e$(mbc10L8O zr5r`R^

    J@DWmj&qR=b5uwy4G7|x$+4jcikZwb%1cM?1`p8<1?1?Jq*fcaWkQpkg ztLn7qYiG3>r2r_ppa7LpHI!}oKxk@vrZ(ASD~PZe7e08MK2m?4)^3aR1C)#0D{XLY z*^ePnK|0r19P!GMuj$gzU_s!OCMRDCnwS8{nT2Wll4F!c7CGCDM2WP1P4o4vn{tMi z#g55^@@2`S7^{*t6p{7+n#&)_&W%2qh+qsnxyXy0AI)IxLR#QtdXY!rFe7=cdPxutoj4{D@ zphaz;88qRo3}tqXRrsHPe3LjHs|?{HOE9l-uflR?&*f2uU)w3XWPYA6Ys8NC#W1y{ zQ}ORscr6qW4%xYd8M}oQz*Ec41{tTuY!JESO&KjcA7H* zYA**R*!^YRUlB%QK9Hg-cVOa&{eq8&Kups-pRz$_uA6Dhpnvyfg7I*diyt7CravC^ z_M)9prKDkz%xbCuxq(20pN6rdp|KkOwfSh>&OUqZv5Ttp>G2S-&q|GfHuKB>Mz8xa z;2f*n*0JkynBqD3%A}iQ6#Ew#mYB|7Z$CM+i*J!mqruNd+VJ!`L_JzI#=CyraEVE* zDe`NTUEtmZyMq%Shl*%wc<513?y*@Iv;&5!1O?K`%^l-uI*bZ_1;sia{jk7VpF#6Y zD;&Dqr!Hb#Hmc}Cm}&Zf^E)bq9Ph9HMRzv^tJ{U6R%e?e9kk^$7F8<3@amW&7zI;N z>gw<~nI?LBsYJN%cq-(NXl;=;?gAZf8o@~CILa6}WzG3mVgCv-If`>xg3v;L;KK`y zigp*Q&O)re&CaLy+RG9tjETZ&ZLW<2;1KciCoL+cG_>@!E!{XxIpN_4Kr0imgGb0Q zBTq@~9PRgdO|o5ozkcy8mrP2z#j}DE9*ydZbfbsEslM--EKNs6X?+ch zUupbQ#KEb&1fuSL&#jotjKDRTP`HYV#U_cDHpsO{oJ%RQ;bih0I&8NCs%%c{sJ! zI#dTt`KeE}RKfrIUn9%n72E4HXfwCI1YiQ~muSe6(>bOnVyyR;jH%%V0^4^T=+odfY~iERf}=<&AV(}4z|_+n9YIo+YTLi$`>~J8 zAT~YS64CH7l5icd`Lo93pT=~D{LVEs%KYm58Fe2Xg%Np@6|xeDtrbi>aQ=mZ`=DRU zJSW7Okp7l~E!tUj0Py(JB}p|C%B}fD$6>0SC2TSOl0qs!i7w&zn^yC5hT{IuV536u z@7$-N&I9YlMz}GFXL0}`B=~{r;Seaq&cbboe}&B!Gk{xVGHK-U#wUqrkp}ls0q;5G zF==ZyCU*SeN|I~=*4}K|Dhp80) z>Kqou(~DwfV+PG4`y>6oTFCSpR3Xy(vS!5xS0V+>Txox^3q-6IzCbmga6mFY-m4YW%qT;HeV{LneK`JW|LS8k%6H z&8*DHMm$44*T+mgbIX6=wwg*C-wofB4`=r?sN?yC$>tykp==}ri8oEZiE^9@e7W11 zbYd)WDuF-80-G>PfTncvU~wqd-MC~v{e&qj!!n}w-5mgG?GxjZ_>9T4V`D^-pE(zg zOo5GpdHq$r&n6vT8y@$?>I+SR+PynAHAQzze_SrV+i9~u*0-FNtC}|{r}yz;-r=n> zO70Pshp6|Q2CO<{T9`_7=824!RY57}vUsDNO6+vzt~7t>-9dW1naAXkJjj;qeb!} zfziPyHngXxVy_u`@G*hE5Ea{CUef%p1s}M_65)P9xg$QgKQY+85Bu@t?WjOWo8!I z1}1iP$vWwE`eWc8M`MySjc-otdlDGP8 zySAy_)z&w$X+8m~718P=D2RvzWH1w0mnjxohnhASx^*~sCTQ|1`L9CdE88_~#bm@o zcjQ-V2!+LIlY9>DixP@q-TQA{tw)>>4cjYpv5Bghe`9-g zP{t%!R+aE6$6bn6`O`Ij9e!W^Tu&Eco~tVAxV0nBDkI64tb~%1_`8YwflmhOo|C6- zr!{XGdO}VddIWmWQBdTu<@rU8p(5ZLrmbUOVPO$t^0UVYED^yDTFbq^-f&&GKVEOL zYJnLYvH3+u^eEzKi1&MUw)WaigxnxHF5k{H%0NN2$b(p2Kag*GN)I z9g(sPLJAN1IlqBM`EUatOxN;f0(1q1v_eXZxBBSSO!MytRkdB@GS_$<-ub8mMe8}p^NK>x6JaY}eO0_I; zTHZj$XRT52>Pd{Nvt%^Yz|Wpj##ZaPZi8+02z80*K+b&#r$hlfxEWYY@yNU?XmsyA z>vA378&sUQgpg~|Eh`k=SiPgz)om4P<=A?AN5_@iEAxRDYpEF1L( zf;z+s)bSr<0j{GMb#|F8?6o0NWz1oPIK5wkc3Bisg7Vv-6~kes-irTWH(X<7)~~>< z66NmzOtB?fqc1 z^nIal3_Vae%r>5&4&>9%Z5y&~=ro#frfV7z4ym?~FN{u~XJxP(|I5-5g+_>4ki(2m zloI?bJ1Jl)Ho$lt9Sy?XJ?@fG=1w;_217so146^5qWU=fBq5GSH!w!4gT78ssYAZv zK^wHg6|Str6jyu()l^gRl`6+l7+?9?KWg@h`nH*{%z|MVF8Pl`w| zA?+CbdU?*g^#jQvqx78BFNxXdn#73<6{7QM6de!?>M1x$J3lyxPr)-{;$LZx6Rwysim@f@hhsKil2>S`PtdRb;!!;MoKePVaymunJr{DQ4A6j)wbFCiu z&1M#q<9*gza&}%f|1@z9l3;mq$Z_8%I`|QN%rzISE2WN5;-=_$c2Z@_@{?~>II^n^ zB{|;X^mubD-S_fy4%(vc#%r|S?WEe^kP#|o{7CqhDl+cxWBO^8%6aPOaZmC%7FyBX zRSqR^Ma+EI+A_Pfh;8<+W3QFk0L7?}$+mZ6{1Yj!q&Z)aos1Vm7o?&4kfva(k|^28 zjhG-odJNK`M!$H|EDUP;78nDuEz*!SYW-^8qrlGXQT;=YJExykLW;7OiiuikwK$_5 zTfFSjt#DVSg_ar~Rw8UD88tny>0-V>#gPQfRiAxRPViSo;!n*I1~5Gb_`D5v&s7{4 zJw+|P8On=N9Y3*S$T-r5qK|yh#)Cm`;FgF9iYVEgn!eh?OG*uROXG_Z zxPrC+Hi~i`*YQqrIbrk!6`Lvt0AqfZqWgWiqD!Gd%EJ&sUShnT~oF!*QTjuxZm zSj&Meb3(kgb@y8S5AxI@qAOb1$z{GA^rsxg>;$`ny(*C!6B$m;%)Gl`F)?>ZJdd#$ zqf4_pJzue^suB*EuRpLOLt%eJx7&S3TP{?d@zre_E^x}t9w~k3FwJGE;?SPnne!DI zRuEC%trCmpx%>U&FIV6sa(JaisL#DMCF%??p1raJ4J(C9Os&fD=@xciK@EPP#{-yH zS!PV#Voixo@wkn&5gJbZDhnnNb#Zx#^S{M^U9k^*zquP``HIC_+N2-{Ra3@q>xM*A zv>sbpoZ7a;Td0DvMPP4JF21LVq4Is7fTy?I*}-ForQ9f!rhT)&2{x678> z(x$F=exAG6$JpBj0Yh~7L2vW@HH778p%a7`ke%yAFlhhS0T4j$>FT}ulfmdTh$sB> zBvOtlWZ$_p#DNF0`7m0iDh=NzD=O%s+N+NW4H#jmrOsNBK0zFizK7LRtA8*nyL#4~4yeT1L|3KJmHqL$wLZVr&jf8DSh@VYThl?O`xGnqVRGX$4N7<61 zkRBo&9uT|a@gY-T_tjl$!aME`E*^9{5%_bQE$82)E*LvnwhNvjfOVB8`ulq&k7pld zxjy=jIcOE~CwbUO8hNRw>Lk&I$xoO~LsGe$Q;GUh<3Y>Q8UC{q(WLQE=R4hwO$T~i zcs8ZXN3^J*?hhVf1*UsjIC=4LBblDfdsD6VqJs1Yq9B9hkzUl_1jNIqx@rtX%N7Jka+g#GbEfXZ5B zIh{U~l3++E#<++|-ji*6_!mVj8{{wX`g~hV&D>(#ck?Rehv22Xs~n3 z=5mnvPh=_PcWl)3!-pJrk#6$E1WDOKNPYNTWnSl%(^aoyyRl{*8hTN&S7YMDc{+n( zN|Y}dbZBAm0kfNR}F6LHi#br0ijeIQdp z94gMvvN9{pvPQSQGcFppDI1bg$8wCag9ISmQt+}OAA3S4aa?rd-TU`*2nTg|+%dVw zJhk7yH)d;m|Ef^rWV5(kPc2*qQhQG-V6p`B%rotpNm^%;q0NdtqOy!Hnn&cc=zxP6 zO`4T+7=5Nf3ZE$(Tmp)#>0eDNu$3^y2GBZQS1y`g#lN4XyP*UFmA8M8VBcc6F6l&& z9pR~#AX*FF&R&k>$u{y5G!J`2D3@xG-u zTSiHE(Dp4GGn}g74V|I#S$-iB6Pdz9+@7!_Z>wc&vEX#m=TqJiF}gOKKZ*LG(~4Jz z%;Yey_ISM2F|sc>y%W*p^usBhb9zoCz`(I0p{ z?6?9QFW94c<6|gLgr8WI^gDW_bYmcsnnUJknZmYZs+;eaHlUX+)rfdV1~;y7N(WHZ!&??5{@FVeXpHck&Wn@s2DLHK|icP3(QKY+0bhrmV!Fqtvwj41@&FC9T+iCU)S@x3LilZAk{@ROV%B9y{a zpt`g+*iHYQ91~jMYm!SIAE+Q~NtIR8s^~&rncH6KRnIibA09LrW4p6U~%lDP}h_ptXu5d0o&<5#c!EFU=i4`D$T-Y~ z;u3tkK0>3rXs!l)yu9Z6EFzfhQP^MH1Fuud-Z+r;*~hgko_GieiLPSZUkFJar&BMd zYP_f{(#!yi0OvhDjz(JUd6YMlt?SMYZtlFap#S-GI&Lcl~efp2A9?f2lz|@AK+k9W7YX_h( z5ayTk7ChTrs@OG9i!``kqX4ac3Zs~cxzll?703+u=Sa(7;PUE5Mf)8GE84Jqj^$l-rw$?Uvj8td?htHbr~&x`3sgC4lY=^2B+AK|?d6K!vpr}=_;sfB z*mH7lyv(#Rh2iV(TW;_-v^+fY*Lp}>SN+h|R4*%bMelFVSEydUA}y9wF4En7yI-GF z>NvH_5_C`5U-P6no!45xT#A`fItm7N7Va`xpEpRTZ12v|!P2g|teK z1~FViGm?t=Px6;9S=b(BlOAl&*wg}*1C!5Woua!xtV#pQq+|9+RMaWw5XxR8)+Zmq z_Y2K?qnMETg!n8)qpJ=l)_?S2dz(sH0nR(D=o5vFWT$&O@xQnL4h%U2nbqvk8Pkoc zPlcbjipIIiin8yxIlw28Lc>Axfz6(48)=R)@}!0)Mdb8WLYu zyT9WLzsKh8V}X6}x<9&N;jYE)7Jd%|^A6+qCh!*M{fso_=zL!$d^f2hcq1KJI{5lE zC6X*IrP(y242d9Av0YGNSt#ZjmSqO7Io+WZS6DCZfy%&{$(bM+*0_1TR3w}&3keQP z*w$iI$}wAlsvChL1}KBDf~OLdnUb^zR^3wIpN-i7`gkS4Tc9jQS{()8QfLwnlc@mN zBB3Z9O6EhP9Maq;Y;fWN1|_?hJD!fSD7re7r4S;U{=3PdNLce_FP`Fc|SAwjGYuoa-yCRSo5Kj zlWRomVf%VQVg%^6KJ;O{l;pnRX@U|On%75a-k*NA{BpmQdDU4o@-^!uGy~v&6F4Urxr!3zeL$N!_8@uF7X#FcMevXNtTEQ6vA0jz^lkZve(#e?S}nOEh_il z@u}2-jo)gr9+nj#OqhhJVNoZUHxqMLoR}o2lChN;N{j1skR}m%l3{z)nd0QO8Z-w% zCY?`2U`9!gi7&xuOqW;YiMm+3u?83uNmxx{6i zaF@nyTNAa~3(fiKi_+5qRw6EI~ z%)bfliMYR?DS<%3TNvPy2vHXro10w7tlh9KjfvO!39)(I34x_$Um%smaBsUvj*x`N zNj-HYA$s5mFDx7TWA+l7=m^)9_f4%FF$s(3X8V1EVHL0YF(0$huaTDW#M`T&?ro4C z%;4)!@XvZybAJe}8PJ4M7*Gg*J)%~v-{lrx7aM1c$R`Cv2iIGxZ1wAU(CK0F`1Yr~ zB-FIPx0wHVqQsi`W}{nFj1sjw!5c(Ru(T;7T9_rOI}bo;EBuyT>y78HJNsC?Pp{`W zuRB6wNW?;NqSg7&4e4eb3F~0%U8sOjz%hT|%IXdPHf_4yQD!5H)9a(7$#-;Ly*T7u z>kiN`Wd*5W-iNtAIuWenIi&v*JjX<3%B)LaL8uU+DA z^16J4@K!1lIiCm59}1PW6NPp$T0s*lXhgU!*g`kyrVU)1C${#^Ph-}KxqPEU?yCb; z`xBRa{Rm(-!+E-)gtB@y4fbqdXJ#szR9Mipo6YqDF}&av3cfF`uq^PY0jkR@@!)8Oo6Q zGiLT>SIZ8L`7nj5P82{!YB7rHT1U4)p}5H6O^lXf%w|VSyS$Jxd`MZ4QYFoeo`!u! zcc{3Obno#x)cZTSZc_fPoYvr|-%O2<)ZaJJS$V1{vIo^*SQP)1(Or9*K583uL`v*x zo+1oA9ywWwf;IVJ^m|)Gk_X$uW^x5=ziGpqE%856XfTPX+I%;N;zjQr|EX6vEzgPW zgS!}4+hOKVAtlVJV@%;|hJFAlOl%&l`>OBfnupT3&Bw{#QA@0(A9V?j5Lp;P`vcSQ zgxuYfvbXP6?Du<4-5s9huWgyu2fbPg0R)nnFw$B6`H%E86_-_gTk^rm%s(A%3^<7K zoqXh*B8B13Hr^JGpJS#RXTX0bUgxH3Eir0UrvHdKbiB8mj6_Yw=5h~(O}PWb^)7Z; z+#OTSU>3|V+xez{n4`R$ERHQbR5JRrj#z#?buL4lfD&al@XVnalL*In&qA#~!L$QNLf8u93mu9z%TJJA^8W_%Mq4GyK%0)w6wm*$+>WtHn zPEJl7z;1Z<3*=_#@Lz+o9nL=AOdfrA+FCmxj9z1DpjFX+$+rMR}g-3EGaqN@gDk2dG0K=*kM15gRoZ z3hHRgOl0{`Q{^lU{dhR6s**sd1B(^~_c`Mjz3mza@wWhNy=Z078>3-U)fGSeCS@|r zN<-{_{w<#@%_k=d{kg|`HBXlj`^>+8K%QzAyB~|~80T-HUjRC!pqOWPn6Q`Hcv-7l zb3o*=oOL^E)^FZMC6}S+6y`JUDzqWy|HRZYZ`*x&BB7=xIh7Q$f73Vyf1S?G2<|Q7 zNgzZmMRQnAI<1_#I}H-K*9&e82b4Eg$0)41MOzmD)Dt-(60r=opDCSL#()Wo=rijm z+mjOO4@|Fn2QAjPzOM_~-xiu7O(4GJQz}5AqmEE1*@z1v97T#kOZ40ww59cgjUo?z zeTg*Mhak~O36%3;e&iCZLp}fSDe@6cLX~;x9fM39w~9R;dxmCrXTa==9H0)P zB>DNoJ0X+Re(8&cW1%f@WV&*pM(khYooQ};%;}7J&F&1?7jN40qB+YHYj57%!)JH2 z_k2^Nn-Z`f4*SlY%SNzgYj^Ijaz~|~Jl9gJ+ntDW`i7YU7MV}TBd#zlW+-XY~!7IndoQP$;bb7 z!p%jW!sx0~s;WeX-U^gc0}J3@BCAgpiU|3`1Qt&mCESMs=PE?XyK973Hma@Eky>I@ z9fmuqR$oIvgSv!?wv`b2jO=Cty-P~-r;+5;Y7U?2XGoblJ%s#ZFl9BNqR0>M;TnI0PM0gbc)n`5y~X$fKhA)Ktd^EL*Pg>O!NG^9UoV=PFyM0~0wNY-mqLa`L4 zcbSa=G6>oB)V5B0OohmyqEbt4ENk!WPvsS1q^f#46Ci0Rnw?hf^Jm-hGl#nqyr0eP zWl^ZDnpCn2kT zqK@aa58A`~5{#2~d77*!PD;DtR$hrGrdFgi=AFijU-jZ_ATBxj^cp^Mo4Y;dxGc!;aFejuu~ES%XR{b%iy=M z@_I$-e{F7oegb?Bw~liXr-v9Lf^j@rR(}aFm2&om7!nx<5((W~RWFKzN+kB)HD9mk zginWbQ}2hf%Gi|3!Fc}aQZqgInTqPj3FuIh30rREkuFni(9N0kUT+q;h<3Pa3nfzq z*EB0SNz{AfdDVc)F!?Ioaz*41t3}3!d1WF0FWakC`z@z;D-R(`FFU@8E~}VHgCu6J zIGv0Lf)x7od)AwSRda#rJDM#>`x5oWmL21-Ah!FuZMXmxE_JXb1ai?Q zG+&)>7}|6>;y>7smKn?4YEwQ36rs~eqY7N1QB^jO)VVZ+;RZ&1%OpdQc_LG_wK}Q7XQQg9ADW2=F{{yydyhy_%*0ot=S0{r+XQk22-&^XE z)z&ov8>mN5dIic6S_V|q;%AaVtg>^pz|v<#JEiff2V=Y0qBGvz=(+{^q-F4$y3@2< z{1*i1`sOO;e!6$IhcJd6>N@D7}U%tAoWgi{shsq&#!u9Lono zHDFe_(-po(;c0CO#)@{Q*a)-v=K75%jZ6kFH&ZGD@pGczuMJ8fCE-=Pj_2POxi1kz zx*t}nCj#F6Ds1}Mx_Wooaum)#S6k(vCy-YJ0E4T88G{Do4EdqEBUHBdIl&?b z`QIBgJ{>OB8!`^Z`xr^Lu~0^YEY3hjEZudNt9WlD$wPHh!6HG?3Y5~<5G{bpjUN6p zUze#6iQHY8jib7pt?CHCk3zM$i=^2r@cy>ekQ1O2l98jS5RqNDKS!q8Mdd^9rlcG65 z$_h<4dvtPY02k&@Ri{<34VY9n zn+Y3c4N1Ibk<=?JWyClPGoN%;^Yj0_zAiVd708ShZX4DCA55q0lVVGJpJ3Q3T{8 z@*#Ovl`)5TwD~%9hb{PFQepbPT;ds0nVlS5vg=}_6{d_sO-t#EFaf<;CPHT1jEhU_ z0897Os(x~0KF?A|XMLVgi-uYyh^fAFn;G*|`MU*NTxqoc^JHs!7Kd)%BjG!OB9Qgq zNto}4XE~$9N2WRuqo^QSI)1l zx-B*#au1AInefrn>UZnfP())96)VZ5WK>0lvd z=tCeEjiRiF{(nLCE9u#T(tFOU@FQO>z2|o{*8Y_|qWp=59TN%^o;qY8I$GU)Ss=DV zC)K4FnIa`D3hw>Qm>-D~niUA=Jp z^ZrDR#r=)7Ygc za}3$HncgwIdvlSR0%*!gR0a>3Y^a9AagDhW##QD@6DQ?i3LJ*#1(X*E>7!-Xe>K~3 zlsI}*C(je@58Uw4(x6il)}a3cAzHX@qi3B$;2*rJp-~=O%)!W&8dhZ25v<6u82O z{657JCnK|~I6FW=e1~K+ZkI`y-a>AO%0syIWlI`9IU)J>@>q`;pVE2}C?KegQ719( zhR;s(vAd}(k-W2?a~Q5MdDXJYa&Kxg&v&G<5q7zf^a^A9E!1}o>?~wcmjnizt+Ayg zEEL?Ib@TCh1>YyWkf=9wM0zC#02bqhB(YO(a{?6!x@=wJaxswjY_Udd&h()gmW@~oH6#wVN5rYj zhTF?cf+-bLP52rsCc{pNyH+Zp>}HnkVQXq7>2@0f%78p+TQpRKkoaVhvpm39SMgXP zEL7+Z43#aJmeNL7z7vL6P|cgcE8x&$x%N3xc;aZ%hiq0SG)D@Qkp`C6$sZoS_T`4V zwL=hh4`3At00yhZMlWi22VFR$ERzm3V;*9UcV{Y!)fQ5tVq%Sxu6RsvHla_4+FkBa z_!X(7N)#`9&v|qd3TBd*Cc*T*HdHJv{#VxX`xYSBvPTX^=@v*u^ACi|su$BCeBkmS z`r0?@2_7jLvnHy4Zm}@(ZoBGs7*})|dMZ%h0UP60^j|i&ikkJ*p4-(F#wJ9Q7Y1dcLpiknw`=mkq?S!lvJe~0ptS6QN z5|uT9+29Z?Xff$j$mluTn%!G&I{6wSZ%kumsI7gLFgH%zdi$glU5 zAjz>5Rgn1|#0BFNfvY@HinrcQi^rl|7}pO#_Mkld3w4ghO=s~-{AYQ9+8z&Aoyug^0pV5Jt zCd9ozYwa8;$!n@KS?(DAiv=5>iVhYNIxE_Y{Ghd|8ro26;?I(W?c;fai-YVF{4@L! zTWh~#S$5DsvBweV!Mffj+)9X15BR1Z@e^)x`ycaQ7mmO6aO8L^P%(Wwr zkwb~+n2=P<4-iv_7K^Pltf{RtJs#PbO4tPsbT=LAx49&@A1}1{+QorD6a$W9`6mEB z^+wD_T1=BVF3?T7+Son^u>Se%lA+Cu+pkc;Ot;p?YpXzn0w~Kg0$J#40GFu~ux)gG z`&xYH;gvJ~{^I$;vALQ>l7S!6sUrEIyz{FYMX-y=$g1cZtC=mUdywS4NO|Agu&>hf*lL5`v>sgtECuhHlw@Vv%J#gtH6|8Z*( z8P!QZd?ak9SEjh8-ZoQ+*kKCpS7WKXbTx4HTUqvGT;69#ypI6^Z{wbm!NSKW{J2aIX!u{*JbmAm)c`lsKAqS zC+a`+2MM|xi@?AULr3ssb|*HBT^yX1I+#Dz8;E*1-L`$D_be#pI#r1}!He%&nWj;R zU!X2op>Mc0ynbsV;CM*>{2WHef}%rGgSTJ?ihdgec)LrmX1X_85E-V5EqB;T>AIt5~0MG-JCF4zzrPnqT5=-$>+&jmgM{ zo#uZ`P~to2qqVdN4ZE2r3MT4h{N}gQspcHNl^S1u(n8l(pv3`pO&1pdPg|pozK3L^ zfhS8}X2=w#9>SG8$_lP@`VYIIuj^kGiI8+?&ys=43y_*%g>W!@+0usvX2IR_CQr)q z?K*dIVgfrKQa7KdcNqU9gBzVEWt2;5)C`<_gg}@OZ8R5Nk}sIDNvU7^o2vk5m5Cw& zpFfCOr(ks{yBETO+F4F`l>R!F--Q4UN$^~Eqn)@u8;Trs4xTwPO%L_;cl(?>W0A@- z>}`xb(*~f|c0>wpX=_6EuODB-CEkJ*o~K}&Vj^@;lw>|@2`tjADx*hI*tatJ2BONd zg>l|}^fl;vzi1)0`L8#0;DN4f%QBFmAx$2c47<#JUG`zzv?d6+%rqMqjhaiXkfLFg zk5+Dx^GQ5$*lBy03ExOA-)wo^+vtCer38O%vrt>}EIa>xXJl5485dECQKhIbb#JcA znZrp(wxwn%9T*tzsxk756;Uol!clO$XE&d>T}G)PIBB2nE_$==mD`GLzN{&pM>JsreuHoEE9HtdEeu? z%`5@Y4GPx$4vV_~d@Ly8qb5NxX+8>&HvIyBB!cg6-Wy6KgL7YPkyeo(Gbo+v0FbA> zq-@;0DedXfRnFkbC(GRWz?p-G(aS-pxtb@vT=z*U`^ec~+u9}o!|wcK@iyQJCgu2c z&Ug2}yn>Rf+>8#YhJg_aQBAehKVwG@CwlP#9M?+idIIt#$Jy&Xhr_E4wU~(q-E<+w zqcgg5MF+C)h*E$_`2L=k;xvQKSl?Hz`jFU^3PN=f`4mV2=?1pEpxM$EaNkY zS{PFuOTXdtmpfNlwp>q3Fbll#Vu2J@ek$dfXy(6~RwmCm^?SAMEw6J`8~%=-i>HNA z8H*+-X2K^vu2;KqNEY&#gg>wRq~TcJ*Qb31Xa8bjv|#@uwgi*qY4uv)Ls{F9WmB|( zVtXA)m6XB+B4!Iy;k6;cINKJDsc>2k13D8r8XX&9^w-J_Q;0h4sV?Br8GTZ^!G)IC zj+m)&no@6{{JWrfk9oG0G>3k0!j1%*MC9+etH;sXqtyRfP00tO-iZpl7K@w(K*L6Y2+>eZu1vPS_XV_Y z`sRD`tHMoP(e_aVF)$6uB0+_Ea}IdA_~#SLh8L2M=NN!m9cN%$IBC%#8h?NOt*#ez zeG4-P^Im9l`>!1L313{-jn1g&v!6*;mkj@$F+4cGeoY70Z-(~0`?6_^3J#_rY1X1z zycpeHGZXd;38&$W1}8Ls;;(-QwgM2Ot!!-Ecl&4RdEtjwS_|3k`8ksC{7jfPWsWlw3En0iZ*>77Rb~sq6O-4EtoKTNZM`Aq>MyocTm&<*pL`?Osnj`PR_~^56ZI2&_ZJR{(Qr?nP%mq81sDu)&omkusNyh zsGr>3fO05m*mui%hBT)RF>;RbxiOxi+pTY}Nqsf=w2H4eN3egc!rNZrmF52bQZ9J1 zI&ZI8)p}mAGxS5*tW6?g!nInC%$q(XO;55N59zYkiTk6blf;|O!>%}q=~I9u!ij>q z9KKX;zqPIZNOojOLVvrS z17c|+B;6aVntDHfjH`DaKSu3pPU=)KXpSGQuGfm)5(65y^g!q zkQOBzc5Rw7w$p7GLz3B+M18qA^rdhWu;BH$5N^1qouv?wpQ?{-w^(KuM|2k}qV&|H zm=qbE&B`m{G7_gw2bB_7tq<;*K*kT*!Fh=mk0=F{wo?tqAjW9~wm8@$;pfO2h4@Hv zUQg-1MI&4ahEo7(AuUk93cDj`BX#PAZr!F<>;L@wTIJ~8#NRd$_9{z`|B>NBlF^6O z)l7v&0UqugE;s|BalH#EEtOOH%l(UZ{-Dt+3wTXat#EH!kbFBzc|DpXmi^aiyoBJU zIL6xj+?jp(`wg2MY6iU`QcAGtV8-SV15I6QV6E-qE0%NbJeO(0fYp|E4a4XhTUZ#3 z!$SqQ@3XZ$H+o{w-qZcYRosgMJ39rxF}9>3@NnHW44li zgABiL`sqlM7XECjt2Js&jQn##s57{N+4{*kFMnxz>LYue>%Oe67(7h}jY7Y8LO`A@ z{OIgy^XdT8{?{eF8m*dkm))zTfA$K!27(jDa!8kJ;uKt&DTV()Gw;NBY_SKz58jm> z*Y=7c?g?0aj>MM)y8Dlr325|5c|Y*;685;TyN(*I+JOwERHrcmrfiVLp0xVyKFO!q zM0-UJPqTDCvGoY!Y-&f=*ER0$YRqW!bB?PeUrlZsZN)nhRf`2+aZE~cEIaXogAsBD zUb$t?4m{{D0)@SDPYBv z?p<}01bP`HK*tgCt!RqlX``ADSwMpxQPZl3Fl9%|hVX(kTnU)f<5?Xqpk>(Svz9gD zPN%~-p{I+2&guUZj$c0j8ceFvUrG5i(AX+NhZSW*5tabg3zW{}j~OUc^!)`Dtp@r0%3!e$gDQW2|QwZ+&0W*ffXzZ1=ivKmz?%uSfP&X8j#wKavJE zZv-Z$YcihH8&lX=I+J)z5kcRq{kjyk7=m#AyR)$8nAyvu&0fRzkIVv_jQ5zDdE}+o zz4VvpTohkVXAIaTJ<@ym2XH(lDJPJjG( zo(302ThJ6XriK}3Juo_^QX#O>A$KG>|9p-7T*@Co>{s6?dc6>y_YvO4KGgcjTih%nESo+yEY-48eJ(Tt$Q>a4$aA5 zg%e;YgixrN9Iyjw4F3>42^B%3HsmE&)!2Y3uV$Af||HkyoaX^;cpulkgfG_q0lc4&B?Re>STl zqwqa|(e|B79SkTm7yMDwxzZIy5d+s5KwHF$2$>(E%Kl92Ot8vW8I89rf2*+REISHX z+&QqCHH8gb?g}UCcL@5{2_A$uojdG~U=g>pgc!YZrARUS9j0j8WJOGW&8r&t1D9Yj z!(pC&zl{13N3N(Mss{oawv9%qR^z6izynST$YeH7bBb2Ry`lLS#3-ruIy*K10PZWFcY;o8SwbW1<6;f!)^GT_y|W98Wg!XXB2 zheQ#QkZ@=g%qxqg`9JSHJD1@hR;kk$!8Q&2;*GVL6^qwKLDsIGXB>QPd1$F^c25op z64L}_+yaxsXq5bXN=5aN#=TUsdpK7Dkkx~a*uUA6NBXVt+<}c5YT~ZnaIf{o#^U>8 zIW735k)>H?tW~U+9Tx__VG66mmagZ`u zG-JdNZOnBBKb6~rk%a6^LYqVC;s&JWj%R)%UUC%B940!M9 zKPz^0^jm(11@YQHFP$II>wnP%mIm6*ALQ5r38W9F?lWoqn$~2~bYq83(kbPM; zje<|fKuirJ{vQ+-a>wpH@`=iiy=_lNB8DGG29&%tp7ny}z@E!F1>BILF+nwv&a~5S(|nwx_%W@I>e>f1m+{~}O8dYC zW5#x%+F<2T@GTp0B#D%^2R*?kk@8krB}5!M#sgt|wycZ-k}3@evf(kmdVVnXy87K| zPO}Kdq{g8f7%k2_z++5?ZeRt-%IdWMGF}5|n{QKnVRUKIutmufi#eQX+vX z7k~3+9SU7^!z>o@a!~qTxY*~%yR_d3^_kkAnT07gakScU&7rr8v3etjjeBm{7?A3? zDazvWMgYSY#;`e%?_h-9C>Gk>-_XHhk}zV=;M*%*`qx0xvQ|;uk%@x9nKGebmv&F{ zJr`r7D`4>QC24vdIRUyz7u`}4+NAu6JyJnoBzcG7xZPKqaA%Em)_6;14k0UqvhDyPG=lWoiBK5)t)JNp1+<&S@|We?77C3hmdH*U?>^?bvczpzR#ww$hAGc|%$W4oJ%f>OV9bGVXM~*6fY-*h+wo^`B4#|ZVH)P9 zE?e;6ji-DH_cm_8&~>C?F|HLKe{LV?Pi(Pqt7FQ z;Y7)|r;^KQ6hAufKz+_>cqGoT*=h9Ebzk8&B*=MxEbVQ~WX?OQ%cYbu6>v2^!nRbcu!j?HXz_rrG;H5@D z&?I9p*GY5Iz>*dJnDu!!Y6pYv{{nfP;m(FSy+2?5n(XnrsS*)@&0`!23ehe_94Znm ztPpQII*G zUygG_RiE1DF7+gbA>VG_@Aa1aZEv~(pN>U6#=Us-93;G>VSYL9;S#C1o06aXUxql% zGlez}cWo>PrhRJc(8z@iolMX|RBGi?!7Pd3<59h7O!8WN?!Vy2?X>0C2Ee7L3e9K~ zl9x)p3Ea?b+o2*EIDd|w+*A{~om=%B%(i9vsbf3&7@q1&xuUnywwdG2TGJBpWba)! zeyC=-w7zDNmYg=70n!Dd2%yxyIwrw=FZQE*+Mr~}{&c;qQZfx658e5ItZJ+P&{rmyD*LCB}+~>@k`QLf` zbhG*Ri_1^oz1bggPQ(5r05{+LcuxbNrS`rx#HV+z(64YfZ0gBSFi`Q&zT%;m$3{=V zc>2^4!ohk`@AAruv2F9f^$g4DeMVjN@@h^)gLi~6r&3I6lvk|_{~QBb5K{#tMd#_E zh_j`dF#Y?MRnzKcQb<(r4g4;M*odj5R!QPqCB&j-HEU|>hasWvEjWKNucQ;lc5iUgTXMC?_yT+sX73mB?>(L*|2hD2^FSC+_UT0f(e0ha? z>4AUmbs+|ipnzelN{C$Dx?B;iDPk+3XZ>nQy*B!I@6r*-oGp|RStfjxZOVx?TzpUr zH8xmWjVjmpZ6){OUuEUqN|ery;k*Bpb~o}fmyk62t~AHGq#i^=#tDB9e`y|r?Qf`2 zGJ~5PrXByc@u_`|8ZSgpFv-at2q+k>9Dm1?9Uc=p!JPX;t)pdk4k`z+J~StcfC+Qr zpV}TB&%WYMTbOR4s{6#bNQ^bROO1W&=x)6!e|@Q${coiK&YzDVWF#y0*y$T2Pkl_ZrS}D(jD=l7eLXAKLF8FFr%_ zyPK1rMu*A9P~H!L{AYw)(#(&gCz{f^Rrlp@vWY!Pqn3KxsaqOvq*RBbJt z?6o*3e7kF9Yq>)i`^SH-BiPw9TzDYU=*^gA>N1qd*yZTeS6fHai$#?9_SKh-G!O}% z5!6W)doX+FnWm-_%q1;bk>Jr7{;N}Mfa|O2LjNe3674$Wi@Qfju#JVSfX}ZVYx>1s zQq0*;j$T)5`%CTUQMbv?=T5RaL9_G56nJ!TOW*wE383}%i+qTA9C$j}ZgNo3D+diS zm|qcW8e_(*7bi12IuL`BK>5ScX}?v^9&G&0*4u(HUe$!B?!`t=&{H7;-s;cB+TD|@ zAhQ2K#UJ@aXVI$yrjAhAOO}*WNL`!F!W9@*fj(jaj^cg4S}SXsRFb&+VNT`Utp$dW zFuR^O0Bmr0!a53S<2R%Ue`w=U!aCHL!VK>@yHaJ+nqnD*d9geTU|W`F*EnYoG&(A_ zNNoCV-zcvGZ4Pprx&nFTYd8d&$QZstF&Tnz6?vIETlvRqgXkJDi*EXh+P7al9ODSB~2_5u&(D? zI1=qOFJ>x7&h@Po8Sd)52AX(?mt&mm~?jG2>pdvv4Q6^sbl(smU`B_TkZ8XJ{*{$DzUz4CQ_ zbbrV{X{W*PN7wbzkdqlISW}h}r@GiWl^Xp8=>pF>MZWw=HpeUxoI~pYRyFWX1UZ?f zR_x<^sUV`#s95?q{9H6-CLX*+1CEMJvy8OMV_V)C9$C9ofL_4ORKy9v#WUSu%qn(8qzRJv0xqyf;}2Y_~K-( z#yhD7t0F*wxehxNGs7@iOht=>17psmlZPQFdi||_V(X1J9lF#S=S&}lVDQQuo>WX# z5)sn@n&Ubr9t$UOCcWmcxQgsL7kn>M4-3)Ma4}u{%#H^(3+i+SeM3;A;E#JB`*=9- zJxh6*<#g(MIXj;nW=#@U#ttcN5o)5&homemeTfMUfilXA9NIQxP{|G zy9apu&bUFwR0BSPCZ>VUZoMX5Mx40=Ntmn%tEg4S-c)VH>%R&n(Vgu8Wwb#BxHiII z@zmIuGZK44#Ce0qmFm>tTig zxVrU^d5m?E+q}bj`r=D?W9UackGzV6!aysj8PMsLCsF6 z80S8*)z>d+_%XBY)Q8CbW-1gjSIXoswnay}elwp5syI`l48y|u>qr7>K)Hfj$m-eM zrdZJ}_j**5o*nSLTh|mp-QmdO7_3&+uGn7aT_klja`hPXc*O%L1YOI#g6tjF8r9D& zxsw~+M`s&M!!m5ZdK%+7jm%bL&jA}WFw+O=0eP3V!8Q$f{a!192x94p5crl7ITvkv_qUl05aLCew3+7+&2bqJmiVC!;~ik~H? zh4=P2X~k#IhqW+CT4rHyTBBDWm*!)q3xM;RicyK34sH$pOY%zC{%H}wyO{v)grY*= zHt&92(cKMLyPqZ&{YZBe?{iyXLd-6o22MMAA>y#0e#c_h!tN#mT2XzW9h5Tu- zIF^0&OoC`>+J%`@>a33@ESdgO@?3%kw|MX-o=cV>SH z$bFmX$Ttm}#vbO_^+uEBom$%lSdM6tu?%yU zT_v1n)0DHR+{+*rmG2F>H{Q}^rr_1^vyo6OmrRK;6DVC!gIal^iV2pdQEx=ULWn)d z0J@R0jx;6#gDL~C4<`=ywdB!6Al${phKMIvX|~!LypM~!-%_|da-Ae6?MYPruNL6n zk}`1DH~9{^3A*wxJI7yb!CEiG4JsT3QGMu>bEWKRuR%FvAm#?71#vwnm9~>x@60{` za{M*Snob=5Jwqk2OS+#WUYXv#o&SUzm3WUaRY%xf z!hAWnM3n$$tA20Q&o+lo+HtSWMs%j&%~F|2L$)Tn#>Xgq#p*#CwI#d=wzAKynmY~J zhHeBzXw~nt6|Ij1lfjQX_ryAeVwjln@X2Jsm9uiJN~63D`N85b`Hk^(Gx$0asqx3A zrj*)o@foqX5%A8_y{r8TpN$2ONd;Y^Cr%dZh+~3vM83)(Opz9n7D+FP%!w;?q*yv!6+sIRxg91zD} z&E&<6rhuxRaIHHXa!jC;*BTW1nlaPQjZ{&zqFPJf?kTw{$s=6Yl zW8=0!$R{6>p;n8nVQDe@B-&iDgBW9LzwyV}h}N)?pZm$f7xll8q#OY%V&_AdO+`Jc z-(%?Ko{QM)9)iUXn~t+&&}PNKBFN)`y18F^X%pIHqlb+~)J8RGJ#!TMOOZ%^qC6rJ zGq^FY?0U;3k>&@EaqqZkijn=|?!9npv;+(p2WgO<+Xm#zi$_|vCxST$89JQ?o+}cOSCpS{q$|Mt+&Roolw()g`?#?zCtFlUqZep{`s z{fd`f5DGXAjO7gYUIs0I6U&OaqrJ4Kv4j(>p3)%frXw5MekPWN|I#NDk{EfIY-^?C zwK3}8#bnEI`h zdInIasS!gDMJE)mK!O&~{oAgvHxo19_(AzVGu3NwEz5GBRC(<>#86TgQ2e>Tp3G*i zJuqyM98d1ncC|=7;p&^r15%2#F>j_)$sA0tsXT8#>>%n4_gV`Sl}W~_MI};7mXLq^ z%JY)zM<5*%5)!bseQ0x&5M+~@>Gg)^caEyfopU#%C{l0TCyuQlI*V+4Fzq~gM#B*M zerho_om4OB4O4izaos2_gXFWvkv@+9Ql`1z4=mgZ>|5z}qd@M0KZ)6H+7^s{z#`LP zsvEblqJ@@})L@X39Q%YRyB5#h`;~u}S`~Aj7q@PgciIzSO19ZT9PqTG5w$p;A2pH7 zKs_gNPP{UmUu7fMJt(c7#GVYTb;ftevgg%j^JM4?>E`CMZ!&8bFa)iT)Akxe8jM+C zl!pn(Vb#K+5^UWQgn zZU`{yw+*x9KI1pHG=KCj8GCzY4!8|^)u3K+DbG`&ef;d{%`s_$CC|<{_E;|I zc77R`nd7X(5|3bhLlzn@uBz(##3yK#^&Y8tv+bRKL8}tTiU2RxyzrDkZ?$C2Zqo05 znex(0PFd7cJK(XG0|%GP0_@O_{Jau~cS59I3N*rjAe&6%AW7(#Y){D?Agd65o4&0k z#9=jdjIfo^`5|j2zkYaSZBgNI2RgRb@zEe+C2PALedsbxk&31de7shUD&|FgUC*W# zg{3td6@y(|oRX?t{J9hdh_l+!=oM(4pT;;6b|f(4;qij^4`i*@5zY)m4DWdKUYi|% zE?|UM$i}&+kqHY%;#~wM^dzv%&hrDs8dT!@G`hIwr3lpuKC_(^%PLQ13G`j>QKd~C z(jU4)#NsLDL4uhwxwFuhYBcs{0i2>JbEvSq&jJ$F7M>jQrk%kpJ; zU-3J7?%{b}Zg}+c0nmIWT?ON%(_Q^rVd)K^73oHek-cr9uKJ=$HZ!33UnF)>4$`|> zLY`^td}mUfj?r`Gz8DQbN3-R6V&Gv-wbzkbip3 zCOIOer`r8<&DHL=nV95|DVRRVHFwWq`~)B6E$nv^!TE}+210U2`X+VHW?#b>Z)NgR z{%nA@4JT4^NzpJ0+bU(kjJYo6Jp7s7|3J48;o#8^`X%ejb_H5Cmdy-QJP))mEwxF6 z`IKv;K~a;U3>s8GYe7@-1AInNK%%j@c@hdS(ohV+(aC8}FZgA9_OsoniECgWZ|ypf zi13fS6C6onK&aXpidubZUzQH^w{IRh!ufI~zwfwjR|w5@wAB07xwWdem$!~yTV3`1 z-=gJ8=a8Q=^xPNJ$KX?5&kP2I&wlFvGf zA=mEd!a*`|uBkh4T2mOb;^6;QJidy|W1PxWELo@ULY4fr;;`N>Wvoe2hJ;#o&zTkmu%Y&e(#c ze!6>ZRlcamN%XEbTLX106OX)mR$KX*{^}lRdHL^twYN4lj7r?4?>J=|ssgki^jpAgeZg*J@zh!3tzpeKgBs z(DrYC?+2@^q@Exm6G44;hRfAHEl)%xAc!RaDOh_DkpsIiBlIQ0#YKxGcV1z|I4LRb z=P(r7!7yUwtutzfUIv@m1)MTV)?O!Aan7@908M{(E#Ung4*rGWr1R{XzE1 z@XtWyZ@U5;;ZfeXWtZ|xo?^Q>Kd!u>Y!Ns6iAYg7aSHl?0*aCx=d?UI%5e+TavTf6 z5JLwBPOnARoB?=)t1{4v-fXsSj@=lgLsQwdfan$Ngl%m++@BsXGUooUM}$5w@vw6% z7`G8t_?5(Gq0M^EwK}W_bk!ZZv^h^fr_(|K*q(6A(VK2cKe&n*|P*=;)7hG73OHZc8LA1@!9+QlPHOa^V%DmZL5-MmX*;g!{mLoNph#*U%cNXMtkS-v{0M zIPv2~B>&@Ho<#oUc8FCDBtk#@p(ucyRanKwbF6T6PAR zrnt7QsprtA$3?yD5TEBef-ATCAv@7yl;u_bY4u)8=|=CV|2g9NW7iA7Aq;aq3Kv0I z&B|tUY+*>JN5io54)bU#w@^-GC0{(g0Ck*;>md~yv~6sl=2n~SSJ5NjaR^mI>+^e0 zLa9QI38y|Y!$gTy=rGDa89QS50!ZB_BYKum2tPWuK0{%yvh$n0U4Dkoo%>=k1Rbnu z_@KT1w#Zet{&ROO9)oRxxR(}#y$bVMVDb-dKC4c+*dh%w$dtai#UuRkqAVO^OPCIPwedqX)Y6<-)tw>7BpN#>mk>7NiQ>H0}R< zxjee?-W=1v^ku4fTUc^AYQOaSSds~1dbc$U7-g?d%%C6($CdU9v2G{|${_QMV3pb1 zV8G>KKE$hi3H)ec>W`7^G8S64yshaDy9F77R76IsvL_2ka{}{IPSmsw$%K(gbTDOQ zLmmL_oop(a^{613oyiLjixE;swq z)%@S$nVw$1xZch=Snx6-^hFE|22@Qj97f{2yu2uhMsBaHnvwFH%d3YlYdC%{dGU6C zcz9sFs;_Tq?E$&rvTWrg1t>s0AR{fWZJ0ay=MLBf%JQ{$996eJ1zGy^VtaaxO|%$s zlB4HuW9|5_q@k|Mss+rR!0YDau3AOA(G0s8Jv4NW3L8cf#3n^)5#}+yxjY!Y*vuEW zY_LO#?RtTkI4^VMdq$rQ6kN<921SI&EO|D7h83ql9sMAt-W4_Lj$>d=Sz%y+8Q|#C z?dzI<=UwHbRS%kNY|QBwb)YF$pY^1U;h338{aZq%xqgWdoeodS#(diy=_gP=#$SfFpp zi}!M^*`rac_eEFe%cVEbW9KE|=KDhns4VIQecgM@_miEy{WVSQO~5N=c7X_izkD=! zenL}_^hv6y`%?699j2uJcDM6&2G!mD@^u(-1*Xs2(suXF7&ihUlb}TU17{-r_>^HSuzS$ zBTNQ6mW~J}Og))6{6fj1pjiM6X%qlm{a30QwT7&#zxpFJC+VJcnn^)a}SqiA*=}ZxOmG^(#_r%$N!~8AJu*1?OV~C zuV$~QhB%>EcI4@(#bF_;WXOzygMvkGzjO=5RxfXIl)_&^%%1eWQz${ujEW^k4+NN| z>W5gdb0m*Mfxv2IF7=j0u|2rvqkL*7g`Diui4u>Ny2Tu1YA(;f;UXskFyUKRkI867 zyzERp8NaQd7=vqU-uL;R7Z+zPA8(HhuJ4Q&D*~hM@1R|w7+d(|#9Yg-bu--xzwZ6v z+wn5V*9ro!r`FsL9KZV|gV6MAkbgPp_--%@J_oESVhGL1~oyep`5>LS{X@!bPgG9VeaZ3vT8AZJWr%pQ3sXe(W$MM ze*vKvgj#f}3^p{sN)GLTZ`6Z83xwU5(;cx|?LYS=6xUtQ;SNzX$bYv9n7-9f-TXTZrue(gYWV=420+hjiwsG#@?#9rp zl$%NM?R!$59SKup!J#TyifQCbi^R^+DnrhQB)QTiTJH?ckY%;LO5>jqDp5k)(SxG5 zI|tDRC~$*JaB~|hr@FRos}{5vAC-euODCPoUSJ-FUkVV*p~4I5{TNvgV=MqH)+-R2 zLv(v2!*)hTvlOqsmwPThW#{Is!p{{^jRDD>#sfXUm^wk7o%)<#GS|Hy1v)O8Y7TA% zFK_qm$9*VlEjv<5cc4^Ir0a{C96eE}GyRWmSDt5?F5&C<;`22|xjx83KHmF;VWM_+ z88B3k%95bQG4H3T8wJvvI3mOagGxxt59%75Ii^M@o2$k!7JX#iV4D3^l)ppvW70;o2ONF$}zOA_CeRdw^)JJs{08`zEW zS7Y$k0eXTxPdbKvJc0jEnLtrbvQr=BMMRyzog$AGw@#FGhs`7qW|UP>F&}rtRy@EC z_%78gOEdn(m9m&Qb9_0i3iuk)aCUy-a<;52c)gc92oJKgUeU69%`ZJC^gfNUhceAO8$|(DyO;t*?qTmudIWL#s;T z?Cg@b`b_?DCEdV8P~YkD-TD1Uwu>}?exT0CX4g+*EG+EE{5UhVPgl5zwf&Ix+xxS} z1INnsu+iY%S~ui}_1<{Nn$B8tCek7;T)$B` zq*DJeLSdK8iTX4&x=|T2TB~P*B$};aZ{%SKeLtKqJl=V?t9!^Gy{`k@wE+Yn2+}HN z*B|4*#u^@<*6*Pvq8{*dA%65+BHs&hE~V)L;wsHKF-FtYFi#fZ_`S+A3K%5il~BvQ z(cHE5bu(8n4}t5lY7;)h6EdjWNZ_xv!qihQI36$+(B( z;}$pl==i)aRaLPgV)*IbGr|*Lui|4-c1qywn8;OGEhfqgLID#;;75rwo0_(Vb3YW* zsbIwqorlgE?dN5G$Tb2k_A#L$a=NxEQ9)fKB*3kgzvw?*2p=12zxxB`CbkQSXqV0N zE<$#=TAm+?RSQbx&u=m&ZB*Jp?6x<>|G0bPEH=@8}7BWwe^o}@aqN{1=Y^hdF0?* zulYcUL)5~E8n53Qd)p1=vW$8iKQf)&{9~W(65+)G4*2wsVCK9mDYB+s%tlT19<{ms zp;eh2H_TX6;?Z&?m6Dk1X1hX7AZpe)Ut|AZEBf`t|BIOknytjySoy#`SxG!_ z`_#pq3r8J;QcTO7*2{(rZ(^p`YAirWDvbR_G8~^7UN?AKYbHw#)c|RR<%a@)AMM&g znvYb2zjTASl@xjJZgHB{8H+wjDv{_@g*(UH=Zyalh_Kdoe>gjWe}SA+^{{`URGZ0) ztDA-JGkcN0fvaf7@u3Co@o)jE6KojvfNp=%pIxfjn%DlUnlQ(PD-GsFa7Nan|H1vL4|Mvu zA%gO&44vgjhFFr`}?~A}!ZMPo&c9$xPRfBq^I3x?q3P!QN2- z70u4G<>FLTQ$y1aJqDJ%Q7}yd<}C(NL)Flp|I?ZFAS@NKyF^P5%Gz*6KDK#KZ&Psc zPa(dkL>=XH3E?vn_Pz`?~BgFC@7Jcq)T#7KJp|T?TeQ}1E<})TiBpR zH)qjxa(m$hB|d$1d#NmR`QLy~)8dThX9AUmRaVBXrad=Cxu#Sx&S%T0B@M99&@aM2 z+VO8@z0LHKo*FzyKx2UX_iN}f7UndZfA-Kpo#}P-*Ore{p}TPo*oBq%>7-9_qIe)Z zJZdA-=>_iwXpWtTfh*n!ua0GqPb0sgj8Sb&FIf-u1>*b}Tmo6yY0lawEwr13nU%|q zgdi%qi;lSuU!>lnl=(ujpNf$&H!l1Mxe;b>LL~gA2Lcfsp>f3FjB%-YMSdx1XIZ|~ zSg=gekbG$XJB5A2pF=VW(dux&2E;zDM@W!X0uoDxa2WZVZ_XQzlXi@TQ#gkjw~Hkr zTD2TNWdviYC$u=c`=~xiEoA^FTovJ;D&%J0Nj@fCj1vgzyfDu;AA8WNPydri$@=de zJWLGlm+ICw<72?Ib8*-yvv;Gdu$^G*Nqd9)hO3IV7jQ*U%fqDS9b`z5$w^?j;BT~* zrLotQC z0|^Ko$dF-NmwYsMj_04e!W!i6QnNnu?WD%8Dh${e9k7g4Rp7MYGILQT5>Gc_sKGU9 z0EDm9U@5|6z1_(TZNHYlNIbVWnmtQTox(Tm84EL>=WRV+#-;jeB7`;4#Pe6!B^ub$ zE>zma2Kp@r^<%1AN;M@OX3SgICp_7q)UiyjmMh?0CQY)lxyelzS+pOhUII&n7*^8=b&-3{mJ< z@&oK|(O?#ZT+SV;1JUyI7vBsB%FaKIN;FkjGkdJ6N=DZ{hGy0_!E&$8-Yr17p&P#( z{Y5AA^I*;1wt6>&h6HKv9PjqI6MaNHbd=k7T|PZ0>JC<_B2U6uI%!hLAK8*hD8=#k zy8tYkKhH%;Qd^F~gBJ)DKYbrOUoN)>@g_Us_l9Df_J5nxW9i05xur8Q2Z6YZaCEBW z)%ES(;7Bcf2}aas{EiI(X=D8gqOsgLa%MygRnw}-!>78R#>dPoMOf=myy;32tZbIr zsg)}f~g?w(SXXFgb zj@AwiR+adKokp6#=@d)S_@65E3-kvlr%_F8>KN96Oov<@c_6rRcCfTSKUq=OqFEQfNeDQhL!fYsD@WikSn7MZ7$M16N%TfI2#bO&n<#Pp3nZ@0?VGExICAzwD+a&H z-25EU0F9rg6V|bzX5?p3(+cX;kZ4sxs4|l`NrMT<^6(V?%K%9Z5q;H^;-7SS?Z4); z#EX<86>6+nkS^g0?Vn@=4C`EW;8n{AvsM1C)mwCxw3+#teXm-l966Ea?Y2^Zad!XF z&<$d+K^gpK1-~tihFiyX9&cz!L-E_-=yW7Z>Sr3lkw;Wsem`NEYGb=S(IHrC)=6Rd z#(rP2OxnfJcT#y!;o($NM1ghibfYH(?DXq}JGSQ|$ges&am(c8Nx3=3!F2<@ru288 zZB|+mY-XhVH>Pu!-F{d`KCgi1Z1|Kh0)DDK4qGA7>%8>btc4N_hMTZOF6^#{hwoZB zPdvI`(hVTC?MHvu!(RDA?*&$+@Mo0>-s z@coFOPOIbCX~2P-dB=r(K?vJ=jn|uoES?wuud#nGEtnMGX`vkUiyWLm5HTEhS~O|# zhIkD6nI~}W2e$4XK4XzQAH+{e3Hubf%(&9)9c#utS0Zv6;>=Uu5N{j9y&E6QPbK7Z zbkR&3JgEx>_4sWBZ`n2)yhL4h%bbSka;p3;evy}Ugb-j~eqU<$yl46nLm)6|#(I!Z zk^59D1&eCVo6DN6u(GiLDH6J7sg?j7`VUl+W@r6EF>3pZEtxU>p5ECaVVO3a z#-emp_Gp5|UPIH5R9}IWZB#sdhFLKcX1|ys>H=RR{83xw*3%}`tM7=N-KIDe%ZXw@yMbqZq~Bn6qsAo<<_w~Gx0yMZntX5qTI0+6VE_L zB~QOuULTa_;+$bjPRP`MPGB0sihK|Gi|zQmBZ{pd8@cbW4@?RGECZ*0i5ehUkSHoq z1p`HCu`tF9_DdiX&Q4CNU`EbW%J(g_8`bL)T#aba@lQEIiIh_@PI-qgINh7KwPa}Z zuA0VLRY136G@z@ScV-nCTXSY;Cnv6yzF*9hK+kKmOTWkLEIxk+J7lH$r?N6( z{#ybuq$csYX1lO$m7!We>5GA(Eb_s?EMhA)h}!O!*k_PmHtt{$j{5ZIe)lUjG?zdz z(|U}MU&GsG`}VD1_RZ>2&_;e$H%XP)sv3M9oIL4ZzJ+^8n35QCq%H1O4tn(hX|u{e zyg@K!J#@k5mg(7Sm?z>FOTEmQKCJ}z%v}2H*+*L zW+VnTr5{-~%|&nTu-4{`QefnV=qvuCP+8}(2Zem$9P${_NDsQWE#0?TgvX#5L&OfL82HTqC` z*9&7x1*QyuPz$=9Y6@^WJom*_R~LOrvKaFmQE7MaTRQ?u`ht{(-6XXE9cC2c=>))>djr7XF@qCz zv&LC8e|E__wrf6WNlndw0Q{_gN?;pA=1hHl>9D2{D3w z1cqy<_&HVela@%@5l0uz4JoCqVu8hKs~Ez{woG~vK@r@#2b11y(7#lMzuI}=i$Jbx z_s6%3HFP^nXtPhr+@I2LP^XBACEO>@2yvdbqS&McM18V(03*DNq{~rp9b#-) zeRRMO$bRP%^Iki_z27s1dVYuWitW#39rsm^2MpbB7BaZwfj(hpF!wO6ww$QLN7bVP zp^Pr4x?7`%hXGa5@)iv=AOgpN_xTrXt8{vO+JyG_mn_hxmnvkN)YbLa)N+%f3TH5) zb(dmmsJ`Xf@^~S6B>I3P>VoGoWlm-5@*f8R=SJ>dCn{mpaRhGjC!9(QuUnSiI+)Jj zfqUR0DY6z@_vcR^FP(B`Q0TS!DrH`sjnmrF$tuOxGD{Q8m>xrB_QkZ+b#i2;q!_Rq zNthZ-G9@;is7b6Aegn1CLYC{wk?YA44nU25nG0w#^b(GXqIud%W84*!>q{9n@BS#+ zPkzJWhG!a@z7&7K1JpK#?D(Um>rYE_3n$O=#u!|ad#rB}nmMTpB|R5Mh4mu9D>Eus z&jX>SD8^+KVs|?!96`z?L80-G~Y(bnjtkzMp?_zCf%w?23%38pVdnvQKt? zt*W}65|PlKtv8NIO8Z_;mA-yhbm3Ji6$~DqXvC^^4KAul963>e(1iw3P*F!n;qkOXX?H9`Zbs` zAuxAE(+{WP+&&DeKgtFN`P90`W+{J3jeEt4B%&(GJgfrWrkxrgXx?r7k4AX9XR`Rv;?(EDs1f7zKy|F!aa!R}gc*X?h#jCAw*P?xG z7a9nz2p26aKq8F)(}8A}$+@JCK z3zwnBpuuIJ0Xk7tv~?t7#Vfgam9R3qTveEq#w z3YJT1!yEQpiyZ_GGI?CzSeDUu3(EZ`c0vEdJge9_!Ol|gG{qx&PRp#$SC^dIGs|lp z|7TaP9eAP4BflG}oB7hs@s$S4ba|@3WKPF(%jUSSe#rV-ekEugRh%9~3$UhfTA&7` zz%VXEnGVG`-wm_K{9Lhn2Rp}$VWEG-Ndd7uz+d*4ZGo;wQ$r+Di#3Us^AZgxGMlbC zF%T`r_nt2qG^=RrbVvd;!0(DFyOgRl0<)zrGR~8fYNNt-`mEyjRET(06A!^T93-?QN{sI*Jk<@vz zM@lezlTOHqyQaNGU?f^Ibve(3mX7G3`o(^(uu_>H-$hfvlBeQP?$e;{o8InahyY47 zk-z*^EC=fLhWsb=46wc15+9WR+|<1Epr5TIJs}-hyJ_ha8{g)R zyfkOC_v8k}K0Y;1y!K?Knj~hvmc6l-pOwgpHJdqGj>l`4Tk#2y`CO#fk*?@@*wFg0 zTCu#j8M6-Kilp%(rv^M6t>E?^z|`}6f%H*Ps$mc&jUs%85M42Lj=E_IzMP#TbbQO+ z+Igxgw6nL%`wU%yNfnc0=6Z_Uq9#Y>Y7vPz>ISNak>vJ!#a3lNa8J(9xaMt)8y&B~ z&sF7n9>Cx|IK=hBRfzV^;4=`Z7UYzS4h~LWz`Jg6tLyczU-JI;IumDX^rw?a((t_o zCHK?d*I;!3nGG3cCyiX<53#b{nG66hbM;6us7KD+t zs#BkjUq@07&q$E7jX%bQldSeleWYJN37<`PvrEniBU70{cTG(YNQ?g~1<+^lI`6vA z6)>^mH}D<`1VJ!)G@V;;ZEo0Q@zMlTn5r*EbuFw|X16nn_;MpFB2%Rdy_1C!55jMB@>sh=+88A58AmYj`v9S*RQ;_iL1VlW>wYHQp3 zzX+>m2+U~(3)QA|H|h1XM*eRcot4h&UE=w zVv+lwGRCbs&aeCC_yXU_U4m`wR|b@0X^b&(++naLEI<^lYdkuS zof2x@X5T<6JVFeXwRoX0q?~+&SmeOcAbmW0MX@_ZwsW&#FLqTfS7_+2m`TFn&0ft? zjrLH2KtqY+96chUcz_yOP+1n;n<+d)TP%OE?s=RcQ30teK@YMlx)ystq^uIAe(pfY zRj0v`01GJ^G{_ijfJN3eRcAQ^0?I>8RXlSXP*g|X;Nd~gm521u@c-kE@!s9>T+Q4_ zkUD`xtI@KZtzCp}UINbvuDdk@8a<9XyVjpaS?uJa~X@Q(gu?Z2}S zphX;zwILuCwkVkJGpmb`Ew(upOq45X0QsH!OO3CI;WB|g%o?b-;AwPfNE4Fkv*|$A zLH(dkQi^^84r-Hx5|EGMlz)R>hn=khYXL_49tIP7Y_VR6Ny*eVqz$v5{KFe2bHJ*O z13~_!($s-Eqo+l9*81Y3$U@7@c6a!LB)wH6lY)oijs9#FH zntSjBxMD-w(-Su>F1mT-I`p+wgo^FWolk+Y!WexlbAg;i@&MPHETiCFy05QC2T>7{ z-D~J&;N>5zC~Z2RF;696ftqlItX1l^9d`wVT%4XmQjN@0BNLKysU`#~fA&OBG`l?I zTS14C9>f9~4|0m!eh@LeIXg?V4Xk3b3MQ>BUA3oR^@(Q{NUzYNEQ<-7s(Wm^#SyJ? zyr{?rJyJkL)UntfcbQ#YmnXJ0zH`Cwd{Y^D%8rXR(~Bb#@j4Ex4>mo66$6O$eGE$^ z^5@7h9FgIK43J$osRYTd+K(I1n)-A9u2}4A=CSvMSock&_QnH}jru$!s3HER-Pss; z$R1WGLg4yqpb|?1_ps~)f6%-dcU6m(-Hes7CsNYq3mFm{Fnt0R;SyDgIT}!cyc^Mw z>iTxSbgR515Pn+S9aAxB&}Uw%;ACbSxnehJ#%Zi0Q;tNxV1GSjldN8)Rx|E{+=6f~ zgb`M%eM_^qGs4}fM7q4G_deP$f3jrw&! ziVoVm=H_Nl&^yTL+@<%}@LJM}<{s35N=&>59NVULSbNRuMyAw~TQFD!IfywmSU>{8 z4ve&BXzcv24Ug839==b-2z`~%>-e>M`i|7`psto1@B>j8L&G-Obh|{mD)XF+;Z(d~ zpfY$WrkxzJh6QET*^;9g)MzOR#4c>aByc6r;e#%J`ed+sltNCwl*@0Xv22_$H&lr^ zq!f9I9UUh}7QjV8kJoK$k?UaL@v!h8zaE2(D;Z1ZwIT4h{zhMu5+tgSaw8FLvmO@~ zZt!HESnLRU;D8fG4uqvt&cs@qYy2tY8CJHP2q!8G_mq7ZQ0lMePxkk%>*82|yeCpC*uBI#EfSH*-dSwtJn{F%+3_r$>utxz=&+9-vi^nVC*lpAWJ3%c>j*LIY-%NY3 zhyDyS!*gP(D?z*zC^;EoK3ZydZ#<1U>Q;j9Kgrhmh}TudbQAL^`n;jL!{pCPi7u0# zN6m=<#5u$=BW7qJc@UzEuppBgvq3?on3Fowu3X6jBMTJQv9}~c!VQkg-#P!-K9!lI z9CDE_h{HYY80KB)s8R?el7t&ZAyQ*xm{vz^06;I)?cO4W_@<<9Y-x&u5{6LlRZzzR z8;SFsyI^BGik1uky}(4qO3JwfRnl)UkJ;6mJ#&VCNN)Bi91056y;$IoKm=b}S(I89OWjKSP_Hj(A z^&QQ;s&J9YEKFLYv@(BFxlu7)tzV2>6NM>=LKpt1EGnoG30kH@8PnMCutOeWZhk7+ z{G;{%2aVu^@R4m0i!S|&?xca|)ssaL&XOxWX*Q%QfiiVF}f_}B^uOJRDY1<7eO-945 zGD4hJWQ^|Lan29{sry)>BUh17<$;83ny$65`DE1T3v9#xyca__2W~Sv%;S$TC!DvPsu?l{&W%yxVyvLP0J>u3kA-4zj1;K^*xi5D^6!yYu;AtU)p7rmC z6XfddCri034(u6~2D<|!&|TT;D`a)-kj7LPE!cwz4M(N4ovD5zv|kwO(*ZT!n3L|X z%Hjfay82H`dXldt+o5pboHUU8`lWD9V%))Mv`EvW9ZQ`s1B%lj8M^i&B!t60i0Bl#DHS<&uGD*PyYL1rV}%5gKsf)l&Fr1*{}d7d|z3lP*8Q=f3Nd-xKN%M7_CRpy&pY@-Ewg;GoeNZCL2%? z4+=C)^-zs`b1f~=^dP!(9=o+BfxCfwS5BS)y=s5QziNBN|3jy3{5(%raTp)2r2O}< zHba_Rs^#urosqWJR1!dM3}%p|XOxrF7?SoC^0rbbsGn5VZOTJu4Q}Y1^1il_$QBRt z$)~X5nG3-t(ar<1LB12)ZHU@|TKR|8;Uf=+B5x9By7X4<2EQn)4Ri&>@*pxc4YYZs z&V;WNffbiY=kSe~D%0{DL|7~~s$tzSZ&-NTyY&1*g43+?R!V;>zm*$iyhxDgiEQKN z;C980PN}B_Ua*SZzh{?!sspu#Mn+FuOOr$|(CrNi+y8bpaoQum^t{afIrYU&WMER{ZTWn}`i3DfQ! zbBg2L!x-Da(EV{R6!GD$dZkqk^k0mFCo_h4TWuXP*A^7uoE zcjhT9KYZ|{JL7;A(w`&NQ4R#1#5NyANJjYxOIZ-wDx9>sqE#MLX$PB;P+pZjMMi@` z0dbk(-c@pG7_C$Wau+uxFtWB(S*Aim%HLi2mlS*+AD`vhT0ksprOJ@46TrPkq8=O`$ZP2>5SY2a-5W?ed@E?pU%@^((4)@ z_lqXKOD9i|0pNxK%OQqJ^(Ayx;g@urYOm-JXC8C{4rlaN0a59{q~!{{=(yM*rx=!8 zmM~~*p38l#cT8ewZfy3AYyF#DI<`Ic2v^<5wtt@JhO9LAy^XHab%q2hahj@2E5fy= z#QYO7_0*hp-B;MLvdD7X}k_>Daz&s*z1dE=9Q zLW>*YXidO*w(kn) zBa52r`gK1%`y)F*pnLyUz(S52Gxsx3uW*O{qe1|fSW%Otaj+lRSgb6L#x%CY`_z6G zaF2n-1hc>Ya(bTwF!Su5t4ia?Kh>7bqH~p%SDkaWXFkU%=i`h(6!b(4U`Zr0d+c~5 z5MVgsfWHD@fe>|C2LXTcH^T`Vq5cH;&sva^`9p(A^gB5gXruHa9c{oJObe@IaXFtw zdR#wiRD7!gJ{a~V0-A`l%;nR;P9@9niPNm-`o44Kt!f*o8-Zdkd=@1)FdWVrXezW@ zpVYWkblcI>b#$u_IRrA~2Oi#USNlQh=G+=b_w$0jXCSo6dj>-3wU$Ce5m_oJD*y8O z)S1#I#O7dM{7XDF4pIJypr*%B1 zglN$o0Sty~O!G8-2s@8+VV;}f7X4y0heZEh4$`MkQD=0w4Pm6LIv~A=NYmW^ZAJo_?*B?5}e>%p5I>&PFELq=(`Y!JAJ}VY72% z353%WXBG5K4N#*;7WP!An0j-frzGVP`6NMTP5Pa>;tN4110Tc zj{-HU%Y}gFQfrb?iu-Lu&Y(35@pVYk6SuPVS2GK`cx^8>^}la?0|LCFqhmBCsqa@K zS9$)TEI-F>7+C02fi?8ftl}#y;MF|wj)aJ->&3A|%Vs+I8E-H>!%n9v{lL2k(~f5OT~ z9zNf|F;?Q&!!HpDfG(nn)O&s>**5*YE#MCq|K8%q#_XNay-LjS2H$k#gJ^bvz~~1O zC_XTg1jt}20_g+8rHf=%HlCzJipO}cE=<-Ik*fA}1**aMD=f$`^@SN@h(2kAxhiP5 z^EH|~Q#=u1!qcaL$&soB#)jb=5~dR%qB_#HnHGT~PO!RUFU05%svAW;;+Mw}IW(;J zyqV=!EaaS;eNrT$oMKH;R;g+ctEM9gy2$hoW#S|n3lr;c6XfTIM-(}zM-O#EN^#9n z5jxY#fJ5 zv{QbL^<`5=BV|=ZwUM;*e@P^9oMV3)-j%ndNHLZl>MsiW??BAAbYYf?^#Dc}^PI=? z6;4N+kSe7T4vvlEogLq#Bo$S1=Hdltu@H}|{izUQZ0z{8&LaTwOf3<189er+)8;sp; z%sSY6f?cuu``uwKR?iJ@nYnuL!Ix2bI0jSuXKJunWZe9sHm)qwFO)PH=D)*xV`M)R z!^DUz-$m}7V<8$2eFEa04GDmyDp!f|kAL<`V=c7~>X)%GRZuu7`VSdo{~Ca{SD)F> z|Ke$Q)7sZz1SCk5@1Xj1ou-z~_t22%bpnq??-G0+&ukm14&$kx?8L53waRRwu7YTiwLP;_0o>zo=bn5biNw9q6yO)kA&)b8P;$OW+bY z6AgmO9Yfk66@X?EH8jUcW*o^*kg2_pO?T=oC=1h`0YY2bIu%AC6BR5n;!2y9g}HzA zUt3v>kam}&MAH*v%|@K zdet)u$q7lrA~V2G8qd-9OECK-VpNkJnPc**K`D7MoIX6@go{MxMnQh^9HL_{g!Mjd z;&xs+_Y7tMR0yS7{L}7VNMi2J)V|?rK2CFvZ0x?hEPEEW*Cl-rbjr2DO9Xe~XH#gx z7cVk)P|fbIwA}7gdYI?}Gdjmf5Q%a=%U+r`BhP75HGv`}5mZ{#OUPE*i>Rn~HT)S# z&smz0UbqXdpbQ?$adO3Kr9fpIr)q2j(z#W@nnYH$g`W&){dr9q($9bPg%U{)kU|Pf ztuP%noT(@EC8}y#dw5P?Tkg6wt&#kHA)8CDl)Fm7wRq2798e`1iuGGULY(%*N>_4( zLdr(LQDTH-0SJ3g`0XfT_)SZ5*J4(3QLE9%0HXabQ!6_7Kk00>S+m=LA36FUtZbZQ z=1AT+K9Go26M#(XLN6!LU@X$ltz|4hN#+geBqAtBs7M=7zdn#3p1Ef5i|0tInXN$v$YrK4bo=){Ae+LToKfFctlmKXIR_X)YY|(l?^ORvaxHV z(WXj@&!NO~VN&XJ`EKc|oL@23 z9z3W5hNIX>=RL&AQnNg87waquS#M+WD*74wJBr7Ck87~YP357=E?C;(S=Th_G$-`2y)Yivqu6 z5;f)%E`FY09*9IgmSs1fuhT9~6GakS&`q9AZv^O$h2ZGa%!du~d!0QhVvn3IYldq3 zi}t4L8^Y5++^C5l-NRBUcLNesxlgHlFn)Db#)=k&-Eaz4ljh&G2=r?OY3)eZ`>)tY zQN%W-gzk2>h>6NsbJ12a_e0efQf#7L5esS>VCgAAWMR^3x;`<)Kk;S0W7Gm)nwOvU zoy>s9RU*e?oyQb;U%PnQ*|jr$UUpyevt%!F8EYpburrF5f<9t645nYjMfSjkiKkWt zy?ZoC#l<6ui>bGScLu>!_*gt%pL4 z_nXQum2M%TM7=!vtK`cH3r^)IC0D}M<))N;@uidE3FJYM&XOTz>$`$7S4x`T%*IA0 z<&$Ckn5v*`!re1~(CmIX=lIH}*XjPN&aRVmS&>Np_2a{jeSaC;gL#$f(pw4orms^` zxxZD_5<#`!T5(-YABAAz(g28+%(u&sX>cn2gm4?iBAHm1q^H3&u^L0fACweYBv4#b ztfB)B2%64SlA+)>eb8c7_{VX&GEJK7tDQd~`F6}p#Cx}1q{}d%)Ukq~7=7OI<#t?Q zVnz&wlqdJ;(K2mhaGDIkRTf{}77{7pFGs+LrAIXV<))RsA)fP@o7__s)=B;RkdMaH zudEw(Z&_yM`^ml!dAl`>fY2xc_k3eOR)Gf^piK`-_=1VA za=n9|HC78W^R^r)I-PJlPc(7Lu^bs6547bnarDXpy!qRo)3!LSU$!{jeprxiCVmEx zv`@&_``9F%Iwgtrgs9;e6A||@nd^Sj?au9V2_i?wM7KG3rBs}sSK3+Ewzc)80mhdu z=kTQ_mG-ef%b;oULKeP4XAoPiLMRz>B~>-G0TRk7I+{78+r-l{5u+t07%fFXe>hXB zCy8P0h}h^b2ZjrpBFx>1lG~452n7R!UP+^d+Q{oh@?N|8DC2X{Z}L$WPl|}+sGVn7 zuanQAm3gn#z{6 zE@6uvEDC}U1mQxA6J27CIn#iX^h>1Wh4Ds3G!>Vf(EPdq#b!lzR>w zOP;+_gkR**TJlLCyeD7y&26mzlXp7RT6&*MFDrU|w+PyN{zG|&umk1BD8jZTg^2>_ zrJTGes8rae3Bvdr8Im=LiTb3tm51W$BA-DeCJM#4`hJ|%!Sg{DCU1wBRF#!78X6k= zi)pgc*?a?dE*oPD*?%xG?Yg2CGBRY;xPI)eJt07bygMaGUiGcUl#0}YD{zay#YUK0 zcq4$j7;QZR%wHWp*AOx?&NDfih5#HA4rlj>s3>REVx_n00%~+i!3ZTAi`mmIBU^KJ z1EwrG)-$RE0UGj=_}-`k4-11u%$Qi$uD!9JjN%dctbB4PG8_FN`GZ=RGgWuKE?j@wzBJqQzaubDyhkN+fJx>R zDW>8UhrdDVLD-r7C`w4iTj0^3=UGZulPp!*B(wel7`??8>m=X^YpGBCZJ@4!wO5rnOBrt3m}oQ~`?dI2Q44Cgk2!Ai|h3R#1+g&lW3$DDG3=HPa2yc|+Y7YLk>U z9g)%f0KW;=Ll`^VdG7~;3(PUDce1QjiTJa*kXrAREg{>J90-e|FASa(FV>Xi9(RsL zm5NL3cJu_i#(Zm4 zFLTn~#Pn@h$N{kFxy#bxYb7IH~#rOkJbb}W~5 zbg`_##@<}(4SpJFIh(ei1p)(6y@_Nqa;-c;X#V+ zIP9~FE$Hd9bmOZqWtE5=!brwe7`v$@<0l-loz4ocF9z;h()|_n@_N7T4~Qp$CwBs- z=MP=hsyaiRl?RS6k0wK$;0D^Zpy3w>JHBn~8f@R3CO=u+YL=c5o}RzZ+o%*?jVvpq z=;9=M%%~hPKV)~NEH8cu8_E#Ks7zof{z9~G%&PfK>k8u6CX`Ud`T_62EAY`!xkf$0 zZ3O6KIKW|bTigU2qCRC$Px-kxBL_bZ(1WdbtZ`?79ByhPp%X<;h9-anXil3ZAR8?U z$es)`lP&PNf!^18rpBXmOnp9KMnD28mQT@1nq-~`G~_{a z(ZvLTChL3XW}TZxtvwg-FB;^qo@t>a77S?eGgO6(S zl2>XXvA0+2tELD-*bys!2_glMG)Twfm=M(V&r%*j3>$n9;a@sUa$-cidzI2`V%k(PCF z-mFM8)opyDpGg!V1}c5E!o9w6?^xm1D!8Y&{Bz#b@!yV3Lddx6K~I=>`1SKMg7}W^ zagHilIWBj$8SWLsCFDR^fy8eBSBtn4&uSFDEo;roFAg7Ow! zk5@Mq@Grpn{sy>BQ|tM8EdXe6Kxf22K*+K%%_2e~=|9?=2fWDHDUnB3(?J@nG3sy4 zlWlFKbBPv$DkbLIJ_8%Pm<=YwEYc@Vq5q_y2^Q9JW!YOgMntse9ALZfXT^NK$iLM1 zhVJ@lbE&E>JLKw3atf7N7^+nc>5@BdWIn7FvOW1i+|$GhIWZJR9_gICkKbAke{JEf ztJVB|y+gO{dQ_#;<}n0tmE5XitmO5$q-K%(>v|}Q86~6jwo^183%e^4>Ox-M2XRW1do>nx*Rh7}}caH11b! zDyFFv%{Ug{+oFZ`M2HOj!x!!)KF)+EmM$LoPH3WeRAuJ*hD7sYTkexo0sDPxUNj}- z=bLuTC+Pmv{{k(WfV-RLnPVEAISo0`DZ}*K_{7yntprrmz9UKk&GEy41?aA$d2@@` zUNm@SnJGb#uKDi@I}=Bjkk6j)n2`vkWp#F|Hl=E9(@WeQzx^B^aB;d>>g5qX-T8sy zAt;Y@sh{ZrmQzqX@GbAllpXtu@p6~S&&tNs0bW_8c$x{-SyQZgRo@8v6DaWkrXnGb zCz_268fm{x9TS+3XB?h4UG6sIJ@IEej~X~Uo;G~m^ES`{0s;Wj%MbkNd@fz19&2)U zLbMjArxqe1W@nV!<6Q=uDdKccoZ25hp|T?zs$)81@`G#00`DXflxxbvFpmmFQy`ELR*P( zkvoRuQ_g{BDT~zNK2u?)#!}tx!bG%ZG&GKdFS4;fx`M(55Z8+4)MsNH?$#0AY<(LOuxe?8BjqrYE!^7S;Z z>8I6=Hfc=6o{!k`zVEm5eQs!fsEi#z!lwz~5C_j~%=@y>W1}Q8C~_giGdn}81hZQ< z3_0;>Zb>YT#-xc3(BMP~KQZ>bF!f0>$1zxfvlzXyq@q*+Hp0xcgX9Rzws(NyPNBRj z-HiLPxWm)MDnH=X4}DU)mb$c6Wiqon=YH{v1q*DCaZZ-&k5Wcpo+%Jg9pVJi1Pz9~ zhhVLy#-!QBHG%!*8G^iScmL)3dM2-j->FlSWR;91L6lsHf>Lch5&UU8^c8sFOqh+N z0I?BQSgSCyY=9KDIz<2-A2xZo(bT!2d8g13ihLHBS7or0*nPhFue|g|7iebb*EhS| zuPAz)zR--u2Q$5)gNkmRWs7P!N9a>Yjn*wS($dV_4_r;K^ZWua)vb%e{cvqggg z5VH@?XK((N&K{lI|J~@l^8eIqt0z{!qr3Pt8@m2t=;z!_0HLKCl4v;*GeBU`jm>|p z)lJW`D7Zk76_KG0$WApicEfUJEe(I_zc|*N8c*@$^IVZWSz#ZBWdY!xBV-NOhg&(K zUt1}^j(wqtQ7hS%v(_~2ESN`)nM?QaFRB6`Q$K3iA5aqJbW`;0)xxQ(*Up;l4Q{?v4)aHqW4uoX$zw zmW}DNVI1lR^s_?YM&BT~nR^#HCIhb1J+DGptFcfs4RSNMop?C0QDT1$x%ePI9io6} zzHE50LGpByANj9$F0{8(G)0ttdIub@9o->cs5#tpg&7Xl>NF<+$$Vn|TLK5JO#BT+ z5aoGbawAJCoGwk!(RoY|NeE$Pe&AWSRgL-ri2%F%QD4dD#Elr1zG$pr3Fe~0VXIPd zLc`Odr2{DnMGF)Tc;Y~l#%kkm1Y`nj+NUaQp6ncgNM%WWt#4?-FU_jYIoFW?_Q7}6 zv`syc92?(72(-XrA<|-X6|hHyvSf0*C?TwwTb5*bIH6>Xr$Edh2Jjn>N%j&8%(32C zsYn0m-IvE2qq}^0!SBt9v>ZNW-5kJ{?_)*&WUHY2RIq6G0ht<2c+#Dpx|GuLWXaPeAl1GoSO?TG~H%-aZVQV zf$y@H?m;;1{o%1nee=C5JTl~jnykBS`sP3o{Rm9gO1qbB(O!%~Wze*##31rv3E&}e zq=5b8z*>kmttO*k7+@+M=G$Q#?Iisqr9owYxL-aCSe#(~$zeakkdK%xhS3cdID^J(_CwW{7% z!wT%hjCnaZw^Yf(2)Sr<;u6N~IN z(2X7sYZWHIC==;bA=W_mkeSQf32GiLMv}i}z8z&iG_N2;t zZ{4@uPjhcS9*vV0B)_MgJn?#MFU)uVyfVDDjcik6b48@iCF~;O1?t&~MRU=cisOvG zJ|Jk`viA#&s&zVsucld>Yqa+PW^e{xjuC70Cn^f3ifo2y`nygE2(;K~l=@T!3U-mv zuCTIObB+NrSfLXHgb9UIC-dC?kyb&v$w%X&XpAM6Qr61BRyH+l1U>iF4zinptZq{? zMtZlR>a=yLsHDt7#fN89&tapo?6~s)_eGLUH3c@X)G0- zv~odsu!BKPGkqm8iS+*P;p=e6Uq&kCKon%F7|DF>W2-n%Wx=I`uhc#m4MzmGQ?_5u zY=htbvhaW2j*L{=ufFzL-IDnB3uKbt0A1GERb4upSVzdLjCPxL&V7#j_|fkS>&Oy3 z^ZF{>lG*18Ox*~u;H40mNBEV_ zcrX??ek7Vp(TWU%6P&Pc8i?t0i+TSc@6G43;c!gw^?3?*nH0p`1W}QD?SbKC(#%n; zr(CJ2(7ai9bWmdhvTnXXwk1Ed7@PcaR=|{fGV9fFc(;!Tpxt|wK$zkqAl3X!~p zH$|h=hyGopH#Cz&u(Bkpsz|+H_geCT_@UKEZ_%FRkcwlqt?hss4km#7P;N}$=+VaZ zXE4k;zBeNth0;#HIOYL3wb}JP{hU~z4c0gR(EW`mnwivymDhHDCBKd(_!<|E zlooin9kTo<)r7h!;z+jOl(GJURr%7lQK#_#cEa|zSL+PhJ6|G#!t7Q-u~A7w2-Eo2 zYXj0^;tK)Z|8fCn@#&~q7@e#gG~s@E24HG)%mz&}O3uzZ^^TRt3WA`-BZ%u?-Du?W zwqbtTM1LKrIW#u2>DB^js$S_T<)f6=oQ6ZcpC!B))kg$*qSt_ zA{p6cMAV7VPZ=w>`@&C(ihdpwFd|e5IT4ly+Rqu*D0)XLDIjMcNoJ-6(cm%97Oq2S zy$>>;@7I~*==E#YmM+;X@qIhi0~A|19pxiHSkl0iXmOunyJ&%T7VE(#^kXN3PD%HK z;Tnn^0v_e$O&D_=KZg-knrrG>T1NW%fUVB0oJ{5s)2iB}K7)~q`oc?l?ccVOII>}v!sgK_0TiGZ(xILFkTGaeS{ip^j|dwI zV;diU{E;CY^)_&bKaNK@iV z_$#wM+$jt3NJxY$KRx|4a+SG{tWe)nIt(kSi;%GlO?o=eYPKMwty&8=wD?v#b%+)Z z({pV0CIKNCE*_*0`SOGuPw$CLZ~UJBRjGA9ntSWgF2h*Fw7_gdQ-cQM$4oS%mO@34 zC-Ec77Op1RWPCj>Nf9ASw)K5Ss8UEw@Y?Z6g;`k z$Y6 zv?Qz2@SK}X7;!ArfU`tm3+;2r9r}998l3BYJNSPd2l*|@Uq<%s99p%Ro@J78`YZk8 z|H7q91aL~WI5rSXHw0OkE9T+;`BezJz-G0$UYuu*c;pm4V_wZR` zf>beKI&D2{j^H`wocSgJCdsGg--R@o&c8l=#|C&Hy>=opi9|tcTCu#VQp;9c2`3=t zb0R*8rAUB;LEsMV#rGLb_st=%t5rMLyu)~OxBx8KEdVbT3YS7CMsj){JQ}!)n*Jdr zdg2l~aI*Wo0l(c84A9h~oPfI>7k&m6HPs!sC z>)q(z+1GvAe%UA*IIZF$mB;wRNyfHPZ+!>tNF{G!FRKqhabQ4eT{KkCFmg%sbB)Pp zVjaiAa%ckDXEG`wLqBASlKtoOp?x6jZBI#&t@EH}Mw#Uu!i)64Y5fYkWQSys&2W=2 z7rVP{J0<^HMnB6jJ^@BFNgf+*YNB***Foa^_S2#jv^1D-5>T0OKhBIrTrHW+Mjg_}qUKoK?Z1~@bn zEHb?=9^V@@-_1ToY$cr?xZ8Sd6FI?tz(r&DK*JY#ELYBP%$#X?g&m+6r}gSpmBu*&GcZhNz+q}J-a;!LN@Gviq&nD$f}l-Z9LjlsL;KuZ~|C0HpY zQmUDbMq!>+Mo#2|NHPn-VZ>EPKodntEc8ozK7x|xqI%zY0dy=s$iY2ZMN>1F9F@Wzv_ykZLhM}1KRG$RmZ*gMxdiCe z$)a^?9%+G$-{2-g-QVu#7ecPEumQ>GNOBwjmEBa#FvbxpB$JJ-NO*`^Ulye3V_Q>= z9sYIvfAg}Za&0AqB4Lbo=|UH|=U`7l_B5DHHyg+0w zJ!gT%0##`$cz9lezijm2JZVv~V6gz;Y%~5Kd~JA6zd7*a^405l3wR2S$={&jUZJ#m z({cU${EO&C-|T}WAc%x}(v?4@!S{mjpy(K%3g(SSA`KdzZ7yQ*g#ZlFSXS)H8~5iJ zfKKwtQqf_AAxt2DAnm8L;aSKhFhzf`*nC$Tl>go1#buTTqQX0;rq1lK&(^Yi7r5#^ zM_682ZgYQxY1z0Fk9z@_(BI_ZEI&^kkNhCOtK-OK)}xtcCzcIpFaW4Bn4YD**AyN4 z7uSe9XhpRlWi&+SFP&okn|`=gT>fsY9xZ=rscUT;b7}M;Tf(nGE!k70DI!@&@g3mN zb@A`9&jEMZastOe2uk1FVQL5_|4WA1n=A zGr~~RHT;1N+#QJV0{H6u`t?8lqOfc*Co3Vrlh`tWxMHJb4y4Gql`#O78A{7T$(y0i zblJuV!UOFynN-#>{(wgb0++7z3VMMyl5Xf*2l2?o0H!>mXVA){;qqttJNE=DLeYlVaKMjdlA`@uIrLPr6?Sp-{U z8cES4JLZ`-DBG^5#YS>TR1@a5BUteSYIcmNJ8%}VIrebglY~qEmU#;j4d7_hk6)zT zJ#CBooJTDJsCQ2j&vR-+T_OwSWG7%K-EfyX*QfUCcez;M6AAMLQs9GgB!J4{se-J{f8TA9m&Qkl7y$9gkXHT4Lo+e)3W{oC zWm_?@G0I9xf?i*#{{8jgCgt|bD-^_57By_8i%E0D8{uZcm%*Gt0GVjz{NkBZOd|j0 z>S%+64adiu_G6cth%<8PIzdm(>hR?x6_?%C-ueCKirqzBlUw?jaere5;012NkTItr zl^zM6P(vhGm0GtIVZk!xDNaKE_K0~-2my$S&v$u>UF80IK9!c<2mQ;8-&i}69s7mz zwc97aXM)`?>7!)%!U_m1+ZtFArTkKN5sZa5_eJ#?Bo-VNMp^SZn*cR!X5D)t zF_vIpvZb`0cA1ON0bwJ3W9=L=T1C%Xs{#SN)N4$P?;Weuubt<K11_^8~%rP}TJrz!PADivavT{mR z3?^F_&`AAdl~Wu)ov&wq`Nm3@-E+7JDIo!)uuFOQy-U%Q_`66VI=h8Bhz_g_|&YCnT1hp?Q~$DFXH39)%?F`jvUVr%I_ z{t-#=WnE50?bS%NRg+4HIm8J}fX-nWRT`4tsfAXC(?yEXnCn!(qfuVxJH@FPQkA=1d$2?Qxm1`0u}JO_Qb;0xsVASU#@KR)C< zgTJi3xe|@woPVDa+1JXuojmWh_@H_n#xQ?Jz1OwvcMWT@U<(9g(hmc7`I}>>B2OUb zYKo`dfxyzIn!+3_D8?J;0#8VY&yx^i!x2wFNrzE&6!jTj@ZNe|2EuWtBJ)Ym(R^BY zN*tJe}C~!U&o# ziYMJId!W8UyN_MH2XE&b08}pXH`)cTQA+t=@=xyVk6I1e_o%+&zUYSfWHO`~%hNjQ z53o-N2&=1Ghi6MR)7iWuPD}XT)K{E%bgNMlEHa!$#sd1dg*&Q|2U z)xBU2u!cc^%brfKP_J8%r?dB)sVWD*EfJNihmKJ*9>_RJAoDYez4Ew|`Q`eSk7V^( zTV#)lG~HbYRXEEVg!pHMRMloYYL8DKI+{pFBVO?JJSq;JYb|gY=U8cQf%Q}Y==U~y zvhp;VqoV;Y`t;%*=nV@52O1wflh@a{we@)G4>Gzp_NwdmTe*MvymkHbeM^gV-c#~p zU4@n&(kHK1L-Ne=iSl-I=E5e=#5G~?xF{^WalmYW9d$u|iG#)?!BawxRWz8q&r#9O zRIaOJy-q~o9}?T2k*5kCl{?qMs+p`z4Kg-!!U+*c}mG}cpNo`@%52i zLHBK7bj2I^v4~&h+mT=Tdv@TjOg{@PIvO7k_$ee|e89*N1HHu-hLU7S9RVpt;3+oJ zi9s=QVOS=dpPRv2GD_87YMPR;OGlm5)>=@7wa4+4doy2-1u6pbMxE| zCh**y6lkw+88oLFuofURRNRp}OOy-!G*6Gx!*R=Q{bJONcG+~gc`}FaOgIe$P=lJ; z*iYL>EwX4$e_Y_*93)NfyzB(JPu0{3gh++y)W74LKMJML86i-qSC^lyhN#o-0j8I&SmG*CZ$2^|x*3)yxj z={!SU)bpV2_U(2loB?g1N)L`su4Z5o*)>TQhI)69B=I!`SVa#eY=LDTB}Xv_wFDDq z%nDle0!c`%wFJEyjFlOZh~_{|qe_-L4STthn-q!=rfudivd=2!`{674dfHdd2?mS3 z?73c0caaPH@<2d*YTJ@cm7%D#v#qUbk4{EVU}Ix%d)%|}Ik7Uv|CJqTh~TA{_Bb+m z%xv1AA;DP=ADk%WYJvve761>Qin!~0v^Dvi@?H{@W}h(II?>C*ojvu>Q2qT*C7a`& zV_^xsoL(XE^OG0g+3>fcj^MhDOkqiz(uB&-p@9Yep)Zo<=V1e1 z%Io*n%>1HJWrmR*m+2jUSE~EBkDO}ZuZofo(9*;1ojNw!LqtP3(-mnq5wSfsy@Vr; zg-K%64fDDKX;fSD1UPw4G3X6C2>dpBx)nGU+SIuNLW|L#bvdulZkp-xjYd+VA`%0u zwZGwq6%kB2-PS@mioYY=@EWH6QAGgUORLJ=S?%+IqC}6u1Kk^GVt*?+c}SZ=1bAUw z#o+<2sPRr#?s)isT~TB>#=P=^Y7U&E-`Bk$^0<@Nu2B%> zChHz~_?(}J--M5lJp>wlOXwvXpL8#UGsbVnUpyl&pU?>32PgcD%kSoG3pRRr9`~R( zZ|}F?H@uUG2)E!8w5imKH#YX*mjfw95aJx^K5859z;J{l%mI=%ppN-?%g*{#lK7K& zRtti5#UcL5FMObf?NQXY_e3ilmcJ`l4}s4n8X-dF;m{)r<{34z=ELd%M+vFBSWTvm zJINQJnX?s$9zq%6!(v6!sMCL^Pu{~_XTJLI0R$H9HZ~(0_cXw1cq28>lR2P~Ggnqt ze(lnw!6U(TO_fDR={)=(m?@12uM<#7)nZ1rsZYdNnanx^&JWc}hhfhZXSgQGW*+H^ z>dP-zk4i<;Qbu@2_=locTievv*9{vwBul2%dVF%JRqLAC*&$X~FszM|dovmQN`^~f zK}ibq4R6kCR)CBZaX6o=c@LTFIgw7;KLHeJtH(>bg6a-SLxjbys3=gq|GbR{zFEiuXQtMbVkgWUwq6)Uvkg<4lcaVph zNRFpuQnpMj5Fm?FUKVIgbQvT2eAm>ac7!cD|4E$}5a#!||7i~YSV?tX5#>#M+TmLP z4mCz8%EVt&BFI#iW4LkIr-ZUmkT#^QXYn9%KsfM)P`Up0-OHhcsCKgchKdCiqn@_E ze^v|-twW%)7aWW@YR1DyntkMS80yYq(;N+3Uo(DGZTtQ4mx+92ReznxiRL+{U+~$< zIT{Y}9(27Oaj&7}K71uSHh4{ zsbE-tLe&&%9I;$+yqnqkqtFdZVE(b-mK`=t#R^|jIDOMehFTT^2x#sm{N{pg*4iDN zwu}g~Bs4#(wz*mEl-%0Zc3zeoa4wDT^aL$ua?wj)34;@JI-3|PH+Bcd ziDp+9HFbP?epc^!f?8i#SJz0ZXUeMYt{X#@=b2{Cb~x4P`BQH_`9AccLar)lhzBT4 z^sF7RIJ%D^}vnk$M1b-)aaO))^4Ay33=Wj zu% zBomxNv?9G8k$hL@MgM`W^}x@(MfHC-Ki1`M07`-wk?0ZlyCy?NNYTUMb_ZYkdSSJT zQ#+({UQXsT)G7Mnce+b9COn}(&akTVZx^P)B-9W6yH$b&v}ae@g_~3?xK}K4bTJk} za<4(CJ@eBWQ7rpqBcne+;`HCp4-|TG8tBDmCy=}eSJ}sg1P^Al9XZo`&Ff_gNw>jR z=e)Y^UXX5k4iCGKb$Z{dACNt;onTel?Ld$yg!aybKbYf=RXy zg>0-3#+-1%*4T@xbAhG-F)eI_o|tglM1xNX2l2aVvp|j5Y zh3D#88fMO3QBjYqayfhvlt1wCf-H);L&PCzj=Vm;_(c8``u*7L^4)7H1{~RY(2{`W zDT14R`d{6IZsI~#R&VRu*d$a{;byL_n>d?A-DA?#Ho2YYbfz-`H)}E?E{wbJAG@*{ z@#XxoDw5Y99YkK}l;di>L6ktQy1nz0+cE?d_3u)YAQZ&)ZIppv*wMd^wW3}p?7y%N zPKI7_skvP|r2c{b!-U2x0!c!Ex-JTWL*xHY^-j@|K;71MY}-b~R>!t&+ugBkyJOq7 z*-1LK*|E*P-ZRE`-t*tpP2JSkwf5X|J!`J}cqd6&GIXrlh#X)L6Bnw-L_DkavLONX zgS^^8IB{X`%>imOjH0{nx%3RwWPLWsvKl^2nKP5REKdT$&u|brh^5S4j*jhy6u^j| z8nca88G?nzDIGZ$RTaj>ub@#L;{bAn;W?^$Y%5)=cPsgX!XCRIs#uwnu6+ixl}m z;{}4ck3c_q4g0s9h;isEl`6p~ebzM_KB0}UV3td$ecm2sf`ZpQp0{6*Ji8R5kU~j??>M%Va z$CS&D9^3lLW8gaMu`z;6V&`tx{;O?)vbyW0hK3naMS;J0oy~#0(O=N(-CXPek-%8K zy*<*gXw&0pPYPR)>7HM(qc$7x&`*+_!VERVuOKqyCI29}8tr#cQiz}`qFeAq`+Ud-ZjozN%4(HOtKjhns0$n!`BKX^ z-hu$ZlZ#A*HQ(2HJ-Z%UF1{1~IEfDLR%ISR*%& zy|(BZy{b>$QC^@c^*^ZPs28;mFvq57eT$AZy-=72XdL{gm0bw6v7*-J6POV>D+or_ zRX~73^USuR%*w`USi%cup0I|BVO_^qwugJSd=ZO*y8~o{p(*v@~Ou` z%;S!hT9`p!9N!a6_hrouZ(nHlMfmOsHyc(BF|82CpzZU_5!vo?M*pNBDu-*2^o29XUFn>l*k*}J3A}S z*p2<)Tde-y9G+)JJr{f*#th9jam5@a?jHjCFDDT?*rITW)WJhO)+UiJ%<@?F4U?P! zOcGw_9&rxP47?L!w#l0>Iu$_HJEY^63*Xp#ZqZ20s~yWrgccrl@*P+4@9ZS!T*{VP z*e{*?H0D1xBVYH|EP}3Hde>a&IL@y$7ph-mkAS#bGPGW5_B;EhLylh1fvYuc(2I72x?nNk;-D7d(WB3rl=m%vzf@|`eVmn7z-?tkPX%n zpefy&sF%G>lZH?bwi$Kw4>D#iHZd@0X-DD#NTD_SAK9~g3KbQ;bj01X}r7F7$GT#3sNaK=?98)b(;n23nl`IW$1 zj9OggU3-LlF;DijZ)+QRu*M?UM2>`nye7D9>i-P&-W8tB<2rS9#aPCvD6V(R8-8Je zw3OeL8_JZybUbA_MnE{7F^#Qtnf`9L(UN3AG_5bSIQ2s;#zuh@udTIYf8dMtkIIj9 z#GEK4SV1_=6KsBq@F|O56uxw6G{h9-RQO!sg#yfr3HhVCsRKr0GU5=Nxo%w0f}X^# zKfs!}RBQD5!ubCU)vuYYrcasSF3?~$j6XZ;QmM1w1+)oEl-gE6R89WcEkK;Yl-97| zp^1xd!Nu(UvD_^;KPe1#COSL@v;Lu#?ovO{&heLgz@Z)|8NphFXtahg7?^=r>^~AH z42@F6QvY+(CpP`=bg`57|H!4TDCrDAyZz^gvz{lY$dCWW0;qKrpCk|x?S_qkqzndM^>0uiszz>(2eio zkc9*UAB9a(O1Va}@C%>z|X8MuHlBF@5iD6AV7eR*arMF*Jsh{Fv36 zr*>`Y|jX_ZU9k$N|o#hcz-*yWVr9!_YJ-dtG zrOUHr-}h}oo&Fxvv2pI{%@#Y(e6oNpDtT4gTMM zwoqa->Fs{J!s~JS=c?+KY424^xY_i;A%LlxDM^_LE>jN|AmI1C2<~0M~(n zD4@QHU}rCdfQl-ngh2`7T{fXJQy!^x)CyM@>p3%WDXB{WH_Iyca^O zYty33^N*(nVl<N$iP}}Ouh$PvpWm4j1cDV_`?hb1I_!BMT zVRuZyg>X2}3oguBDu>6^nG>ZqM^rD8r|@!>Y7p}^8vK$)90RV3O{Ps#&w~0e6r{fA zz8$;m%H=l+{3||`jb>f-cnNcxyF>flNgvKh0{R{$-qJr&TrGJ6Lh&$snz)^1--=Hr zq~tK|nEe(mh;!0xcSW`rq}j0Ms|vy%0Mr4C*;B&xzMJ9fkvQC=hfceI=;P0G7T3Bx z#d>z77ztHXxWXjcX$1;5lqrgP^S9vE?@BJ+Ks%je^SxPlAWL7Cy}oX8K1K7_@B`$k z`v9XYyKB6rZR^kLuYWwg#|T-vy47{ntok|guIOHDNL_=sAKWX79xTo53Hk1m^=<8C zZE}pJ*4(8W3qL>+-#2A4f(nDwx%qw+pTn)JxUa2l?7sa){F}3LH1)wQDaoebxa|H% zmJE&c?V*b0*!AzlO~mIAxJ46>Zw;>U*>5up{fzV@v({LB_556BB1~w_0lue+Q6rey zD*IlJvO$HntYdGCXVE;w!wDDg+mab6c-iKZQA88|U&Rk{%LK2z#Q#Ra{SVujHqAKJ zx&j5?XN)8%1H5QU0vN_|6qsQHc6v&jAd7@=+TCrzvQ9rKxO1@OR7@zQEooz!bt7g_ zzJ4RHDp#(&;6?XHL6*CLY1MQPrIXNNy{B*`3?xtg&L8U+_yzSO9?TL{a0s3;9}qL^ z_vW5PX1Mme^nZmKm-@~#vhC>=XwS+LlQ}Wk`p-GL#e;eRXfWK&e9(|_Zu&5A*S}&E z)K2usP=8c5aLa;mdc26}*<&{e!f?6+a}|iDe*M6UTd^KoJ4qW-&T>g*YW_%wXhEk) zhEannwf>Wem=&Q~^Lu)w09z~b7l)g~tn3m-MQW#%YX4tt0`ARcyu+4fNR&be&Fm#G zuUm&dO}X+N4}mEg*Zsf*y5M_v=C1pvTMHj#O&-2tJx@3ye~i!1wmrdbn~RkzaP+q` z`pVW`zC%f@ABRZ`R;Y*|in4#`2`ZMy5u_idPj&H{beck^f;}Gbw&^{Ck%X9V7Kz&pWxxhLq>whUN(s)IR83wNp>du{Muuh4 zT0IJn16E$$M*NjJ0)UaECcUJ_fTBhXDm+bt0)xoe3c4|nesJJh`<~s?_qnybq4O(l zPH+_GSC1r1hzxs!@AAi(!28|hv;MW-d5&q!zgK=Eb^u_==*U$$MNf%9A`)XHl8p&M zsa1@SH1aPsNmSVCZ)}I#nk8@fCh++Iyc9-n z_pY-MR>WUM0S7cc0YGHi}FyS$;*v_9%iW-)a&jxxkIIb68>s zspcnj8}A8VSq1>kSBqge5KnPP@#gVwmutBOg-SOAP%bpduS4aqVpn$9 zoSQ&D@g0M&lKdSv^ov6Sh7+=5w?rx`@jybc@{NPf*WF7mfdLS1RiVF(hdc9Kb#-XN zHZrr*iwfKTC2sJGxEP1AT*^PwQfUG)g2bNK`X^9nP~l~yzA`Y)l?Gn2-8jshT)F3c zSCgY6Vv0tNdf+|{+-ntVbrv$07ZWm;oBg=bQ+&#-H{4V7a-sHUvWQ?rysi8B>ZY=D zC1)$!3Etr+Mt+xO!oWuef7Wy;`@q4c8)V@RBMpndIcpIY?0;Sg5sQ<8ich)Qw0e5` zV#K1N%5L8{GsZTJpW(+2A&p>gppw8AW6`YFAFzH{Ha5x%AXbYLB%=!@jtx#w|D84a zgY4!8g+b|3S%qCN^Hq1X0HtEvni>pA>ibG8c8_To`B2IwO6-a7+;5=hG52@D_-=o< z7kB_7ISXeS1s-Vcj5LUS4EpY!UoxH6LSPUh3JrJ2A3bS5treR-rxRle_tBn%EsneQ zzeq=>QE^vn^sDs5N?@5Gi_9v?v(vi3%Xtc07U;U{9v(tD5ajJ1*XXwUzH>V42#lP@ z>b5%{;80UbX3V5pS;D*AL&+hoF9rYt(d*Y>^MmwvOk4E#e#$4P#Pk!N@-la{CIqqt z*BKLQ6sux%Ne0)V!cr31~#gMz5Wp&t3~e z)omoFnm_~AfBv<<2*cY3jdxXpA8iq|3#yrc!~z-wJjL!NUyg$X;Son9mroT${s~OG zo2J~TL_~UrLHqlyS+hOO^@QeU#v$Ojg-lK@A;b3v z>im5Iu_gJQ_mnu92rOObLgNzh;eIG2Z#Qi_%7wE2p_wf_4!m{QB=6eMWCETlPCbp= zO!wMpi#Xvaa;bPp*{ix=k?0S;f$ZkaV^ z0S8hz>azpqF$GrKHl%1kbMns*_5)rJ*}ThbQe%K{)xl(ogFkQHE63d z{E)2)4?v14JYH8f_9iV=z9w7Jj)j+beux;bzo7X}PYNf^IkI)hT06t!)c#_=+4XLb zO+a{X_<}N71pW{WX<;8u`}oG7p+`pu!DETgG1j_c0mHi#4kT!B8^-MMeZD_|Y{0eg z+8>q6tX|M@qE~V05{Dy4mK-Vlq5}&_MpKRoyS5<@e>l(DIMk%k0&F;QOw1YSrnhe6 z2mjyF?WL=i#uci>RbgH-FU^Tp|2(&e=Kndbh>_Nx@LH@0?wO0dqRP2YbE?KeO5gj` zj;!TkcIn=6kxEJl<+nqPUdo}U+5dDPd8LMmFId*p8-igQCSrPw3j_#Jp&LRM^{e8u zyK7f6d1{x0Y^I`;_;jXlu@du9N0qa1u14;c4B5pY@B%`{a^W}Q|3HnAbkU;>Hbn> zBXa~DPky6h?mT09T=9OLuN?Kti^gB2rYGjU(?77~g?$Y!Bp`np;xv*yQ+YD2rSc<5 zH?zaldcz{Q)`aCL3Bm~Ft`3@s*W@Jj%u=g9Tu3g81k)qq2*-{cV@!;TqXg|iZraqB zdEKz2;vU$BC}~U6Tv`NA7^*4yr;V z@gD0O(aFsX85&x4ldmTOyHg;&X?O-t8~HR&Z&IINTJ6No4^eaHs8r~Qu7p+C;*nw#nCP8&A>aH^Var4~Lz}Db6 z)jQ#3p9*woP5xmBRbMy;S@Z~O!8mQUb?o9IlU zJYz}#TFLq=X7$n_N)sB*U4@d-1sX$17-yj;-3{K17YTOaJ~BG!@F9Aj%07k*2Fwib z`6KwK7w>SoCk%cO z)%?bmj|d(D&)(!7Ih|lpP*8CG`7`F=0Gxmx+4=neOF_XgKWo~_>i{YZI(#%S1Gl)* z{?cDnMk+E<#k41*L6S@4kWU*7Fg_Gu3^)k~^<+l(Lan$_KqCzQc8(qAVRW8piVNsD z{DT@laR^tBo&tvOiL|m}o899Trs=pfGK9<@^Jo(~Z4o|nge1SwdHC^*i9vZ5ATxpU z2C8{(t5>6;b?t@|2C5AgVJbq`;>p}8UrtLVHQkCi7lB5y-2;{o#IS$HNTtXMVAOPf z3i14Gap(h_f&845Me&9^>H4Ps9zs*N&*X%Ai5Q|PuOTJTqg%I&)YoSmg z0BKppbofHa5X2q?OO38&EJ1|=GNOtY=Pwutn1*=`Vwerfs&2!vXo~}i((2)nI7Z|I z4i@uTnc24Bk;fk8Xsc5_31*f{rQ**RJI8;mXuZx8B*izG!_KJcZNg`&YRLsn>>76W z2tNSD*bn$Z%|_`{af8x*o8cLSs^>`Q*79|kOXxEgVS((Jf-0qg za91iGo#)+%{%kKFYD8rg)?`vD$0;6NV!>&IrY(**30W=8-|RM@ z4nNul35SN}{-I;y=@c^J6L{nk=(b##-hS8PbP5^=yFh%mOBP4UegUDnBRO2EzARE)SZ{+9< z&9W|gZ@|>Dy}fsEdJAvJDT47jVg93SGhG~kyAU|p$Ny#={rCvgb%_Nmy`s4JNs_a5 z{RT|&oapc%4W%XG47zNez6r|+_ZhaCm?^*YT-hv(0=j#bt%PRX( zY6T$Rt0CP@c5g1rceSUZ% zD(>(dn7}eu0c8+^^WuEseGx{)s->+&hUdzJ7}RYn=Ja=@w~bG?n})g)QCwN^yxjlq zGjz4`JodnuS8UTHbUSz7@X`o}+Xms=7SfgplEbh--;DZr;#L`Os`SL$j!Ui{Z?nzF)6J{r+`r(hJtCT&jq_HSNHDE+OL^1{Nuw$x-{m;U!NH#YJ8|n+WzC zKTGz2?Q7;2?Z; zh3cqfNX=vvb3i2Apa{W>eGpdvltA@0U#Ix*`F@>EN%%?4aY|VEz%HBz#^<0gf{diL zKy{ISCB1L3c)3?^aDT+Kx8#o88wx8GyTRJ!dv_vF=v#m>SD>LL9Sat^&|{sAhZZ<| zKWUAebhHc~ZLF5itb`SjfmwvXVIdldU-|8E_p`tFDIr588r#Q@WTBzHywBklUI`;F z0Ie9S4k8tRWFmns4W{7|mbzl2c0Y*i7!}bcdDM-4Tc!a0uisuowt`s+C99ek76)V- zEHrRG*_D!o>4r`a^6fN_-B=he0E#;xpo{XPst@&Hw5XUO4!w@*9}H~H#HZ1qCUpuj%r#7Yq-ORo zAxx(Lak}_y1%jq9WQ%7ah(JSe|;^V#*;Njbdmv80HJf z^h&m4*}-v8LQ^Mj$sAz|27Jsy4CG{@My)-6VJGChR^0NIdC-w^Mn`2aHgn33*vFuj zRbkf4?lw{%RK}|RJOblOOFvTz_*}C~+*7C=_Sn&pW5m~;xQs1KgoAKQfIwNer1$}q zI(SW|BFN>`_>hLFZ$-{Nf33z2^J^H11f_iq%xF4?jaKRu?N-|o+-eyXn0K05}tzwdAEBvw&9;4<)kgP#e1!hj% z=GLSPJG?!T*kJS7URAXzs{XX13L!;L(O^!?008LlHw1^1;`lp-q>A+7!o$Jqw9rCM zqDWowkF$X@_KI*H<&-_8giZzVigeU zdJ^L6dG0xG2EBayc^Q0foDMIioaTOIXJXv5edkA zKcMo-Ys3y5Ai#g$gF-`rb!+9{)=n%5_C;e?KWDaZD>iVO&u=~eQz~Y<@XLR~HOgTM zrFH^Y3`qvFrUFXt_IDH4!pC+*`_w8mY{G?GX+&dPoG+7%^k$S8%2N+cX`GVkVj)^> z=?!XUhL9;NzFs)(Ko3^;CL7V3?<3omq@Y({N_v)53E zDj&aY^3L?RA4~P%Ce`iPW0y4FgGn$tD9xpW6*;oPd^vTbDTFngBUVOOV$uv8uBnJF#Uw)3MYD zE*SYov*)?-zKAd#zAUn*h0&>A|B!@F0VT2xOB?6Bm#^o-kK6B)JZlyC^T&?k>rYu6 zzkNym(v37qOHUfZKN&EGQH@H++7J!8gi8yptG(_ch&E{Ns(hKML%q1VO8kj360W#l znbvR;K$CPG11URxkx5Q4C_N_yjA|2y@Hl`tiR(Vn$b#)(s67TF^+rJOJ}N}^H!9* zw^pY_zaV7~y5YeJYCu$!gUrHzEw@XClf0!&6d#Qs*(d4SC`t)TsO+D0LZU&OO`}d0 zQBxh5up_8M^DY&R@8O1lWo{S!2pUvb-|D!8D&N*SAkBfsQgvuIGDg?m+;XXywW* z6<=Yu!j~P%n8qt^6=a=>sJv14#c}-3RjO$BZDD1m2Q6o*OZqIs}0Xcvu2i2c$B=tV~N{CDCs? zR)K>39nE9?>S?KOM~~p%JC=iQ-t*z_u}dj+FYUY*@_)i(xh4MbjZJ{XrRc$0JA|ww zDH15OSZU)JVu-!2$prIt(*G(9&u)kpVVMWl;y3xT1+NO|Lz2y|nBGxeC6x08GELidnL;cQvfPhdy+9-6 z7xqk(yO{#4`V|UVMkHt48ZlZikSo3)PIWh!C=pFP^MjB5<94)AFbQZj^b|NajU$Fo z=360k6EmJAUe2P{!jNvhbUzo-A<6@>a48?*`f{7O z3coyK&{=CiP?+dAFi^a+!Se4U$&^DuObP472WV_83sn>kjPT!g{LI`y3Wx{zX@Uvp ztr#NfLswfU=Q2S|^9lm+!E>lqNjo8dMKb_KpG%Z0%UIxiwv}xPn~egcb%XG4*RSln zyrRQiiKh{M>z{rjr8~x3*US&R2t20axEuQMTa2cy<4X3IZzZGnOv~J6OgM`sW(4_g z81k49w(mK%=gJfKrdLKm)M_d8fR5eQOMKGLxf7KKRP6C0Sqg?a-crds9kRU z5l~eFCA&I>Ar(>BDnW;!p}cwq27AE8$Rj zbuqMNA=Yx*R%`M;cnVFht~&tW8c?<1Bw7mB*txw4Od_U}{WS_Y;m&yjdm_n1YENMm zyE~KQ61YKiNCaO*`&T|U{0K`q7?RKTM}Y&5i`$KwqMm&4ZZ=_?Huv@CtP>{K|< zdMjiJ?rJd|i9-TlpS=dCOAZ~yS#_K1JpbZu=HKCwnNvHEg_;0@dJ#^-iOPl?cT=}O z<{FG48jCSp&%@<~hPIjgdWXsDYAI~FsUDT&9a&X|V?7ok2&p2o8`i3C&py4|y`RN5 zdwt<0!^gB3|5>SmFNc(h+RH@HnxUL1DZ$M?<8A$|NY?YQ- zh)cs=`85b7G5jk`6As*>Aa~q}5kW9IlE{fdR2pGof3&w}e?0CVnZ3=W>MNIh_d{CW zyU(IB=iU>m>cK zqGAyr{J@UzNF~P-&quB;TuR;~WNRmhF`q_YJ_8XIO{kEfa;l+#wWd zOOX|nP#{H_LOME=5D>)*x`vdl9fK?6x7$nsCL0B~xE8#Dy#c7gX-K#fF>|uG#6(e+ zeAYn85=a$ona@nhZO<~hiE8mLF5(lJpfHhPU^gg^M@t0iQ5RPh74296esMbzIk0U)T=YJ49ydK(#%vKgWD+ablrp z%xUZ7>Ka<`V5P!p4)dEUdks&%NU02jw!O1^P?o7L;>a#8_=ps1GW2um6~W#nkz2TH zwAT9}I>P|Gf}JxOe18`bRPseMNT&7-@c6DtAxddF&I=S3cCES*WYUZSRb{L4T3mwG!G}4>J!WV!vDv zP(Ag~5NYZ4HR-8SunHhR&YHPvMbeY2Gsh?9m1FTQ<^|(@5hhr?Bbfn$pz#rCZC|b| zu{WH8`F>>3Rb#hE3(wsk+U^q3FX%FML3}XivJpp#78*8~v}3mOFJO4x<28zzn-Ia5WEaZAa2EgK~z#Op*oz0+Hj5^0_T zZ=P~unBWaD>%qIT21QeFy{``%(cdFpvt7AGd?kI4oZ$I%&i8ToJuf!uQnP^IlQsx{xPh$PL*)!;jzM zSc(%>+Ja_6{nQ+21kP6}r(zI5(MCPGg?2IZ#f%a8;czJ`I(c;WciSSip4$}G<`ktM z&_IgjHEi3}pK@tAy01RzO3^4hw5>qNlVCw#ho(`fFU5?YVq=&G!(r7t*Y;;(UijgM z<$K5^EK>tOD8^TO7;XeV8BTlze1+xDInJLH`;a%jZ@m*%MsFJ&UaLb&snP zarvO0w#J`)he5jdRg%cPbBcz}5P#0elv5};w5Y&e11eg{MXXD2ByyV)s`rRqB}~cn zL-^1%>9;hCU~z-t@YHb~!r`;DA}7u```il6b}6WU5d!N z2N#K@*qQvOeS+e(6J0w>`LT{TD0pq~0hhFR+mFEt1xHsU6>Gcs$ZN-m4}fVS_JeVJ zoo5=^yCsc6sbykx_H>N|wwIcKm#ULr9PrbYTktust7Kqk+G1e5`UGrS>HnRjSDH5P z(4Mo}y)vs4PU1j<0er1X;#NXGMdA$!FI{4tiR!f&mxBw-zZt0-cJ+Fz0*qo95zPZe z#cET+0c7z-tYODPJvqaW$~!q3tcLmM>*El?#`EQX2$1l>YEyw!Hj zl`yk($Ziw_Ed!^*c9xw1>mIW8G}hzc1a5MKG%g;=$;m0;bq_3hZAyhn1zaJdRNg8= zs6uq5d7KCM(`4C;Vs32hl4vSc;52!j`_gV-oW932y(8bO@#c-#H4G2@40LE1?!KI@ zFR@EOvg#}y-56D$?yUo4KyRna@!N?cTu)5Aok^1fW2LCf{sR?dOq<#lZ_oFbDr?uD zBv)BPXD743Fs^HS9=YwVKy5QUlV9Y7`)O4EgFcPr0sLyKFWfRuaA+=8vtEmyC2M-t z$rP6LB*7z~oWxdm0z7OB>l|3Wx8Ou^m41RsSBQNltR!5MgBDfH2ysJ)4OAzn2xZga zmoysTJbqFt!YPwdC^im!KNZc^eVm>0TI+mIb-sS(4L|jLD%deqtfRIhk+iH|P+%w^ zxl_fjBUXvV-aq}L*UAZ3C8@WtU2cCK^1>0)IKU2slW~(%Q%}QnkFed>C`Ry1_ENN# zb_hg>Ce*ZQ$X`F^n`iTzsZRgOF?otlPd&kR87I#DzgjwwiH4Q=WFzH<)Px}NhqxhT zcDGfi&;-0$p7x4M)OtHJ3O=28=D@gAy>PY05{P6q2_Lv3%cw&XCR=*?ipSGmHX!RV z2rS-dlCs&`9xyC3-KobawK6?|*W5!-SM8c=L;m)efCA43svPzY zkG4$EXBPRZo-ga)(Zvudcl8Y0Z8S(SNbx0fsAi&+B0Cq<;BT*~;H7pnDWus&oqS~y zI6hF(3aHw*wD~jJR(90LUGQiWaZc|*%h3p|?|@Hql3NKxUY~@sViu%}LIefasgPH@ zu#pdtCi8G@UeOIss|{SNd)#%;Xzh&xX*Nd*_7e+2B;+_JWEr>f!t7iF zYibqWXxU70Ar)YwmciRp!OVwX2ucE`T&;MVhAQs9 z0QQ_Zh6n5d(|Tw)4NY*M>cKq6QDDN7SRQ!oR(LO3b!|75!Al0#7~K3RIG(_%4^Bu{ zu}4za#aD&18nMp~>|i-E$|DRNxy$Ye{c&EZ1lcRdN*{SvP6fX_?fU*H78tQgq$5!w=Fsnf#pckEI@&##uSmquH++%Sq4}^1@kdTR@zL+ky^(W zVMucu#}Xv->>@;cUME#XRNfZUdXODZDF>E;fs*)11lFZz3vyh>{Rhuj$izcicr&q< z5fYT2x>g?9xv4BrV7wG#4Z>Pu!2V+cn?5l0(<2qCEl}cw*z-(e*q^mP0%{bw9+o-~ zAS^}-@VMC}=)1j*-wpk+_~7-xa`Zh1o78Qa`)g^H;dIt`uNPHC0>`;Ql&+bsrEh4= zWu|8+5a#`S4eUf^=j9xnoxxQ-a%fZ`wq|iGpA**BF*RJej|Y3+#)j{991~H}BH5@h zZQA^RF^zG?I3QXTj*;B)l&c1-E+1uwIWzPCM%w(QPa8<{0}(e#_u##VUH~jHosG1J zyDfSw$Z#?-2D)zxy(mi+5Ass#KJh_2&4fGH9An%QCl5f1WB!2^0)@>2;nu=Yu%w12 zhT>xnROX9f96yDEr)2smD!dPim+DaJjbu(39;j|_5=zh8t~NB(*XOR^Fd)*xOeL>G zquzS~TB-4IwHBQL)8r7BTx6P^z1_dR3)AQW*M{}Xl81{8+;r^mV{}su{T>&H5g}of zMgV7hGG=tAj^SP8ZGBQk?TPI4v4!RKAwL;S^)H^qSeG|Aei8|hJ@!=T+~V(}Zw1>! zt?YTV48Sw>hAg&7*@rK(pLAl`Zxq;C8gY(<_p6Jj2{|!W=&+Hhv5U2&lw&alQT8%p z6yheBG_>SKB=Z}K^3$r1 z?1YN(Q(Vkro4?2G!uOS59VT?xv8Ts^kmk?3AKYClQ&w+C>0?J0YTrkE{pVkIGQ)8G zp*(-kSwy$ECaHa8tSGT1I5jX@-7$5ukd^YA2m;RPAogSJ7-nNK5X2O?;UzE39XV=AZog z8-`>DFUdZZ0_Lt<&^-R9nrw5vl*a>&x%y2sFJ`#}w1iwBCTm)(f$12EB^6zgv|g`1 zMmlos?Dt#z0r62AsVmzQ7W}4;%ZUYBvpqaiQGPx~)(HU-_9k~vAQ@O<(5B^sZ~WG* z>93vvuO`;XF^h1&wt?3*9s{3O@lEd|0nT#{w=4#?Pow~ogqTYUTZ3PqGFQrti-kHU z4S5FbC5KOeBq{I!7PU1M{2)68yk7;VFvrOzh_-S7YP~?z5svX94D!=q>Twq3$}_l_ zS&60b^5@d2xiG_2>CvxZ&0mum3^EO!wcMq8eNUluJxe;a# z@0N;Z;n>> zGZFw9H*Z(}J(J!+-x!&y|8$Dk+j?^m<4cNc0WcDAh zDV?)z)Yw*Ygh~!s|BJ*T7`6=mkZOMG##IKd_z;LSE3*RAqzv96KsCj$mZ1u53@B!K zCmqj>+U^FJIOm6*OGDWlH+U*;8?8{(W*fPO(Sp2bQ~E?By#8D#{I5wp(wWf<{^$D# z&33hnkP0*vEs0(Y=0z0AmM&YXhAkbn}B==M9(B7`75vqRQ= zVkqD&!|uQLdS~pQTVQscFlaQHzeTchBh25@hZ2%BlnBsy*lGrPo6e zp_e6UOnEt^ZGu(nsCYk6LIE3vFxj)YE4Q;tDoxE4bBwYDn)WjMw+}FIT{G<;TYC&)F`Y5&#gGuh zLm`r===PGXac|w#t*@MM+{^>H6e@L$>Xe6nWb#e50;et37D-u1A0W_&L9^)@$T?t} zUctB&9QPx+_~Imqyb0PI`GH8CsBAsAmdWyTL&gkq7SpT?;(DoQ7M=^+0UNtr{7b9+ z=+v&&A7^`tNXd;fK9JymiKut|orAM|w5&%Y%71vj@$-HibqsT>AV^InLV${FV`l3c z1PI*Y+GTR>-P}OZr^6Sw0@7VYBJ*?8I^Ysb!p_gZXuJe$VLNL=vuips1a;WLk<3g1M2XcvvPdy>Gr+3D zO8XCg(7H{_sbjY0ir{*YLkxKtoUoWBi_r6Py6ReBYvyU3QZ#C@^!F=Y6qh1jZ^n?< zQCf47-WJO!UNW}t#Zz(S`n~FPxX-C>>2;R%XWID#Iq~qZqAy!m6+7)8HG0U%99;04 zxfRS5x%y$mHloJ=kE*v|i!)rd1#ySQ-Jx-JcWK;Rg1ZwWxVu{j5}e@f1oz+scPD|y zJ=pXhRJZ6 zCByf~&6CdEB9^c7u22zXtRww=7A5bKTNS7AdXL4Z|7R@X+L`Nct3~*~j{JCE1vUc4 zGFSHgSx$v2cjEzy3U!uSNfOskheq4zZ*Z(OCYX$74mDVkhd8?T!?3lwfNKy}9BiuG zP`JtrGDJNc7!!LyX2Af=wUTYGznbz;wrovV62PMT#nE>mZBsC76`0i(x|+ferw*|j ztu^Am9=<9a!83K8I~hPR>F5&ytUkalpW*T4qO-BHxm|7(iCw4d-;4x@y#Qt!w_G^D zQSLVs3TcG5nN)=}-JV@h^6f^50kn;(4}UzY{5I z4V?~H4oTxS0Q8^~G7rpQcO%xxJYp0S5!>LEHhOkWdvVNW4X z9>M(JcilgXtsg#6o4JY_E9gIurWn1RXT(Ux;3$|*%|TE}@82wTCMjK{Y~ zh<4uG7okQLEC?yh#-`_~lwIMYB=6svA^{*c@E%5~Y3Y#qwL`8W49dsrqt*O@Vug(= zlHehc!OsfP%!ICM;EaS_TFlfS;`Clz&BbS&GO0_7;UQI=LB00h$zdA#Z8(LkqYee% zw!z9qo_Ano&qo3rokN8z&qbH+UhbhwOT28${oc)z|y2%Ku$zxGOzkNUX z?IS_cq@=HQ5NP*bzPn^x`T_dWnJf(Z?uDKc0hW(HdXe9}4*aJ`jE0v>eWEn1S!ImN ztAE6VqhdX#2cD#9dF}2rx2&Mz>fL{Pnn$K58;gZ z;k{QVed;68=zCuu{51x~=h2f~y9Fcm-;qxkly7nVuNk^NTu1SFBx)g;y|Awkda&36 z2gg<+B3GCiiMMIMzCO#VvaL%+a+BVn$(hhs$(+-i1GC5gufp#o(Nm(X7)m!6^>w{I z2(GzK`Zve+&7zT;TY%LvI#-)i1F}fo;*T?z%ox}#OigLfViBH1HxWh_Vv3fQU_~}S z{E>(n9wt=(KC*h6_oDBWtLxJXlGsn7QHO5nR439X7CaM5!zwT-C9FQvK!v$sIvTYQ zE~NGH{!Hf!Y8bABs2iD)3Bkx)Sl=$3wff^jrt?6JW zCuKI2!&=EPIHVSOC?N`t$Z0W!0*-NnZfDyG*}yX~oyVCQ8(Ka(+7~=8e+6`Px*Db5 zNuVzXAmvx17~GqH&M6s+qU&RKO0$Y|-HtRkb(x=X+wbDQGs{UP=}@YsMw#QXF1^n6P~ z)O1r3$Zmb~g3R@_k%4QBW}g5oc2Nd2V=Lw~ zgr6KD*3v zktb=`GKZ=|^{oiTQ%0ZDr?G1oQT(-9pUuXu8{gG$DA)b7+PFUQ|M3ERj%ahN#l(!# zM2Pj@U0GXaOF#6K$AWlvk4y&I(_NOJj2Ky(9up6$tMP2Ie!PfFSu zoW8NCi*xE|>4R*|^I)ZFRH7axj&pj8hsSbQwWdSp`J9Ly3{d{$5^jT_G;85srp?q{=F9N*qNsF4c z;S2b@mP$4uuMfSLBe<|iAZIZwyMKZN+WS5L`?3vQ zMIx~m353hXA`iU75_vc*@jcv=cZc+b)a8b5AjGCiZ;da~)t#D`JCwz)j@fNfQS&-l z7fNWbza|=fYBFo~oSASyO|_6vFW_;YEs9w$-CBZD&4FM2j7x3W(~WK0zO~zVav4H2 z+MMmB&qk2V&y5M3I`#xA$>9~xQ@Uj0wmW;gZrFgdL`4R#)?-q_@YfEJO2oaeaSK9pJCi^?*I7= zZzf~z(%UW<-vFM#_C!FqFL~M~Qp75Qnq05q!H=-ctnpE5Cd3?LBy`(mjxGTQFi<5QzUsk!Rk$ zB_+G*%4|stZIC`tK~#q(VbxU91W@let|Nx(R^~TLoCv~C_C~xthn*U@=Wy+|{si^H zYvN(h*IbE&02jzp;?e{X7 zKRF1~HImT?o3z5dI65v-WnbZ>hk}aC<)o2+N&E)#WJoZs@nDoYsB=KBg&=d5C?rTp z5OG1a)w=pRHz0GxooRznVD(6UIA*;0ROeXTCu#WXX(34Dg!9uBN5O1L*S7)2Kqf+! zd|v?m84|7Q;@lt7O2s5BDBBym{_0~%0EJ#-3X!}4!cN4Q>g$Bpxg7)IoIvOQO`fXLB zO=?b)lA1ww{<3YnN}_E=$HMQFjINGAX6$hfpQ4VBqTQg8xMitC+!3etUG$#LG)1dL zN=ueS7Qrlo%CdXgyvM+@HxTaji{u8zs*}$uuV~u_`N|m{WI2zs`8w!7qzt4zXnjgQ z^zZ5TN&!>M69!Y+NL>y8i~SYlt|+>kJ;VO(cI8I_#MJS|+aX|SN1kcDM#EL2Vx)lX zP(#MF{Cc7o^>E@}K~zd903 zHj;_Bt91#Q-&L1Lz_}6-sZ{$DZ-g`436r{9bYW5t8_V% zbw5342Jh3l4W_#Wz+wOZ^R28IL=qleq4E~LkWOg;=IsbEAzBQWn_auI?l;EDlF-cN z+~BJPCH6f_iWfpt-*;Jv$eU55KR?RV#I-5!Lhfs6$6>&Uaj<7_Dm`wTDQP@~sP;g9 zwkQA{OQKb#^|Xq&-XR*HOk5wyB$2ilCQX7F_hDMRoI^#-!9!C)Vhs7hSC^a&Fmre? z5XUKav8vO6*|l+G8T!L;x2g$d6WUqpuTypYw#PmAQnoNJEo?G-ZV#_f0Tq}H9qaWPh3-fBv-|9 z5KS`F>MtIQ<0(?2KZ`_K4d=npT%kX4{MebH4YLeh;Kc4U0s(-KQ8cPFGy^c%4Z$f|A1I430u8bUqFIvj)H|kC<_fvg> z(Pu88b4(;hxr4NFl`27+GaAxfV0J0o% zmS044D+qpK)sZKnHHJznJd|R{;(mU8+J}pJPnHlNS7xOJgq*@72TbF^)2qQw?dPqQd+oup@@T;Fe~JUy)S%3iNpC(Jj4;{r5E@h<#0w?Y=>U z$AY64wBqmy>88g3opuwT%YwNQg@F|s2kg%ds({#W`8IfU&+zK%>gHVivUep?L8IeZ zkFM3lRasJ^=6Qhi_!Ux1pUi|GSG%vI1^*Vg1q1~810bB7*YdbRdCi}7L6k3p2o^Hdd>+K58_)z|Obso4*SA;v@(mLwWF0G;E8JqEsSjri0Ta6g| z_SAE@-tPt#ouTikNCA(tcIVmeT1Vs`*$2wZC+ZC6r3-_DexIzSv0cUMKABd7GZPT* zKz7$~stug0s-{YhBvY$1Yl+^oX!+@lHa&KGe5VI2hY`f+)!895NKzY={&LI6 zl5_P(&9_9vDOB6C4*6Bn;yo%aI$8Pu!Vl%G?T6hNe;f_2b=_SN+z;Tx@UC;5{@L5Z zxZBwZ7usPW1}Nf4)S>jCbhW7{D60}Tu%s8{@NV~Kn;yZ?LY|x_aK0^7DTBe)!_J4d z$n!D(?$Z^LO0`|3EOJTs*{JiNR*I(O9GkV34lRmND5ZuJRy>IoK1d^D6C!ney}sTF zz_JC*XefHONoMmbo|DnpJ_`6^8WfS^y2}_!B=Sh#zf`^KLEF;!HmWHL2nr4F%_HZO zUw3|YOggCM~ z_4nS~#G%S0{+e5c&`RYRer{g2;3wcFo%)(ReGCEo|M6(Cu9w#~?VFG{7@fO)cB61s z)OR(isjG*GXNSG*qsfcPI!e*z%*HQ4(O)mOM_hyc0e}8|+#Heyh=+ymVIr!;M}_AO zU>GNSkCV@<;)lArS4_8ago?QcWZT4SBxsmCWhX}k#$9#vvY?@ zzqIy>RC6X1*cqUC5g}Hm^mX$FeB2#SjP2cEHbbn_ErnLf0L6$To4LctUWDCL^tlSC z>Kwe`T-!AgKA>%erbs4`ss#{cF^$;%D+Vb|-fomU%+GJu6sJF%Xd>{+JD3aD?Y$lO zC6WF+HT!VWow_@nB3BHjtd>x5?xz-gV+vzzQ3XI<+7Rv-i!5~ zR=?}~JYwx5Xn%*4_lT^vy;fbm)ZK5qpgrdKU*^8tS7cd5U}Am~z>t;fPYP9(T3gl8 zW|HKCiwTF51@%+8oBvQw)&BMkiJmW2EApr#uPJ|(EzBK^DN|N~26FJKM)d0YZrT_0 zJMePmWRn~GX2cv^(;D>B0;?~LD&xrhHFfcAyT^9*x8d2#TL8Op3*mT`RB@G4jK>BH zoOUc#6g8s#!|0-^5&p|j>XvGiKFe~4=Of$2Rv^*Y(l3hQ77nK3#`a1$lxZciLOFLM z6vso7xQ`%2nY8$)HPJuY60t~zc-lIN%zAA5ubq=n(9zY-;Keb!4ZL-^`4**3@$Lxs zAUu`a`I;5|niabZ-W)pET1FU|=CUvlpQaEp# zr-THq!f_E65gHC|7vStup&X+u6%JZ)4O~RtcKF>>8FzUgZ=^eyN;b{jR6NlXa{ z2oCJMnM~Q(F1PTR+h39Klhkk^AWk;JyX^Rqx4ZPBCyTuzdHb|+k}jInq2C~f`>p%8 zT56BC9|1iw0tF~9QfDwiRmgoD&_OPSEen+G=vim@3#5e;wfW3I6QC31U`K}XqA?UY zTUrV>yxoa0B7CA{m%Sl)Yh#RF{5fMQp^A8D9`oMJu3;Xlq&U>T)1roQIYJA@BX`n< zPm?=)^;3PmBCC)^pXLp87%Y{EiWQgZSGT9_l@89s6U7GoA=I6bExFC=D5eNpCNU&0 zkgV^~ZP&=4mT=ZNjP0bLoIP5_dgnr2qwy4#O55cObY&5k>nPs&%ekTE#??)9FyVRd zQ_x}8g)4qw;J?=32zjxq`u%rSv%s8`pOLff==ks&@R`dOK2~O{Z0l>8kyYkd9=u^* zPZ@A&(uw2ncE(XYQowC)IlC+}%U3*$(+jl)M*4BBjEDdv#r&?=akCuKN?9QO7@<5h z*5la<4VIER^&2Z@uk1!Zy*S6e)9U)(f1xs;0^izz&hPq)OP}yF3qo8!ru9yhp?C1{ z{k%n@J7n^=NX*~FLTQCHc1Rd@QXH=Bd^9~LF4*r88|`X9=IS8$tVx7+Q-5@Nd3i{1 zsejEPl-+;U-|c80xEKyyWte&jNB8iZj!Iez#|01|k$RSHM2v(l&j6YCu>3qGR9^RFMnL2)l0>5SO>W%5k$xwFgpFl>?Ne&yA+TioomU!L0OHadK-MMl$+ zw*?7l%H#|6EjtZMoVh%Hn=Lt+x6bIZ;p^Ay>UaC?-yf9_S|r)~!%E1dbFb^B$QGul z+*~$jdi)j9ZZvGuuRYG5#upGaGAm=QI97eu)FwN`^8BMsalO@bYQ;ln=HpL7z?L_( z#Qe3<|3YNc$mrqOR?@|&Mpq?8Y_5%!rycVoUEA1PfU8^1M6cp(z5N9bVTiN*w{wEL z(NTv98-ajOC*U~Z_cPk>2|k1ps$Wd=S`rDZ$2;&%L%ShahMV0!6!kxEn8*Xc?s?q} ziITiBRfKpyZYU|#c4?tro2k4FTA589 z211=|1l`)xQwlmtY@Q=Lf+w$BoJC9V5|9|~(Pn7g>e66X!%3R=Oj#+rVbgMkkl2FRG$5@T zV$ik1!tj^pmA)7HeD;;MWn<$Fy|{k1qD@WJ?b^Q&#(xF5iEy1z0@}Qp85usX^4dS1 z#;KW+1T?XzBt{tG`_p~Xf5{?pqNqk2Tk!S0rVR;D?^i;;i7m7nOGuM_vj16Vn8 z4q0OA26WwlaGPrN6U2zC68(HP%Tiw|hDRU9{5RoXMWU1qyIuI(*5r;545{-`dp?7y14e`qBzkoZcQ3Q*%d6)_OEs9rR|1=!(`0v>Otl<9 zx>gg}voj6teWUYlUmM$fD2n;UDWkFFtmp=i&i1`Yqr%21gF7e27ecTtIS9>~c!wXX z;k`sbk;k5rV6^aKQe4r%8hRMuj{LEjHyh zpyY`4B4rByWm?&=dK@8%j7XLiGDz7D_;ShhqSZY!eokC{k22j9dbpfs#FK%aWQZ&7 zfwR_tKzGs>CAda8N!4OQf%O>BJTjYwXCv2axVL=f!?D#FwRKC-(9;v{E-F#Qtgcg3 z2WiKVK|$_CWx1PbZ12weZT0A^SM5js>=542>4TuHs=$8UoSMe~IzB>$+xi`VG zAUX`22OLtWd<}e=TS8zl73f=UP7FB^*_1d|b@1MDKmO=R`#sovhGA zeZJq3y4{t26D_Kr?Lhd!ZT$Y7eJKB||Aq&lkP~#}DWdK9=Yoo}K}%03HbwFiu$yb* zw}Ad+&q5T@K0fma7Z%Y+toowU=?bOXFoGwM2Yz>!JQaaNGdlvyg~6`|8f|0ZM|G%O z6lxWP69xBP$mPG(AU&6|zsU)`{~75Z%6Yv{9}B-!vb4*@m6X+_j-Z=O2s3jRb=%)VDtWf?und!>6P)elZsF;=v!L7sp&&`!Rs*vA9_XX6YeZF5wd-Lmwf(Cn zxb}<9fvz_^oLHTA)vMtV{C#Zq?~)=$IGpu+715s z8ojLVl5t=XTOEBkrjV>Adj7(3>w``-Y(jNpJy-$8QrXnD9PyR5DYr7vMoV^n-tOHY zZgIbz$msGi?vQz6Rb8q`6c-_`^G5LmQKvl>l!uEl+a!u}sPeMEXX0!nFQq(f#a%j6I$*W%f&-B@4o7$Ux~-ARrqJ zVUd5i%g0wa<;}nN`FG4k5&2*Msj`lvf>|lhzWJ8as$`A*9bMcMdI{ghWoO`aRy-Mz zA^=mk`-GD)5e@3wroTxcB2-4a&@fn;ILraDS(E>E4z7VRUdkh$g*9ih*nr<|9U)Dn zsDzTXZ9qms+W>w&jhE&Ey&a5i8!8Y?wa8p0*_-gx*C2;%WBbGRK2xu;yC?DQFy&X{ zB}!W5&v1(lSd9j~&#?Wr(scZL9dQ$42tdhSVF+sm{1G+UQ3Vgr>h^HZ&~HPZ zOQ%gX z3;)Cyx-0m>ZADAzT=0Dh)wVh?V3nnGC z2zHD)ZINeQYN7BvPFU+PhN;7_#H|k)5Bnqpbo{PZ( zVVH%fm`L%|39t?dNWFogNbtupYt-L8`ub^f2p}FYsYl-bFatN#B}FY&m3B?blkz$L z1}GLPBjHV1rb+(|0d=m+$7qR#d78M^>TolgcXTBkJ6a9OaLs~qh|h997`#)E<*PSb zzTm7DmuP7eI9WwB7X8xkuphs=B^aImm+iDlPDiJL3C10Uk>zG_RzsN2sNLmD0vk?; z_Y$1f-{7o83l34(vVVvd;YxAiQV{50>YswH?6hoFP`PI61ZDnL)cbV=b6`O{Z9sU< z__}Dn@mm4zOd@8e!JJ~Y0yTy{+J&N6!c*l3!n0Z+^Oe6_&BOh2&DJxc_x4A4hrdaq z0DON!jfu9~jU^X#36Q$U1fSMCk<#Nx>jApqljGtSzDQQsX02%){YwKihMBd5bHS3>1Llrv>K z)O?}R%Ftv}lZ6gxT6KGQaAv_rwTeEPKwvc{y|Wez%m5AUi<+|;*}q_t*^D2O{{w5} zHrB3B1@k{UrW=uxl3DQOLR&3IUz^29@=CyjJ4OrIgz56}XmB$3BeCiD$byCwnYv{W ztT;fvzpEU+`%Pp!C30?U|7YoP!)`egQ z!QczUt=;6+tn2nqZPqR+8~6)3|0vrZ3iHUu@pq7MXqrC~61eWLsYt;)W6iT-(banO z7LS{Jqa|1fl`uaJ!?`I28{@GTwvoPZCY(hkiXq{z7 z^AUm`QzO51z ztye)UBa0J=iphoQMms%wqQgWHv&p^jHj?Ig6uMr0E`;N0Mm+f*@J5g_P}q0%cyF*0 z`OHXn`5{T#_cZI~4d>R@u_w@gj8e25<>s&we5|T7vIaG2Y%_viTk{eFyB+qC-)+Gl<*ftCdS9$4N{x?;S5DBF(PL5c_YJ!J9CwVPAr7sQA=rh z0&1I`I=ES!>Aw&(P&E_#10L&HNMeV?DA3VE1U#8r0@^{gKguv18%MJyEBU`SzQZ9JaHi^%Fkxl#Fi7z(0V(Y@k?viEMY=GG=zk>=w0o#A=@(i zTV!7QE}IxMCmcjAPdgntM0ha)5UgA>yF{Brnkh6D|W5#-HX{ zFud{6hM(Q*Fk={wZMmmZRUraigE4EPajkgsp0w{*#wi zrlreXl8utI9`Cuq*ZT!D(8|clqvv*>Y&-DH)Gt(ZO#`vFdt@hQvM{Og9aTcZT>gf# za)+*;`` z3~}K>xOntvXLwcGp#ydx#c=HmZe@(x(hCvuP^;`PY9}H}+r8(xsadM)JuNij3=u!R z?x~;3?14zda7)GkmqiQ{@12Mk85sk4>PvV})qTYAfOqOF(T1jaeoMmZH}RMu#}C{| z8W{U9%?C%oOWyQsN$DJTjC;Jqc@5F!dp6c%dPqcly2nFjl>8Zt>syLk0;`C9D-|Q6 z*o?wa=*~a_P?WU|oNu(dJ|j-~x&o@gF%TyhuG-BovuHDjXcFjh{u zM$RNszsFT!(3MkTKcwchq1}f7Ao2u7S8QhOrdUz6-XCnOBsXp`ggnt%8kgc)hI}N6 zH5?&>oDon`0AH)SYX#`W115h?d^h3mvV6%i7~iSeK=M5(`gUIA+o^AU`3{|=mIO%? zaeSI_|JY??Gx6PRClhn3BJWr=3}FJstfmYtGWB(%cbc^8*DVX=PrD1{N_rYa8EQgN zm=L3pcG)zC&#__79CQD?8ocdryQZEo5r`gWkgFn!JvXD&Hp8!=vzsYA#8fCyu}((J zOu24wz7A}6l}2)g;;oAq(O;Jq^UR|lnVZH%myoep4+b3ZL|P2?0WFv$rex5HjKMg= z2(J0lYS%ZD^%h+{iTU{^38OTNOHg@)`%5>r+JR;izw+$vRDiZaP!lMM zd;%?BT3{KAtW(NUA12FZnE&#v1I1$3Db?08w82iLF=vN|D?(nA+M0c~jcdiPBHHbM zLBxK>vP7qM!(F=XhSO(uvF!nZ$? zP$bI-QXibSruzOq%;{p>2eIA;!F_tDV;||>eseMI@WlwPg3&>y?f2*$-0l$-c?5et z9j2@8y}xy1x?Ojl?Dr%has<+p&LRbpCnKkCMknBzNCT^Yx*UA_duEN$A?P}VDE4se zt~&$OkQior-z{0Tiwfkxx5hx*B-%!KD7tA@+H;x_wFI$S)h4+`34wL`bac&5n;Tpa z{$|_qZPUFIV=;}1s#6loL1gxbxGw@H<_!QuCNv($$1flxGP=DLXx!rwqeQ^IZ*}&k zoeWgLZuU@e}AzAsi-Bhb<4=j2rGvyo2xUfT71_-tSB1k>;MWG3#+D zbat#H%s@vWQb`a}Ux?<&TD9PdTP@F$l<6nqw+Y3Hx4&ekviQ&@+{NF&yk3uIhiq%4 zbVFECUXen;IXHE+lqxu9mA@IlTt(U1&iX18Mjg8Fnz z)vyJ#Fi-V{n}W~SmfHX>PE zQICT_7<5w)Vg}(sb||n-AcdzCS<38!nQQQE!QjTnoNy!W_MjV:YJPAVf&jn-Rh zKi}CD%Hgj+$J#`>vwh6vhuFR(r&d<#5211FRX`(|eWCh!#e>x3nR9j5@}&b!(VpS` zqpnM!^Z;vvAv*+!n26lXNq2Z0;R*|PcX*y4)R_j}`H0L&5v5wwl37=YA#jK52QYUI3G2u%l145Dw6R-mSm-#vvyRa%tRR4Vq*=yw!!LfD6 zRZ1Bwy=b&9r>XVVq7$(R-y!Gt?BZAo809@{@tfG!5Ue-HJOjhySSd~cJU>_~2 zV-_l_n~kjJZrY1(s{oRHGy2Y28B4oOP8z9t_~KxU5uaneor~nS|8hbFUw@tH3SK`- zT+_k=WQW36cQxUc5>(~Lrt`&NBW-!LVKzr1@C_op{Yj4P%vM*Cwpr5ndm1++zm(oO z`Akz<;7$>IVM0QlIad1ldc)ii^d{6{pyP~n3O}TTvi_~For7GZp|#c8 zI9f+a)U4V0>FnN`gGs>-Sz>8T96Qx@9+}agnPti>t8Nd7JJG}IRE~kgxm(^u{6ekC zYfNU2ZVvLr!CjlU2>ZQ8`T~jaeYjCH|F)b`_2D4I;1F9IyOLr=LPepONQ->O-%(8$LFo*u=ybOD{4aWMybIN`g*kkW?4aR+;cmf`NeWzdnI5S8z zxx3yl80tXlOt3?#TFW99C7Ao89se5H1R~k!+?baEwk`BA-E-%_d>FX*R2|*&hEmNY zN&;C>E$&%@1=0LeOF&xSGiFnhJK~ymn$d4}!5_+a`j?*8TsEAsDQjDr|3 zpu8K`Z%k!>^GD%pH4dq1EY~Wg{$YvpBw+@eLJt;h#$EVAX?jkN0odoT7-|kiC!R0$ z6^er|N@5~nU_ua?evEsw>kFzLyU8eoHxCWR89QJ^xW7^n6|=R4+!hcS06Qfe`JE3{ zfCgz@F+bB>%ZloxcOmGb^5GdkJHXT1a{qk&#oY1mq}1Vi#6@R>qg;g|2MPOZ~>=)v;@A z+tWqE(~j%0)p{iJ93%V}nLktq9rT*=s2@~EtH}U#%>ZwjCy(>$Wcps>L#ea_dtSE2 zgI|KB#DaVKd>`-2morBgAH=Sd1U~+K8Ud$Ja=DFU7wF}5dtztF73C@jnd`-w7wwF# znrC{e_#DStF*E|kX}Y6+sc;A3+cQ*hWu#Y96P7QGLxGghS0aOO`zT*VMxp={QCsd5 z=#hoZ-ij^se~37G>Ydl=Uugb^y*UHO-4oCEZWEXJd}MPt>g(Zb1pD41T3oJC6Q}13 z&!baKsX0pHCcgS$g`;=vo6}@a8Q!KrtGNMQ5cYmL1x{G?Ah2dEpp0G8?GG+fex0aW zL6^b%%5E|^^T2l6CAW{_#U%zu-*&vAa8cTjE&D;7CSFrDBu>)`XFzE%cL>C{wbjpK zb^q)e)7}2q&>jqsmmkxwCpi$PXr>7p>9?Lff!vc7SYMaowd5hR6e6<}`Bq$q zXt%5czA*LG0!)+en~o{5n+$pK!^!)zn~N2cWP+s`g=cS__WCfn-A5gRnyxGH3I`*Pz*QRbgk%58$p3rn&+5w_KV|+35gt7Q7N?5>T!N z0D%w=P37PE7_mvbaz*;8VIg&=|R~^NaVtl3AF5+>&>7Ig`&6BC_Fg6H7Gf%1X-xWBJcQ zxdD@K$Afx@kT!z$Pz*^{AES4dWsAcPYF=AUF$Zifa#R?gg6H#gsyo*vQ7p7!Wxr=+E->sf^%jo$-QN(XRc)X`b+7AY5pYxCN-hlzOlJE9XOEfj z+FhqFQR(S8;GLbZ+T8YrzP{vpzqKzvx8C=HD6;WJ&66%rlUuMnW~88aN8a}M(6oZZ zNwP3cDN|2x(C#l)n#`8!8cl2ar$9PR(xGkB(JeE?7Gu#+7TGw@C#Cat-A@HztGi`mwMnws-a;SAzJ`jlq0b~Y3aM%W2-U1&>lE8QE3o^3d_}g z{K{3GPamotg&*e7{9KNl%hcfBmuolKh%TZ6B+SP7gff6tJD2A(q`~56iwde0WLc-h z#Kz<*9d7)WB3-s>?bKZk&t(Ueu?+-`Ig{dOfQyyWas)wf+;s`&ZlK#NsJ6h77A_9vHHO1Z3rp&?k2TQL3g zZiybp+V{dbM|J8&?QSNz>{Kjx#}I*d79C8Woo2F&S6xi-2WR{^{IY}D*p2o}iw0haW&Fuz-a59dY_Kv% zjXb7RO%`)aE`FurDsderu?k0xZ3+v9Gd%7@JtBCSj#Vb8VGQHnwIat1Ja^`SDQw*d zUv;q=c4+?$x1tp!3v)>ErZji?Kp=CRNWv6md zXIOH%;xhrGD5gA~m4<_#81l21lW7C0RVKAY(p#L{p@sg+ib5*-XlUt^%;=lD!uP1S z`RMy9m?%aJ^e*Q7E!Y`nluVgD%5SaepbviPcfk7Q_TS{F|M^ylXD^8Z@9(piBFV#^ z*kE_6)P2QlHR6p9ze@u{`Sqz_{^2QEg<0oTZ=k@Gj;Joum-CNS48X$lq_S(2jM&!a z|0KyhMR8ru)(Tj%SWy2KMv2YGmjR$;;#d_R*Ab5Okzj^?YG5%~Kpg93%^;vQcbMpK28PFGP`8GTo>KUunwLlXOijbNeX9@fR`O|UE#}|p*)2zc z?{57Y!_4u3!#vXXBT}XZ8B1$Ch;k{@)p|NXrfn&lg*ibB}M-g z#0ciJr3xhW*@2^aavfi&qPpb^2CW9rwm~&9(e>Hfc0VTA($3k6U`^7IGD;IEpItFY zdjykr%)c8s`OeoSb4$s}z+)3tiYy>mY*@1^3-$RQc8^_VvdxMv;0=nI2OyC!(V|=t z#)* z2cFn~OsqxiH}(Y-yL&%|MF!O16xXZ2)TVeq1WA zs%ok#`^*TGQZ%)jE1aTRKWNMJ_*$w-NnRRB|5yhZaGVO?r7 zJv9(MCbFVB43c3loe(f$Z4k({T&m$HC@>G=8=LrxteULqCL}yE@t0G#8gD(1H4+xe z8peQ+jaHuv*hn{%zx$m~jyezg(8T;PR2dHLtddU!-+sBH`Xm10^{4$+q3{$wCNS_l zu4$J!=+va+w(-R65-$Xww{QZiFdlSahg2NEpel`Lq-l_zmbUbxZ zakaBEl~p~Xg867?NbiG6}4upsoS8=cg(tUbfvZ z)1Sp)Mv1FRQxqaZ2Qy_sbqrDw-m8?3JuZ+GXP8avAovi@=i+>C@Y!>9dwaByywG`B zz<@(H9?#=JCU4^?j!7H8K53!;q&hsNEt zad&qQZoz_kaCf)h?(R;oKtmvCutoyG2@>3G`updcXP&w0yS{n%UVBxos#>P})QN(n z;fQ<*=54sO9t;~-6>tG5c<;WM-b)mS{~wz=VD8XiNP!u(RAkb$&z~(&ql6@ZE=Pgr z$nm^P?i_lNSS=m{7RqJ}l_irie>1+dnWSypgyK9Hql7_}!N{FOKCFug|6AX*$u~jk zE+BFW6(w)x@ayyaWlM4(X%@TLn5U2|psdlsHBSRXR8{=LUDQchiesN7Uv%%?RFWvaw54FK7 zZvpB*y1yKXiQT@$LpH^dFI^M-IH4ndsal*E?Nn_N<3*7zo0Co(T-Ack3sF z#6y#)1jIQR=;HTKc2y^evd<`jtL)xas+%yXYRF5SeYT1k^LCmjqbyM#Zr(p4P9h``SyJ z0~6vbDXxdbBm>Qc8Y?~Oiu;%QYF|vK&zkP!CH_CvkvLX|X~&=e#tDVp%3~GEH~f+U z3knS{dRgMM~ zu)EkCaiX*wrc0$^sEJ0?2!5BT?*xx^S=zfcLyC~}e|7%(C0iN?fv+$EPCB4=h8yWJ zerM`HaL%Zf3v-(GsMFs;ZyLDbYBceOO%{9NWUqkL)7>?)zeedoEsqlhz&dO@R6B-A z{a!2KYyY#?i3>9kO^8E)IU4Q-Q8Ua!C;2NXzxy}Q^)dal7ME&I&$T-^#i*_V}pc7^fzpQ9+V zK0ga|(a%LbNIb_vpgvRv=;{QIU?Tz1O&ZKaPG^u7BJ3QdV>FX1Ts{ZVfD9h3ty)M%|~*;lZQ#0kEUh zl0^#J(dt03WpC12PuB)(uW!gunHqGmc%nO-a&*FGlD1#A+i=P2+@Ah_wE$c8YquH0 zW{4pf_^iV-t@J5@Mr4jCckd+TN{3QXUs-SG*a!xMZWDu7%%f0C*Y}VFFe3{Huhu?z ztX6bt6Cn#AN%l!1BLY)IKAOVrSXsd&Ztbt4tEm!N=Jh`iDB+pYVv$g)paMsh@GglS zo-H^dS}vO%&YLdyjC)dyBxf+#O}@oI7gPhsL$ojVJdREM`B#x+s%pp#LC6;W;bnF= zE@ANeJm^2c5p-)L*fOaTJBv}+p)5qIdEVUnLzvx_uw~yR+>a%s+U0fpOBjGaBgT-ba#ZI0v`32ra{6j$Z zpCt5D0^i@3oTTRd4kRPn$hXCv0(v&I?*lj}JlZW}`>gtGP`{O$o2zR~f1iJMa4^MB zFiwcn%Wd;I*C;Q-_8mQdFU133h95oa^%j+QZrZm>f|v}h=WYDRDc*-UAyC{T7J`GZ z9?>5h|2Fl*V1AExtpk=buTyfN(~hpppzBAjFpD;klGf^uA@2TGxQ1pV)_MpF2De=B z7JDIB~^vL|*(3q0!hEE4=~bin8o83X|?1Rzxf%SJSZY8J)hOpH_KXVc)Vk`n)9n?dI8mv=RO+4>HSw{RdRPQ>AfFG*WF4a?c0qaH0{@-J zj;p+bO2Klyy_N#GlpYC{NKn2vHb#D7&Bgp|MvxxRElbP<5WnVkyK@QRwt2-WZQ9DR z82#jPHF0n8b{gFxW<7)yCj+N@2bn@&vRc{m4qt_%uzM23Wko^VsbFE!*CtAZBUx@v zVfOS@Z~Q>}K5lbQlgy;Y6_4Aly%uYO-~%bm4c8oo*+?RkF@sAxmPdRV$iGH=?b`ZJ z$*_b;3q^cvnrp}MIe$;^ANa6>wD6OK^{~uQ4hnfAQ07>j!(%ewZ+~!aZI4AQt@rHN zMPK|(N8+;)fd4Lt+vt9)pwfbe;WAqP&J|K@&psd5d4xj}H|qT*yjOA526H>b1De5l zW{~=Fo&}qbK1EZj+6=3zvK#@pQ6DB>vGGPimY68-QHfBIqyEI{zHTXI^ry$RXn0>a zi<9)j<%Kk6_?OPP#+72pL>b=VRceX!6G_q(NZfnd@)?yQ6E-tFzS^D$!fj+B4i{h9 zu?nvit3W*^Rdp3hHqUC!zgjZ&?|} zZm~OdlU@XBIcQAqSFB{g&};0C%v}{EtW|pL9!8Xa9?X~bG#yg4L-_E{&;N-iq3<1( za!~220=guEY8|lX$|bFhF%A{<+p7&oGf-l2wZ0B9!K(aGvD*$_juuFnnar5XT&w#7^7zl zD}%a6&Fg38jGsp-hF7mC#*Q;e zzI#9U?U z`|t5|1D4W5lc~E)gd|C$KsXid2p{uH=?8>$mCa%}Dpt3QIDW@LaM$ZW$9`HnB}jm& z2IhmERhuR7f-4Gt^*#GNQ2jC_#ejY^8=)nSJ&}T_WSBB?&FL@O$uxzm>Oh?mcq?xd zM>^ui83oaQ%^*YJgTTz$KTEDvDchen7O5Mf^HTFP6*bjp*B1!z*ilM?S1gJ^pMCaXqpA=dbEbs&gQhO; zv1DX;Szw#wV)*P)w{Y;kZ{rPV805x*mRsRMb(BAD#cpD^2?D|=W*kyT(=gkLed3h6 z%_ETOIxy%O`)_dAjRz7YpuU3MF+S|t$y2O)&^Jxna-3S7lG-!|C&+n9Gphh}vt;pP z@d4U%C?(?T-^;LN+6g&s_(M$?$e%{YoKyf;sU_tvyC?<65E&ejwL~iUf88w_>!|5% z0uhUgWMTmt$STFSLugyX>)CU3dqnvm^9Vy*uU=CFd&R1W|$WI!&TyC-EJ` z62aUcF;-@6tpI$M7-?S#@Q8gQa`BI6C*{*Q5o?Dz@C^M`)Jmpo(qE{LahSG)k=$pY zv?WKGSQRoRO;YD_xoV9T@x4#utn&w8XK!csH-rM7B+u#q7*z~mz(P;3W08$t8a9tu zlO1m9r&vF&`{`Zt5Kk1>21Z2~u3&*08`{Eq$=A*!=`3iks!XZXg*g?2oe{ByTMaCF z40k=fqgGF$NSNWjQ0f3a=|0m{J#uX9<8v?@205IiH&(@r<&fbbI~4{pUufXjUM5Z` z4^dkiJj~F{3?Kahn@-J`U;n68tJBtdl>wBUx^nIiIHE2d_OWQH`~CE%K4}RIm2RRSjisP~HyKEq>;!;`91(#rkh;DL zClZ?m0MmW0<)l8X?Y%7035qtc>FDF<#D}BPGLkIfC%DP%O-E<*by}8m6Y>eemPvd7pTXmdU`~Wcv##635;pdS{KPZ{_x$z% zCPCpQssd4ulf+k18E&sT^l4|8l89W@E;G<$)TY&IY|ldyc!`b?=MVc^l4zpj_n9Gb zFZmEHYxHP^#A2u@MAHszC+qqxIHXGak;l7}A=_C!MJrJT<;(fTcWKPvCz05*Gp9Dm z=2gQ4=I;d7e~rEi@O@t(8=(u80FyWu8G(mlUm%NV%diT3Ff`cEktG&~nLi34hw*4n zfeCsYXlw*k*W{B##1sH;KZBB5VG1bVLLKQ2AE6Q!W^Ar)ZM8~!o(D$$Fo+vFEcLLY z31FuMDM=&M<4J0T>eygw-tOj|^{3k#Ea7xh8icS5;nAUFqT$MT*eauRr2B0&*~7V^ zEfS50U!GIGKd75u3Xb;OEv@7km|dHR$9*i)+z3o<7HcJmFgO`?a`g6S#w*z?*4qrB zSRdj=a*z!Nu4+D+P3!QB!O057wzq>59kNvMHzQ z?#I;J!X^Q>7|kn?{Q-EYSHf@)_dP<5&y2BU6Q}xU^^7k7A0en!TqZHRFe%LP)dJ!qq#X*%o+kb5f!4*HVpaLV#dO)#^fT-x$-QB+` zuK?95ZFD)_qs5R^jSyNwuNXf(Ti7J%ntIQLO))&%X&Wg7p3x5`%n>Zc%hSZq(a%0- zw=1sup+6yo&Yv8N!?iGoKWOMeBak(r*=t;2Opkp4S(LkXUxX0yY!V0sQkkaMg$U^V zajb4TcsTCRPUMtsD{7)5VeqoZ&CSd>y>8*flTPjto|_n#t58@cGQ^Jn{>~ZA+cHG& zOs#d_zmpFSyM$)`sBYe=u~$rqL!+a6SX=_}ff@f=De+^Wq~%daV)@Y_9j2>y`%7a_bhsuVnP*Teq(=1bpPWyF7t z%fF3*_(~_%Ae>EshhpXc8X5LEx7!&1m?>QH>BtBFbn}pG{1TaeR5&uoPJL0GL{J9< z^q&?Am^!1RFN)bmh$o1gX0q94T-|*VgumRoemUnmXv@Kf4q%i=)L)gPpNK}VHZsgD z0}eaESm2weFh=JNM}b&L4nrBdhjfK0e;yahG&XaloYKcfiVQ3q@@j0)5PUt=SJer{ z4G%sYlzNLaleu z44NTA|Jag><-|<$w;q>_qf}V~m4F_}SYRro-R<{$_hGhHxbh}WzU=NHG$TRy3YIC$ zD9;3<0MknH3n}MeYgTaUWbpBS*!6#I(!l-};_5g0vZv-+m7KqIi!b;DE9`eEvZUcL z$a)K{UdaM-`_$ykPIx&R-#D4l!czJ6j^^|H-u!uval&DRtp%SxdNcXQAx-)KJxL@q z$I@%CxN(`dVN)FQK;BceuUV02cUr+gW|S-;P3;&TlzV= zEStwQ$HJTAq!geNL8#F*9t<}=9xWg!IDC1DasZXoaAaXk$ST#Zbqw-ZVRhBx(a5y$ zH8Z%$+?xU6FMB|Qedz0pFg^HFvo;D|i5`SA77oxP3Jn^EF)N3s&ty6)W_?MAO@Ywu zCa7rzx@NfnY+EMf)5!&tBwEco^mi`yknivApof55&DMjbhYdrng%tl*<&cb{K20G$HdMEnL?DU< zBCyG)-E?w~7jV5nHB%5HL8mO=0 zOVgfR2L3k|g_1De9b5XNSyPF8BhrL|H<1Cb?haP-b1;Kh*VfP9WV zo7h<6=yq2Jt5jy#Ue&}k@W(AqV@;HrU8z_K#*HWLhUpbXFBUxBx$%zQ@rkyH?vTeI z>22doe#)d5%9q2@QDNUL!eFJfu_+RUOsfD93!OVqMsj|bO6+n^+vnxXW!&m#NJ>rX zN>RIecS)dkJ~C_Ptnvi8U7e1grz(HI&!b-w0Uk(~G8pvgdqc?&PrL z)Y*;)Rg>cuu{}D>!+doPKq4TJMr4gn+(z4|BP(Kxf#w^yo!sU@a@E`(ukcS#PpOxr zIYDw|p%jFGNs>k0<9RGE6Qej(>dzxOVrdonIh4yA8H-qaMiB@t5dn~~MG*N*8^ipL zde*61cP`r-?jEQ!YES!0#Uvg8aa}`T=WyizT7V=5L*-3*u=Q*<7qssmD%J}<19MNK z6i;Q{0WVGh0T}wT=5DOWe%#>Lj2QeR#?J{nsjQHEsrj%sU|hF;Ves)^XQP`LNhc6Uhd?*BW^ z`=7t!iAPyhOi9U1U1_UQMI7a5w-+S>&n3eWh&?BHCXujDzSYxAe3#0p0LwA{0n~|UT`vOyRFV*^ZB^mvd9=JFG5OBhojdM(JX#w~=6+(Atx;zbfG);Xj?^-8Y4N>k= z*P~jS>%UOzTJO(zQwS5z>XdgS$-##p+N+X?mNRY{oEW?|FdV;APdJl>J|>799Yl<-Vu1~fSmd$AV)#_`P)xbV6w8;vOB~U+v8qC<^Z4F! zf3Tfts$1~>r6KU2hHlrvqyjx0EtQ25av60DI2gmIUGjV9MBoKJSH&XscV{k_#p zE+<_VqD!c&ZwpZCq}UTj%0>A9WdG!+rZBC3eNKsC9gkXk`8bgIU5dAF^a)Kh{hhPX z8w)}5q9ekF7kOZF7~uvD{fP$aR>B7Xg)R1X>?3_Jd+;#7#HbEF?tW0K4UtA-u4laL zgz!b;LH@UX49U4%?vip-FZ~z9eGd@`r$IG>KU?`U>rr-`x+?h0g@VB@5N!AY{3lzf zr@YM!H|3ks!Do??t+Fn;j=_R!!IftrLTdSZcP8+66u0GDJF=zsGg?`Ch?WTxoL!-% zkP|1d`uzD9&?!dqKuc1hGhx{!0(If`%u?6(9brx06JMERgH5}YUH8*a6t;&iknVZp znpR@fc~tTEe(C%cnjz5K+8Xw?fkj_|j7N2BX9tdefB@uRlRZY#n*^NiM&hhN0+;y*6s{# zVtb0YO&>q6s#WHfu@n*B{=!1-4%p^8YMJA*_SmyEy+Gn$lP98N{Sg*+DBzoMnykIJ zVl}SG15YuQ5JeC%)ZnM)_Fd7x*Dqp>rmOSYeV5>XuPeIP)bFuY5RKuMI zqgUI9|A`8>I9an~5(;T(c?!iJdG#G(`a^oHPe7qrZL8=M&yZ+Ns0uANVQz{`hwC`s zDkw!4K)rfRS=~=xDfQ=*>H3pb?ch2aE2BHMFmC~Gm;pEV*ABH1Q^|s2rNFk;Rvurr zYUPoazkvG%=LZU*un8-jpu2|OkxIv6O9mPfNf*9E*{kU+s)i-5>ww`r!EV3bKRIRA zm3OL|?C`NXO6W4>x7lE*Z7vNo$|?NVuZmt=ek8vzdY&xn*N64JaPBA;y{H9j@2U@# zaBPLF5~sMkTVqlAhO3eZ#xKK$`uxORTEg|xnG*TfBur?WGbxT}?Ne=i=IxNav|cPT z%$2oFVZRk5Mr%tQ)0BL7>!^C1!Z4K*l0i)Chan-(nwC6@h#5u+of=>xbe&j9`D^r} zs+& zW%}V95otd%ZpAa4`ISxJtli(j+8TU~EfO*x9LX$$Fzpz%?HVy}7qQgAOKMPH$*Di? z!!zc{$L%FOq2*c;+8^+%7V}_@0l9&-2cY7k?m$KzTXf+@hOs!OpdCiLNB@cEv?QIN z*<*#Bm4hQ(7%f}X78(yeY9|sFW4=&1eLcb&aI=R(Uuhe6)iDCNe%a&hMyxU2y_mhi|8c{vD|=1Si2swiNZ)*&(X{Yb4m ze9b8H(_?fA$@s9Fw5+nnesQ^=7u*oLDdd0jBQfU>59ybYCj*4PgI`!wlIS(mc+Il| zd~B~pC^GyQ>j^LP9E>M^DE{Hw&^uhzGLG5?Q|tG+J3J{7{Yk5wG=^Bkre;0Z|fFDqZE2@GbT#fAXGb%H# zLfRpbX4=8FU$so9Y$?zk#TCK6ETjXJjcy9fn8M&pSv105JJ!_GqVr>kDXe5(!LFEP z-u6AVz=^nPH$?G@`BRzP`$)%!BHDt%lolKycuRnJ&hrfW5izJ#XAqNw2ZsAI43?Vc zKHlkCKN6bq1Rt1*>gx)sSV}fuwi*LeqoIG^ShgY>X4(|JZ60F4ed~0oIbO%u^O`x+lXs+7A0!6Az`NP3Ne=$ zF4c;a7yHKYjSLcsR#JTu%CQv5DNa9ipjFa0G?s`aEVk@SEQ?Mu-`acyqCcl>w0qOC z{HY7ei6RZEygCTr|or4c)iwe$+w-co?Dx1lh)SwTxV&2O+Z+ z7iNcyL-|EYYopl|4F`9@jzGbRu`V6i6)YPwZ3t4w#Wt7rx`xhIJDG~R>si=-8J$7W zk3!II4$Wr5+F+7Kff*um zO}fK!%#Bc{GnU8~k(pMz%H1&M z%dy}h`!q*ZymeC2HxG8@`R*tbzPI6=@!Drgp#+itdYJrwwE(IeMaOOY()yDKcoLRU z)L{ypP<3MD4{ZM%qdY-P2eQS5!>?4dz(hIVhjGZjC*e-V&9L_D`a2!`-OK;#%GvrC z7*3LrhOUjUP4kd{=J30h=xqk@6UWDJSCUCCqk(f;@ajaAiXBAd`P#MiEV+NE`}9b) zhpJ9>gZY~6-NXMn#%)-63X!pLuv)l%CaU?A%mX#m^|nOy``?%L&0jlW)Q)hcdos9u zu1Uw-daqu3#sB@~G4HCa@%pe1j3J|oC>fV)Z5fU)rzByhagi3%^nk`QcxU+IbtZ8+ z;g9pU_Le98=4*+WgvW>xNK8fI(<6oo0^4NK;|8Alv(YqTX|2m#PjHwLU2_BPbq|~K z_yr7dZ91TEK_CO`U{96wEX*GMoKL@cO0XK>5Z9EohIsLb`ie_8VOI%B5(@#8f*dC7 z&t8%Z`$Xm18#c@mVyr;P%y4E%_eeMTD`0>wThrq0rS)?gE2<7!j9(Hwt~9cOM-I|) zI4s}uF#6mI&i1qHO;l50;Zy)wXmFZBdfe~H$|s;i>S5+M?LALhSCB>zNVmgJv%`tq z_b!}krBXm=)?G26>%8tWmkjq^zALpFqz$P6{2?1R8%L`qHG*zrwh|Y8T>B;({^nWK z@um-W^VC@~vua9sSs#(6Fnc!E6(M!JLi>XSZ|B!xuP;Yr~T4^4JluC(z8xDlxyF@?fVT_vWKNUYL|2-Rv_R^$Dn1j$RtUKf+rR9 zL+B!d#1G36wy}03Rtzk9h_k@KwIK%MRN0F%tylzQF9B@2ODh-KpU~J$DUbmXh&43& z5bBh!LCiL@H;g_10s_ge!@rZ0U*Xw%4*qr@zrP}MZUhij$`c8-<$wJZwJkWr=#;nj zPF`&UBl@iZgY1qs24yL~iuYk@%fB(Ll~P}f{-L|^zC&>&>U#+PqaBg6hH57(2G|(S zJ40efUiWsQ^eMD^4*H=b(P_i}kG~4g^C_weGZ%j}YU`l_(JAwPp5{boYYa{{9sL6~L87Z#&S;+nJ)Oc#Sv= zCr|YM1P4hud0W=_T$3hrYGw?RM<`r(RKozuV4?RdLR(!J!>G$qT8W^!Iz!?#?;s!K zz(v1+A{UO`+b`zV&+gYxK940_#Vgbmzm%tH4 zin3v>1q1fq&E&*PUasN4DtDUdTBBXKSe1uQl$F7bIdBG>}F0SuNPL4d@%G;Tc56L@?orY>?-Ts%C--{ z#)~w|P{xLz%QD)VBTL~Qk8_h-+}t{xAZBz%0N9X@fu@Wl!16dcxX0#kwMhx}kM3?B6Erio=+LGPke)J>ftFgkckV_L8oe<8@d+4EU80H{hT2Y)}5_ z0-U~&pBkA$`zKSWenFCo4w!jsFos!uaf{oBBeED5&}a`w-j_X$jEx^6mI|+(ooG`_+q1Vcn+JmZCV;m@Vzw z_zdqT9uBcjt_^JPnte88A^f1sE=E4lH>T|VeKO$tkL#3=r68K_JWQHRccDhV_v@CE z&Y#T#$r7qahW7D?9{SbOo|LNOlXc_z`SyBf2WNhE*L0IwGuD5Yb^k6^_A^GT(=$H6 zEI1>B*9(+l8tU?d&Fo%vj5fTzCU(wek?lgfRruRg`&0{ zJ!?b2{9l;rhbw%WM9!SXbrdChT1}&tgMD-|C{>NClBJM&UwgFT^(otfoYhMxul4AY za+atFM6;Sq!%oz9SBSP6>g)KEgTCqn%S$%&yqEpDM=Bd>&Ob7dPAZp9m6?_4HmS() z=|m4L#7gbW_n?m8GiSjxCuzdm*_W32GfSub_~GK@O3=Do&HbW%-O$EdXe#hl&RFz8 zn`spg_zwoBQ`~upyfJ~&hD4nR+LNtxfANs+&PRN>$*9tM!!_?byMkbf|Ib zAb;F#TOs?X48+DEmUe$FbY%z0?^cl&?FW>FF6&rdir)Gyk=X}58 z34KG;R&(jr^ji^aD}0qaBDr1cv3^*;-{i~7$))0?)aq4sG`|Br{5tS^qLg*8qh4gC z^+=6-OZ==FheX%rk}__lMFgYiAKi4!^ni}zB4hy-W)H7D5oZ&zuow*~{Ky445SPn9 zT7A)+SN+j!rqM^Xu}H$%D`{rGr=-z^b`p(#xtT=3soBzRS!!=!G>raeQ(%`TlTnj6 zMBCWFd_k;9r@s)*oLf0;#WiL=H-7v?{L~LizYEf7x5gbRbgrA?k9pYL|L+G~pQZPdE?KEPHo~1hDSpDXbe8a> z&&YHrjQBFVUXxi1=R$W-b+{35EdJE-aYc{8wjgM&edR2usa%)NtH)s{)%kB*6-0)M9d;G+1t4VyPrnW@0IkeuCHyb?>jXm30m-&egRluM&Fk@PWWO57T;pKe+bsAr>9-V9@ z45OJP&F>b*7(lqf{DJC^dC@zf=fPs$n|De#m@td1aDwnZVDjMNl0LVDbaAd?6+?y@ zyQCLNXK-4lkZKwT4Q*=qUfA%5tfOkly$u$X8yU-pGiwTC;jO?RufoYrHM9l!m>Wgv zt9v)E|NDlDdI*5vUHfa_%&_`1a~d_5Mr1tGkk7ooo?(pM1g2f@Gjl!34IbQ!>=G6A z-gd4R^5&>|QxQ(M{o&Rx+Rx4iWYO%^=4I$qMg&U5bE zO}rb4_H(7v-gxMCfSjz6DB6Kk+gWqQh9bJ)(g<7oRZ<@sxt6dgq@RgV(ohQB(Zpc* z>Q`C~md$@m)$x-MH23}KRqQET?5STYstmjMm)@{_-+5D2aRqx@yPNqYbOkqZ!x)9| znacib93g3n5KmZvU+t=AD=H;4KxttDJ$(Y$W)P(SAkGGtevpTb=TY?5e$X5dL z(o=YOBjI_*g(mEqo-}LTM#uki=>*g&ahcugYTpuT-#&m|@k7PrWW;kk2Da~>xQgDO z8g*Rqcd|PF`+xoitm^aL8(F_E0_Y=(70?w{M}8^wMX{i_y3FQG!)fWhdDE!am!Wol z4~t2O0THS818xvbAuS^=?XSCt8cv)AY9OM-=7E8vvbs8bBO`h40&0HW89q2yYW+cl zb%N50HN5I~s(+B}pb05PFCVKBtcI$X1ma3fKG;qFagSR+t+4&YD43CX zQ&X84qTm07=+jZ4d=j(_KfRLol87cm(ZuZWwd)vyquiPCsqL!B&(1rlsmmi2GN;jW zW#xOYz>|`u3$=9fgNx&Q1nxO)#W~j1sP3OO-K-q&H80VzqLe>DbV*koh0x+6t<8Of zn_v2~q=HX^Eg#HOQReF%6Dt$`kWVjcxf2Y$q!fYg#tY)!VBw~PelR<+HR*FUE}s$` z*|i&T&H+p^>CnlEPaS3hbg+6}r33qAOYMI$m*ni^;25uGcMse7Q46fLC_||UO5RcZh|hBV z(-2ANVr_A?zC{(~>v_Xh|GYgBDu4IEu#yZ(ls_NlR$29X_j-U((UFDE&Vu^#trfVaZHp;4rtYXSS};?$Azy|PVrB8x zxkaE8#=`1Uz?Lc$Ku878jXe2b_M1{bBdDfXvxG#kfCkFVHSH^<`uxX)t%qjR|MXXz ziZDs%^a7>WzDOOM5=*ha1IiX!a&?vNYVG=$vxRG|HK<*2G#J7O5#x#tBS z8PUkI574hE&(=DCR@{tk~+AhYtZ%feVphthFc~uXnvveHss5 zP<3PfcN6|&Sg|%PUtW>KeMWGLZqBy6Pa3*@`|}lcR_*9AS>);Yz)<7(pM<&m``sGi z$GritTAnQtM1wUXMDR@$fd?i4vQ?`f?d?R$@p;2!Q8CnYmIB13eVgF1HGtm77!l~$ciKgq5|H%=akdOdxqJW8GSTX9_0T+Lo`j-5c?E)yg zGnUb>rV|$*0&q=S9lk2w8Lwh&$MIo?R|ih3SftsX6|H*VM6`-J!*!P_D91%Q#YIvC zh#f5rMo-MXqUJ`6uh|QILdGVI7Ws>*Hoim=0P5TfAflJYwXz=x9X(d#H)C1F!>wqk zYB#Lz#1M*2vlS@kqkyZR3qQshfwMN@-It=Zv6qet_2zU)=3EI2Ctfs)u*^X~4=RFD zU(6TB`N#`pH@n-Af68GYz=X()AA>kVUH{CDi@n9F&sot^dKTw4xJj=Q%eTSr_0Dr# ze)VE8+#6mJGtV>EbowbZWR5KiEJw$QY7Qg*Ge$BHH=QRIIpG<%Fe?DhaJq&<3#E&} zt^zXYwsyHhI$Sz$yC-kz1mIU_e@Jp4LQDHjAW9#+A_GVkZ(N6hqZNh(0whqVi z7Qo3!5YTdZ9uyhnN&+=N;3yySd?~0(&h=z-eeUeOr8SV{$}tK`bVF0GQY-PkNG-B5 z!p)|+CU-e8;$=+K^Uy7a!xq$C$xMIKsN=xbmTopu5`u{+XVwSq`yV`I%i_@Rz|%<@ zA`o=qJP>gVog%S=_x7|YNv}^SeRfFHM|f8T5QGQ(hye09S9s3aKSSDm*>ftU-7dZG zESZ^Y92nscQWA_=yhPizAYzrH0^Q$BKYSdfoUuW<%qKD8!jn3az$Z_L|1|@THbFa= zpc4!)GuYUZ+Fko!Z7ERP&;&@rz;r+5I^2n#APu~ScAc#U2X(DST4BcT@dYF@uG;jk zvA<|+1R^Z!&g!yBIty!HcL`+Zq1CnN{9@+r^!JpDGB4}17)~C+hDwBUYgT#vUSWIt z*^@}c;zsJ^q3^FK$9MI&*X^D~+hy1|$cni`D@+BoKs8lzbuWCt^gVby|AqUd1ve?x z++kIuIt<6(Jl_dsv41;}2Z$~QOnL%-kON{m4pG0oG>DFvu5f*$%JPPEr5p1|jzerh zevwA&cr2!=*#-w`K#pjYUWtdL4ct4TVLX8x%W^75*k;LNN4 zt1h!=z+k%8;@k;@_R$GZimaGfq=cZ-KFqEo>1#***kTH=U%R$ZKrB;W*WQ8B|0yDW z)EMxfWkl!Sm(8B{tw_wExrJnM!C!y9k(|2AY=H_jl{eFU-5c4EJ#JI+KE$;%$V& zZPHomqMf9auB$q0lFeOPvh{1rtgX7Uh+!xZltN-Czvd%$4E3lSXX29B+tE7|Fa|j{ zozYw!pwicgdM!ywOE41=T2`g+YC=iC6skrED$HFTH`$hwn01)|t?zKwjjO==iy_TM zx?=0_PXiuZb#R%?4Aq%8&>y-wus($h!1sBdNmY#iF&OlXOh)FHXtI+Suo&@BrtC3D zD#mb6C*j3;%hO`Ft76efuxP_WLwY{)kJ*uc)fK2i0UAQy^M+< zz3Wn#U7cOFwj~UGm6{cA(4H5a7AviJb6ioQp=spPA1U0>`0w!5_Q;VT6Kxdko$+HR zbVWtmRh=3dTIGntzr4YX=_qD_&zle1m|;!f@~_J^koO9CbJy$fdEw&hL?_x}W=dNB zq9)%+|j@!_YSA17KPRNWJ~#onDy0!q!8m zn0?kltjyw-25-!bHqZ|JVw(QHQ%gF+#8C8dg~ri)6n#f7((HTtqqc%4jtN!fn`PUE z)U%x7;#a)YFdSKBrGU?QoQSa6Jw12Zk^U&%FNG@6tOg=gRAnnE@5GhmbP>0e8wVX(Hx4&P)>bANdX7yA(l<;el65R-die? zYTm+?*lA!eNHt)KXj?G%#kF|m4npvMU$;I2CkJA^wVn8OU&UysI*Th=WT7x?E8{zwXmYCab2F^Sl znDVF85(#Q48Zq+_#n3v;+_fp>zS!k8d(B^a9ebQQywWI^3J1~bzrF1<5v_$WjE_8z zqfB!hm$KhtoF6V6QGJ#e@|eHFGYR&@^yVu)d6-j3Sn*UUm^sxUIWQ4>_Mo-| zl&?y=cdteV0Ho~yD#ds1~liv{-GjNlwYqQn=-3f`6MpdJY*1e%> zijVT5SShAp668xCaqGBtV5mVpL_#@9Z&+8K&Dizj;#Xx$>b;MOZDIlI@+iVbf*-~h zm9+&QdXK+x(cPwr&pf#??du1jW^C+A486$e|SA5oilz4IPIg@VnP3bF%*Y z4xrR=BGGb?+^(uECopE%=_s=5ME!}*#*9T$OPPZTCeNS7rG`n9&@Eo01cj%;VFm&B z`icNz_abu>RUuzahpe8}crJIz5Z(tYM(6wgAELf8Eb4H1ds%AfUAkGiyQRCkI|Zae zq`Nz$K|&Of4r!2-ZY8B#>27!z&-tJC(|+3PdY*Y^?zty^Gli?(cvpuG9L?z*#?)IV z_Ofx>%#lf`Ly^N1 zcU2@qS14sFr))-ZQlGQr|LUKWy6RECSm{Ik!vz21dfckV7;kmUPIYyEp;!fPz6@1G zMMmwK&il`n(<4-cJ*IkpV8OtS?c_!PzRRHbxeyNAAg^6&MM<&Z2M3$5GFz&&5UFx) zC>2`x2+&f^o#D+n)WZfk_sK5@D=3%5qZEtd;HL;zs^LNdijbjaMC@1b*h9c(|MV8Wd_xS%vut(+?a_?iDp6YZ`4* z(*oC9VoeqKzEv=Cizb4#Vrs|vuhUvzV|}B&tfMBWPvwY~m$;$Ed&sj-{7*2;_7Xfn zZ)_JD9IlT=pE#55v#IYfKb#gN1>#Z>k#_@_u&r-INmNwg^;Y}f7wRbe>#?p#JT#$3 zykE);&{^U_?RSi-GEgUq3LTT!^OFEq7{ND>?TF2g7!02IIx_tsDJl)(>*T>FRVBOv zEJKcK;g(;nt;fXnis*QV@8{B5V}UjA)rII9#=@y4eEK>*6pJF4F@dbhC0|!-^X4iH z)iL1Z1Mj|5NC(bSOW5Y4yxP{@yE>fw{MznI*rX|PH(77CXuQCiP2RmjJ!QXen~ZYQ zSRNy1gvE9wBk(63_FIJa*_1Lv5nS;Mhxz#0Ms%%z@tZ%Q!^c%ypk6wXSn7FD&cT(S z%z#G8)Khx_fjs=TWzDHC}OS8Pct6XkA3tj5BEVQ_X7UjGvikW;;Tp? z{8Q1n^EbI(bWmF-GUD&c>|{8fi;BaA%CJ+vdFJw60vK{c`dpnnr2`N?C@DB&$>P`rC!zbtQf%e_dI7M$j@qa(wj$E;T}r78 zdwGW2JG3yUknLqzhKA4 z1vdwsnzv<+yN-&hd)C0oeh6>+()G!4nH!JN%2b%(z}cf!8pzqAeV)09>HVY=UQi0l zP_5sM#p2VAUyBd_oz??7q_va%TtdADBC4mK}+CfkgAXM|F9)m0^$>)$a z@BvmQeibj^b~rOXkIVyKZ=x{P&A2~pZ1Sat+kBo$FhHKN<`UkNU)kC-U$F);5%Gs% zzbEU|qK&p5JZ-MJ^?9Gh8-lZHD=Ky(_&?hS0V?Cz8(&{iLCzBYtVgfqu6p(S+eF|| zeUQ)X{%Y_e@|9j2QdkFBPY;w5l@hTW(eTcs?Ke&PS=BeU=tT_Pln>xK#FgnxdTElF9f3b|UEY(zFa|7saz?^*EI0Phe0w=zumNgdB&4qybLPe)L0 z=vHugmiJu8#iNj?0TchG%<~X+9r_8~U8WGY)i&Q%HUIr?Z+cpc=oNy5>l~?UUfmsW z#jLI;eXpIL;Y}Fp5va+z4VOTer&FsS9wv#MX^sC z7ZUe5mue{T97uC8`{94G6c=0NM3XmUqSOUuUF9dftTGti@3zJY)?>PINriTBItC zh|`DLO1OG$UGz4s&k}UP`(c_yt^`7{gutibrc_PpH?gV8hp!#-zY_Ps2p@TZ4teGZ z)X1nW3U4c2>?4Mw#Cgus+hthJG+FCBaWRZeE+UV175*VHWt_wh_nX*$R)1g!k!^*1N6^A4mb-Hc55 zBu^Gv2W6cSfnGIfE%`}t|5!=y}7@p5YKSr zpFl75E+rXVgaVRLqyvX{Um*?c=&RSl0orxo7sl~s)X-yKzUC`EXhjfiJ@M5#`wrhQ?Y_^#e6AkQzze9n1q7&YhfYxyr>3dUDZ0XBj2 z{)v#F$F^E`t^P#~pb9IsTx?vnMM=}0=`VlVr_S2wZT>n%0#hx zS<0(mEX}Vrn>TB}klU1Lwe{ds^q7Y{XhuSsL;t;GHU|%~#EGZd@YgTxTOXrDzX@7Q zkVA8;7iszf4Kv&2!qzCAGk!p;3K`)lc6@e(lE*ufw3~ z3f!!Ey;o#Y5AFP8OCl-100jir$rq|%WSHK$f5I|LTHt=dvAOO9)W27iU8w`2jL3jV z3Bb5tg7%J!o)57}n@u@|l?zdf#4Gv#Msq29nNC{rlbG(zi z%qaL9<<^3@P#H#yokgke4gN6i%38!zOBQK%?L9Aran5+yi~Z|x?LO_c`hHn!*18sY zzUsT`!Bzv{r$h6&Xh$9jTtp5n%%ibSXXS3keDBZDrTsikHV5TH;f6@HSdV7*s0$I% zbbhU#$#Fgg2(zf-F&*gS0iwrRYXL0INcj1ey=xAlF{hs=r0QSOvN?iDqgWIopN7oB zOs{(de8bFr)5H_@TL#@7L9yCBPDM})ijRhg@by>_|7CdP4jI#1hnAM}071igSNTbs zU%y~;ImpglSyMUu-n%)sm=p`XSkE~pk6QYe!x=Ir*-GYK?PGA8y|U{l9w&Z znVNeuu2iRezhp1#Ifa45^uINt!M1jl(GHc14=u8!)h#c*5A8q+6Sgp2PSUU*13TQ- zxC+KKvg2dZLfw$qA&sUN9G^NLgp|z!iEQZeNeQ??iCXVe2eORc(T=zO9dO&*VeB^n zzNw1`jT+F&LtlM~J%I}oGbVkF4`?rNp-ROC~*FD1j(Xuk8ePb4EgGi6o zFYN@ZI~hjT)OmCQ5GZ9#{J=y1zq_~%QIlT{c3}CQz^`}@57GNQe$la~qqD}bv|Q`!l;qwq~cz#hC91vfZ?LVRNbs@C0gTZYs@;Ru#b7}tiqmDHi% zQ1kK3L4bNzYHK;l%dKqX&F z|4t~5L!5B_59&8_@ACo*R{d{!*-cH1^~3ENS@=0A8j_hQ6g$2|DFEp-8o>gRRI*q4 zinyNe!U^A3mNH3vy!#Qly>=QsPpO#Y=s3`c{de-6>vC2 z|6%Xzv2pof>gp&CigvvLgKzmbCtRB|>4IJ9Mpi*P`G%ae>dmz1!cv<*s{lKT5HnnS2Rz?^+LmMv!t=2$P#reCskL@ScW=LTo=SZI4*4HzC{6&ff1Qf{! zmTdX+RGGq#?W#t%u@wfr@$xwG*=>IcMK(PNg-wk6Fv~NY58^ubH7Ta5v!t!@JiNgc zQ$*!yUPP;E8X&Ksxgs61MNz!4HMaF7J%@4wgH*Qi(1tU^bgRNfJ-+2`4@GRwF892-k0bWMv(WVxcs&wgMSRO1gXHdr?id zV{Iud*FLiL@(v|~W$acjS~>8wRsbAyTMcmMh}R)nW&Cn(VK8b86!f$#y!mH>ROBQC z>){ed?G?(21)xO*9-7S6<;ab6SpfndAmKn?@2&CQzowQ%o2 zD3vl+nlm9YZbWj~;8pHt8q8MM&!=PX0^WDvYtP`Cuwm4P$kj#_>Q4c5G;jX7#;Qigir3iw)yDO7MSYOzt#gZG=mh%gHrpvHw2OA%H8fRGo^uqzCi4EmX*f%1pa$2eOkj z>C4C%xL)Pr-OOKRIjvw5(18-A*q(mP-1O~N4`g@fZSGYPy$?j5ek=Yf>wd(^I#`l1 z>{n!74^Z=Jox$)&7^>nY41?=P+xf0AHm7DlV+d2qv#C>85nvGBiR% z3uBP!n?tK@kYjzF8s2_7z(S@;Y6l(Zg_X~ zu(v6FkxVH2k`01O-k^oWA0+Ei6i41bH?a)Vv);tluOc}jUkMF=4>gCE^LvRe}jyeqkx{3RPGpr z*)z&Ba{`vtoX%2=S^9u>I3}^uvZ((k!M`Y}nM~k1?`Kfp+_=cJ0?H)%r>)bojjzty zhs3jAzf4+_J6bkRECx1c$wkz8!y}JjkOL5Zm-KAxs~{yDAb$w|BUBZQhK(?QV*579 z{4gRY{xRs0VE*jK$fQtOGD#$ZK|_rSR^f&{(`y|45XZ{Ut~9I@M-ZZsSCWi+>er@U zCFPz1DeXww!^u*WCehhrJw?FbEgwNtsty^QP7|8viZWT(*DweJ3;c^~Np{}P1)NR( zATaaqOlW98Rw>a!%ll>}*-6yY9b(XgM~-hnAHVX6((%-d^@)%Qs;FK{#@Ca2xZYVE z@$tJLbZj28`(}OhMOTlyja`54y7nqn^>ko9(P6H#WT6wL5d=GCpw;MyB_Ye-{enBd z`|;PHm7xrzR4IK3sNr zE`f}f6oqS>>uk2|cgKk%k@tOH?Q$rEzqmr%1obYtWgW)LyVFie60D~G7Ma`prtz*b z7If2wOCC+f=scwdP{mIfu_eLedTq{jx7T)Zo7qXx=Hi2g8Gx9Th?B}D{^N{>RWef| z4N&(0ieM}cU&HHhu4cdxEz{*Z2>Znqzqa+4g%ydD!pY7cZXU?mQ6vTXN7^LfI=%GP zJ_u3gWqtniU&cv6bG=JE&{|niic2a!)UVyp<5*S_0asIZ4ipj z2pUJnX&50HrQMNtZBlcClFAr>LOWI0GYnhV;xiF?V|T~|x&7}N*gnsiGRyQ=@=nDJ zid8DNq@k(7JRwBtYlgu5in8N`a{L$Ddgl740q-`qv{Ze6?5O0nGTx#THu6yBTGJe~=qTB?&MOe_#B3qp_WTT~UxM#zFI7Q1zF6s-|sQv@$b z&5M#*`7fmFx1ZV8JR_O6PlXJI?kY9jVp0W*2Z+?Ru5mf7EuQp(v2p7o#3_|^f!eJx za5W%#ki4kDXY8*11WB1ch{23-GzrP=ig>F`RR2Y)(4Y_hd49KE41UaGukiqa*}2x$ zDyqPrD=SOz90iTo`cBN^2;vJmU>V(Y`poZe01c6BZHt0W?g7S>XyrIp14<}dNPNrg z@N}8N+%OPW8WViI2}*2-tu7s=H;XL3M;T*iv1F1W;%mRKAqN&}zvA@QP(Qm;5amo#HxU1i0ec-+hemaaq%4^M9v*0#9c{Uy~F>7B?};2852k;$;qP zsK9Xk?5ZJYVCBer!Z9*^Q*0fJs?Xu<@O*YgF_AU6p@_-~mwnPmK~g9giyGF7`j8g; z@W#0I#p-XaaWWF1AZIV~^DLL!i|-xCf#$D19Pp(}*)v~dwewXgQ|&ks{aO?PdEjGy zQ$Qsb36fIb3FFP|rBQ?{U4p#y8HKOF22R z=p$4mA8WlMk1e85(19S$vANLZ4^z+=K8u>yKb$Ndc%4W#d~mPiP%W#n`AhE3LKx5G zP%f1uD+U5+8f}anyRxHKSJVFg{WUt^Tz@oS@C^NlhwrP6VO*K zSXkBRJn^k~4&|Zz&lyw?czBLdJL=}YRfP*I=0pfak|6QM%TOYK6s&&US8{Onhq35p zgb?Zk^^O3W!i@=5>A%4J!$1uBzk`ko9=A6g&NrODQarfDJV3dsk$~?zqY_TWwm+ZB z$zcB*Bnt%*B3uK6#UuCVJPZuKlwq{Lz&>(fhK_Rbt<#O8@RU@9F6%N zm~_{I^lYl&CA`~dmHA6Eio}_Hmws;yutiV-d~ho(nkN~!NiCR5|Mx7JmTE1(bF<7S zH>LU_fArY6hP$n!q6KA^BfJJ;*!(AbX>`q2U>9wuzm;*ti;6Y}{A*S5Vz6Dk{(dat zzqf|d|2_usM~u?f&o`{UETMg=`{{#xSf4*m@Xj$guLD9}0f?W0$~Ex;kv*y+5=XrB z02*}@0B49ET~>g*nQ=Ow>J5=e(%&pD-hS4gM zhA#IR;THkSa+v0K&Gh{du$Kp*rMjINt%kq7RV(yUPmx3UROK=4Xo}Kg7Yv}$66?mm zCu?*kL(5eL%s-iaOB52f50dp)K2k<$40qSj&^EvyjDp@lg7axoi^sYDm)TiP$o=mA zv%7xc`|<7O=?xs$2zbc!)S&E6d8q5~s{5S5X;R%tKwBiJCv>NIOASm3F&HGfJqdA) z?luls1pss+jh_u1gATmR{WpeQ;*(t1XkIPTQ_BjITR*}IYz`|50e78OxXhUyb1L2H zs3I|h;IA4PPI@(WT_Hu%uWA1H@B*@u&5P1KJx{7_9Z*7uFhDSj_Ru-A|L+(%7O#Lv{R$UlqSo4t8QG`2Ab6e+w{M8+VyK5z;l~;6 zj;f{t5kiP!B+mlyNIFE>u0Q?(XBCft7W6&4!g;Na!iP$C^_C#3idJ1poX)OzHmjIi zGkYl(ZX!p6L|2~4=K@3gR{p(QR~{jsk6p482BoJDUdtFfbUGJs#|OF9 zNe5NEq+~*cQvZ3(sPk?10H4Re-KFESDes#a8$PLGAmq1k^IQ)CPN2SmJb+og^r>Nl zaVL%@Ea0?&fz1jQieJO>a#T}he9;j|GqEA2J_jSF^kFZvz7@*%u5U+0j zIqTlB*4EPLC&l2HF6qe%b4$>|J>g2IhNO!`Nus=Xv0H)veR>bj3$yb=EqENYMfxJP=SQx%*|EF3Wd15Q#3>a} z7UOwk_AZ7ug^@-cW!MJ7Ugr>@rak0wF%!lxxPP~HNK&#Si_)aE`mhyQbGiA9_5S!a zq**}1Rqj2a&l*h+5A#^eaquRK|O2 z3f1}Q?T(|P1k1ac1C)7aZ$`p}XtJmVZ4eEc_6yE0wfyTh5K0&jHUH`H&T-Yv)y<74 z?n9!&*O^a?vvn3`PyTPb9Ez9$~w=SDSs#vH4ip!*Bq26A|jwaJ=QRIcT-;!RWzX z!z|}(aMZNqz=Y=I&p3<;B(Ha+oM=l^gG$8HI2NFjE}cv-Le1wVPzhrVx_vjeNs+jd z5jI&8_T1UQoz!?)R`y`*P;j5~jV3(YJQ@LLr=BCRhfT%DqtOb|j7?e5L`ad;);=h* z`((`!fr-ab4umaUfo4hA!XwBQiHoO*7NLOH*J;_STaNQRu5vC14DW^b^{454mLD*z z1_O5U{Z^YgkMrtx`RDS2i@3p%Xw|pe0yadHR$rNti6rAR0y%*$KdIp`PwUQT<7Wop zxAu|iUPlcUD(l0(NN-{0s^9nsn8+Zc3|2a% z14lqCe%4jJzv4B#c-}C|Cg&0hIF34LUltTo(#atv7kTq^EuR$`FDrN|ku>GPg9vT) z11|!a7n~+~R(>z(qS5s64{2lu0P6>YNzXfPJX|?P&9<64WC>{w zBVMY3GW1AU_{p=@YDoNO(KlQIP%s91IV5rTgG6i^T*gWAEd)}&lfZH|+D{hVffpk5 z{_)a~@kO$s;ouF>j^Oj-g4qw-Q2x~UL#l` zG<$vYL&b~d;<#0)mGN9Jx;h+g{_5+*+RZ>gQb)Bf?an=VN7El)C8wHSy>66-DjFQs zZi&9CTl$DSXYUov5T{iS3BR=m)-o!EGDc> zOpiI7gMR;h-k2I7A9FHlEUs*b4dyU!CNnqxRN1EIO4d?#j{t-qc<@ml_xEsd0|lj= z8%~GXzJ}NHiD~4LP4r{*1Y`8}|L9CenCmc!qwdqQyf)3d%QNelMLSM#5OYJ^xhK;f zU7Lc$4^^5IN?Pu;3k3AFr0v;lDpncC3_AQ2j3M7K6~rm^RReS@{HnAEoBi-O0uM+2z+rt(k9=2QZ1MUko%diO?4^lI)>u_br#B;*ZvZTMDQ6`#W z(EjiB&PTh0-zXQJy<8PNmKgX5?Fgd4;9iKQiSM=`_=wjgB%=xiNxz{3!Qh4Cfn1nRFD43`ked9F#38^vzED3Tup^k!Saj<*TadR&_h2N6fNCT_Ulnq9) zDmC-nU(9l>Fsa2Rzdma;H|+ynuB(>+giEy*r5? z^Z>TP8$7Iu;LTpimPA$Gt%beLQ@evU`^X>U>@gFZbX21b)=Vs$HVa=;(Bt1Q0t+*< zlFg{mU3HPdDc5+lbySEXc(}U^@7VesdI=w)=37Qo-t;jLm=5$6g>lh@gOSs&zh8Hl zwVs)=t#pQ3iyGj8{RpD`?i3}+pvG2yzxp!%eT=ByBx7GBI zA|oS~_7e{+Q7VqnmNt3|`l`+;N@cL+c<9YM@L)rL&euAhteUT4Jx8zk`bcNoq%H>C z;&&E4rC3_%`eTRdfe!~GhW0(R0IZ}oc(O5Xc^3%VfWrnBAS;C!D;DMfUY44IakHZP z_k;mWy2>ER@MI_qB+R(QZeUY;k9S4yTM8E2Y$ptVNLY)LVW^h-oasb{yP)=S?^l(R8KG;uQph*hHF!)HzEZ3%bi-wsb3{F*Hf@ zx)N$CS1_O`8+?OnzWw!0;!Mi*9!|EM7_|x)CS)2I@b=A(w6Xil<)0q~9C7OT2M+jj zuy+x1@3yShro$^dX?r+)Gz^-g~a-PEN zybKjZ8pz0SGSLLqZ;5(jaw^4J6N0CX4KSln>%L**4@+3+zQHBM{L-EnqEbn$J5!Lz-+kbJwSC`U|IZMU zNM9r#d7tN1K(Knm*Yu<;73MecsK`76O*XTNYUx#!AB4pD^Ns-8NB@|`U?LFJ*spXE z2g^gK1@a&bhm4%Sx6KVp3~n)nl%fHMv-ywZhBR*TZue^bfB}))FK!#~&LQP&@c45T zMNW=zy$}&`?0C~TRB%4F*|A1Q29d=Z^k5bUhvM7PNdx-PkCx0)bP`>!0M%}oRl;Bk zM$sN|enKlTb2Zw-`(pOa!vBaH>z}xJ+OHa_O`i552##7G&eTNT9^XBYR_bFr0)%&e zS~w||eY2S(5@~83$@!CxDee4DJsXGjuj%>UNQ-z^>u9>#zsRqZg^JxZDkz^0!{&Ya zD{I@Ao8Hv+QBE`+JpkW{?9R+zoeiw#jEE&(en{MYLV5>~klx~zfX})8W%>T6>P##r z?{DknA9kbW3C3U~`l{dY7JO{ojy)10)q;blIfR*(B|o3avkO+OZu#@rl>ZSp%t07| z6DV!IU!M5BCv^OF0E;Z(;+^+Mm#|ds-zSzk>%c(_90+Dtw751>INXOC6?`}DP#PAX zxUNt9@MMiYOYj@~m4pVA+>oOh{ZOTWhc{vg4QheLXtbCHs``!qz#YzU?B>&YX3e>~ zd7z5aD26^Tn!y$UFcFE)Oj#YtB0Idr6Tpes7Yevfo3qSt%k0SEuIOP>pdu`UaVF=O zgUu`;XM!xa({^n^1poPC`ip}90~3s4tf#f!d!Os4Dl^f`oiFAii^MjS+h{s7K9R5A zg=cs7)DiO}vqi2WRtwcM*ye;sv8*&(^w^AHZ!ds$t zZPhcDF;cOz8;u1wfX3&!f&rD|Yu4to&Az)#;+=hrfH%>!5OOHV8WF>ON||IqKO;*- zXUfP%>3&V`7+6p)LNP8PM9TvMgO@Iypj=0!)URPY+?ycq|eAe?$Ol3tzsivp8- z)Sq1(G9YDNcYp5c!%mVYti}52Xu|BY#b}HGO&Y<{yEnyBT3ag;!;D{~9vV-G7oG?h zn;gvB;25wsO1C0DxhUK%z0WeQKCTuQ&FbA-IO&$3>w8UsJRYQ;7zuJq_E@Ycui0;@ z?mti?k8)tU=dUoc+PgPGMGqKdFo^7}>t`a3E+w7kB-k#mLW|Ffwy&_b3JG0tmmeOM z(IAfK<>qO$_gOa}d1ca=0L3`+W)&KzzejFxhsi!10d|AtsoRBupv7FV`#%#a*KvBL zp64rm+lI2LMuj_bl}ni0p{^r?Q8OnkyCs9dWZ3bGB8W>Bu#SO6@QjJEa!-T~P-byd z4z}$sq@WdjEVy}~?k0f7bN`XGSZf1) zxB!FCFW3sP)#GvD~5ayg>d!69X0K_XzVG$e{$AtxV5LnxcA&agcXXI zHXy_bYXffhgmhNO$wguqpa;}S+1}_#!xHU%`rM`Rv=(8=;x;*zB^rrC;fa*kcFpR# zz1{Ww1IEZBhHf!VciNI=juR8F#m@%o&uoO}cZHA-jK4mcPH`?l-V}07>UO`E?h;mv zA9Ud87sf-k&~T=?x1Bs*D(;jor+*2qBnk}Pf?&La`PhF~=z4WO&%?BK6V0qWfc{t< z2m(gy@FBQ`6l8!2hn`_PGUkBH-^ZcZS)mE^cnMKxdHswxz*>9TA4unXEWcH>)h5Zuc z4pEd^sU_v;`gVLN8kXq-n;lW$kS~W^THjvndRZm>G zq~$hwa+(xSvAT}wq{T++Jg($vy&$N zwLa6I0;{+Bjg9>5I!j3@A0r0Uq<>hBZcuAS<6mcNzGCzG6&eHIe|mpqzKQ438`F6O zF!uQ4R~wW`z2o#M)2W$i)ZA20N)SrO970)L>5hZpHh%WE@bf;yc3bL+rhe(-1TJ&Q zZt1>FGmGV0H~pw7(R~|&i@hNEItRoESR_M@^^+1Nb|zB4gVPCp7W@e#HUhlHIcCVm zqw!g~m|6Q05!OA3lsus`N>4?1*!EqiNUD`Fv$VKHrsUBpC(Zb;oeraD4Gjytcj_<>or7{O_C}rGE z$hXMoCOzl)bRy9;dfk2F0eSqR?fMy}CzKMBQQ{ys`55fD+s{T#PLJXOY|s{bD|p0T zB~0d`V+OxWewU%JN&rKaSQ0RFeWy9CW7k{lXcf243HPX>_201shao&QI8zu8lR~cb zv^D#VuN9+7Lc~9T!r&dlf=N~^4!J@AKY7eOFBOPowtq7+S7)mphi;kTl1_(9F`*r2!R70OiNyl z4Lfw5K8q-980n&s-!38&sSLJhdZ?6jR?qF8V72H1@w5t@_G};SAab+Ea7xx0WB(3X zWyJx~xz0QVQ$5wI+P%mh!1ypN$?it{qmAh{az)hXgm}teJ1!QZO)UB-j&L z7$bgH{YefOWm&tT>LMSCW^PD*0qvK7LEw`CTSh@{rISxF1PjUAU-w;=y$Fm!Ub$Zv zFPOwR>W~B?!gYlsuHrX>AF>s_NSrc|{uvw%e zYXmro$EeX+D805>py%k9)lw0wkS~8BWGbb|m-o8`jb)iI!YCPjIq}r+HuHX4Hw>`|R?m0gQEoG1m7b*7``?qn75=;OZPQ*ya z)WgZ^TJTXAnX)H32!Wx|vG39Vv8_=tg{N zd5w}aO@iu}FzI)Huiy9E&s@h)A+2U{>BoxJUSldG1$}t`+1f|_Fp~@2gVyCHhuXfm zi+*Lmm*+(ni@+hJ8n;S|=RoKVMu=mxT}G(Xg=0UALcQlweG`9*cCBN=dJjXWot(i@ zogvQ8Ogk)}Ez>(Bp^mL}qxwC@X}?SViYgGgvUU|5Z&5h#{!g*zJ$_vkH;ogVXF^V+ zp~+H5^67BpW18Aw!LZauRvU#Cxm0rSYgPuiU^A!wy%RStA8dwK=on<2D0qBKvKU#s zmWPCvzdcMt*_WR~UflT(|R9A0xh17pe|)T6*}wvV^t zxaX}`oQvt80E4SZ@ph2jD&In+CJSv7c6U(Kyjw%tt7v<^L}Jjdclq@k_Ut|47%IGw zSAX7@?4tmSJ_|vB(@4e~;emCiEs4nST~+EO9a@`#4lZVG$buwCCuNr z7>Qs$^RYH^-QP*Q>o1URe;BDqM=*LHj!l!SGDB`nj^X0zz?8A?&2Csm^Pl7oZr9{$ zg7|BUU)|NIR3W(=5HSv(M{WQ8szF(P&bz6ie_HXmUS~&Pr%qSCPn~g(c1_S+!%AMN zxxALPX+~Z?!i?C-R~0$a)er7XQtY1IoBuCq>8VVmtCiKg*{j^&0q1yC{S5Z#)cn?_ z>lwx7PzvrN2CK-NDdcuOvrFaMXI8oqA*by$^Muf59ge~V(*5Aw3pr%`V?X}H+qL_y z;}(j$StmoD5SQDZ1ib}k90z_A_q9v)k=s4pgBfv<@xM0u7#;#FXL(olEw0obaZ#l{ zCELv4S!Gm_)#LsS9+F5QV)9AssW+ozn&kS)$;^U3SJ~x>R@f)V?@!!?2zfBet5A4R zY_J*GzjU8c`xtrL9y{l`K~^`FE^n0c`}VF@?mX^OxWdmmWFjOy_QAblU)XvXA5u4c z(7z2ryUQ>ub3$jXSU|bF&{vMKjzV<7o&G{sD|gDQvykC2vrBiDz-uD?)sDy?(_Im8 z5YWZq!!8Q04r`o-OPnx)5Ql1H<@>e< zeK}MTkGzMHIjhYUwIBXoQf50o4c5GkHPrJ;V8ZpX;+)2Dc3GKxLSaqoELnaQndq9V z{*+J_0_hm=3}G*2Q#wxX*c6;MHH&aW3P+7WdVX(E_5jn(SHh5wzxlT z0CT0#38Wjx9R-#bF8ku_DZiI7tRMcurFEsR>A44R0LO&k;W&qR>!jV#&6CAFRCC30 zR50VvaYu!grM#grJ1s4}JcOya!;U(mQ{a709o8J8#m25HIT6U?lv>w{R;NtdJBIIH zQ`ctZHnuvSjr!1n;7IO+e=JQBWOIi)?;$k{5iT^mk<};c5zv|V$VI{NQOOZDjMT4{ z+BbnLjwV`sOtWh}8l>Oi(%z%hE0CaAgMhaC*F;D?M;Zmo>VZCtl`P(VIO`kF z@3&_&M8^nOiLq`~IlBk)%DZicl1=oG5k0%~|8ujHRRo6IyKxGBA3d50(T`f-(6LP= zfyF!LO2Mx4OajRa*HZD0-FLniem7Z@`~VRtB>n_DWv{O(EpTrgQD@PErGEn*MHXuw zOXz_~0k7T#9K&;p-P#!Tz+aXS!L@E?u=_x+o{kC3YsvTGh%6Vt-RMcu!4NHG*_m?d zloA79w~ymV*gyW_j(X6O;wft;um^j@10Y0*oFex$mT86=yI4P?QpTf)KbEdiE(_%X zO-4RJXam#kcb6mIS-ASN=$iabt$c&v@)EX-`>UKv_#`>L41i6~jO8h@9u>`$)N`kK{lL4>EfEU5`T|*uw9VNo z1OE(Z>)U`R=w9?p;=qXi;s#h-cnO&3HVU zUaGrpjOD1uknB@fk_Q}sm~dfUa|qkuRPa~gaQ+&|{L=9k0Nr_pi}=!bhXBssJv@~8 zf2QNCz4SoN;}`Z*l}euvIhW6kuMWz0i0rXC5DOYy2&!kGwDMo>8njne4ujqTG|(8X{7ip^Jqxv_De^| z^#WEVy;BN8yT8VcL+n~{zo4>L?#Ze`79gFm)@aBH)^nC-OVf$o*4J8OO5hT~=KFvp zj-sbwg}WUsKi5M@Iea%(@k!;hx7;8MuW!+gxn6oZtqWZQbZy$%d(#GR?dh$CJJITX zQv3jV!L@(&9AxEmJ6qlp%hWF<+(UI%DCbBQ?Y(*1gm}36xf;37>R&8Ylm-cNwd28~ zIWrRjzn0zP8rih_gLU>ShKPu&7C;y74(2mi;6&V46;?YKcEMxO3zg1TqE8C?OS|_q z0#9^?bJbljEggZKJ%*U1J@b&P{0IAQt^Q0C)=Nsroxk1IMsa!!OqZD+ z$bwizc5?R?`z1~MQGW5E|4>_CGAF!AYs}RR;dl%15pGZS=Pe_`JWuDUmPXCDC1lPB z->SBH&>n^$vSua`lvqZPfdT8^d zD{!l}OJuzJiQ9#JPTK8fkT1c>^m9xCRy$eTBf>5w8@CPag=22%Ulhxj$c&s+B7r`H zG@<^pT1bWXbVa07hEM8e%Cr-02Qf!Jz8*D)VJjYE{fHbcdy@cSG)CUC4b{4@y^dS5 zTjxZ@fmJ%PGISZPGfSLN>)GKQ$&e!Mm%IJPx0*v}7Y`7S%xu?uTK=egH5VW&E61TO zn^fq-wHq9A$2w0p7gsJOCEbAQ2&KBi{g@5G$G9kMn{KG{vE(>kNl*}EMd>}ydq2nb9R!{RE33z%Rq{h7X2GG;YUM5P1wXk zkuo!lF&4tATuOI{PJYS;BD2v)cR5Q09m5uv(ULu_$rAo6PQ?w^x3YN7V4EulH9Pv* z|Lf(--=Y4Z|9g-blfjHI!^kqEq8j_whp{vw%SVc_#Eh|K-$KY(LPE*M(q>7?Clga+ zOUN>oGGyN(L?{W_Q+#K9o~Q5kFZjNHc)y?LKIeVUx#ymH&$;Kko*Pxu`)3*Q>EEk( zKD(=Q=N2Ro0Z@C7z$zM7nMt(PXOYxx7=nhvG7_Qu=)v$b!{&FQ$ct&2*K_QhM8}SE zr{8y~Slx(l^Yk5MWDV$cidcqjfw1$}IRF6WY;34^!7HXxmVRWg5@BzlC3K~6ns#~z zO`Z6``%U-zRwj8`Kt$4^=89WmfOx>q&w=%rcTU96T}4o28f|BxreUiC$a`(zPbRU&` zE9us7T!~tCuwDbqGBXcNmSi$-pfN9RQg&syH@-qe{+M%)dbJ0v;pSABbZmaM&&Sp+ zyi1p><`l`%N>qNO@yMK^n*pz7oZ5PvxmeGGL&GkK4IbMbuZ-9Cs*Mk5wO2yVm7|hA zmj{FhtVNn2_2eSmmQ{(1#R(&#J-#7ozfNv)b!iCxUi(0t6JLsKr}n&9g`q#XX%5pfhKFI^-#SJIhHM<8Wklly^*!X+9KHWOjMc^r`j+Z$~Y(^ams`$S&H~kRI$rP-YII zeT1=cZuUtzaJo<0i7JhQ*WT|m`fmDcZ7xOl@8*0)Jm}<`w?ivW@P8k^Q77^1knc+C zkn@$#d8GO_S~UNQdOimC%Uo4nUJK(Ry>-r&TygVwm8$*>=F^XY{{7F?XmdH7DX2hA z@eV26lj+8DH0;&|xc#bTNG?$|ko>^;OcC-16sM}uo`Y5=+Xg*fozt}2z1#A2N1iN0 zSHS;$1TcK1xNI#aAviM^B72av^4TCAeRT4vES(62*i$h9YBfqPyCg4X5TvqlrxI1m z^5F24Lnp?H)Z)i1TFV!hb-r5IB)Cob{fqo!D?p}v`ep7yDoU+YwN?Bl^IO;z;bc2cJLfFohF!JhBC&68D8a-Xs$lEvq)Ed@u~aaFD# z;BDAwLBkjdS;M{Az>N`SJ{Fo!giwyUYHv`Uu%kp=C45dOTnc51h8W)_?sp#>BMU?~Gj8D*k@kgKdZMceiO5CBHx_6{-wi3;jIR z;r@lWA%TXE0g}O&v)~qP(i`72k^k$?`9ekwfQTpI-cFu8Bej5X&YUnkTSylMdm{W# zF%IG5mgJgtUUKtJ<~b6UI~MjmM19d{6FUo#6j5Hw-o88WSCU6)58^ zdOxRMjLTUilVQ?sJj774SG?VI0Pq(zsXF>pe^xPNT9~|NVH%S)i_Xv<@l`|%8t@!5 zN>w!3dOOiK1tF_G=IxpC>VYXlzegyIy$evTdm_lC^Nl8vI-mXZp3t9t#zBTBZ=5qh z{r2f&RsF8hWmt;mIgPp|KBJ~;eqDWt&(#OEzn4gJp$3K*jm$%v(#-^7pb9CZw6jkr z@*VY=EzaLM5|Qa*WNRN+(?wp47ce{{36VY?(WF*K>Q1^V)^r1BQ1XJ?V5W=|@AvP7 zc9Qkf!}77}I2<7v3N+;6Gd^|*o!93Vc@itRdbr!GL!8SVsxAXpVd8=Xo@-`x;FJ?% z=GDfIW_`MziccX)AC)R@I%Au5=Pk%;{n^oM6N!bEQwPf)u*?ZVbQ~zC=psVFio#(^ zn#&83&7~`M-ogcOhmNOSxLGW8)LYn;u!DyE5-%xXXVjL@(X{ysTvZr5BK~YJ zqhe&@kOi@id#z(})9JVCD83}t)y&lmLHKTLm3%gaq~b;Bhd}TJF5_0W0x!L zeR6ln*jD7b0g{!#V;g%zvs_7KwLTtynDP z=WLC+5f6GjB2xoC1V@I|LiTDUr^N9I~gK{o;CbSvdQlE$Z@9d^{>S$*+w9u-p zxTXqaF^iB_uZ{Z6^M-b~g@)2q)O^mz{QIPt`SkL6v7KP7dd0NO4{AK`QnU2!LWm3% zS}hh2^=Wwi0HHqPw(b=XE`foRV4=$S%0N+^ zR08Hv6Z(XL5rR5u82jZr?OiyM%t^Z_$nYqb2dRTs<-Z(K^M8nNgo@f2+cQ!0=0*iRX1u@RU?i?DJxPV13ol zK$F$(^Rf2eZR>ngtS@ zg;9l-WN5m>0bJS}(qm;8sQt`4Pm;VR86homeXniWVOzuzCZTFbmuK#_k6+7yi-2m+ z8=0^*lQ`fm$pRb1SMPdwc~#+-hd?@w9w+J$;(?q=duu)#ylYh1GwL}jO80&reya;2 zm!+k%XPIx>LZ9_1^>g#G?{Z!fLH+{cg@GRR1DBF9yRe*GsZ7#movDb<#ZBa|pgZTp zS*duIH{8feWY?@eB@}j!Mj1yGDA&_(B|yfos`;O~!xvO9oCL#F>fz;cn4hv*YWqWe zsUyS&6zB!``MQa4?;HfQTbN2n1a%n*VBjQp;9@Gju1n>1Od16f+2`5-8EJM-&R1s4 z-KcLX2z$Dj;{*M?G9|83`hr8AUlnT2Q=7QO$?e4Zkb*G(Q6~sxiS?nkM_>lC9iq}SrB}#2*cMla)hd{{k z-LE37o?~v+5d`kW@VGIg;Lrr zOWP-9ME}qvuu7gR7&5)mZ9J?<@D@+&Ht^kfHVyJON-z@w-xAeb{VKplGB$cwURgNYk;l2E z0elEHCBSp6(eO171Pgb7LwTA={wxYBZaU{0(;dm8@tje0O;^fN=SnDg3CX{V5{r}*Rm%N%|E$^T{eGCWvh=NwRZOcTZptt z^_aLmML4j*ISeD7qKE^#;tiRA4$ovEpwr&)*-nq$Z<4_ly_heC^=FVbr~h$E1_%wF zJOs!A8r%(?0Qw+N`+T(qv|;e-*x*lhZ3>D~@w06qZ$*cGG@Zmxc?gsqPoJ_{f8Pc~ mCdG)`!S;vn|MVK--{TKj!raj^ + + + + + diff --git a/assets/images/icon/github.svg b/assets/images/icon/github.svg new file mode 100644 index 0000000..22590d3 --- /dev/null +++ b/assets/images/icon/github.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/assets/images/icon/instagram.svg b/assets/images/icon/instagram.svg new file mode 100644 index 0000000..04baeca --- /dev/null +++ b/assets/images/icon/instagram.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/assets/images/icon/linkedin.svg b/assets/images/icon/linkedin.svg new file mode 100644 index 0000000..6266267 --- /dev/null +++ b/assets/images/icon/linkedin.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/assets/images/icon/me.svg b/assets/images/icon/me.svg new file mode 100644 index 0000000..7331b5e --- /dev/null +++ b/assets/images/icon/me.svg @@ -0,0 +1,65 @@ + + + + + + image/svg+xml + + + + + + + + + + diff --git a/assets/images/icon/twitter.svg b/assets/images/icon/twitter.svg new file mode 100644 index 0000000..69511c4 --- /dev/null +++ b/assets/images/icon/twitter.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/assets/images/icon/whatsapp.svg b/assets/images/icon/whatsapp.svg new file mode 100644 index 0000000..3b91dd0 --- /dev/null +++ b/assets/images/icon/whatsapp.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/assets/images/icon/youtube.svg b/assets/images/icon/youtube.svg new file mode 100644 index 0000000..d951b94 --- /dev/null +++ b/assets/images/icon/youtube.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/assets/scripts.js b/assets/scripts.js new file mode 100644 index 0000000..c71f5ce --- /dev/null +++ b/assets/scripts.js @@ -0,0 +1,12 @@ +function toggleNightMode(){ + if(document.documentElement.getAttribute('data-theme') == 'light'){ + document.documentElement.setAttribute('data-theme', 'dark'); + document.getElementById('mode-switcher').classList.add('active'); + localStorage.setItem("theme","dark"); + } + else{ + document.documentElement.setAttribute('data-theme', 'light'); + document.getElementById('mode-switcher').classList.remove('active'); + localStorage.setItem("theme",""); + } +} diff --git a/atom.xml b/atom.xml new file mode 100644 index 0000000..d3ea6e2 --- /dev/null +++ b/atom.xml @@ -0,0 +1,26134 @@ + + + + hpc.social - Aggregated Personal Blog + + + 2023-08-03T20:41:18-06:00 + https://hpc.social + + hpc.social + info@hpc.social + + + + + LSF client on macOS - submitting from your laptop + + 2023-03-01T19:10:58-07:00 + https://hpc.social/2023/lsf-client-on-macos-submitting-from-your-laptop + <p>In traditional HPC environments, login nodes are typically used as an access point for users to submit +and manage jobs. Although login nodes are still used today, HPC environments are +increasingly being used by a broad class of users with domain expertise and not necessarily IT experts. +In other words, such users may be more comfortable using their native desktop +environment rather than the CLI. Given the factors, in the commercial HPC space, organizations are always looking +for ways to lower the barto access and interact with HPC environments.</p> + +<p>Spectrum LSF provides many ways to submit and manage jobs in an HPC cluster. For power users, the rich +CLI functionality exists. There is also an available web-based interface for job +submission and management which provides customizable application templates to greatly simplify job sub +mission, while hiding complexity of the underlying infrastructure. A RESTful API +is also available to users of IBM Spectrum LSF Application Center or IBM Spectrum LSF Suites, which ena +bles organizations to access the HPC environment via web services.</p> + +<p>I&rsquo;ve written previously in detail about the the LSF web-based interface in the blog +<a href="https://www.gaborsamu.com/blog/easy_hpc/">The Easy HPC Button</a>. Here, we&rsquo;ll take a closer look at the +available LSF client for macOS that uses the RESTful API. First, a bit about LSF clients. LSF clients +can access resources on LSF server hosts without running the LSF daemons. LSF clients don&rsquo;t require a software +license and from clients, users can run all of the familiar LSF commands. Additionally, LSF clients are +submit only, and don&rsquo;t execute jobs.</p> + +<p><strong>Note:</strong> The macOS LSF client uses the LSF RESTful API. This means that it will function in environments +running LSF Standard Edition with LSF Application Center or LSF Suites.</p> + +<p><strong>Configuration</strong></p> + +<p>The configuration used for the example below is as follows:</p> + +<table> +<thead> +<tr> +<th style="text-align: left;">Hostname</th> +<th>OS</th> +<th>Detail</th> +</tr> +</thead> +<tbody> +<tr> +<td style="text-align: left;"><em>kilenc</em></td> +<td>CentOS Stream 8.4</td> +<td>LSF Suite for HPC v10.2.0.13</td> +</tr> +<tr> +<td style="text-align: left;"><em>My-Macbook-Air</em></td> +<td>macOS Ventura 13.2.1 (Apple M1)</td> +<td>LSF client</td> +</tr> +</tbody> +</table> +<ol> +<li>On the Spectrum LSF Suite for HPC management host (<em>kilenc</em>), add the following variables to the Parameter +section in the file lsf.cluster.<em>name</em>. The FLOAT_CLIENTS variable determines how many floating clients can +join the LSF cluster, The FLOAT_CLIENTS_ADDR_RANGE specifies the allowable IP addresses. In this case, the +client system is on a 192.168.x.x network.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">Begin Parameters +FLOAT_CLIENTS=2 +FLOAT_CLIENTS_ADDR_RANGE=192.* +End Parameters</code></pre></div> + +<ol start="2"> +<li>To make the changes take effect, issue the following commands as the LSF administrator:</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsadmin reconfig +badmin reconfig</code></pre></div> + +<ol start="3"> +<li> +<p>Obtain the tarball <em>pacdesktop_client10.2.0.13_macos-x86_64.tar</em>. For users with an LSF entitlement this package is available on +<a href="https://www.ibm.com/support/fixcentral/">IBM Fix Central</a>. Note that this package will work on systems with Apple M1 silicon through emulation.</p> + +</li> +<li> +<p>Open a Terminal on the macOS client system, copy the tarball to the $HOME/Desktop directory of user lsfuser and uncompress the tarball.</p> + +</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air Desktop % pwd +/Users/lsfuser/Desktop +lsfuser@My-MacBook-Air Desktop % ls -la pacdesktop_client10.2.0.13_macos-x86_64.tar +-rw-r--r--@ 1 lsfuser staff 18452480 27 Feb 17:12 pacdesktop_client10.2.0.13_macos-x86_64.tar +lsfuser@My-MacBook-Air Desktop % tar -xvf pacdesktop_client10.2.0.13_macos-x86_64.tar +x LSF_Desktop_Client/ +x LSF_Desktop_Client/bapp +x LSF_Desktop_Client/btop +x LSF_Desktop_Client/bwait +x LSF_Desktop_Client/lseligible +x LSF_Desktop_Client/bsla +x LSF_Desktop_Client/blparams +x LSF_Desktop_Client/bhpart +x LSF_Desktop_Client/bclusters +x LSF_Desktop_Client/blstartup +x LSF_Desktop_Client/lsacct +x LSF_Desktop_Client/bsub +x LSF_Desktop_Client/bugroup +x LSF_Desktop_Client/bpeek +x LSF_Desktop_Client/bacct +x LSF_Desktop_Client/brequeue +x LSF_Desktop_Client/bjgroup +x LSF_Desktop_Client/bslots +x LSF_Desktop_Client/lsrun +x LSF_Desktop_Client/bjobs +x LSF_Desktop_Client/lshosts +x LSF_Desktop_Client/lsload +x LSF_Desktop_Client/brlainfo +x LSF_Desktop_Client/bresources +x LSF_Desktop_Client/bladmin +x LSF_Desktop_Client/bstatus +x LSF_Desktop_Client/bmod +x LSF_Desktop_Client/bpost +x LSF_Desktop_Client/lsid +x LSF_Desktop_Client/bentags +x LSF_Desktop_Client/ch +x LSF_Desktop_Client/bchkpnt +x LSF_Desktop_Client/bparams +x LSF_Desktop_Client/bjdepinfo +x LSF_Desktop_Client/bgmod +x LSF_Desktop_Client/brestart +x LSF_Desktop_Client/lsltasks +x LSF_Desktop_Client/blusers +x LSF_Desktop_Client/paclogon +x LSF_Desktop_Client/regnotify +x LSF_Desktop_Client/cacert.pem +x LSF_Desktop_Client/bresume +x LSF_Desktop_Client/blstat +x LSF_Desktop_Client/bhist +x LSF_Desktop_Client/bqueues +x LSF_Desktop_Client/bltasks +x LSF_Desktop_Client/bresize +x LSF_Desktop_Client/blcollect +x LSF_Desktop_Client/lsacctmrg +x LSF_Desktop_Client/bgadd +x LSF_Desktop_Client/bmig +x LSF_Desktop_Client/bstop +x LSF_Desktop_Client/bswitch +x LSF_Desktop_Client/blhosts +x LSF_Desktop_Client/blcstat +x LSF_Desktop_Client/brsvs +x LSF_Desktop_Client/brun +x LSF_Desktop_Client/blinfo +x LSF_Desktop_Client/lsgrun +x LSF_Desktop_Client/busers +x LSF_Desktop_Client/lsloadadj +x LSF_Desktop_Client/blkill +x LSF_Desktop_Client/bbot +x LSF_Desktop_Client/lsclusters +x LSF_Desktop_Client/bconf +x LSF_Desktop_Client/lsinfo +x LSF_Desktop_Client/lsmake +x LSF_Desktop_Client/blimits +x LSF_Desktop_Client/bmgroup +x LSF_Desktop_Client/bread +x LSF_Desktop_Client/bkill +x LSF_Desktop_Client/lstcsh +x LSF_Desktop_Client/lsrtasks +x LSF_Desktop_Client/README.TXT +x LSF_Desktop_Client/lsplace +x LSF_Desktop_Client/bhosts +x LSF_Desktop_Client/paclogout +x LSF_Desktop_Client/bgdel</code></pre></div> + +<ol start="5"> +<li>Following the directions in the file README.TXT, set the environment variable LSF_DESKTOP_CLIENT=yes, and set the PATH variable accordingly.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air LSF_Desktop_Client % export LSF_DESKTOP_CLIENT=yes +lsfuser@My-MacBook-Air LSF_Desktop_Client % export PATH=`pwd`:$PATH</code></pre></div> + +<ol start="6"> +<li>Next, it&rsquo;s necessary to run the <em>paclogon</em> command to connect to the LSF Application Center (or LSF Suite installation). Here we point to the LSF server kilenc on port 8080.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air LSF_Desktop_Client % paclogon +Log on to IBM Spectrum LSF Application Center +User account: lsfuser +Enter password: +Specify the URL to connect to IBM Spectrum LSF Application Center. Format: +http://host_name:port_number/platform or https://host_name:port_number/platform +URL: http://kilenc:8080/platform +You have successfully logged on to IBM Spectrum LSF Application Center.</code></pre></div> + +<ol start="7"> +<li>After successfully logging in using the paclogon command, it should be possible to run LSF &ldquo;base&rdquo; commands from the macOS terminal including <em>lsid</em>, <em>lsload</em>, <em>lshosts</em>.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air LSF_Desktop_Client % lsid +IBM Spectrum LSF 10.1.0.13, Apr 15 2022 +Suite Edition: IBM Spectrum LSF Suite for HPC 10.2.0.13 +Copyright International Business Machines Corp. 1992, 2016. +US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule Contract with IBM Corp. + +My cluster name is Klaszter +My master name is kilenc +lsfuser@My-MacBook-Air LSF_Desktop_Client % lshosts -w +HOST_NAME type model cpuf ncpus maxmem maxswp server RESOURCES +kilenc LINUXPPC64LE POWER9 25.0 32 30.7G 15.8G Yes (mg docker) +lsfuser@My-MacBook-Air LSF_Desktop_Client % lsload -w +HOST_NAME status r15s r1m r15m ut pg ls it tmp swp mem +kilenc ok 0.8 2.1 2.4 7% 0.0 0 1156 551M 15.6G 10G</code></pre></div> + +<ol start="8"> +<li>Next, run the LSF batch commands <em>bqueues</em> and <em>bhosts</em>.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air LSF_Desktop_Client % bqueues +QUEUE_NAME PRIO STATUS MAX JL/U JL/P JL/H NJOBS PEND RUN SUSP +admin 50 Open:Active - - - - 0 0 0 0 +owners 43 Open:Active - - - - 0 0 0 0 +priority 43 Open:Active - - - - 75835 75803 32 0 +night 40 Open:Inact - - - - 0 0 0 0 +short 35 Open:Active - - - - 0 0 0 0 +dataq 33 Open:Active - - - - 0 0 0 0 +normal 30 Open:Active - - - - 0 0 0 0 +interactive 30 Open:Active - - - - 0 0 0 0 +sendq 30 Open:Active - - - - 0 0 0 0 +idle 20 Open:Active - - - - 0 0 0 0 +lsfuser@My-MacBook-Air LSF_Desktop_Client % bhosts +HOST_NAME STATUS JL/U MAX NJOBS RUN SSUSP USUSP RSV +kilenc ok - 32 19 19 0 0 0</code></pre></div> + +<ol start="9"> +<li>Running the bjobs will result in a warning message appearing on macOS stating: <em>&ldquo;bjobs&rdquo; cannot be opened because the developer cannot be verified.</em></li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/bjobs_unverified.png" /> +</figure> + +<ol start="10"> +<li>To remedy the issue observed in step 9, click cancel on the warning message and browse to <strong>System Settings -&gt; Privacy &amp; Security -&gt; Security Settings</strong>. In the Security Settings view, +you&rsquo;ll see the message: <em>&ldquo;bjobs&rdquo; was blocked from use because it is not from an identified developer.</em> To allow the bjobs command to execute, click on the <strong>Allow Anyway</strong> button. You will +then be promped to authenticate to make the change take effect.</li> +</ol> +<p><figure><img src="https://www.gaborsamu.com/images/bjobs_allow.png" /> +</figure> + +<figure><img src="https://www.gaborsamu.com/images/bjobs_authenticate.png" /> +</figure> +</p> + +<ol start="11"> +<li>Run the LSF <em>bjobs</em> command again. You will now receive a new warning error popup indicating: <em>macOS cannot verify the developer of &ldquo;bjobs&rdquo;. Are you sure you want to open it?</em>. To +proceed, click on the Open button.The bjobs command will then run to completion as expected. Subsequent executions of bjobs will run without any system warnings. Finally, to submit +a job, run the bsub command. Here we try to submit a simple sleep job (i.e. bsub -q normal sleep 3600). As was the case with the bjobs command, the bsub command is also blocked. Here, +repeat the steps 10, 11 as described above but for the bsub command. Once the steps have been completed, repeat the bsub job submission command.</li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/bjobs_open.png" /> +</figure> + +<ol start="12"> +<li>Finally, to submit a job, run the <em>bsub</em> command. Here we try to submit a simple sleep job (i.e. <em>bsub -q normal sleep 3600</em>). As was the case with the <em>bjobs</em> command, the <em>bsub</em> +command is also blocked. Here, repeat the steps 10, 11 as described above but for the <em>bsub</em> command. Once the steps have been completed, repeat the <em>bsub</em> job submission command.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air LSF_Desktop_Client % bsub -q normal sleep 3600 +Job &lt;617551&gt; is submitted to queue &lt;normal&gt;.</code></pre></div> + + + + + Monitoring .-.. ... ..-. (IBM Spectrum LSF) with the TIG stack + + 2023-01-24T19:48:44-07:00 + https://hpc.social/2023/monitoring-ibm-spectrum-lsf-with-the-tig-stack + <p>Much like dashboards in automobiles, dashboards in the context of HPC infrastructure are crucial to get an understanding of what&rsquo;s happening under the hood of your HPC cluster - at +a glance. During my IT career, I&rsquo;ve used a myriad of monitoring solutions ranging from SNMP and Ganglia, to the ELK (Elasticsearch, Logstash, Kibana) stack. For example, I&rsquo;ve recently +written an overview on how it is possible to visualize <a href="https://www.ibm.com/products/hpc-workload-management">IBM Spectrum LSF</a> (LSF) data in Grafana. LSF is an HPC job scheduler which brings to the table three decades of experience in +workload and resource management.</p> + +<p>For this blog, I decided to take this to the next level by monitoring IBM Spectrum LSF with the well known TIG (Telegraf, InfluxDB, Grafana) stack. This article is not meant to be a +debate on the advantages of one monitoring stack over another. Rather, the focus is to demonstrate what is feasible in terms of monitoring Spectrum LSF clusters with the TIG stack, +given the many available ways to query LSF for key information using CLI commands.</p> + +<hr /> + +<p><strong>The Journey</strong></p> + +<p>There already exists many write-ups on how to deploy the TIG stack to monitor systems. This isn&rsquo;t meant to be a guide on setting up the TIG stack. Rather, it&rsquo;s assumed that the reader +already has some familiarity with the TIG stack. If not, then [<em>insert your favourite search engine</em>] is your friend.</p> + +<p>On my home network, I decided to setup a VM running on my trusty <a href="https://traverse.com.au/products/ten64-networking-platform/">Traverse Ten64</a> running Fedora where InfluxDB was installed. The idea was to run InfluxDB on a system that is guaranteed +to be always on in my home environment and that is energy efficient. Installing telegraf on all of the LSF cluster servers (x3) proved to be straight forward. Note that in all cases, I used the OS +supplied versions of InfluxDB, Telegraf. Finally, I already had a Grafana server running on a server in my network.</p> + +<p>Out of the box, Telegraf has the ability to monitor numerous system metrics. Furthermore, there exists literally hundreds of plugins for Telegraf to monitor a wide variety of devices, +services and software. A search however, didn&rsquo;t reveal the existence of any plugin to monitor LSF. So it was time to get creative.</p> + +<hr /> + +<p><strong>What to monitor?</strong></p> + +<p>A bit of research revealed that InfluxDB supports what is known as &ldquo;line protocol&rdquo;. This is a well defined text-based format for writing data to InfluxDB. I used the following +<a href="https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/">reference</a> on &ldquo;line protocol&rdquo; to guide me. Using line protocol it would be ultimately possible to +write a plugin for Telegraf to effecively scrape information from Spectrum LSF and output in line protocol format for writing to InfluxDB.</p> + +<p>Before I could begin writing the plugin, the key was to determine what information from Spectrum LSF would be useful to display in the dashboard, and how that information could be +extracted. For this I followed the KISS principle to keep things as simple as possible. The key metrics I decided to report on were servers, queues and jobs (oh my!), as well as process +information for the LSF scheduler daemons. Refer to the following table for details:</p> + +<hr /> + +<table> +<thead> +<tr> +<th>Metric(s)</th> +<th>Command</th> +</tr> +</thead> +<tbody> +<tr> +<td>LSF scheduler performance metrics</td> +<td><em>badmin perfmon view -json</em></td> +</tr> +<tr> +<td>LSF available servers, CPUs, cores, slots</td> +<td><em>badmin showstatus</em></td> +</tr> +<tr> +<td>LSF server by status (total number Ok, closed, unreachable, unavailable)</td> +<td><em>badmin showstatus</em></td> +</tr> +<tr> +<td>LSF job statistics (total number running, suspended, pending)</td> +<td><em>badmin showstatus</em></td> +</tr> +<tr> +<td>LSF queue statistics (per queue, total number of jobs running, suspended, pending)</td> +<td><em>bqueues -json -o queue_name:12 njobs pend run susp rsv ususp ssusp</em></td> +</tr> +<tr> +<td>LSF mbatchd process metrics</td> +<td>(Telegraf - inputs.procstat)</td> +</tr> +<tr> +<td>LSF mbschd process metrics</td> +<td>(Telegraf - inputs.procstat)</td> +</tr> +<tr> +<td>LSF management lim process metrics</td> +<td>(Telegraf - inputs.procstat)</td> +</tr> +</tbody> +</table> +<hr /> + +<p><strong>Scrapin' fun</strong></p> + +<p>These above metrics would give a good idea of the state of the Spectrum LSF cluster at a glance. With the list of metrics prepared, the next step was to create a plugin script which would +scrape data from the noted commands. Both <em>bqueues</em> and <em>badmin perfmon view</em> support output in JSON format with the appropriate flags specified. However, <em>badmin showstatus</em> does not support +output in JSON format. This meant that for <em>badmin showstatus</em> it was necessary to scrape data assuming hard coded field positions in the output.</p> + +<p>A copy of the Telegraf plugin for Spectrum LSF is provided below. This is just an example and is provided &ldquo;as is&rdquo; for testing purposes. Your mileage may vary.</p> + +<hr /> + +<details> + <strong>Example lsf_telegraf_agent.py script. Click to expand!</strong> + <div class="highlight"><pre><code class="language-python"><span style="color: #75715e;">#!/usr/bin/python3.8</span> +<span style="color: #75715e;"># </span> +<span style="color: #75715e;"># v0.9 </span> +<span style="color: #75715e;"># Sample inputs.exec script for Telegraf which outputs metrics from an IBM Spectrum LSF management server</span> +<span style="color: #75715e;"># in InfluxDB Line Protocol input format.</span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># NOTE: It is required to set the lsf_envfile variable to point to the LSF profile.lsf file</span> +<span style="color: #75715e;"># for the LSF installation. </span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Gabor Samu</span> +<span style="color: #75715e;"># January 4, 2023</span> +<span style="color: #75715e;">#</span> + +<span style="color: #f92672;">import</span> os +<span style="color: #f92672;">import</span> json +<span style="color: #f92672;">import</span> time +<span style="color: #f92672;">import</span> subprocess +<span style="color: #f92672;">import</span> sys +<span style="color: #f92672;">from</span> pathlib <span style="color: #f92672;">import</span> Path + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Variable declarations</span> +<span style="color: #75715e;"># **NOTE: lsf_envfile needs to be set to point to the profile.lsf file for the LSF installation. </span> +<span style="color: #75715e;">#</span> +lsf_envfile <span style="color: #f92672;">=</span> <span style="color: #e6db74;">"/opt/ibm/lsfsuite/lsf/conf/profile.lsf"</span> + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Source the Spectrum LSF profile. </span> +<span style="color: #75715e;"># Check for existing of lsf_envfile (profile.lsf) and source the environment. </span> +<span style="color: #75715e;"># If the specified file does not exist, then exit. </span> +<span style="color: #75715e;">#</span> +path <span style="color: #f92672;">=</span> Path(lsf_envfile) +<span style="color: #66d9ef;">if</span> path<span style="color: #f92672;">.</span>is_file(): + lsf_env <span style="color: #f92672;">=</span> (<span style="color: #e6db74;">f</span><span style="color: #e6db74;">'env -i sh -c "source </span><span style="color: #e6db74;">{</span>lsf_envfile<span style="color: #e6db74;">}</span><span style="color: #e6db74;"> &amp;&amp; env"'</span>) + <span style="color: #66d9ef;">for</span> line <span style="color: #f92672;">in</span> subprocess<span style="color: #f92672;">.</span>getoutput(lsf_env)<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"</span><span style="color: #ae81ff;">\n</span><span style="color: #e6db74;">"</span>): + key, value <span style="color: #f92672;">=</span> line<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"="</span>) + os<span style="color: #f92672;">.</span>environ[key]<span style="color: #f92672;">=</span> value +<span style="color: #66d9ef;">else</span>: + sys<span style="color: #f92672;">.</span>exit(<span style="color: #e6db74;">f</span><span style="color: #e6db74;">'The file </span><span style="color: #e6db74;">{</span>lsf_envfile<span style="color: #e6db74;">}</span><span style="color: #e6db74;"> does not exist.'</span>) + +<span style="color: #75715e;"># </span> +<span style="color: #75715e;"># Get the time in nanoseconds since the epoch. </span> +<span style="color: #75715e;"># This is required as part of the InfluxDB line protocol reference. </span> +<span style="color: #75715e;"># Only supported on Python 3.7+</span> +<span style="color: #75715e;">#</span> +time_nanosec <span style="color: #f92672;">=</span> time<span style="color: #f92672;">.</span>time_ns() + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Here we set the LSF environment variable LSB_NTRIES. This will be used to determine the </span> +<span style="color: #75715e;"># number of retries before failure of a LSF batch command. This is used to cover the case </span> +<span style="color: #75715e;"># when the LSF mbatchd is not running. </span> +<span style="color: #75715e;">#</span> +os<span style="color: #f92672;">.</span>environ[<span style="color: #e6db74;">"LSB_NTRIES"</span>] <span style="color: #f92672;">=</span> <span style="color: #e6db74;">"2"</span> + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Check if LSF performance metric monitoring is enabled. This is done by running</span> +<span style="color: #75715e;"># 'badmin perfmon view'. If badmin is not found, then exit. </span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Check the return status from 'badmin perfmon view' and take the appropriate action:</span> +<span style="color: #75715e;"># - If return status is 7, it means that performance monitoring is not enabled. The script</span> +<span style="color: #75715e;"># will enable LSF performance metric monitoring by running 'badmin perfmon start'.</span> +<span style="color: #75715e;"># Note that a 70 second sleep is required before LSF metrics will be available. </span> +<span style="color: #75715e;"># - If return status is 65, it means that the badmin command reported that the</span> +<span style="color: #75715e;"># LSF batch system is down. This is a fatal error which will cause the script</span> +<span style="color: #75715e;"># to exit. </span> +<span style="color: #75715e;">#</span> +lsf_path <span style="color: #f92672;">=</span> os<span style="color: #f92672;">.</span>environ[<span style="color: #e6db74;">'LSF_BINDIR'</span>] +badmin_path <span style="color: #f92672;">=</span> lsf_path <span style="color: #f92672;">+</span> <span style="color: #e6db74;">"/badmin"</span> +bqueues_path <span style="color: #f92672;">=</span> lsf_path <span style="color: #f92672;">+</span> <span style="color: #e6db74;">"/bqueues"</span> + +path <span style="color: #f92672;">=</span> Path(badmin_path) +<span style="color: #66d9ef;">if</span> path<span style="color: #f92672;">.</span>is_file(): + cmd <span style="color: #f92672;">=</span> [badmin_path, <span style="color: #e6db74;">'perfmon'</span>, <span style="color: #e6db74;">'view'</span>] + p <span style="color: #f92672;">=</span> subprocess<span style="color: #f92672;">.</span>Popen(cmd, stdout<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL, stderr<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL) + <span style="color: #66d9ef;">while</span> p<span style="color: #f92672;">.</span>poll() <span style="color: #f92672;">is</span> <span style="color: #66d9ef;">None</span>: + time<span style="color: #f92672;">.</span>sleep(<span style="color: #ae81ff;">0.1</span>) + return_code <span style="color: #f92672;">=</span> p<span style="color: #f92672;">.</span>returncode + <span style="color: #66d9ef;">if</span> return_code <span style="color: #f92672;">==</span> <span style="color: #ae81ff;">7</span>: + cmd <span style="color: #f92672;">=</span> [badmin_path, <span style="color: #e6db74;">'perfmon'</span>, <span style="color: #e6db74;">'start'</span>] + p <span style="color: #f92672;">=</span> subprocess<span style="color: #f92672;">.</span>Popen(cmd, stdout<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL, stderr<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL) + <span style="color: #66d9ef;">while</span> p<span style="color: #f92672;">.</span>poll() <span style="color: #f92672;">is</span> <span style="color: #66d9ef;">None</span>: + time<span style="color: #f92672;">.</span>sleep(<span style="color: #ae81ff;">0.1</span>) + return_code <span style="color: #f92672;">=</span> p<span style="color: #f92672;">.</span>returncode + time<span style="color: #f92672;">.</span>sleep(<span style="color: #ae81ff;">70</span>) + <span style="color: #66d9ef;">elif</span> return_code <span style="color: #f92672;">==</span> <span style="color: #ae81ff;">65</span>: + sys<span style="color: #f92672;">.</span>exit(<span style="color: #e6db74;">f</span><span style="color: #e6db74;">'The LSF batch system is down.'</span>) +<span style="color: #66d9ef;">else</span>: + sys<span style="color: #f92672;">.</span>exit(<span style="color: #e6db74;">f</span><span style="color: #e6db74;">'</span><span style="color: #e6db74;">{</span>badmin_path<span style="color: #e6db74;">}</span><span style="color: #e6db74;"> does not exist.'</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Run badmin with the "perfmon view" keywords and the -json option to product JSON output</span> +<span style="color: #75715e;"># We assume here that the LSF batch system is responsive (a check was done above); if</span> +<span style="color: #75715e;"># the mbatchd is very busy there is a possiblity that it may not be responsive here. This</span> +<span style="color: #75715e;"># case is not considered; LSB_NTRIES setting will determine how many tries are made before</span> +<span style="color: #75715e;"># badmin gives up the ghost. </span> +<span style="color: #75715e;"># </span> +<span style="color: #75715e;"># Note: We previously checked for the existence of the 'badmin' binary. </span> +<span style="color: #75715e;">#</span> +cmd <span style="color: #f92672;">=</span> [badmin_path, <span style="color: #e6db74;">'perfmon'</span>, <span style="color: #e6db74;">'view'</span>, <span style="color: #e6db74;">'-json'</span>] +p <span style="color: #f92672;">=</span> subprocess<span style="color: #f92672;">.</span>Popen(cmd, stdout<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>PIPE, stderr<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL, text<span style="color: #f92672;">=</span><span style="color: #66d9ef;">True</span>) +stdout, stderr <span style="color: #f92672;">=</span> p<span style="color: #f92672;">.</span>communicate() +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Guard for the case that the performance monitor has just been enabled, but is not</span> +<span style="color: #75715e;"># producing any data as the first sample period has not elapsed. </span> +<span style="color: #75715e;">#</span> +<span style="color: #66d9ef;">if</span> stdout <span style="color: #f92672;">==</span> <span style="color: #e6db74;">""</span>: + sys<span style="color: #f92672;">.</span>exit(<span style="color: #e6db74;">f</span><span style="color: #e6db74;">'Output from badmin perfmon view -json is empty.'</span>) +<span style="color: #66d9ef;">else</span>: + data <span style="color: #f92672;">=</span> json<span style="color: #f92672;">.</span>loads(stdout) + +<span style="color: #75715e;"># </span> +<span style="color: #75715e;"># Run badmin showstatus</span> +<span style="color: #75715e;"># Next, run the command 'badmin showstatus' and capture the output. Note that badmin showstatus</span> +<span style="color: #75715e;"># does not produce JSON output. So here we must do some scraping of the output. </span> +<span style="color: #75715e;"># The output from 'badmin showstatus' it placed into the array 'showstatus'. The hard coded</span> +<span style="color: #75715e;"># positions in the output of 'badmin showstatus' are assumed when building the output </span> +<span style="color: #75715e;"># strings below. Should the format of the output of 'badmin showstatus' change, this will</span> +<span style="color: #75715e;"># need to be updated. </span> +cmd <span style="color: #f92672;">=</span> [badmin_path, <span style="color: #e6db74;">'showstatus'</span>] +p <span style="color: #f92672;">=</span> subprocess<span style="color: #f92672;">.</span>Popen(cmd, stdout<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>PIPE, stderr<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL, text<span style="color: #f92672;">=</span><span style="color: #66d9ef;">True</span>) +stdout, stderr <span style="color: #f92672;">=</span> p<span style="color: #f92672;">.</span>communicate() +<span style="color: #75715e;"># Convert badmin showstatus output into an array</span> +showstatus <span style="color: #f92672;">=</span> stdout<span style="color: #f92672;">.</span>split() + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Run bqueues</span> +<span style="color: #75715e;">#</span> +cmd <span style="color: #f92672;">=</span> [bqueues_path, <span style="color: #e6db74;">'-json'</span>, <span style="color: #e6db74;">'-o'</span>, <span style="color: #e6db74;">'queue_name:12 njobs pend run susp rsv ususp ssusp'</span>] +p <span style="color: #f92672;">=</span> subprocess<span style="color: #f92672;">.</span>Popen(cmd, stdout<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>PIPE, stderr<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL, text<span style="color: #f92672;">=</span><span style="color: #66d9ef;">True</span>) +stdout, stderr <span style="color: #f92672;">=</span> p<span style="color: #f92672;">.</span>communicate() +data_queues <span style="color: #f92672;">=</span> json<span style="color: #f92672;">.</span>loads(stdout) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># At this stage, we've captured the output from 'badmin perfmon view -json' and </span> +<span style="color: #75715e;"># 'badmin showstatus'. We're now ready to print to standard output the metric</span> +<span style="color: #75715e;"># strings in InfluxDB line procotol format. </span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Details about the line protocol format can be found here:</span> +<span style="color: #75715e;"># https://docs.influxdata.com/influxdb/v2.6/reference/syntax/line-protocol/</span> +<span style="color: #75715e;"># </span> +<span style="color: #75715e;"># </span> + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># LSF server status</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_servers,"</span>,<span style="color: #e6db74;">"status=total"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">21</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_servers,"</span>,<span style="color: #e6db74;">"status=ok"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">23</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_servers,"</span>,<span style="color: #e6db74;">"status=closed"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">25</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_servers,"</span>,<span style="color: #e6db74;">"status=unreachable"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">27</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_servers,"</span>,<span style="color: #e6db74;">"status=unavailable"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">29</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># LSF job status</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_jobs,"</span>,<span style="color: #e6db74;">"state=total"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">33</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_jobs,"</span>,<span style="color: #e6db74;">"state=running"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">35</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_jobs,"</span>,<span style="color: #e6db74;">"state=suspended"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">37</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_jobs,"</span>,<span style="color: #e6db74;">"state=pending"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">39</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_jobs,"</span>,<span style="color: #e6db74;">"state=finished"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">41</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># LSF user stats</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_users,"</span>,<span style="color: #e6db74;">"state=numusers"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">45</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_users,"</span>,<span style="color: #e6db74;">"state=numgroups"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">50</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_users,"</span>,<span style="color: #e6db74;">"state=numactive"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">55</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># LSF hosts stats</span> +<span style="color: #75715e;"># First we split out the current and peak values for clients, servers, cpus, cores, and slots.</span> +<span style="color: #75715e;"># The current and peak values are separated by the "/" delimiter.</span> +<span style="color: #75715e;"># </span> +clientssplit <span style="color: #f92672;">=</span> showstatus[<span style="color: #ae81ff;">9</span>]<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"/"</span>) +serverssplit <span style="color: #f92672;">=</span> showstatus[<span style="color: #ae81ff;">11</span>]<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"/"</span>) +cpussplit <span style="color: #f92672;">=</span> showstatus[<span style="color: #ae81ff;">13</span>]<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"/"</span>) +coressplit <span style="color: #f92672;">=</span> showstatus[<span style="color: #ae81ff;">15</span>]<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"/"</span>) +slotssplit <span style="color: #f92672;">=</span> showstatus[<span style="color: #ae81ff;">17</span>]<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"/"</span>) + +print(<span style="color: #e6db74;">"lsf_hosts,"</span>,<span style="color: #e6db74;">"state=clients"</span>,<span style="color: #e6db74;">" current="</span>,clientssplit[<span style="color: #ae81ff;">0</span>],<span style="color: #e6db74;">"i,"</span>,<span style="color: #e6db74;">"peak="</span>,clientssplit[<span style="color: #ae81ff;">1</span>],<span style="color: #e6db74;">"i "</span>,time_n +anosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_hosts,"</span>,<span style="color: #e6db74;">"state=servers"</span>,<span style="color: #e6db74;">" current="</span>,serverssplit[<span style="color: #ae81ff;">0</span>],<span style="color: #e6db74;">"i,"</span>,<span style="color: #e6db74;">"peak="</span>,serverssplit[<span style="color: #ae81ff;">1</span>],<span style="color: #e6db74;">"i "</span>,time_n +anosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_hosts,"</span>,<span style="color: #e6db74;">"state=cpus"</span>,<span style="color: #e6db74;">" current="</span>,cpussplit[<span style="color: #ae81ff;">0</span>],<span style="color: #e6db74;">"i,"</span>,<span style="color: #e6db74;">"peak="</span>,cpussplit[<span style="color: #ae81ff;">1</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,se +p<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_hosts,"</span>,<span style="color: #e6db74;">"state=cores"</span>,<span style="color: #e6db74;">" current="</span>,coressplit[<span style="color: #ae81ff;">0</span>],<span style="color: #e6db74;">"i,"</span>,<span style="color: #e6db74;">"peak="</span>,coressplit[<span style="color: #ae81ff;">1</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec +,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_hosts,"</span>,<span style="color: #e6db74;">"state=slots"</span>,<span style="color: #e6db74;">" current="</span>,slotssplit[<span style="color: #ae81ff;">0</span>],<span style="color: #e6db74;">"i,"</span>,<span style="color: #e6db74;">"peak="</span>,slotssplit[<span style="color: #ae81ff;">1</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec +,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Print mbatchd query metrics</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"query=job"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">1</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"query=host"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">2</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"query=queue"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">3</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Print mbatchd job metrics</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=submitreqs"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">4</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=submitted"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">5</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=dispatched"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">6</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=completed"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">7</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=sentremote"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">8</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=acceptremote"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">9</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">'</span> +<span style="color: #e6db74;">')</span> +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"sched=interval"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">10</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"sched=matchhost"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">11</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span> +) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"sched=buckets"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">12</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"sched=reordered"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">13</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span> +) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Print mbatchd efficiency metrics. Here check if the efficiency metric indicated is "-". If so, </span> +<span style="color: #75715e;"># then assume a zero value. The trailing "%" sign on the metrics (percentages) is also stripped here. </span> +<span style="color: #75715e;">#</span> +slots <span style="color: #f92672;">=</span> (data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">14</span>][<span style="color: #e6db74;">'current'</span>]) +slots_percent <span style="color: #f92672;">=</span> slots +<span style="color: #66d9ef;">if</span> slots_percent <span style="color: #f92672;">==</span> <span style="color: #e6db74;">"-"</span>: + slots_percent <span style="color: #f92672;">=</span> <span style="color: #e6db74;">"0"</span> +<span style="color: #66d9ef;">elif</span> slots_percent <span style="color: #f92672;">!=</span> <span style="color: #e6db74;">"0"</span>: + <span style="color: #75715e;"># Strip % sign and decimal. This is to work around issue inserting float to InfluxDB</span> + <span style="color: #75715e;"># "type float, already exists as type integer dropped ..."</span> + slots_percent <span style="color: #f92672;">=</span> slots[:<span style="color: #f92672;">-</span><span style="color: #ae81ff;">4</span>] + +memory <span style="color: #f92672;">=</span> (data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">15</span>][<span style="color: #e6db74;">'current'</span>]) +memory_percent <span style="color: #f92672;">=</span> memory +<span style="color: #66d9ef;">if</span> memory_percent <span style="color: #f92672;">==</span> <span style="color: #e6db74;">"-"</span>: + memory_percent <span style="color: #f92672;">=</span> <span style="color: #e6db74;">"0"</span> +<span style="color: #66d9ef;">elif</span> memory_percent <span style="color: #f92672;">!=</span> <span style="color: #e6db74;">"0"</span>: + <span style="color: #75715e;"># Strip % sign and decimal. This is to work around issue inserting float to InfluxDB</span> + <span style="color: #75715e;"># "type float, already exists as type integer dropped ..."</span> + memory_percent <span style="color: #f92672;">=</span> memory[:<span style="color: #f92672;">-</span><span style="color: #ae81ff;">4</span>] + +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"utilization=slots"</span>,<span style="color: #e6db74;">" value="</span>,slots_percent,<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"utilization=memory"</span>,<span style="color: #e6db74;">" value="</span>,memory_percent,<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Print mbatchd file descriptor usage</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"fd=free"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'fd'</span>][<span style="color: #e6db74;">'free'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"fd=used"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'fd'</span>][<span style="color: #e6db74;">'used'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"fd=total"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'fd'</span>][<span style="color: #e6db74;">'total'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Print LSF queue status (njobs)</span> +<span style="color: #75715e;">#</span> +iterations <span style="color: #f92672;">=</span> data_queues[<span style="color: #e6db74;">"QUEUES"</span>] + +<span style="color: #66d9ef;">for</span> n <span style="color: #f92672;">in</span> range(iterations): + print(<span style="color: #e6db74;">"lsf_queues,"</span>,<span style="color: #e6db74;">"name="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'QUEUE_NAME'</span>], <span style="color: #e6db74;">" njobs="</span>, data_queues[<span style="color: #e6db74;">'RECOR</span> +DS<span style="color: #e6db74;">'][n]['</span>NJOBS<span style="color: #e6db74;">'],"i,",</span> + <span style="color: #e6db74;">"pend="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'PEND'</span>],<span style="color: #e6db74;">"i,"</span>, + <span style="color: #e6db74;">"run="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'RUN'</span>],<span style="color: #e6db74;">"i,"</span>, + <span style="color: #e6db74;">"susp="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'SUSP'</span>],<span style="color: #e6db74;">"i,"</span>, + <span style="color: #e6db74;">"rsv="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'RSV'</span>],<span style="color: #e6db74;">"i,"</span>, + <span style="color: #e6db74;">"ususp="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'USUSP'</span>],<span style="color: #e6db74;">"i,"</span>, + <span style="color: #e6db74;">"ssusp="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'SSUSP'</span>],<span style="color: #e6db74;">"i "</span>, + time_nanosec, sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +exit() +</code></pre></div> + +</details> + +<hr /> + +<p><strong>Bringing it all together</strong></p> + +<p>For completeness, below is the detail regarding the configuration of the environment. It should be noted that the simple test environment consists of a single server running IBM +Spectrum LSF Suite for HPC and a separate server which runs the InfluxDB instance.</p> + +<hr /> + +<table> +<thead> +<tr> +<th style="text-align: left;">Hostname</th> +<th>Component</th> +<th>Version</th> +</tr> +</thead> +<tbody> +<tr> +<td style="text-align: left;"><em>kilenc</em></td> +<td>OS (LSF mgmt server)</td> +<td><em>CentOS Stream release 8 (ppc64le)</em></td> +</tr> +<tr> +<td style="text-align: left;"><em>kilenc</em></td> +<td>Spectrum LSF Suite for HPC</td> +<td><em>v10.2.0.13</em></td> +</tr> +<tr> +<td style="text-align: left;"><em>adatbazis</em></td> +<td>OS (InfluxDB server)</td> +<td><em>Fedora release 36 (aarch64)</em></td> +</tr> +<tr> +<td style="text-align: left;"><em>adatbazis</em></td> +<td>InfluxDB</td> +<td><em>v1.8.10</em></td> +</tr> +<tr> +<td style="text-align: left;"><em>kilenc</em></td> +<td>Telegraf</td> +<td><em>v1.24.3</em></td> +</tr> +<tr> +<td style="text-align: left;"><em>kilenc</em></td> +<td>Grafana</td> +<td><em>v9.1.6</em></td> +</tr> +</tbody> +</table> +<hr /> + +<p>The follwing steps assume that IBM Spectrum LSF Suite for HPC, InfluxDB and Telegraf have been installed.</p> + +<ol> +<li> +<p>Start InfluxDB on the host <em>adatbazis</em></p> + +</li> +<li> +<p>On the LSF management server <em>kilenc</em>, configure telegraf to connect to the influxDB instance on host <em>adatbazis</em>. Edit the configuration <em>/etc/telegraf/telegraf.conf</em> and specify +the correct URL in the <em>outputs.influxdb</em> section as follows:</p> + +</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext"># # Configuration for sending metrics to InfluxDB +[[outputs.influxdb]] +# ## The full HTTP or UDP URL for your InfluxDB instance. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# # urls = ["unix:///var/run/influxdb.sock"] +# # urls = ["udp://127.0.0.1:8089"] +# # urls = ["http://127.0.0.1:8086"] +# Added gsamu Jan 04 2023 +urls = ["http://adatbazis:8086"]</code></pre></div> + +<ol start="3"> +<li>On the LSF management server <em>kilenc</em>, configure telegraf with the custom plugin script <em>lsf_telegraf_agent_0.9.py</em> to collect and log metrics from IBM Spectrum LSF Suite for HPC. +Edit the configuration <em>/etc/telegraf/telegraf.conf</em> and specify the correct command path in the section <em>inputs.exec</em>. Additionally, set <em>data_format</em> equal to <em>influx</em>.Note that the +script <em>lsf_telegraf_agent_0.9.py</em> was copied to the directory <em>/etc/telegraf/telegraf.d/scripts</em> with permissions octal 755 and owner set to user <em>telegraf</em>. +<strong>Note:</strong> User <em>telegraf</em> was automatically created during the installation of telegraf.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext"> +# ## Gather LSF metrics +[[inputs.exec]] + ## Commands array + commands = [ "/etc/telegraf/telegraf.d/scripts/lsf_telegraf_agent_0.9.py" ] + timeout = "30s" + interval = "30s" + data_format = "influx" + # ## End LSF metrics</code></pre></div> + +<ol start="4"> +<li>Telegraf provides the ability to collect metrics on processes. Here we&rsquo;ll use the telegraf <em>procstat</em> facility to monitor the LSF mbatchd and mbschd processes. These are the key +daemons involved in handling query requests and making scheduling decisions for jobs in the environment. Edit the configuration <em>/etc/telegraf/telegraf.conf</em> and configure the two +following <em>inputs.procstat</em> sections.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext"># ## Monitor CPU and memory utilization for LSF processes +# ## mbatchd, mbschd, lim (manager) +[[inputs.procstat]] +exe = "lim" +pattern = "lim" +pid_finder = "pgrep" + +[[inputs.procstat]] +exe = "mbschd" +pattern = "mbschd" +pid_finder = "pgrep" + +[[inputs.procstat]] +exe = "mbatchd" +pattern = "mbatchd" +pid_finder = "pgrep"</code></pre></div> + +<ol start="5"> +<li>With the configuration to telegraf complete, it&rsquo;s now time to test if the configuration and custom LSF agent is functioning as expected. Note that the following operation is performed +on the LSF management candidate host <em>kilenc</em> and assumes that the LSF daemons are up and running. This is achieve by running the command: +<em>telegraf &ndash;config /etc/telegraf/telegraf.conf &ndash;test</em>. <strong>Note:</strong> Any errors in the configuration file <em>/etc/telegraf/telegraf.conf</em> will result in errors in the output.</li> +</ol> +<hr /> + +<details> + <strong>Output of <em>telegraf &ndash;config /etc/telegraf/telegraf.conf &ndash;test</em>. Click to expand!</strong> + <div class="highlight"><pre><code class="language-python">[root<span style="color: #a6e22e;">@kilenc</span> telegraf]<span style="color: #75715e;"># pwd</span> +<span style="color: #f92672;">/</span>etc<span style="color: #f92672;">/</span>telegraf +[root<span style="color: #a6e22e;">@kilenc</span> telegraf]<span style="color: #75715e;"># telegraf --config /etc/telegraf/telegraf.conf --test</span> +<span style="color: #f92672;">&gt;</span> mem,host<span style="color: #f92672;">=</span>kilenc active<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1938817024</span>i,available<span style="color: #f92672;">=</span><span style="color: #ae81ff;">6820003840</span>i,available_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20.653390597462806</span>,buffered<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4849664</span>i,cached<span style="color: #f92672;">=</span><span style="color: #ae81ff;">6317735936</span>i,commit_limit<span style="color: #f92672;">=</span><span style="color: #ae81ff;">33560395776</span>i,committed_as<span style="color: #f92672;">=</span><span style="color: #ae81ff;">18635292672</span>i,dirty<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4128768</span>i,free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2623799296</span>i,high_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,high_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,huge_page_size<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2097152</span>i,huge_pages_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,huge_pages_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,inactive<span style="color: #f92672;">=</span><span style="color: #ae81ff;">13852016640</span>i,low_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,low_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,mapped<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1007353856</span>i,page_tables<span style="color: #f92672;">=</span><span style="color: #ae81ff;">22478848</span>i,shared<span style="color: #f92672;">=</span><span style="color: #ae81ff;">259063808</span>i,slab<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4946919424</span>i,sreclaimable<span style="color: #f92672;">=</span><span style="color: #ae81ff;">902234112</span>i,sunreclaim<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4044685312</span>i,swap_cached<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3866624</span>i,swap_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">16994729984</span>i,swap_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">17049780224</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">33021231104</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">24074846208</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">72.90717336424115</span>,vmalloc_chunk<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,vmalloc_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">562949953421312</span>i,vmalloc_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_back<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_back_tmp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> kernel,host<span style="color: #f92672;">=</span>kilenc boot_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1673790850</span>i,context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1943864437</span>i,entropy_avail<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4037</span>i,interrupts<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1294179599</span>i,processes_forked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4255316</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> swap,host<span style="color: #f92672;">=</span>kilenc free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">16994729984</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">17049780224</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">55050240</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.3228794698626609</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> swap,host<span style="color: #f92672;">=</span>kilenc <span style="color: #f92672;">in</span><span style="color: #f92672;">=</span><span style="color: #ae81ff;">172032</span>i,out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">851968</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> net,host<span style="color: #f92672;">=</span>kilenc,interface<span style="color: #f92672;">=</span>lo bytes_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">90039931116</span>i,bytes_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">90039931116</span>i,drop_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,drop_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,packets_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">17245997</span>i,packets_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">17245997</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> net,host<span style="color: #f92672;">=</span>kilenc,interface<span style="color: #f92672;">=</span>enP4p1s0f0 bytes_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,bytes_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,drop_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,drop_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,packets_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,packets_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> net,host<span style="color: #f92672;">=</span>kilenc,interface<span style="color: #f92672;">=</span>enP4p1s0f1 bytes_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11791041280</span>i,bytes_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1701152001</span>i,drop_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,drop_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,packets_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10322276</span>i,packets_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4594948</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> net,host<span style="color: #f92672;">=</span>kilenc,interface<span style="color: #f92672;">=</span>all icmp_inaddrmaskreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_inaddrmasks<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_incsumerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_indestunreachs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8609</span>i,icmp_inechoreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20</span>i,icmp_inechos<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11</span>i,icmp_inerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1084</span>i,icmp_inmsgs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8640</span>i,icmp_inparmprobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_inredirects<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_insrcquenchs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_intimeexcds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_intimestampreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_intimestamps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outaddrmaskreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outaddrmasks<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outdestunreachs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4805</span>i,icmp_outechoreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11</span>i,icmp_outechos<span style="color: #f92672;">=</span><span style="color: #ae81ff;">94</span>i,icmp_outerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outmsgs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4910</span>i,icmp_outparmprobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outredirects<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outsrcquenchs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outtimeexcds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outtimestampreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outtimestamps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmpmsg_intype0<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20</span>i,icmpmsg_intype3<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8609</span>i,icmpmsg_intype8<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11</span>i,icmpmsg_outtype0<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11</span>i,icmpmsg_outtype3<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4805</span>i,icmpmsg_outtype8<span style="color: #f92672;">=</span><span style="color: #ae81ff;">94</span>i,ip_defaultttl<span style="color: #f92672;">=</span><span style="color: #ae81ff;">64</span>i,ip_forwarding<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,ip_forwdatagrams<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_fragcreates<span style="color: #f92672;">=</span><span style="color: #ae81ff;">62958</span>i,ip_fragfails<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_fragoks<span style="color: #f92672;">=</span><span style="color: #ae81ff;">12611</span>i,ip_inaddrerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,ip_indelivers<span style="color: #f92672;">=</span><span style="color: #ae81ff;">21324370</span>i,ip_indiscards<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_inhdrerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_inreceives<span style="color: #f92672;">=</span><span style="color: #ae81ff;">21324371</span>i,ip_inunknownprotos<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_outdiscards<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_outnoroutes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">30</span>i,ip_outrequests<span style="color: #f92672;">=</span><span style="color: #ae81ff;">21248264</span>i,ip_reasmfails<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_reasmoks<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_reasmreqds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_reasmtimeout<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,tcp_activeopens<span style="color: #f92672;">=</span><span style="color: #ae81ff;">763497</span>i,tcp_attemptfails<span style="color: #f92672;">=</span><span style="color: #ae81ff;">96617</span>i,tcp_currestab<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118</span>i,tcp_estabresets<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1917</span>i,tcp_incsumerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,tcp_inerrs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,tcp_insegs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">19488475</span>i,tcp_maxconn<span style="color: #f92672;">=-</span><span style="color: #ae81ff;">1</span>i,tcp_outrsts<span style="color: #f92672;">=</span><span style="color: #ae81ff;">137188</span>i,tcp_outsegs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20220038</span>i,tcp_passiveopens<span style="color: #f92672;">=</span><span style="color: #ae81ff;">675805</span>i,tcp_retranssegs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9827</span>i,tcp_rtoalgorithm<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,tcp_rtomax<span style="color: #f92672;">=</span><span style="color: #ae81ff;">120000</span>i,tcp_rtomin<span style="color: #f92672;">=</span><span style="color: #ae81ff;">200</span>i,udp_ignoredmulti<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10509</span>i,udp_incsumerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udp_indatagrams<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1816997</span>i,udp_inerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udp_memerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udp_noports<span style="color: #f92672;">=</span><span style="color: #ae81ff;">264</span>i,udp_outdatagrams<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1506724</span>i,udp_rcvbuferrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udp_sndbuferrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_ignoredmulti<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_incsumerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_indatagrams<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_inerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_memerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_noports<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_outdatagrams<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_rcvbuferrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_sndbuferrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">2</span> io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9739370</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4015612416</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">604060</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">40592</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">60563370</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">47025459712</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">59959310</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1079691</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sda1 io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1460</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4849664</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1304</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1304</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sda3 io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">45872430</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">623</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1061314</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">16398521856</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3371612</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">139298</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">311521720</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">133715422208</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">308150107</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7031512</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">1</span> io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5780</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5636096</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3030</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">81</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">26500</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">13631488</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">23470</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">208</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">0</span>,fstype<span style="color: #f92672;">=</span>xfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span> free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9315028992</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">18214222</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">19822888</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1608666</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">53660876800</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">44345847808</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">82.64093032486566</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>sda2,fstype<span style="color: #f92672;">=</span>ext4,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>boot free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">309653504</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65264</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65536</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">272</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1020702720</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">640585728</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67.41310045173972</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">2</span>,fstype<span style="color: #f92672;">=</span>xfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>home free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">856442515456</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">452529686</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">453312512</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">782826</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">927930712064</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">71488196608</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7.704044674735306</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">2</span>,fstype<span style="color: #f92672;">=</span>xfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>home<span style="color: #f92672;">/</span>opt<span style="color: #f92672;">/</span>at13<span style="color: #ae81ff;">.0</span><span style="color: #f92672;">/</span>lib free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">856442515456</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">452529686</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">453312512</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">782826</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">927930712064</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">71488196608</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7.704044674735306</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">2</span>,fstype<span style="color: #f92672;">=</span>xfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>home<span style="color: #f92672;">/</span>opt<span style="color: #f92672;">/</span>at13<span style="color: #ae81ff;">.0</span><span style="color: #f92672;">/</span>lib64 free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">856442515456</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">452529686</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">453312512</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">782826</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">927930712064</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">71488196608</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7.704044674735306</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>ST31000524AS<span style="color: #f92672;">/</span>raktar,fstype<span style="color: #f92672;">=</span>zfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>mnt<span style="color: #f92672;">/</span>ST31000524AS free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">210837438464</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">411792117</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">412304487</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">512370</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">965496143872</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">754658705408</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">78.16278813725106</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sda io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">45899860</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">650</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1061332</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">16495536128</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3440899</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">141325</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">311596362</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">133715696640</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">308155462</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7031531</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>ST31000524AS,fstype<span style="color: #f92672;">=</span>zfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>ST31000524AS free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">210837438464</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">411792117</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">411792123</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">6</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">210837569536</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">131072</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.00006216728844316324</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sda2 io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">18060</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">27</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">18</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">88372224</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">31224</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">436</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">36579</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">274432</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5355</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">19</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">0</span> io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">38788720</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">12341294080</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1143210</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">51814</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">303329620</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">86676331008</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">302186410</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">6798400</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sdb io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">668810</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">58</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">104550912</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">746540</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">31054</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1445858</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10845920256</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">699318</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">124780</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sdb1 io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">341330</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">58</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">95562240</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">383066</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">25026</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1082385</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10845920256</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">699318</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">124780</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sdb9 io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">190</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4980736</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">37</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">69</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">37</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> system,host<span style="color: #f92672;">=</span>kilenc load1<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.06</span>,load15<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.12</span>,load5<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.12</span>,n_cpus<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i,n_users<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> system,host<span style="color: #f92672;">=</span>kilenc uptime<span style="color: #f92672;">=</span><span style="color: #ae81ff;">456127</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> system,host<span style="color: #f92672;">=</span>kilenc uptime_format<span style="color: #f92672;">=</span><span style="color: #e6db74;">"5 days, 6:42"</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> processes,host<span style="color: #f92672;">=</span>kilenc blocked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,dead<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">569</span>i,paging<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,parked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,running<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,sleeping<span style="color: #f92672;">=</span><span style="color: #ae81ff;">412</span>i,stopped<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1366</span>i,total_threads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2683</span>i,unknown<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,zombies<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_servers,host<span style="color: #f92672;">=</span>kilenc,status<span style="color: #f92672;">=</span>total value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_servers,host<span style="color: #f92672;">=</span>kilenc,status<span style="color: #f92672;">=</span>ok value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_servers,host<span style="color: #f92672;">=</span>kilenc,status<span style="color: #f92672;">=</span>closed value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_servers,host<span style="color: #f92672;">=</span>kilenc,status<span style="color: #f92672;">=</span>unreachable value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_servers,host<span style="color: #f92672;">=</span>kilenc,status<span style="color: #f92672;">=</span>unavailable value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_jobs,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>total value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">121776</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_jobs,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>running value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_jobs,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>suspended value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_jobs,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>pending value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">120771</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_jobs,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>finished value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">973</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_users,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>numusers value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_users,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>numgroups value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_users,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>numactive value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_hosts,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>clients current<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,peak<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_hosts,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>servers current<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,peak<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_hosts,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>cpus current<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i,peak<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_hosts,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>cores current<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i,peak<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_hosts,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>slots current<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i,peak<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,query<span style="color: #f92672;">=</span>job value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,query<span style="color: #f92672;">=</span>host value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,query<span style="color: #f92672;">=</span>queue value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>submitreqs value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>submitted value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>dispatched value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">19</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>completed value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">12</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>sentremote value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>acceptremote value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,sched<span style="color: #f92672;">=</span>interval value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,sched<span style="color: #f92672;">=</span>matchhost value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,sched<span style="color: #f92672;">=</span>buckets value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,sched<span style="color: #f92672;">=</span>reordered value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,utilization<span style="color: #f92672;">=</span>slots value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,utilization<span style="color: #f92672;">=</span>memory value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,fd<span style="color: #f92672;">=</span>free,host<span style="color: #f92672;">=</span>kilenc value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65509</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,fd<span style="color: #f92672;">=</span>used,host<span style="color: #f92672;">=</span>kilenc value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">26</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,fd<span style="color: #f92672;">=</span>total,host<span style="color: #f92672;">=</span>kilenc value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65535</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>admin njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>owners njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>priority njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">93951</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">93923</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">28</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>night njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>short njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2504</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2504</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>dataq njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>normal njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1750</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1750</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>interactive njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sendq njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">22598</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">22594</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>idle njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu0,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu4,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu8,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu12,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu16,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">98.03921568448419</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.9607843137324836</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu20,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu24,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu28,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu32,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu36,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu40,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">98.03921568448419</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.9607843136879006</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu44,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu48,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu52,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu56,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu60,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu64,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">87.99999999906868</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10.000000001155058</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.0000000002764864</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu68,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu72,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">86.27450980280263</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11.764705882127403</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.9607843137324836</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu76,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu80,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">92.30769231113655</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.8461538464431086</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.84615384653056</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu84,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">94.11764706486585</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5.882352941197451</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu88,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu92,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">70.58823529344627</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">29.411764701983955</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu96,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">96.15384615040192</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.8461538460125784</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu100,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">97.99999999813735</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.999999999998181</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu104,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">96.07843137993407</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.92156862782338</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu108,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">96.07843136896838</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.9607843136879006</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.9607843137324836</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu112,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu116,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">95.91836734305988</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4.08163265313509</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu120,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">84.61538461280144</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.8461538460344413</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11.53846153830009</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu124,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu<span style="color: #f92672;">-</span>total,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">93.47826086554115</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.1055900618243673</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.484472049468532</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.9316770186919254</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> procstat,exe<span style="color: #f92672;">=</span>mbatchd,host<span style="color: #f92672;">=</span>kilenc,process_name<span style="color: #f92672;">=</span>mbatchd,user<span style="color: #f92672;">=</span>root child_major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,child_minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,cpu_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,cpu_time_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_soft_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.03</span>,cpu_time_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.05</span>,cpu_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,created_at<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1674246974000000000</span>i,involuntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_data<span style="color: #f92672;">=</span><span style="color: #ae81ff;">834994176</span>i,memory_locked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_rss<span style="color: #f92672;">=</span><span style="color: #ae81ff;">815595520</span>i,memory_stack<span style="color: #f92672;">=</span><span style="color: #ae81ff;">327680</span>i,memory_swap<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.469912528991699</span>,memory_vms<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1091108864</span>i,minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">726</span>i,nice_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20</span>i,num_fds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10</span>i,num_threads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i,pid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">62056</span>i,ppid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4103699</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">27</span>i,realtime_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_cpu_time_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_cpu_time_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_locked_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_locked_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_rss_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_rss_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8388608</span>i,rlimit_memory_vms_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_vms_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_nice_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_nice_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_num_fds_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">262144</span>i,rlimit_num_fds_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65535</span>i,rlimit_realtime_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_realtime_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_signals_pending_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,rlimit_signals_pending_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,signals_pending<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,voluntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">16</span>i <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> procstat,exe<span style="color: #f92672;">=</span>mbschd,host<span style="color: #f92672;">=</span>kilenc,process_name<span style="color: #f92672;">=</span>mbschd,user<span style="color: #f92672;">=</span>lsfadmin child_major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,child_minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2457641</span>i,cpu_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">320</span>i,cpu_time_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.02</span>,cpu_time_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_soft_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8.4</span>,cpu_time_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">312.14</span>,cpu_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.836645120693344</span>,created_at<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1674227581000000000</span>i,involuntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3553</span>i,major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,memory_data<span style="color: #f92672;">=</span><span style="color: #ae81ff;">228851712</span>i,memory_locked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_rss<span style="color: #f92672;">=</span><span style="color: #ae81ff;">236847104</span>i,memory_stack<span style="color: #f92672;">=</span><span style="color: #ae81ff;">196608</span>i,memory_swap<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.717257022857666</span>,memory_vms<span style="color: #f92672;">=</span><span style="color: #ae81ff;">246808576</span>i,minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2137969</span>i,nice_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20</span>i,num_fds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3</span>i,num_threads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,pid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4103740</span>i,ppid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4103699</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1552384</span>i,read_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">936861</span>i,realtime_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_cpu_time_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_cpu_time_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_locked_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_locked_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_rss_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_rss_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8388608</span>i,rlimit_memory_vms_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_vms_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_nice_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_nice_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_num_fds_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">262144</span>i,rlimit_num_fds_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65535</span>i,rlimit_realtime_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_realtime_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_signals_pending_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,rlimit_signals_pending_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,signals_pending<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,voluntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">43952</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">42311</span>i <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> procstat_lookup,exe<span style="color: #f92672;">=</span>mbschd,host<span style="color: #f92672;">=</span>kilenc,pid_finder<span style="color: #f92672;">=</span>pgrep,result<span style="color: #f92672;">=</span>success pid_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,result_code<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,running<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> procstat,exe<span style="color: #f92672;">=</span>mbatchd,host<span style="color: #f92672;">=</span>kilenc,process_name<span style="color: #f92672;">=</span>mbatchd,user<span style="color: #f92672;">=</span>root child_major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i,child_minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4476280</span>i,cpu_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">177</span>i,cpu_time_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">6.68</span>,cpu_time_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_soft_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">51.01</span>,cpu_time_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">126.42</span>,cpu_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,created_at<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1674227573000000000</span>i,involuntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4993</span>i,major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3</span>i,memory_data<span style="color: #f92672;">=</span><span style="color: #ae81ff;">834994176</span>i,memory_locked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_rss<span style="color: #f92672;">=</span><span style="color: #ae81ff;">827785216</span>i,memory_stack<span style="color: #f92672;">=</span><span style="color: #ae81ff;">327680</span>i,memory_swap<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.5068273544311523</span>,memory_vms<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1091108864</span>i,minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2406945</span>i,nice_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20</span>i,num_fds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">26</span>i,num_threads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3</span>i,pid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4103699</span>i,ppid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4103684</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">21008384</span>i,read_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">364726</span>i,realtime_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_cpu_time_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_cpu_time_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_locked_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_locked_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_rss_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_rss_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8388608</span>i,rlimit_memory_vms_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_vms_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_nice_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_nice_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_num_fds_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">262144</span>i,rlimit_num_fds_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65535</span>i,rlimit_realtime_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_realtime_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_signals_pending_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,rlimit_signals_pending_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,signals_pending<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,voluntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">172583</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1562181632</span>i,write_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">12164760</span>i <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> procstat_lookup,exe<span style="color: #f92672;">=</span>mbatchd,host<span style="color: #f92672;">=</span>kilenc,pid_finder<span style="color: #f92672;">=</span>pgrep,result<span style="color: #f92672;">=</span>success pid_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i,result_code<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,running<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i <span style="color: #ae81ff;">1674246977000000000</span> +</code></pre></div> + +</details> + +<hr /> + +<ol start="6"> +<li>Assuming there were no errors in the previous step with telegraf, proceed to start the telegraf process via systemd.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">[root@kilenc telegraf]# systemctl start telegraf +[root@kilenc telegraf]# systemctl status telegraf +● telegraf.service - Telegraf + Loaded: loaded (/usr/lib/systemd/system/telegraf.service; enabled; vendor preset: disabled) + Active: active (running) since Thu 2023-01-19 14:13:51 EST; 1 day 1h ago + Docs: https://github.com/influxdata/telegraf + Main PID: 3225959 (telegraf) + Tasks: 35 (limit: 190169) + Memory: 192.6M + CGroup: /system.slice/telegraf.service + └─3225959 /usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/tele&gt; + +Jan 19 14:13:51 kilenc systemd[1]: Starting Telegraf... +Jan 19 14:13:51 kilenc systemd[1]: Started Telegraf.</code></pre></div> + +<ol start="7"> +<li>On the host running the database instance, <em>adatbazis</em>, perform queries to check whether the database <em>telegraf</em> exists, as well as checking if LSF related data is being logged. +This is confirmed in the output below.</li> +</ol> +<hr /> + +<details> + <strong>Output from InfluxDB queries. Click to expand!</strong> + <div class="highlight"><pre><code class="language-js">[<span style="color: #a6e22e;">root</span><span style="color: #960050; background-color: #1e0010;">@</span><span style="color: #a6e22e;">adatbazis</span> <span style="color: #a6e22e;">fedora</span>]<span style="color: #960050; background-color: #1e0010;">#</span> <span style="color: #a6e22e;">influx</span> +<span style="color: #a6e22e;">Connected</span> <span style="color: #a6e22e;">to</span> <span style="color: #a6e22e;">https</span><span style="color: #f92672;">:</span><span style="color: #75715e;">//localhost:8086 version 1.8.10 +</span><span style="color: #75715e;"></span><span style="color: #a6e22e;">InfluxDB</span> <span style="color: #a6e22e;">shell</span> <span style="color: #a6e22e;">version</span><span style="color: #f92672;">:</span> <span style="color: #ae81ff;">1.8</span>.<span style="color: #ae81ff;">10</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">auth</span> +<span style="color: #a6e22e;">username</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">influx</span> +<span style="color: #a6e22e;">password</span><span style="color: #f92672;">:</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">show</span> <span style="color: #a6e22e;">databases</span> +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">databases</span> +<span style="color: #a6e22e;">name</span> +<span style="color: #f92672;">----</span> +<span style="color: #ae81ff;">_</span><span style="color: #a6e22e;">internal</span> +<span style="color: #a6e22e;">telegraf</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">use</span> <span style="color: #a6e22e;">telegraf</span> +<span style="color: #a6e22e;">Using</span> <span style="color: #a6e22e;">database</span> <span style="color: #a6e22e;">telegraf</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">show</span> <span style="color: #a6e22e;">field</span> <span style="color: #a6e22e;">keys</span> +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">cpu</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">usage_guest</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_guest_nice</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_idle</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_iowait</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_irq</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_nice</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_softirq</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_steal</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_system</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_user</span> <span style="color: #66d9ef;">float</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">disk</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">inodes_free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">inodes_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">inodes_used</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used_percent</span> <span style="color: #66d9ef;">float</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">diskio</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">io_time</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">iops_in_progress</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">merged_reads</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">merged_writes</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">read_bytes</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">read_time</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">reads</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">weighted_io_time</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">write_bytes</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">write_time</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">writes</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">kernel</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">boot_time</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">context_switches</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">entropy_avail</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">interrupts</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">processes_forked</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_hosts</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">current</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">peak</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_jobs</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">value</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_mbatchd</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">value</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_queues</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">njobs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">pend</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">rsv</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">run</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ssusp</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">susp</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ususp</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_servers</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">value</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_users</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">value</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">mem</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">active</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">available</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">available_percent</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">buffered</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">cached</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">commit_limit</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">committed_as</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">dirty</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">high_free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">high_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">huge_page_size</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">huge_pages_free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">huge_pages_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">inactive</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">low_free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">low_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">mapped</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">page_tables</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">shared</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">slab</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">sreclaimable</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">sunreclaim</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">swap_cached</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">swap_free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">swap_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used_percent</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">vmalloc_chunk</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">vmalloc_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">vmalloc_used</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">write_back</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">write_back_tmp</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">net</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">bytes_recv</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">bytes_sent</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">drop_in</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">drop_out</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">err_in</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">err_out</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inaddrmaskreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inaddrmasks</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_incsumerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_indestunreachs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inechoreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inechos</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inmsgs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inparmprobs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inredirects</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_insrcquenchs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_intimeexcds</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_intimestampreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_intimestamps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outaddrmaskreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outaddrmasks</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outdestunreachs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outechoreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outechos</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outmsgs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outparmprobs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outredirects</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outsrcquenchs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outtimeexcds</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outtimestampreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outtimestamps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_intype0</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_intype3</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_intype8</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_outtype0</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_outtype3</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_outtype8</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_defaultttl</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_forwarding</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_forwdatagrams</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_fragcreates</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_fragfails</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_fragoks</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_inaddrerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_indelivers</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_indiscards</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_inhdrerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_inreceives</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_inunknownprotos</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_outdiscards</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_outnoroutes</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_outrequests</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_reasmfails</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_reasmoks</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_reasmreqds</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_reasmtimeout</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">packets_recv</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">packets_sent</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_activeopens</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_attemptfails</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_currestab</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_estabresets</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_incsumerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_inerrs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_insegs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_maxconn</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_outrsts</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_outsegs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_passiveopens</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_retranssegs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_rtoalgorithm</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_rtomax</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_rtomin</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_ignoredmulti</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_incsumerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_indatagrams</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_inerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_memerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_noports</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_outdatagrams</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_rcvbuferrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_sndbuferrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_ignoredmulti</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_incsumerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_indatagrams</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_inerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_memerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_noports</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_outdatagrams</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_rcvbuferrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_sndbuferrors</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">processes</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">blocked</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">dead</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">idle</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">paging</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">parked</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">running</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">sleeping</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">stopped</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">total_threads</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">unknown</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">zombies</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">procstat</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">child_major_faults</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">child_minor_faults</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">cpu_time_guest</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_guest_nice</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_idle</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_iowait</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_irq</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_nice</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_soft_irq</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_steal</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_system</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_user</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_usage</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">created_at</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">involuntary_context_switches</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">major_faults</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_data</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_locked</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_rss</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_stack</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_swap</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_usage</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">memory_vms</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">minor_faults</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">num_threads</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">pid</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ppid</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">voluntary_context_switches</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">procstat_lookup</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">pid_count</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">result_code</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">running</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">swap</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #66d9ef;">in</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">out</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used_percent</span> <span style="color: #66d9ef;">float</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">system</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">load1</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">load15</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">load5</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">n_cpus</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">n_unique_users</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">n_users</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">uptime</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">uptime_format</span> <span style="color: #a6e22e;">string</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">select</span> <span style="color: #f92672;">*</span> <span style="color: #a6e22e;">from</span> <span style="color: #a6e22e;">metrics</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">SELECT</span> <span style="color: #f92672;">*</span> <span style="color: #a6e22e;">FROM</span> <span style="color: #e6db74;">"lsf_hosts"</span>; +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_hosts</span> +<span style="color: #a6e22e;">time</span> <span style="color: #a6e22e;">current</span> <span style="color: #a6e22e;">host</span> <span style="color: #a6e22e;">peak</span> <span style="color: #a6e22e;">state</span> +<span style="color: #f92672;">----</span> <span style="color: #f92672;">-------</span> <span style="color: #f92672;">----</span> <span style="color: #f92672;">----</span> <span style="color: #f92672;">-----</span> +<span style="color: #ae81ff;">1674493170000000000</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">clients</span> +<span style="color: #ae81ff;">1674493170000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">slots</span> +<span style="color: #ae81ff;">1674493170000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">cores</span> +<span style="color: #ae81ff;">1674493170000000000</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">servers</span> +<span style="color: #ae81ff;">1674493170000000000</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">cpus</span> +<span style="color: #ae81ff;">1674493200000000000</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">servers</span> +<span style="color: #ae81ff;">1674493200000000000</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">cpus</span> +<span style="color: #ae81ff;">1674493200000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">slots</span> +<span style="color: #ae81ff;">1674493200000000000</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">clients</span> +<span style="color: #ae81ff;">1674493200000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">cores</span> +<span style="color: #ae81ff;">1674493230000000000</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">clients</span> +<span style="color: #ae81ff;">1674493230000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">cores</span> +<span style="color: #ae81ff;">1674493230000000000</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">cpus</span> +<span style="color: #ae81ff;">1674493230000000000</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">servers</span> +<span style="color: #ae81ff;">1674493230000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">slots</span> +<span style="color: #ae81ff;">1674493260000000000</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">servers</span> +<span style="color: #ae81ff;">1674493260000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">slots</span> +<span style="color: #ae81ff;">1674493260000000000</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">clients</span> +<span style="color: #ae81ff;">1674493260000000000</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">cpus</span> +<span style="color: #ae81ff;">1674493260000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">cores</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">quit</span> +</code></pre></div> + +</details> + +<hr /> + +<ol start="8"> +<li>With telegraf successfully logging data to the InfluxDB instance, it will now be possible to create a data source in Grafana in order to create a dashboard containing LSF metrics. +As noted at the outset, this article is not meant to be an extensive guide to the creation of dashoards in Grafana. In the Grafana navigation select <em>Configuration</em> &gt; <em>Data sources</em>.</li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/configure_datasource.png" /> +</figure> + +<ol start="9"> +<li>Select the <em>Add data source</em> button, followed by InfluxDB, which is listed under <em>Time series databases</em>. On the settings page specify following values:</li> +</ol> +<hr /> + +<table> +<thead> +<tr> +<th>Variable</th> +<th>Value</th> +</tr> +</thead> +<tbody> +<tr> +<td>URL</td> +<td><em>http://adatbazis:8086</em></td> +</tr> +<tr> +<td>Database</td> +<td><em>telegraf</em></td> +</tr> +<tr> +<td>Basic auth</td> +<td>(enable)</td> +</tr> +<tr> +<td>User</td> +<td>&lt;influxdb_username&gt;</td> +</tr> +<tr> +<td>Password</td> +<td>&lt;influxdb_password</td> +</tr> +</tbody> +</table> +<hr /> + +<p>Next, click on <em>Save &amp; test</em>. If all variables and settings were properly specified, the message <em>datasource is working. 17 measurements found</em>.</p> + +<figure><img src="https://www.gaborsamu.com/images/test_datasource.png" /> +</figure> + +<ol start="10"> +<li>With the datasource configured in Grafana, the final step is to create a dashboard. Creating a dashboard requires creating panels which display data pulled from the confiugred data +source using targeted queries. With a bit of effort, I was able to piece together the following dashboard which includes both metrics from LSF, as well as metrics from Telegraf +<em>input.procstat</em> for the LSF processes <em>mbatchd</em>, <em>mbschd</em> and the management <em>lim</em>.</li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/lsf_dashboard3.jpg" /> +</figure> + +<hr /> + +<details> + <strong>Example dashboard definition (JSON). Click to expand!</strong> + <div class="highlight"><pre><code class="language-json">{ + <span style="color: #f92672;">"annotations"</span>: { + <span style="color: #f92672;">"list"</span>: [ + { + <span style="color: #f92672;">"builtIn"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"datasource"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"grafana"</span> + }, + <span style="color: #f92672;">"enable"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"iconColor"</span>: <span style="color: #e6db74;">"rgba(0, 211, 255, 1)"</span>, + <span style="color: #f92672;">"name"</span>: <span style="color: #e6db74;">"Annotations &amp; Alerts"</span>, + <span style="color: #f92672;">"target"</span>: { + <span style="color: #f92672;">"limit"</span>: <span style="color: #ae81ff;">100</span>, + <span style="color: #f92672;">"matchAny"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tags"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"dashboard"</span> + }, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"dashboard"</span> + } + ] + }, + <span style="color: #f92672;">"editable"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"fiscalYearStartMonth"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"graphTooltip"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">17</span>, + <span style="color: #f92672;">"links"</span>: [], + <span style="color: #f92672;">"liveNow"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"panels"</span>: [ + { + <span style="color: #f92672;">"collapsed"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">24</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">0</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">35</span>, + <span style="color: #f92672;">"panels"</span>: [], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Cluster aggregate current statistics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"row"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">"A view of the current status of the LSF servers in the cluster. Servers can be in one of four states: Ok, Unavailable, Closed and Unreachable. "</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + } + }, + <span style="color: #f92672;">"decimals"</span>: <span style="color: #ae81ff;">2</span>, + <span style="color: #f92672;">"mappings"</span>: [] + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">1</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">32</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"displayLabels"</span>: [ + <span style="color: #e6db74;">"name"</span>, + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"table"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"sortBy"</span>: <span style="color: #e6db74;">"Value"</span>, + <span style="color: #f92672;">"sortDesc"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"values"</span>: [ + <span style="color: #e6db74;">"value"</span>, + <span style="color: #e6db74;">"percent"</span> + ] + }, + <span style="color: #f92672;">"pieType"</span>: <span style="color: #e6db74;">"donut"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"multi"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Ok"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"ok"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Closed"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"closed"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Unreachable"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"unreachable"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Unavailable"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"D"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"unavailable"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Current aggregate LSF server statistics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"piechart"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">1</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">43</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"distinct"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"running"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Currently running"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"light-red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">1</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">45</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"suspended"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Currently suspended"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + } + }, + <span style="color: #f92672;">"decimals"</span>: <span style="color: #ae81ff;">2</span>, + <span style="color: #f92672;">"mappings"</span>: [] + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">15</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">1</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">33</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"displayLabels"</span>: [ + <span style="color: #e6db74;">"name"</span>, + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"table"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"sortBy"</span>: <span style="color: #e6db74;">"Value"</span>, + <span style="color: #f92672;">"sortDesc"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"values"</span>: [ + <span style="color: #e6db74;">"value"</span>, + <span style="color: #e6db74;">"percent"</span> + ] + }, + <span style="color: #f92672;">"pieType"</span>: <span style="color: #e6db74;">"donut"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"multi"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Running"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"running"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Pending"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"pending"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Suspended"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"suspended"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Current aggregate LSF job statistics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"piechart"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"yellow"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">5</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">44</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"pending"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Currently pending "</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"blue"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">5</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">46</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"finished"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Finished (past hour)"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">"Spectrum LSF queue statistics. Here we show jobs in running, pending and suspended jobs. "</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">9</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">41</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"lcd"</span>, + <span style="color: #f92672;">"minVizHeight"</span>: <span style="color: #ae81ff;">10</span>, + <span style="color: #f92672;">"minVizWidth"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"horizontal"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showUnfilled"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Running"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_queues"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"run"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"name"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"=~"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"/^$Queue$/"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Pending"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_queues"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"pend"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"name"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"=~"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"/^$Queue$/"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Suspended"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_queues"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"susp"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"name"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"=~"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"/^$Queue$/"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Current queue statistics ($Queue)"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"bargauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">9</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">53</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"servers"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Servers"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"yellow"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">9</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">54</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cpus"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"CPUs"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"axisCenteredZero"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"axisColorMode"</span>: <span style="color: #e6db74;">"text"</span>, + <span style="color: #f92672;">"axisLabel"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"axisPlacement"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"barAlignment"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"drawStyle"</span>: <span style="color: #e6db74;">"line"</span>, + <span style="color: #f92672;">"fillOpacity"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"gradientMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"lineInterpolation"</span>: <span style="color: #e6db74;">"stepBefore"</span>, + <span style="color: #f92672;">"lineWidth"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"pointSize"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"scaleDistribution"</span>: { + <span style="color: #f92672;">"log"</span>: <span style="color: #ae81ff;">2</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"log"</span> + }, + <span style="color: #f92672;">"showPoints"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"spanNulls"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"stacking"</span>: { + <span style="color: #f92672;">"group"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"thresholdsStyle"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"off"</span> + } + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">15</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">9</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">42</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"calcs"</span>: [], + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"list"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"bottom"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"single"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Running"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"running"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Pending"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"pending"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Suspended"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"suspended"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Aggregate LSF job statistics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"timeseries"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"light-red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">13</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">55</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cores"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Cores"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"blue"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">13</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">56</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"slots"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Slots"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"collapsed"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">24</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">17</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">37</span>, + <span style="color: #f92672;">"panels"</span>: [], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF scheduler statistics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"row"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"axisCenteredZero"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"axisColorMode"</span>: <span style="color: #e6db74;">"text"</span>, + <span style="color: #f92672;">"axisLabel"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"axisPlacement"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"barAlignment"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"drawStyle"</span>: <span style="color: #e6db74;">"line"</span>, + <span style="color: #f92672;">"fillOpacity"</span>: <span style="color: #ae81ff;">10</span>, + <span style="color: #f92672;">"gradientMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"graph"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"lineInterpolation"</span>: <span style="color: #e6db74;">"linear"</span>, + <span style="color: #f92672;">"lineWidth"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"pointSize"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"scaleDistribution"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"linear"</span> + }, + <span style="color: #f92672;">"showPoints"</span>: <span style="color: #e6db74;">"never"</span>, + <span style="color: #f92672;">"spanNulls"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"stacking"</span>: { + <span style="color: #f92672;">"group"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"thresholdsStyle"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"off"</span> + } + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"short"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">18</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">20</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"graph"</span>: {}, + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"calcs"</span>: [], + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"list"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"single"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"7.5.15"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"CPU utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"cpu_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbatchd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Memory utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"memory_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbatchd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Number of threads"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"num_threads"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbatchd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"File descriptors"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_mbatchd"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"D"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"fd"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"used"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF mbatchd process metrics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"timeseries"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"axisCenteredZero"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"axisColorMode"</span>: <span style="color: #e6db74;">"text"</span>, + <span style="color: #f92672;">"axisLabel"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"axisPlacement"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"barAlignment"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"drawStyle"</span>: <span style="color: #e6db74;">"line"</span>, + <span style="color: #f92672;">"fillOpacity"</span>: <span style="color: #ae81ff;">10</span>, + <span style="color: #f92672;">"gradientMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"graph"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"lineInterpolation"</span>: <span style="color: #e6db74;">"linear"</span>, + <span style="color: #f92672;">"lineWidth"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"pointSize"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"scaleDistribution"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"linear"</span> + }, + <span style="color: #f92672;">"showPoints"</span>: <span style="color: #e6db74;">"never"</span>, + <span style="color: #f92672;">"spanNulls"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"stacking"</span>: { + <span style="color: #f92672;">"group"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"thresholdsStyle"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"off"</span> + } + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"short"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">18</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">57</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"graph"</span>: {}, + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"calcs"</span>: [], + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"list"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"single"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"7.5.15"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"CPU utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"cpu_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"lim"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Memory utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"memory_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"lim"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Number of threads"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"num_threads"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"lim"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF management lim process metrics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"timeseries"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"axisCenteredZero"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"axisColorMode"</span>: <span style="color: #e6db74;">"text"</span>, + <span style="color: #f92672;">"axisLabel"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"axisPlacement"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"barAlignment"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"drawStyle"</span>: <span style="color: #e6db74;">"line"</span>, + <span style="color: #f92672;">"fillOpacity"</span>: <span style="color: #ae81ff;">10</span>, + <span style="color: #f92672;">"gradientMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"graph"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"lineInterpolation"</span>: <span style="color: #e6db74;">"linear"</span>, + <span style="color: #f92672;">"lineWidth"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"pointSize"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"scaleDistribution"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"linear"</span> + }, + <span style="color: #f92672;">"showPoints"</span>: <span style="color: #e6db74;">"never"</span>, + <span style="color: #f92672;">"spanNulls"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"stacking"</span>: { + <span style="color: #f92672;">"group"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"thresholdsStyle"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"off"</span> + } + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"short"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">26</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">27</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"graph"</span>: {}, + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"calcs"</span>: [], + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"list"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"single"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"7.5.15"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Job buckets"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_mbatchd"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"sched"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"buckets"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Matching host criteria"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_mbatchd"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"sched"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"matchhost"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Scheduling interval (seconds)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_mbatchd"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"sched"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"interval"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF scheduler metrics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"timeseries"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"axisCenteredZero"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"axisColorMode"</span>: <span style="color: #e6db74;">"text"</span>, + <span style="color: #f92672;">"axisLabel"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"axisPlacement"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"barAlignment"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"drawStyle"</span>: <span style="color: #e6db74;">"line"</span>, + <span style="color: #f92672;">"fillOpacity"</span>: <span style="color: #ae81ff;">10</span>, + <span style="color: #f92672;">"gradientMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"graph"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"lineInterpolation"</span>: <span style="color: #e6db74;">"linear"</span>, + <span style="color: #f92672;">"lineWidth"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"pointSize"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"scaleDistribution"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"linear"</span> + }, + <span style="color: #f92672;">"showPoints"</span>: <span style="color: #e6db74;">"never"</span>, + <span style="color: #f92672;">"spanNulls"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"stacking"</span>: { + <span style="color: #f92672;">"group"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"thresholdsStyle"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"off"</span> + } + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"short"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">26</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">58</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"graph"</span>: {}, + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"calcs"</span>: [], + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"list"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"single"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"7.5.15"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"CPU utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"cpu_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbschd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Memory utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"memory_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbatchd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Number of threads"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"num_threads"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbatchd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF mbschd process metrics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"timeseries"</span> + }, + { + <span style="color: #f92672;">"collapsed"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">24</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">34</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">39</span>, + <span style="color: #f92672;">"panels"</span>: [], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Additional metrics (scratch)"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"row"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">2</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"distinct"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"running"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Running"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"yellow"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"pending"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Pending"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">6</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">6</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"suspended"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Suspended"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"blue"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">7</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"finished"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Finished"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">15</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Ok"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"ok"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Ok"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"blue"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">15</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">16</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Closed"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"closed"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Closed"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"yellow"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">18</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">17</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Unreachable"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"unreachable"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Unreachable"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">21</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">18</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Unavailable"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"unavailable"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Unavailable"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">39</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">21</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Clients"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"clients"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Clients"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">39</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">22</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Servers"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"servers"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Servers"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">6</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">39</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">23</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Servers"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cpus"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"CPUs"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">39</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">24</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Cores"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cores"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Cores"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">39</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">25</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Slots"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"slots"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Slots"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">43</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">52</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"servers"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Servers"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"yellow"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">6</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">43</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">51</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cpus"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"CPUs"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"light-red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">43</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">50</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cores"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Cores"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"blue"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">43</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">49</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"slots"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Slots"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + } + ], + <span style="color: #f92672;">"refresh"</span>: <span style="color: #e6db74;">"30s"</span>, + <span style="color: #f92672;">"schemaVersion"</span>: <span style="color: #ae81ff;">37</span>, + <span style="color: #f92672;">"style"</span>: <span style="color: #e6db74;">"dark"</span>, + <span style="color: #f92672;">"tags"</span>: [], + <span style="color: #f92672;">"templating"</span>: { + <span style="color: #f92672;">"list"</span>: [ + { + <span style="color: #f92672;">"current"</span>: { + <span style="color: #f92672;">"selected"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"text"</span>: [ + <span style="color: #e6db74;">"priority"</span> + ], + <span style="color: #f92672;">"value"</span>: [ + <span style="color: #e6db74;">"priority"</span> + ] + }, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"oSnSlVc4k"</span> + }, + <span style="color: #f92672;">"definition"</span>: <span style="color: #e6db74;">"show tag values from \"lsf_queues\" with key=\"name\""</span>, + <span style="color: #f92672;">"hide"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"includeAll"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"multi"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"name"</span>: <span style="color: #e6db74;">"Queue"</span>, + <span style="color: #f92672;">"options"</span>: [], + <span style="color: #f92672;">"query"</span>: <span style="color: #e6db74;">"show tag values from \"lsf_queues\" with key=\"name\""</span>, + <span style="color: #f92672;">"refresh"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"regex"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"skipUrlSync"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"tagValuesQuery"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"tagsQuery"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"query"</span>, + <span style="color: #f92672;">"useTags"</span>: <span style="color: #66d9ef;">false</span> + } + ] + }, + <span style="color: #f92672;">"time"</span>: { + <span style="color: #f92672;">"from"</span>: <span style="color: #e6db74;">"now-1h"</span>, + <span style="color: #f92672;">"to"</span>: <span style="color: #e6db74;">"now"</span> + }, + <span style="color: #f92672;">"timepicker"</span>: {}, + <span style="color: #f92672;">"timezone"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF cluster status"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"ORojp8cVz"</span>, + <span style="color: #f92672;">"version"</span>: <span style="color: #ae81ff;">160</span>, + <span style="color: #f92672;">"weekStart"</span>: <span style="color: #e6db74;">""</span> +} +</code></pre></div> + +</details> + +<hr /> + +<p>As you can see, with a short plugin script to collect information from LSF, it&rsquo;s possible to monitor your LSF cluster using the TIG stack. It&rsquo;s important to note that there are powerful +monitoring and reporting tools available from IBM as add-ons to LSF; IBM Spectrum LSF RTM and IBM Spectrum LSF Explorer. You can find more details about the add-on capabilities for LSF +<a href="https://www.ibm.com/products/hpc-workload-management/resources">here</a>.</p> + + + + + Adam’s weekly (-ish) update, 2022-12-20 + + 2022-12-20T18:14:52-07:00 + https://hpc.social/2022/adam-s-weekly-ish-update-2022-12-20 + <h2>What&#8217;s new</h2> + +<p>The past few weeks have been on the intense side at work, so I completely lost track of the blog and haven&#8217;t had a chance to write much in that time. However, I&#8217;m now on a holiday break, and finally have time to sit down at a keyboard to write more than code and Slack messages.</p> + +<p><span id="more-289"></span></p> + +<p>One of the highlights of the past few weeks was a trip to San Jose, and the NVIDIA headquarters. I changed teams at work back in July, transferring from a group that was closely integrated with product management, to a more straightforward engineering team which <a href="https://blogs.nvidia.com/blog/2020/08/14/making-selene-pandemic-ai/">designs and builds new high-performance computing systems</a>. </p> + +<p>This was the first chance I&#8217;ve had to meet up with other members of my new team in person, and it was a really wonderful experience to be in the same physical space as folks who were previously just images on my screen. I love working remotely, but it&#8217;s also great to be able to stand in front of a white board with someone and brainstorm, or get coffee and just have a chat with a coworker outside of a video call with an agenda.</p> + +<p>(Plus, we were all careful and managed to avoid catching COVID from each other! Which was a win on its own.)</p> + +<p>Now, for the next two weeks I&#8217;m off work, and planning to take some time to relax and spend time on projects that are harder to focus on during busy work weeks. Expect (maybe) less about computers in my blog and social feeds, and more about D&amp;D, baking, and tasty cocktails.</p> + +<h2>What I&#8217;m reading, watching, and listening to</h2> + +<p>I&#8217;ve been a bit too scattered to focus on actual books the past few weeks, but I did find time for a few interesting articles and podcasts. In particular,</p> + +<ul> +<li><a href="https://acoup.blog/2022/12/02/collections-why-roman-egypt-was-such-a-strange-province/">&#8220;Why Roman Egypt was such a strange province&#8221;</a>, from Bret Devereaux: As usual from Devereaux, an accessible but extremely detailed discussion of why so much of what we know about the Roman empire is from Egyptian records, but why that also might not be representative of the broader empire.</li> + + + +<li><a href="https://willgallego.com/2022/12/18/emoji-as-incident-resolution-tools/">&#8220;Emoji as incident resolution tools&#8221;</a>, from Will Gallego: A fun discussion of how using emoji as part of a team&#8217;s communication can add nuance and shared understanding during incident management, along with a discussion of the disadvantages and costs associated with the practice.</li> + + + +<li><a href="https://www.mikulskibartosz.name/modern-software-architecture-in-2022/">&#8220;What does modern software architecture look like in 2022?&#8221;</a>, from Bartosz Mikulski: A nice article which discusses how service-oriented software architecture can often include an explicit expectation of change. For example, the architecture might include notes on an ongoing deprecation of a library, or might signpost the need to factor a new microservice out when overall system load gets high enough.</li> + + + +<li><a href="https://www.bradyheywood.com.au/podcasts/">The Brady Heywood podcast</a>: Found via the <a href="https://oxide.computer/podcasts/oxide-and-friends/1137359">Oxide and Friends podcast</a>, the Brady Heywood podcast is a series on engineering disasters and their consequences from a forensic engineering firm. It&#8217;s mostly not being updated any more (with the podcasters moving on to a separate series on complexity science), but it has a deep back catalog of good episodes, and includes thoughtful discussions of human factors, safety engineering, and how organizational pressures become manifest in engineering artifacts.</li> +</ul> + +<h2>Recent recipes</h2> + +<ul> +<li><a href="https://smittenkitchen.com/2016/12/homemade-irish-cream/">Smitten Kitchen&#8217;s Homemade Irish Cream</a>: This is a recipe I make every year, and I often give away small bottles of it as holiday gifts. It&#8217;s really ridiculously tasty, much better than Baileys or similar, and good either on its own or in hot chocolate.</li> + + + +<li><a href="https://smittenkitchen.com/2014/12/fairytale-of-new-york/">Smitten Kitchen&#8217;s Fairytale of New York</a>: This is a really tasty whiskey cocktail, and the star of the show is a &#8220;winter warmth syrup&#8221; that substitutes in for simple syrup. The syrup is simply very tasty, and turns what&#8217;s effectively an OId Fashioned variant into a lovely holiday cocktail.</li> + + + +<li>Sparkling gingerbread from <a href="http://www.apt2bbakingco.com/snacking-cakes">Yossy Arefi&#8217;s Snaking Cakes</a>: This recipe takes a little more prep than most of Arefi&#8217;s &#8220;snacking cakes&#8221;, as it includes ginger three ways (ground, fresh, and crystallized), but it&#8217;s worth the few minutes of extra work.</li> +</ul> + +<h2>Pet photos</h2> + +<figure class="wp-block-image size-large is-resized"><img alt="A white calico cat and a gray tabby cat lounging on a large brown pet bed in front of a gas fireplace." class="wp-image-295" height="512" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_7207-768x1024.jpeg" width="384" /><figcaption class="wp-element-caption">I&#8217;m pretty sure these two want me to turn the fireplace on.</figcaption></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="A gray tabby cat lounges on a dog bed, while a golden doodle lays on the floor nearby and looks forlornly at the bed." class="wp-image-294" height="512" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_1725-1024x1024.jpeg" width="512" /><figcaption class="wp-element-caption">Just Percy bullying the dog by stealing his bed.</figcaption></figure> + + + + + Visualizing Spectrum LSF data with Grafana + + 2022-12-13T00:06:51-07:00 + https://hpc.social/2022/visualizing-spectrum-lsf-data-with-grafana + <p><strong>Overview</strong></p> + +<p>System monitoring is a fundamental part of IT best practices. High performance computing (HPC) environments are no exception to this. At the high-end, HPC clusters can consist of +thousands of servers, processing millions of jobs per day. HPC admins need ways to monitor the overall cluster to determine system status and availability through to the efficiency +of workloads. Servers today produce a wide array of metrics which can be monitored for example to check for various conditions. Additionally, workload schedulers also produce a wealth +of data about jobs. Having a single dashboard to show this type of detail can be of great benefit.</p> + +<p><a href="https://www.ibm.com/products/hpc-workload-management">IBM Spectrum LSF Suites</a> provide a complete solution for HPC workload management. This includes reporting capabilities out of the box. Spectrum LSF Suite features an integrated web +interface for job management and reporting. The reporting capabilities include a number of reports out of the box, with the ability to customize and add new reports. The reporting +capability in Spectrum LSF Suite and IBM Spectrum LSF Explorer is underpinned by Elasticsearch, which is used to store, index and query data. With LSF data in Elasticsearch, it’s +also possible to configure LSF command-line interface (CLI) tools to query information from Elasticsearch rather than flat files – for greater performance. This is controlled via +the <strong>LSF_QUERY_ES_FUNCTIONS</strong> parameter of Spectrum LSF. More details about the <strong>LSF_QUERY_ES_FUNCTIONS</strong> can be found in the LSF documentation <a href="https://www.ibm.com/docs/en/spectrum-lsf/10.1.0?topic=lsfconf-lsf-query-es-functions">here</a>.</p> + +<p>(1) Here is a look at the indices that are created by LSF in Elasticsearch. Note that the status shows as yellow because I only have a single Elasticsearch node.</p> + +<div class="highlight"><pre><code class="language-plaintext"># curl -XGET localhost:9200/_cat/indices +yellow open lsf_events-202205 tejh7jsMSwSeQUJzYM7cww 5 1 1137 0 808.1kb 808.1kb +yellow open lsf_jobs_pendingreason-202204 4wi7Ta8uQPSXlFBqPh4kOQ 5 1 90531 0 8.6mb 8.6mb +yellow open lsf_events-202204 tWYvW_w8TVyU1deRFOEoZg 5 1 116957 32691 59.1mb 59.1mb +yellow open lsf_jobs_active-202212 Q0pStQxvTgaeL7R-f02XWA 5 1 210052 0 50.6mb 50.6mb +yellow open lsf_jobs_pendingreason-202206 ENWIwfGrSqCHvi53aUQXJQ 5 1 44991 0 4.5mb 4.5mb +yellow open host_booleanres_latest RE8thZCgTGeMBGodeMfXEQ 5 1 5 0 23.3kb 23.3kb +yellow open lsf_jobs_pendingreason-202205 yo0iZH_4TvOqq6kQgBluvA 5 1 111 0 181.4kb 181.4kb +yellow open lsf_jobs_pend-202212 9ViIS3nDRFewrqtILEbKTQ 5 1 707 0 446.9kb 446.9kb +yellow open lsf_hostconf_latest 9N1Y8ML4TiyaamCPEDRQog 5 1 2 0 10.6kb 10.6kb +yellow open lsf_events-202209 rtKQ8F4bSleHl8EbAQez8A 5 1 8200 955 4.4mb 4.4mb +yellow open lsf_events-202206 UUKPWfN7SZ-dzVs5NAkjUg 5 1 79503 23452 36.8mb 36.8mb +yellow open lsf_hostmetrics-202209 7FUNFCWPQtuGyx5jTJLb1A 5 1 4701 0 2.2mb 2.2mb +yellow open lsf_hostmetrics-202208 52xef_3hQWK-jVuJqyUpHA 5 1 3823 0 1.9mb 1.9mb +yellow open lsf_hostmetrics-202207 IqZYhU0RQNGIFWSRH-Ym8Q 5 1 6316 0 2.9mb 2.9mb +yellow open lsf_job_acct-202209 h1ZgCSB8RwCBxwIUUzDHEQ 5 1 2050 438 1.9mb 1.9mb +yellow open lsf_jobs_active-202209 iBfnf07CTcS7Gb6TxwomRA 5 1 2658 0 1mb 1mb +yellow open lsf_hostmetrics-202206 0PXSYBOgTA2Qa_zzaafUPg 5 1 4301 0 2.1mb 2.1mb +yellow open model xSqB_T_VSByOzYavEcEVyQ 1 1 55 0 257kb 257kb +yellow open lsf_job_acct-202206 C639GnzBSjCEVczfh5u23g 5 1 16719 353 8.9mb 8.9mb +yellow open lsf_jobs_active-202204 8gN_ENkQRTSfnmxrtMcOlA 5 1 33286 0 9.8mb 9.8mb +yellow open lsf_job_acct-202205 LOxmhm_8RxaCuTd7YWYbLw 5 1 274 0 439.4kb 439.4kb +yellow open lsf_jobs_active-202205 61u2RlXgR_SXagmZfrmttQ 5 1 1880 0 1.1mb 1.1mb +yellow open lsf_jobs_pend-202209 eTgqPp9nQOScNiwyUWXmHA 5 1 9 0 106.2kb 106.2kb +yellow open lsf_job_acct-202204 dDDegS6RQSWtWN99eklexg 5 1 28902 2177 17.4mb 17.4mb +yellow open lsf_jobs_active-202206 8ivkjWSNR1Sh_BxWACP0ZA 5 1 16921 0 4.6mb 4.6mb +yellow open lsf_current_status 92KE3V4YSJ-RtRp_kepxYg 5 1 115450 0 9mb 9mb +yellow open lsf_hostmetrics-202210 vbuK2wW3RRmXuY07tDPUNQ 5 1 785 0 942.1kb 942.1kb +yellow open lsf_jobs_pend-202206 OhSwn-b0SiSj8mCW5tcNIA 5 1 22 0 244.6kb 244.6kb +yellow open lsf_jobs_pend-202205 OfBtWklETYK9cRx000aNPw 5 1 1 0 12.7kb 12.7kb +yellow open lsf_events-202212 WUC5KJWmS-2WIN8XCQpSuw 5 1 712399 74728 337mb 337mb +yellow open lsf_jobs_pend-202204 OhUsXqohSciZTPZlTryMyA 5 1 50 0 275.3kb 275.3kb +yellow open resource_attributes_latest R9bk_WIPTU62dVg3O1LDBA 5 1 5 0 24.4kb 24.4kb +yellow open lsf_jobs_pendingreason-202212 55iwDC5mRI-eRbzQLwWP6Q 5 1 3314828 0 288.7mb 288.7mb +yellow open pa-lite-log o8-jaNoGTsSVcjJW5Ufs0w 5 1 1549 0 547.2kb 547.2kb +yellow open lsf_job_acct-202212 4HXvAD02Sxq0tgp2fS2cfQ 5 1 161502 0 73.6mb 73.6mb +yellow open lsf_hostmetrics-202212 Tki6OJ41R363u9Tx02N4zw 5 1 2548 0 1.7mb 1.7mb +yellow open lsf_jobs_pendingreason-202209 D3TOZY2ORiK9PppGVt10Fg 5 1 2511 0 381.4kb 381.4kb</code></pre></div> + +<p>(2) With the LSF data stored in Elasticsearch, the next step is to connect to the Grafana server. Here we point our browser to the Grafana server on the default port: <em>http://lsf_manager:3000</em> and login to Grafana. This step assumes an account has already been setup on Grafana. Here we are using the default admin account.</p> + +<p>(3) In Grafana, navigate to <strong>Configuration</strong> -&gt; <strong>Data sources</strong>. It’s here that it will be possible to add an Elasticsearch data source</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_3.png" /> +</figure> + +<p>(4) Next, click the <strong>Add data source</strong> button.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_4.png" /> +</figure> + +<p>(5 In the list of data sources, filter by name for <em>Elasticsearch</em> and click the Select button on the Elasticsearch entry.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_5.png" /> +</figure> + +<p>(6) When configuring the data source, it’s necessary to specify an index name. This is where the list of indices in Elasticsearch that we generated earlier will come in handy. For this example, we wish to display the total number of pending jobs in the Spectrum LSF cluster over time. This data is stored in the <em>lsf_jobs_pend*</em> indices in Elasticsearch. To configure the data source appropriately, we specify the following values:</p> + +<ul> +<li>Name: “LSF pending jobs”</li> +<li>URL: http://localhost:9200</li> +<li>Index name: “lsf_jobs_pend*”</li> +<li>Time field name: “time_stamp”</li> +<li>Version: 7.0+ +Note that the URL needs to point to the Elasticsearch server. In this case, both the Elasticsearch server and Grafana server are running on the same host.</li> +</ul> +<p>Next click on the <strong>Save &amp; Test button</strong>. It should return the message <em>Index OK. Time field name OK.</em>.</p> + +<p>Assuming that no errors were found, click on the <strong>Back</strong> button.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_6.png" /> +</figure> + +<p>(7) Now you should see <em>LSF pending jobs</em> listed as a Data Source.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_7.png" /> +</figure> + +<p>(8) With the data source configured, we’re now ready to configure a dashboard to display the LSF pending job information. Navigate to <strong>Create</strong> -&gt; <strong>Dashboard</strong>.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_8.png" /> +</figure> + +<p>(9) Click on <strong>Add an empty panel</strong>. This is used to create a new panel where the LSF pending job information will be plotted.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_9.png" /> +</figure> + +<p>(10) In the panel editor, specify the following options:</p> + +<ul> +<li>Panel title: “LSF pending jobs”</li> +<li>Specify the data source “LSF pending jobs” which was created previously</li> +<li>Specify a suitable time range (2 days)</li> +<li>Line width (5 points)</li> +</ul> +<p>You should immediately see in the panel editor the plot of the hourly pending jobs. Click on the <strong>Apply</strong> button to save the changes.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_10.png" /> +</figure> + +<p>(11) After clicking Apply, you will be returned to the Dashboard screen. The Dashboard should now display the new LSF pending jobs panel that was created above. This Dashboard could also include panels for system metrics collected by Prometheus for example.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_11.png" /> +</figure> + +<p>(12) Next, click on the diskette icon in the upper right to save the Dashboard with the LSF pending jobs panel. We’ll name it <em>Spectrum LSF cluster status</em>.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_12.png" /> +</figure> + +<p>Additional panels can be added to the <em>Spectrum LSF cluster status</em> based on the data logged by Spectrum LSF to Elasticsearch.</p> + +<p>That concludes the simple example of plotting Spectrum LSF cluster data from Elasticsearch in Grafana. As mentioned, the IBM Spectrum LSF Suites integrated web interface also provides reporting capabilities, with several built-in reports provided out of the box. Below, we’ve included a screenshot of the <em>pending job analysis</em> report included with Spectrum LSF Suites.</p> + +<figure><img src="https://www.gaborsamu.com/images/lsf_pending.png" /> +</figure> + +<p><strong>Summary</strong></p> + +<p>Spectrum LSF provides many hooks and integration points enabling administrators to change things ranging from scheduling behavior and the output of query commands through to job information being logged to Elasticsearch. Spectrum LSF is highly customizable by organizations to suit specific needs and requirements. We’ve demonstrated this using Grafana to visualize data from the LSF scheduler in a simple example. Following the above example, administrators can combine existing HPC cluster system level reporting in Grafana with job information from Spectrum LSF for a better overall view and understanding of the infrastructure.</p> + + + + + Adam’s weekly update, 2022-12-04 + + 2022-12-05T05:49:35-07:00 + https://hpc.social/2022/adam-s-weekly-update-2022-12-04 + <h2>What&#8217;s new</h2> + +<p>This week was really intense from a work perspective. Not &#8220;bad intense&#8221;, but the kind of week where every day was spent with such a level of focus, that at 5 PM or so I found myself staring off into space and forgetting words. I think I got some good things accomplished, but my brain also felt like mush by the time the weekend came.</p> + +<p><span id="more-268"></span></p> + +<p>This week I&#8217;m traveling to San Jose for work (I just checked into my hotel a little while ago!), so I fully expect this week to also be eaten by work. So I don&#8217;t promise anything terribly interesting for next week&#8217;s post&#8230;</p> + +<p>However, I did take advantage of a Sunday in San Jose to visit the <a href="https://computerhistory.org/">Computer History Museum</a> in Mountain View! I try to visit the museum every few years, and while a lot of the exhibits are the same, enough things change that I always get something new from the visit. Also, I&#8217;ve been doing a lot of reading about hardware development and the history thereof lately, so it was interesting to examine the museum through that new lens.</p> + +<p>I may write more about my visit later this week &#8212; it definitely sparked some thoughts &#8212; but in the mean time, here are a few photos I took while wandering around the museum.</p> + +<figure class="wp-block-image size-large is-resized"><img alt="A mechanical computer built mostly of brass, with various numerical dials. A small placard labels this as a replica of the Babbage Difference Engine No. 1 Demonstration Piece." class="wp-image-282" height="800" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6894-768x1024.jpg" width="600" /><figcaption class="wp-element-caption">The Babbage Difference Engine, and other mechanical computers, have always fascinated me.</figcaption></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="The Cray-1, a round computer with its own built-in seating attached." class="wp-image-283" height="446" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6965-1024x768.jpg" width="595" /><figcaption class="wp-element-caption">Can&#8217;t visit the museum without visiting the Cray-1.</figcaption></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="The Connection Machine 1, a large black cube divided in eight sections." class="wp-image-284" height="768" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6973-768x1024.jpg" width="576" /><figcaption class="wp-element-caption">I would have loved to have seen a CM-1 in operation, with its red LEDs showing the operation of its many single-bit CPUs.</figcaption></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="The front panel of an Altair 8800 computer, with an array of LEDs and switches controlling the state of individual bits." class="wp-image-285" height="449" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_7037-1024x768.jpg" width="598" /><figcaption class="wp-element-caption">Having recently read Charles Petzold&#8217;s &#8220;Code&#8221;, I was struck by how closely the front panel of the Altair 8800 resembles the fictional front panel of the computer that Petzold constructs from logic gates up.</figcaption></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="A Dell PowerEdge R710 lays on a white plastic table, top cover off, surrounded by instructions on how to disassemble it." class="wp-image-286" height="467" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_7073-1024x768.jpg" width="623" /><figcaption class="wp-element-caption">The CHM Learning Lab now includes a back room with a couple of Dell PowerEdge R710 servers, complete with instructions for how to disassemble and reassemble them. Anyone who wants can wander in and take them apart. It was great fun watching a 5-year-old kid pulling components out of one of these&#8230; As well as feeling a little weird, as I think I&#8217;ve run these in production!</figcaption></figure> + +<h2>What I&#8217;m reading</h2> + +<p>I don&#8217;t have a ton to share this week &#8212; honestly, the whole week feels like a blur &#8212; but here are two books that I recommend.</p> + +<ul> +<li><a href="https://www.aliettedebodard.com/bibliography/novels/the-universe-of-xuya/the-red-scholars-wake/">The Red Scholar&#8217;s Wake, by Aliette de Bodard</a>: As the blurb says, &#8220;Lesbian space pirates!&#8221; Also, a really wonderful novella about building a new relationship amidst grief, power differentials, politics, and space battles. I think I basically recommend everything that de Bodard writes, but especially this. And it basically stands alone! So you can read this first, without going back to the other stories in the same world.</li> + + + +<li><a href="https://www.harpercollins.com/products/dealers-of-lightning-michael-a-hiltzik?variant=40824247779362">Dealers of Lightning: XEROX PARC and the Dawn of the Computer Age, by Michael Hiltzik</a>: I&#8217;ve just started this, but it&#8217;s already a really interesting snapshot of a key period in the development of the personal computer.</li> +</ul> + +<h2>Recent recipes</h2> + +<ul> +<li><a href="https://smittenkitchen.com/2019/12/unfussy-sugar-cookies/">Smitten Kitchen&#8217;s Unfussy Sugar Cookies</a>: These cookies did, indeed, prove to be both tasty and easy to make. If you just want some easy cookies to snack on, I absolutely recommend this recipe.</li> +</ul> + +<h2>Pet photos</h2> + +<figure class="wp-block-image size-large is-resized"><img alt="Phyrne the calico cat stares down into the camera from a stairway" class="wp-image-279" height="414" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6881-768x1024.jpg" width="310" /></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="Close-up on the face of Percy the gray tabby cat" class="wp-image-280" height="420" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6879-768x1024.jpg" width="314" /></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="Benny the golden doodle curled up on a dog bed" class="wp-image-281" height="238" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6876-1024x768.jpg" width="317" /></figure> + + + + + An Initial Look at Deep Learning IO Performance + + 2022-11-28T00:00:00-07:00 + https://hpc.social/2022/an-initial-look-at-deep-learning-io-performance + <h2 id="abstract">Abstract</h2> + +<p>This blog post describes an investigation of IO behavior of TensorFlow and PyTorch during resnet50 training running on Lambda Lab’s 8x V100 GPU instances. Both ephemeral local NVMe storage and network attached persistent storage was tested. The local NVMe storage was fast enough to achieve a throughput rate required to hit synthetic test targets. The network attached persistent storage may not be able to fully saturate 8 V100 GPUs during training, though can achieve nearly the same level of performance as the local storage so long as TFRecords are utilized. Further, there are specific behaviors and bottlenecks in TensorFlow and PyTorch that can reduce training performance when using real data from ImageNet.</p> + +<h2 id="acknowledgements">Acknowledgements</h2> + +<p>Thank you to Michael Balaban at Lambda Labs for providing access to their GPU cloud for this testing. Thank you to Chuan Li for the creation of his TensorFlow benchmarking tools. Thank you also to Andrej Karpathy, Toby Boyd, Yanan Cao, Sanjoy Das, Thomas Joerg, and Justin Lebar for their excellent blog posts on deep learning and XLA performance that helped inform this article. I hope that this post will be useful for others as your work and writing was useful for me.</p> + +<h2 id="introduction">Introduction</h2> + +<blockquote> + <p><em>…just because you can formulate your problem as RL doesn’t mean you should. If you insist on using the technology without understanding how it works you are likely to fail.</em></p> + + + <p>        Andrej Karpathy, <a href="https://karpathy.github.io/2019/04/25/recipe/">A Recipe for Training Neural Networks</a>, 2019</p> + +</blockquote> + +<p>That was the phrase that stuck in my head when I first started this project. What project you may ask? I want to understand how deep learning experiments utilize fast storage devices. Not just any experiments either: <em>real</em> ones, preferably big. That’s how I happened upon Andrej Karpathy’s blog. He is the former Sr. Director of AI at Tesla and knows a thing or two about training big neural networks. I’ve spent the last decade working on Ceph and have worked on distributed systems and distributed storage for nearly 2 decades at this point. But training neural nets? The closest I’ve come was back in the early 2000s when I tried to build a tool to predict video game framerates. I scraped benchmark numbers from review websites and built M5 decision trees based on hardware and video card settings. It sort of worked, but was terribly overtrained on a small (~4000 sample) dataset. Training with petabytes of data to teach an AI how to responsibly drive a car? I can already feel a bit of imposter syndrome setting in.</p> + +<p>Thankfully my goal is comparatively modest. I don’t need to build a cutting edge classifier or explore the intricacies of manually implementing back-propagation. I simply want to understand the IO patterns that are involved when training big datasets with fast GPUs so I can help researchers speed up their work. Up until now, my ability to do this was fairly limited. At the day job I’ve had access to a small group of nodes with extremely modest GPUs. I set up runs with MLPerf but the datasets (WMT G-E and CoCo) easily fit into memory. Other than a short burst of read traffic at the very beginning of training there was very little IO. Recently I had the opportunity to meet Michael Balaban, Co-Founder of <a href="https://lambdalabs.com/">Lambda Labs</a>. I told him what I wanted to do and he gave me access to Lambda’s GPU cloud and beta persistent storage to give it a try. I was able to grab one of Lambda’s 8x Tesla V100 instances (These things are incredibly popular so it’s best to grab one early in the morning!). Not all of Lambda’s instance types currently have access to the persistent storage but the V100 instances in the Texas zone do. Once secured, I got to work.</p> + +<h2 id="tensorflow---synthetic">TensorFlow - Synthetic</h2> + +<p>Before even attempting to run tests with real data, I realized I needed a baseline to start with. Luckily, Chuan Li, Lambda’s Chief Scientific Officer, wrote a tool for running TensorFlow benchmarks and made it available on github <a href="https://github.com/lambdal/lambda-tensorflow-benchmark">here</a>. One of the advantages of Lambda’s cloud is that they’ve already bundled up many popular tools for running deep-learning workloads into one package called <a href="https://lambdalabs.com/lambda-stack-deep-learning-software">Lambda Stack</a> which comes pre-installed when you start an instance. This made it fast to get started, though I did run into one issue. Lambda Stack comes standard with TensorFlow 2, but Chuan Li’s tool relies on a TensorFlow benchmark submodule that is designed to work with TensorFlow 1. Luckily, the parent repository was unofficially updated to work with Tensorflow 2 (with a warning that it is no longer being maintained). A quick “git checkout master” in the “benchmarks” submodule directory got everything working. Chuan Li’s tool makes it simple to run tests with several preconfigured templates already included. I chose the fp16 resnet50 configuration as it should be fast at processing images and is fairly standard.</p> + +<pre><code>TF_XLA_FLAGS=--tf_xla_auto_jit=2 ./batch_benchmark.sh X X 1 100 2 config/config_resnet50_replicated_fp16_train_syn +</code></pre> + +<p>Using the invocation provided in the benchmark README.md file, I was able to quickly run benchmarks with synthetic data on up to 8 V100 GPUs in the node. At one point I got stuck, hitting what appeared at first to be an unexplainable 25% performance loss. I reran the tests multiple times and even monitored GPU clockspeeds/temperatures in nvidia-smi with no luck. Ultimately I discovered my error. In the slow cases, I had inadvertently left out the “TF_XLA_FLAGS=–tf_xla_auto_jit=2” environment variable. It turns out that setting this allows Tensorflow compile and execute functions with XLA (Accelerated Linear Algebra) support which is a pretty big win for these tests.</p> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Tensorflow_-_ResNet50_Synthetic_Training_fp16.svg" /></p> + +<p>At this point I decided that I needed to understand how Chuan Li’s tool works. It turns out that he is using the same base tf_cnn_benchmarks.py benchmark code that companies like Nvidia and Dell also use for benchmarking their GPU solutions. I spent some time running it directly with Dell’s settings from their deep learning overview <a href="https://infohub.delltechnologies.com/l/high-speed-object-storage-for-deep-learning/overview-3284">here</a>. Unfortunately those tests had mixed results, even after various tweaks. While researching the XLA issues I mentioned earlier however, I made an even better <a href="https://blog.tensorflow.org/2018/11/pushing-limits-of-gpu-performance-with-xla.html">discovery</a> on the TensorFlow website. I found an excellent blog post with performance data written by some of the core Tensorflow developers. It’s now 4 years old, but still appears to be quite valid. The tuning options used were both simpler and resulted in higher performance versus other configurations that I’ve come across.</p> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Tensorflow_-_ResNet50_Synthetic_Training_fp16_blog_compare.svg" /></p> + +<p>Training with synthetic data in Lambda’s cloud resulted in similar performance to what the Tensorflow developer’s reported. In fact, using their own settings yielded slightly faster results when running on Lambda’s 8xV100 instance! It was incredibly encouraging to me that even in Lambda’s cloud environment with virtual machine instances I could achieve performance that was as fast or faster than what the Tensorflow developers were reporting.</p> + +<h1 id="choosing-a-real-data-set">Choosing a Real Data Set</h1> + +<blockquote> + <p><em>The first step to training a neural net is to not touch any neural net code at all and instead begin by thoroughly inspecting your data.</em></p> + + + <p>        Andrej Karpathy, <a href="https://karpathy.github.io/2019/04/25/recipe/">A Recipe for Training Neural Networks</a>, 2019</p> + +</blockquote> + +<p>Having convinced myself that I had Tensorflow operating reasonably efficiently in synthetic tests, it was time to start thinking about what dataset to use for “real” training. The largest and most obvious choice is ImageNet. ImageNet is composed of over 1.2 million categorized images that form a roughly 160GB training dataset. It is also the largest dataset I could find that was publicly accessible. Downloading it isn’t so easy however. The only version that I could access is the ImageNet Object Localization Challenge dataset hosted on <a href="https://www.kaggle.com/c/imagenet-object-localization-challenge">kaggle</a>.</p> + +<p>After finally figuring out how to download the data, it was time to follow Andrej’s advice and try to learn something about it. While ImageNet is curated and annotated, it has many images of different sizes, dimensions, and pixel counts. Images also come from many sources with different levels of quality. Through the power of stack-exchange I was able to find a bash one-liner script to generate a histogram of image sizes:</p> + +<pre><code>find . -type f -print0 | xargs -0 ls -l | awk '{size[int(log($5)/log(2))]++}END{for (i in size) printf("%10d %3d\n", 2^i, size[i])}' | sort -n +</code></pre> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/ImageNet_-_Image_Distribution_by_Approximate_Size.svg" /></p> + +<p>Roughly 80% of the images are in the 64KB or 128KB size bins. Almost all of the remaining images are smaller. That gives us a pretty good idea of what kind of IOs to expect during classification. Or at least…it does for frameworks that read those images directly. In Tensorflow’s case, there’s an alternative format called TFRecord. TFRecords are basically collections of image data sequentially laid out in much larger files. Instead of iterating over thousands or millions of individual image files, TFRecords allow Tensorflow to instead stream fewer, larger files that each house multiple images. It’s a one time cost to pre-process the data so Tensorflow has less work to do during training. After I downloaded the ImageNet data I took a shot at converting the ImageNet LOC data into TensorFlow records. Luckily, the TensorFlow tpu github repository already has a <a href="https://github.com/tensorflow/tpu/blob/master/tools/datasets/README.md">tool</a> that can do this. I had to manipulate the dataset slightly, but ultimately this process worked (at least for the training data):</p> + +<pre><code>pip install gcloud google-cloud-storage +pip install protobuf==3.20.1 + +mkdir ~/data/ImageNetFoo +ln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/train ~/data/ImageNetFoo/train +ln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/val ~/data/ImageNetFoo/val +ln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/test ~/data/ImageNetFoo/test +ln -s ~/data/ImageNet/LOC_synset_mapping.txt ~/data/ImageNetFoo/synset_labels.txt +python imagenet_to_gcs.py --raw_data_dir=/home/ubuntu/data/ImageNetFoo --local_scratch_dir=/home/ubuntu/ExaltedOrbs/ImageNet/tf_records --nogcs_upload +</code></pre> + +<p>Perhaps I should say that this worked so long as the original dataset was located on the local NVMe drive. The persistent storage didn’t fare as well. Attempting to decompress ImageNet on the persistent storage resulted in blowing past the max number of open files allowed with errors like:</p> + +<pre><code>OSError: [Errno 24] Too many open files. +</code></pre> + +<p>Unfortunately this couldn’t be fixed on the instance. It appeared to be passed through from the host and the persistent storage was completely unusable until the instance was rebooted. Recently I spoke to one of Lambda’s engineers and they are working on a fix. (It may already be implemented by the time you read this!) I also want to note that the persistent storage is still in beta so issues like this are not entirely unexpected. Having said that, before hitting the error it was significantly slower extracting ImageNet on the persistent storage vs on the local NVMe storage. It’s probably best to extract ImageNet locally and then write the large TFRecords to the persistent storage during the conversion process. Luckily extracting ImageNet to local storage was fine, and storing the original archive and the resulting TFRecords on the persistent storage worked perfectly fine as well.</p> + +<h2 id="fio---baseline-io-results">FIO - Baseline IO Results</h2> + +<p>Next, I turned my attention to running baseline tests on Lambda’s local and persistent storage using fio. Fio is a highly configurable and well respected benchmark in the storage community and perfect for generating baseline results. I decided to use a dataset size that is roughly similar to ImageNet (200GB), the libaio engine in fio with direct IO, and an appropriately high IO depth to let the NVMe drives stretch their legs a bit.</p> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Lambda_Labs_8xv100_Storage.svg" /></p> + +<p>Throughput with the local NVMe drive(s) is surprisingly good. The persistent storage is slower, but still might be fast enough at a little over 1GB/s for large reads. 16K IOPS was somewhat slower in both cases. I chose 16K so that I could quickly compare to tests I ran in my Ceph QEMU/KVM performance blog post <a href="https://ceph.io/en/news/blog/2022/qemu-kvm-tuning/">here</a>. Without getting into the details, I suspect there’s still some room for improved IOPS with Lambda’s setup. Luckily though, converting into TFRecords should make Tensorflow throughput bound instead of latency bound. What about PyTorch or other tools that want to read images directly though? Fio gives us the ability to simulate it by using its ‘bssplit’ feature. We can take the size ranges and percentiles generated when examining ImageNet and give fio a similar distribution:</p> + +<pre><code>fio --ioengine=libaio --direct=1 --bssplit=2K/1:4K/2:8K/4:16K/8:32K/13:64K/38:128K/33:256K/1 --iodepth=128 --rw=randread --norandommap --size=200G --numjobs=1 --runtime=300 --time_based --name=foo +</code></pre> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Lambda_Labs_8xV100_Storage_Reads_Second_Bssplit.svg" /></p> + +<p>This isn’t exactly right as we are not reading data spread across millions of files, but it should provide something of an upper bound on what to expect. It looks like the persistent storage can do approximately 10K reads/second at a throughput rate of around 750MB/s. The local storage is about 3-4 times faster. Local storage should be fast enough to support the kind of images/second throughput rates we want to hit in Tensorflow on 8 V100 GPUs, but the jury is still out for the persistent storage.</p> + +<h2 id="tensorflow---imagenet">Tensorflow - ImageNet</h2> + +<p>Running benchmarks with real data rather than synthetic data is fairly straightforward in Tensorflow. You simply append data_dir and data_name flags to the CLI invocation to let it know where the TFRecords are located:</p> + +<pre><code>sync; echo 3 | sudo tee /proc/sys/vm/drop_caches +python ./tf_cnn_benchmarks.py --batch_size=256 --num_batches=100 --model=resnet50 --optimizer=momentum --variable_update=replicated --all_reduce_spec=nccl --use_fp16=True --nodistortions --gradient_repacking=2 --compute_lr_on_cpu=True --single_l2_loss_op=True --xla_compile=True --num_gpus=8 --loss_type_to_report=base_loss --data_dir=/home/ubuntu/ImageNet-TF/train --data_name=imagenet +</code></pre> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Tensorflow_-_ResNet50_Real_Training_First_Attempt_fp16.svg" /></p> + +<p>Ouch. Much lower performance with the ImageNet data vs synthetic! This is especially unfortunate given that 4 years ago the Tensorflow developers reported much better results. I spent some time reading and experimenting with different settings. Ultimately the one setting that made a substantial difference was “datasets_num_private_threads”. In the Tensorflow benchmark source code, this setting is described as: “[The] number of threads for a private threadpool created for all datasets computation.” I’ll go into more detail what these threads are doing in a bit. For now, let’s see how increasing the number of threads affects the results:</p> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Tensorflow_-_ResNet50_ImageNet_Training_fp16_private_threads.svg" /></p> + +<p>Increasing the number of private threads has a dramatic effect on performance, though I was unable to fully match the performance achieved in the synthetic tests on either the local or persistent storage. The local storage fared better at high thread counts gradually topping out at around 8600 images/second. At high private thread counts the persistent storage topped out between 7000-8000 images/second with a higher degree of variability between runs. I suspect that in this case the persistent storage has likely hit its (per instance) limit.</p> + +<p>In addition to having a dramatic effect on performance, changing the private thread count also had a large effect on the CPU consumption of the TensorFlow process. CPU usage increases almost linearly with additional private threads up to around 30 cores. What exactly are these private threads doing? To answer that question, I utilized two tools that I often deploy when diagnosing CPU usage in Ceph. When testing with a lower number of private threads, I used linux’s perf tool to look at where cycles are being consumed when the private threads are fully saturated. At higher levels of private threads, I used my wallclock profiler <a href="https://github.com/markhpc/uwpmp">uwpmp</a> to look at how private threads spend their time when increasing the thread count no longer improves performance.</p> + +<p>In the first case with perf, we can get a good view of the work that these private threads are doing:</p> + +<pre><code>--77.31%--tensorflow::ThreadPoolDevice::Compute + | + |--51.19%--0x7f511a00c7d8 + | | + | --51.18%--tensorflow::jpeg::Uncompress + |--14.48%--tensorflow::ResizeBilinearOp&lt;Eigen::ThreadPoolDevice, unsigned char&gt;::Compute + |--5.47%--tensorflow::CastOpBase::Compute + |--2.66%--tensorflow::ReverseV2Op&lt;Eigen::ThreadPoolDevice, unsigned char, int&gt;::Compute +</code></pre> +<p>The majority of the cycles consumed is in jpeg decompression and resize operations, along with a smattering of other stuff. What happens if we look at a case with a higher private thread count but now look at wallclock time instead of cycles? I ended up having some trouble getting the profiler to work properly and consistently get clean callgraphs, but I was able to get at least one run in that revealed some interesting information. First, I saw time spent in the same functions that perf told us we were spending cycles in:</p> + +<pre><code>+ 100.00% Eigen::ThreadPoolTempl&lt;tensorflow::thread::EigenEnvironment&gt;::WorkerLoop(int) + + 99.90% ??? + |+ 97.30% ??? + ||+ 92.40% ??? + |||+ 77.10% _PyEval_EvalFrameDefault + ||||+ 47.20% ??? + |||||+ 38.10% tensorflow::jpeg::Uncompress(void const*, int, tensorflow::jpeg::UncompressFlags const&amp;, long*, std::function&lt;unsigned char* (int, int, int)&gt;) + ||||+ 12.20% tensorflow::ResizeBilinearOp&lt;Eigen::ThreadPoolDevice, unsigned char&gt;::Compute(tensorflow::OpKernelContext*) + ||||+ 4.40% tensorflow::CastOpBase::Compute(tensorflow::OpKernelContext*) + ||||+ 1.70% tensorflow::ReverseV2Op&lt;Eigen::ThreadPoolDevice, unsigned char, int&gt;::Compute(tensorflow::OpKernelContext*) +</code></pre> + +<p>But the wallclock profile also exposed that there may be contention in multiple areas in the private threads around some of the nsync synchronization primitives being used:</p> + +<pre><code> ||||||| | + 4.50% nsync::nsync_mu_semaphore_p(nsync::nsync_semaphore_s_*) + ||||||| | + 4.50% syscall + +</code></pre> + +<p>This almost always appeared nested deep inside:</p> + +<pre><code>tensorflow::BFCAllocator::AllocateRaw(unsigned long, unsigned long, tensorflow::AllocationAttributes const&amp;) +</code></pre> + +<p>Sadly I was missing a number of debug symbols and don’t 100% trust the wallclock trace. For now I’ll just say that the private threads are doing a significant amount of work decompressing and manipulating the image data to keep the GPUs fed. I suspect that with newer and faster GPUs the image retrieval pipeline could become an even bigger issue when training with real image data. The mystery for me is how The TensorFlow developers achieved such good results 4 years ago without using dedicated private threads at all. Perhaps they had a significantly faster jpeg decompression mechanism that I am unaware of?</p> + +<h2 id="pytorch---imagenet">PyTorch - ImageNet</h2> + +<p>After running Tensorflow, I also ran some benchmarks in PyTorch using Nvidia’s “DeepLearningExamples” github <a href="https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnet50v1.5">repo</a>. First, I installed the prereqs and setup the repository:</p> + +<pre><code>pip install 'git+https://github.com/NVIDIA/dllogger' +pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-cuda110 +git clone https://github.com/NVIDIA/DeepLearningExamples +</code></pre> + +<p>Then, prepared ImageNet for usage in PyTorch:</p> + +<pre><code>cd ~/data/ImageNet/ILSVRC/Data/CLS-LOC/val +wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash +</code></pre> + +<p>And finally ran a test:</p> + +<pre><code>cd DeepLearningExamples/PyTorch/Classification/ConvNets +sync; echo 3 | sudo tee /proc/sys/vm/drop_caches +python ./multiproc.py --nproc_per_node 1 ./main.py --arch resnet50 --label-smoothing 0.1 --run-epoch 1 --amp --static-loss-scale 256 --workspace /home/ubuntu/data/ImageNet-Scratch /home/ubuntu/data/ImageNet-Orig/ILSVRC/Data/CLS-LOC/ +</code></pre> + +<p>There are a couple of differences here versus the TensorFlow tests. First, I’m using the raw ImageNet archive instead of a preprocessed TFRecord dataset, so the read behavior is different. Because I was unable to extract or copy the raw ImageNet archive onto the persistent storage, I’m also only testing the local NVMe drive. Finally, I didn’t see any specific examples for running with fp16 in nVidia’s documentation, so I’m using amp (automatic mixed precision) which may be slightly slower.</p> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Pytorch_-_ResNet50v15_ImageNet_Training_AMP.svg" /></p> + +<p>Given the number of differences it’s tough to draw direct comparisons with Tensorflow. Amp is one difference, but it’s quite possible that there are tuning options that could improve performance here that I don’t know about. I did notice that PyTorch, like Tensorflow, is using quite a bit of CPU to keep the GPUs working. I suspect that there are ways to tweak the IO pipeline that could improve performance. For now though, let’s compare the IO patterns on the local NVMe drive during the Tensorflow and PyTorch runs. I was hoping to be able to use blktrace to do this, but unfortunately was unable to get any data from the virtual devices in the instance. I was able to collect more general statistics using collectl however.</p> + +<h5 id="disk-read-statistics-during-pytorch-8-gpu-run">Disk Read Statistics During PyTorch 8 GPU run:</h5> + +<table> + <thead> + <tr> + <th>Time</th> + <th>Name</th> + <th>KBytes</th> + <th>Merged</th> + <th>IOs</th> + <th>Size</th> + <th>Wait</th> + <th>QLen</th> + <th>SvcTim</th> + </tr> + </thead> + <tbody> + <tr> + <td>00:29:18</td> + <td>vda</td> + <td>761136</td> + <td>0</td> + <td>6746</td> + <td>113</td> + <td>58</td> + <td>431</td> + <td>0</td> + </tr> + <tr> + <td>00:29:19</td> + <td>vda</td> + <td>752172</td> + <td>0</td> + <td>6648</td> + <td>113</td> + <td>112</td> + <td>810</td> + <td>0</td> + </tr> + <tr> + <td>00:29:20</td> + <td>vda</td> + <td>747824</td> + <td>0</td> + <td>6595</td> + <td>113</td> + <td>84</td> + <td>604</td> + <td>0</td> + </tr> + <tr> + <td>00:29:21</td> + <td>vda</td> + <td>735964</td> + <td>0</td> + <td>6583</td> + <td>112</td> + <td>73</td> + <td>551</td> + <td>0</td> + </tr> + <tr> + <td>00:29:22</td> + <td>vda</td> + <td>695636</td> + <td>0</td> + <td>6237</td> + <td>112</td> + <td>102</td> + <td>760</td> + <td>0</td> + </tr> + </tbody> +</table> + +<h5 id="disk-read-statistics-during-tensorflow-8-gpu-run">Disk Read Statistics During TensorFlow 8 GPU run:</h5> + +<table> + <thead> + <tr> + <th>Time</th> + <th>Name</th> + <th>KBytes</th> + <th>Merged</th> + <th>IOs</th> + <th>Size</th> + <th>Wait</th> + <th>QLen</th> + <th>SvcTim</th> + </tr> + </thead> + <tbody> + <tr> + <td>00:38:45</td> + <td>vda</td> + <td>1081324</td> + <td>0</td> + <td>8440</td> + <td>128</td> + <td>0</td> + <td>7</td> + <td>0</td> + </tr> + <tr> + <td>00:38:46</td> + <td>vda</td> + <td>927512</td> + <td>0</td> + <td>7241</td> + <td>128</td> + <td>0</td> + <td>7</td> + <td>0</td> + </tr> + <tr> + <td>00:38:47</td> + <td>vda</td> + <td>913512</td> + <td>0</td> + <td>7130</td> + <td>128</td> + <td>0</td> + <td>7</td> + <td>0</td> + </tr> + <tr> + <td>00:38:48</td> + <td>vda</td> + <td>1047444</td> + <td>0</td> + <td>8186</td> + <td>128</td> + <td>0</td> + <td>6</td> + <td>0</td> + </tr> + <tr> + <td>00:38:49</td> + <td>vda</td> + <td>968776</td> + <td>0</td> + <td>7560</td> + <td>128</td> + <td>0</td> + <td>6</td> + <td>0</td> + </tr> + </tbody> +</table> + +<p><br /> +When just looking at the IO sizes, both runs appear similar, but that doesn’t tell the whole story. It is likely that Tensorflow is doing much larger reads that are broken up into contiguous 128KB chunks by the block layer based on the underlying device’s max_sectors_kb setting. The tells here are the very low queue length and wait times for the TensorFlow run versus the PyTorch run. In both case the device service times are low (0), but in the TensorFlow case IOs are still backing up in the device queue.</p> + +<p>Interestingly, it appears that it may be possible to use nVidia’s DALI (Data Loading Library) package to <a href="https://docs.nvidia.com/deeplearning/dali/archives/dali_170/user-guide/docs/examples/frameworks/pytorch/pytorch-various-readers.html">read TFRecords into PyTorch</a>. I didn’t have time to attempt it, but potentially that could have a big effect on IO behavior and performance as well.</p> + +<h2 id="conclusion">Conclusion</h2> + +<p>As I’ve been writing this post, I realize just how complicated it is to understand the performance characteristics of training of neural networks. Even as we talk about metrics like images/second, the options that are used (batch size for instance) can also affect convergence. It’s very difficult to come up with a common methodology that is always better than others. I wonder if another metric, like reaching a desired level of convergence, would be better in the end. Having said that, I am glad for having done this exercise as I learned some valuable things:</p> + +<ol> + <li> + <p>Pre-processing data into a format like TFRecords on fast local storage is a big win from an IO perspective. It lets storage systems that have slow metadata performance succeed so long as they have enough sequential read throughput to keep the machine learning framework busy. This is a big win for many distributed file systems that may have substandard metadata performance (and even the good ones may still benefit).</p> + + </li> + <li> + <p>To train on a dataset like ImageNet, you need somewhere around 1-1.3GB/s of raw disk throughput to keep 8 V100 GPUs busy when training in fp16. For amp or fp32 the requirements are likely lower since the GPUs can’t work quite as fast. With modern GPUs that are faster than the V100, the disk throughput requirements could be significantly higher.</p> + + </li> + <li> + <p>Lambda’s local NVMe storage is likely fast enough to saturate 8 GPUs, even newer ones, so long as the rest of the IO path can keep up. The persistent storage appears to become a bottleneck with sufficient GPUs and TensorFlow private threads, though can still function fairly well so long as TFRecords are used. A concern going forward is how to ensure that the data pipeline in TensorFlow and PyTorch are fast enough to keep the GPUs fed. The Tensorflow benchmark required a large number of private threads and showed potential evidence of contention at high thread counts. PyTorch did not appear to natively support TFRecords, but NVidia DALI or other 3rd party code might help improve the IO path.</p> + + </li> + <li> + <p>If it’s necessary to train directly with images rather than TFRecords, it may not make sense to host them on shared file systems. It appears that Tensorflow and possibly PyTorch give users the ability to specify a separate training data and work directory. If all operations against the training data are reads, it may be better to host datasets on read-only block device snapshots. For instance with Ceph, perhaps you could create a read/write RBD volume where you put a certain dataset, take a snapshot, and then map that snapshot as read only on multiple instances that all need access to the same image set.</p> + + </li> + <li> + <p>Even with a training set as large as ImageNet, Lambda’s instances have so much memory that eventually the entire dataset becomes cached. It was necessary to sync and drop caches before each test and keep tests short enough that they didn’t re-read the same data from buffer cache. I was able to watch as long running tests eventually stopped performing reads and got faster as time went on. This could make apples-to-apples comparison between different storage vendors difficult if not carefully controlled.</p> + + </li> + <li> + <p>I’m almost certainly missing additional tweaks that can help speed up both Tensorflow and PyTorch. This post shouldn’t be seen as the be-all/end-all for how to achieve high performance with these frameworks, but I hope it may at least help showcase some of the areas that are valuable to investigate when trying to train with real data and achieve high performance.</p> + + </li> +</ol> + +<p>This wraps up my initial work looking at Deep Learning IO behavior. I hope that next time I can come armed with a bit more knowledge about the internals of how PyTorch and Tensorflow work, focus a bit more on the quality of the training, find even larger datasets to work with, and maybe actually accomplish something useful rather than just play with ImageNet.</p> + +<p>Thanks for reading!</p> + + + + + Adam’s weekly update, 2022-11-27 + + 2022-11-27T15:28:16-07:00 + https://hpc.social/2022/adam-s-weekly-update-2022-11-27 + <h2>What&#8217;s new</h2> + +<p>The first thing that&#8217;s new is&#8230; this post! I&#8217;m going to try to do at least a weekly post on the blog now, just a general update and some links. This will <em>hopefully</em> help me get back into the habit of writing on the blog regularly, and maybe inspire me to write a bit more in general.</p> + +<p><span id="more-264"></span></p> + +<p>I was off work this week for the Thanksgiving holiday, and traveled Michigan to visit my parents and my brother&#8217;s family. My mom has been struggling with some pretty major health issues this year, so it was really wonderful and reassuring to get to spend some time with her and my dad. I also finally got to meet my brother&#8217;s three-year-old son, who was born <em>right</em> before the pandemic started, and who I hadn&#8217;t managed to meet up until now.</p> + +<p>On the tech-related front, I used this week to take a break from Twitter (mostly), and to be honest&#8230; it was kinda refreshing! I had developed a pretty bad Twitter habit this year, doomscrolling for more time than I like to admit. While I really like Twitter and I&#8217;ve had some nice career boosts from it, it was also a time sink that was not entirely healthy.</p> + +<p>Admittedly, that time was somewhat replaced by playing around on the <a href="https://calico.social/ajdecon">Fediverse / Mastodon</a>. But with the lack of algorithmic suggestions, quote tweets, and other means of virality, that network so far feels a lot quieter and less time-consuming than Twitter. Tim Bray has a <a href="https://www.tbray.org/ongoing/When/202x/2022/11/26/Bye-Twitter">good post</a> up which discusses some of the advantages and pitfalls of federated social media, and I can highly recommend reading that. I&#8217;m still a bit skeptical that it will be a practical &#8220;Twitter replacement&#8221; for most people, but so far I&#8217;m finding it pleasant.</p> + +<h2>What I&#8217;m reading</h2> + +<ul> +<li><strong>Nonfiction book: </strong><a href="https://bookshop.org/p/books/code-the-hidden-language-of-computer-hardware-and-software-charles-petzold/18465738">Code, Second Edition, by Charles Petzold</a>. This book walks through the process of building a working computer, starting with ideas like Morse code, then working up from logic gates on up. This is technically a re-read, as I read the first edition&#8230; 10+ years ago? But I&#8217;m getting a lot more out of it this time around, and really enjoying it.</li> + + + +<li><strong>Fiction book: </strong><a href="https://bookshop.org/p/books/the-spare-man-mary-robinette-kowal/18834426">The Spare Man, by Mary Robinette Kowal</a>. A cozy murder mystery on a luxury cruise to Mars. I&#8217;m only a few chapters in, but already greatly enjoying myself.</li> + + + +<li><a href="https://ferd.ca/hiding-theory-in-practice.html">&#8220;Hiding theory in practice&#8221;, by Fred Hebert</a>. I&#8217;ve been reading a lot about safety engineering and its application to computing lately, but that community can sometimes get off into the weeds about points of theory that don&#8217;t have consensus in the broader computing community. This post has a good discussion of how to use the theory of safety engineering to guide decisions, without requiring that everyone working with you be handed a reading list.</li> + + + +<li><a href="https://cohost.org/mononcqc/post/385225-paper-repentance-as">&#8220;Paper: Repentance as Rebuke: Betrayal and Moral Injury in Safety Engineering&#8221;, also by Fred Hebert</a>. A discussion of <a href="https://link.springer.com/article/10.1007/s11948-022-00412-2">a paper by Dekker <em>et al</em></a> which looks at the aftermath of the 737 MAX air disasters, and the public repentance of some of the engineers who were involved. Go read the post, it&#8217;s great. And I&#8217;m planning to read the original paper this week.</li> + + + +<li><a href="https://chipsandcheese.com/2022/11/15/cannon-lake-intels-forgotten-generation/">&#8220;Cannon Lake: Intel&#8217;s Forgotten Generation&#8221;, from <em>Chips and Cheese</em></a>. Really I&#8217;ve been reading a bunch of the technical posts from <em>Chips and Cheese</em> lately, and they&#8217;re doing pretty good analyses of recent hardware. They&#8217;ve definitely earned that spot in my RSS reader.</li> + + + +<li><a href="https://glennklockwood.blogspot.com/2022/11/sc22-recap.html">Glenn K Lockwood&#8217;s &#8220;SC&#8217;22 Recap&#8221;</a>. I was sad to miss Supercomputing this year, though enough folks have come down with COVID that I don&#8217;t really regret the decision. But Glenn wrote up a really interesting recap post, with an interesting new viewpoint now that he&#8217;s working at Microsoft Azure. Among other things, he included a whole section titled <em>The underwhelming</em>, with the opening line &#8220;The biggest deal appears to be that exascale is here, and it turns out that it&#8217;s not that big of a deal.&#8221;</li> +</ul> + +<h2>Recent recipes</h2> + +<p>Because it was Thanksgiving, I did a lot of cooking this week! I&#8217;m not going to list everything I made, but a few of my favorites were:</p> + +<ul> +<li><a href="https://www.delish.com/cooking/recipe-ideas/a23340027/cheesy-garlic-butter-rolls-recipe/">Cheesy Garlic Butter Rolls from Delish</a>: Nothing special, but really tasty.</li> + + + +<li><a href="https://smittenkitchen.com/2019/11/challah-stuffing/">Challah Stuffing from Smitten Kitchen</a>: This recipe was a huge winner, with most of the family coming back for seconds, and then having more the next day for leftovers. It was really good, and is probably what I&#8217;ll make if I ever do stuffing again.</li> + + + +<li><a href="https://smittenkitchen.com/2008/09/best-challah-egg-bread/">Best Challah from Smitten Kitchen</a>: I baked the bread that went into the stuffing, and it was really tasty on its own! This recipe makes two loaves, and I only needed one for the stuffing. So I also made french toast with it, which worked really nicely.</li> +</ul> + +<h2>Pet photos</h2> + +<p>Gotta have those pet photos.</p> + +<figure class="wp-block-image size-large is-resized"><img alt="A blond golden doodle in a red harness and a blue bandanna lays on sandy dirt and looks into the camera" class="wp-image-271" height="233" src="https://thinking.ajdecon.org/wp-content/uploads/2022/11/IMG_6863-1024x768.jpeg" width="311" /></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="A white calico cat sits on a blanket and washes her front paw" class="wp-image-272" height="410" src="https://thinking.ajdecon.org/wp-content/uploads/2022/11/69075713241__19379770-6B0C-4780-8DD0-30C62A033C88-768x1024.jpeg" width="308" /></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="A gray-brown tabby cat wearing a green collar sitting on a wall, looking vaguely toward the camera" class="wp-image-273" height="405" src="https://thinking.ajdecon.org/wp-content/uploads/2022/11/69073206299__DB9CA33B-0EB5-4681-96DA-8368554B6B8A-768x1024.jpeg" width="304" /></figure> + + + + + SC'22 Recap + + 2022-11-24T02:00:00-07:00 + https://hpc.social/2022/sc-22-recap + <p>The biggest annual conference in HPC, the <a href="https://sc22.supercomputing.org">SC conference</a>, was recently held in Dallas, Texas in its second hybrid incarnation since being all-remote for the pandemic. This year attracted over 11,000 attendees which is much closer to the pre-pandemic high of 14,000 than last year's 7,000, and judging from the crushed conference rooms and busy expo floor, it looks like SC is not that much worse for wear.</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>This year's conference quite different for me since I attended for my first time as a vendor, not a researcher or practitioner, and I spent most of my days behind closed doors talking to customers. I didn't get to attend any of the keynotes, BOFs, or panels to which I wasn't invited as a result, so I'm not really qualified to give an erudite summary of the conference or expo this year.</p> +<p>So instead, I'm just writing down what I remember in order that I remember it and not necessarily in a coherent narrative form. I'm sure I missed a lot (for example, mixed precision seemed big this year, and I heard Jack Dongarra gave a fantastic Turing Award talk) so I encourage others to write their own recaps and share with the community!<span></span></p> +<p></p> +<h2 style="text-align: left;">High-level themes</h2> +<p>I actually started writing an SC'21 recap last year which I never posted, and re-reading the intro was funny--you'd think nothing has changed in the last year.</p> +<h3 style="text-align: left;">The underwhelming</h3> +<p>The biggest deal appears to be that exascale is here, and it turns out that it's not that big of a deal. China let the air out of the tires by debuting their exascale systems at SC'21, and not only did they thumb their nose at Top500 by not submitting, they debuted by winning a Gordon Bell prize instead. The first US exascale system, Frontier, was debuted at ISC this year leaving its showing at SC a bit deflated too. <a href="https://www.hpcwire.com/2022/11/17/2022-gordon-bell-prize-goes-to-plasma-accelerator-research/">Frontier was featured in the Gordon Bell prize-winning paper</a> this year, but that work required the use of four Top-10 systems, not just Frontier, painting the reality that one giant computer rarely stands on its own when it comes to advancing science.</p> +<p>This isn't to say that deploying exascale systems isn't a noteworthy feat and worth commendation, but I felt like the hype over the last five years treated the achievement like an end state instead of a milestone. And now that we've passed the milestone, the community is grasping to figure out what comes next. So what <i>is</i> next?</p> +<p><b>Quantum</b> had a strong and growing presence at SC, as it has for the last few years. But the conclusion of the panel "<a href="https://www.hpcwire.com/2022/11/19/quantum-are-we-there-or-close-yet-no-says-the-panel/">Quantum Computing: A Future for HPC Acceleration</a>" was that no, it's not close to being ready.</p> +<p><b>Disaggregation and composability</b> was another theme with growing momentum. And like quantum, there was a panel asking the same question: "<a href="https://www.hpcwire.com/off-the-wire/informal-poll-of-sc22-attendees-suggests-a-bright-future-for-composability/">Does HPC need composability now?</a>" The answer, again, was no, not yet. More on that below.</p> +<p>What about <b>RISC-V</b>? Surely that will revolutionize the field. As it turns out, the answer there is also that <a href="https://www.hpcwire.com/2022/11/18/risc-v-is-far-from-being-an-alternative-to-x86-and-arm-in-hpc/">RISC-V is not ready to do anything useful for HPC yet</a>.</p> +<p>The list goes on of technologies and trends that people are trying to boost now that exascale is "solved." The reality, I think, is that "exascale" will take years to actually mature since it appears to have a ton of technical debt that accumulated during the race to be first. US Exascale rests on the shoulders of AMD and Intel, two companies whose software stacks have not caught up to the market leader, so there will be a lot of thrashing around as development practices and optimization settle out around these systems.</p> +<p>Struggling with code porting is not very exciting to computer science Ph.D.s, so I expect future SCs to mirror this one and bifurcate into two distinct tracks: those struggling to identify the next big thing in the research space, and those struggling to use the systems that were rushed to deployment.</p> +<h3 style="text-align: left;">The unexpected</h3> +<p>My SC experience was very biased since I didn't get out much, but two related themes kept popping up across different meetings and the sessions I did attend.</p> +<p><b>Power efficiency is serious business now</b>. It used to seem like people talked about the need for energy-efficient HPC in an abstract sense while continuing to jam more power into every rack without changing their approach to system design, facilities, and deployment models. That has hit a hard wall with energy prices soaring in Europe, though. The financial impacts of power-inefficient supercomputing have gone from a one-time capex cost to an ongoing opex cost that is putting many HPC facilities on an unsustainable cost trajectory. Even sites that aren't doing new deployments are facing sudden, sharp increases in their costs, and nobody has good answers about how they will keep the lights on.</p> +<p><b>Cloud HPC is confusing</b>. With only <a href="https://www.nextplatform.com/2022/11/08/hpc-follows-the-enterprise-into-the-cloud/">15% of total HPC dollars winding up in the cloud</a>, it's little surprise that most HPC folks are only peripherally aware of what HPC in the cloud really means. Worse yet, a subset of those folks are actively hostile towards the idea of running HPC workloads in the cloud. I spoke with my colleagues from all three major cloud service providers as well as my colleagues in DOE, NSF, and education throughout the week, and everyone painted this same general picture.</p> +<p>There seems to be a mismatch between the expectations of on-prem HPC folks and cloud HPC folks. For example, I was asked why Windows doesn't support OpenMP very well, and after a bit of digging, I realized that the question really wasn't about using OpenMP on Windows as much as it was about using OpenMP in the cloud. There was a latent assumption that "HPC in Microsoft's cloud" must mean "HPC on Windows" which, for the record, is false--I don't even know how to use Windows anymore. Similarly, people decried the performance impacts of sharing HPC nodes with others in the cloud (they are not shared), overheads of virtualizing InfiniBand or GPUs (everyone uses PCIe passthrough or SR-IOV for HPC nodes), and other misconceptions.</p> +<p>This isn't to say that cloud people aren't confused too; I heard stories about conversations that went sideways because a cloud folks (not from my employer, thankfully!) didn’t realize that the requirements of a traditional gov/edu HPC facility couldn’t be neatly wrapped up into a single workload with a single solution, contrary to the case across many commercial AI shops. And both sides are struggling to find models for partnership and engagement that mirror the traditional relationship between places like a DOE or NSF facility and a company like Cray. HPC departments are used to buying supercomputers and parallel file systems, while cloud providers sell computing and storage as a <i>service</i>. The distinction may seem trivial at the surface, but there's a large divide that becomes evident once both sides start trying to drill into the details of what a partnership would look like.</p> +<h2 style="text-align: left;">Parallel I/O in Practice Tutorial</h2> +<p>This was my fifth year contributing to the Parallel I/O in Practice Tutorial with my colleagues at Argonne and Google, and it was our first time doing it in-person since 2019. It felt really good to be back in front of people to opine about the perils of POSIX and the greatness of the <a href="https://www.mcs.anl.gov/research/projects/darshan/">Darshan I/O profiling tool</a>, and this year I retired out the material I used to present on burst buffers (since DataWarp and Infinite Memory Engine have lost relevance in HPC) and the <a href="https://www.nersc.gov/tokio/">TOKIO holistic I/O analysis framework</a> (since it is no longer funded/maintained). In their stead, I presented material on <a href="https://wiki.lustre.org/Lustre_User_Group_2022">benchmarking with IOR and mdtest I debuted at LUG 2022 this year</a>.</p> +<p>I haven't gotten feedback yet on whether this change was a net positive one, but I think it went over well. Benchmarking I/O is really challenging if you don't understand how things like page cache really work in distributed systems, and walking through some benchmark examples concretizes a lot of abstract parallel file system concepts like locking and striping. And since benchmarking is a rabbit hole of arbitrary complexity, ending the tutorial with advanced benchmarking topics turned out to be a nice way to add buffer to the end of an eight-hour stretch of carefully timed presentations. It's very easy to skip over the nuances of analyzing mdtest outputs if attendees have a lot of questions about more important things at the end of the day.</p> +<p>The most surprising observation of the tutorial is how many attendees aren't using MPI anymore. We got a lot of questions last year about task-oriented I/O, and this year had some great questions about trying to understand or tune the I/O performed by Python-based analytics frameworks. We decided to add support for <a href="https://www.mcs.anl.gov/research/projects/darshan/2019/12/11/new-experimental-version-of-darshan-available-for-instrumenting-non-mpi-applications/">Darshan to profile non-MPI applications back in 2019</a> which is now paying dividends by ensuring it is a relevant tool for these new analytics and AI workloads, and we'll probably have to give more attention to optimizing these workloads' I/O in the future.</p> +<h2 style="text-align: left;">DAOS User Group</h2> +<p>Monday morning was cold and rainy--a perfect day to attend the <a href="https://daosio.atlassian.net/wiki/spaces/DC/pages/11248861216/DUG22">2022 DAOS User Group</a> which was held off-site at the Fairmont Hotel.</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>Whether you particularly care about DAOS or not, the cross-community HPC I/O brain trust is guaranteed to be in attendance, and this year did not disappoint. In addition to the expected stakeholders from Intel and DOE, representatives from all three big CSPs were in attendance. Google Cloud, Seagate, and HPE/Cray were all on the agenda, painting a diversifying landscape of large HPC companies investing time into DAOS and the strength and willingness of the DAOS team to partner with all comers.</p> +<h3 style="text-align: left;">Life after Optane</h3> +<p>The question that opened up the meeting, of course, was "what is the future of DAOS since Intel cancelled Optane?" Kelsey Prantis had the official statement (I'll replace the grainy photo once the DUG slides are online...):</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>The high-level project answer is that DAOS isn't going anywhere. Aurora, by virtue of still having Optane DIMMs, will not be affected, and DAOS will maintain support for Optane until Intel drops its last Optane DIMMs (Crow Pass for Sapphire Rapids) from support life sometime towards the end of this decade.</p> +<p>For new customers who aren't going to use Optane, the answer is "<a href="https://daosio.atlassian.net/issues/?jql=labels%20%3D%20%22md_on_ssd%22">Metadata on NVMe</a>," a development being codeveloped by Intel, HPE, and Google to implement a write-ahead log (WAL) and allow DAOS to use volatile DRAM instead of Optane. It will work like a file system journal in that a compact representation of writes will be committed to NVMe immediately after landing in DRAM, and then DAOS will asynchronously write back the properly serialized representation of that transaction after it is acknowledged. Johann Lombardi had a helpful cartoon that showed how this WAL will fit into DAOS:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>A key benefit of DAOS's implementation of this WAL is that it will be able to still service incoming writes while flushing old writes; although I don't fully grasp how this works, it is something enabled by the sophisticated I/O scheduler already implemented in DAOS.</p> +<p>The complete implementation isn't expected to be released until Spring 2024, but it appears to touch only a few components of DAOS and doesn't affect anything above the VOS layer of the DAOS server.</p> +<p>There was also mention of developing operability with new <a href="https://news.samsung.com/global/samsung-electronics-unveils-far-reaching-next-generation-memory-solutions-at-flash-memory-summit-2022">CXL-attached memory-semantic SSDs</a> to keep the persistent memory capability of DAOS alive beyond Optane. I'm not sure if this would offer a performance benefit over the metadata-on-NVMe feature; early results show that metadata-on-NVMe actually delivers higher IOPS than Optane since the synchronous write path is much simpler without having to account for memory persistence. That said, I didn't really follow the full extent of options on the table for how DAOS metadata may work across different types of memory though.</p> +<h3 style="text-align: left;">DAOS in the flesh at Argonne</h3> +<p>Kevin Harms presented an update on Aurora's massive 220 PB DAOS installation and laid out its configuration. There are 1,024 DAOS servers based on the Intel Coyote Pass server design, each sporting</p> +<p></p> +<ul style="text-align: left;"><li>2x Intel Xeon 5320 (Ice Lake) sockets</li><li>2x DAOS engines (one per socket)</li><li>16x 32GB DDR4 DIMMs</li><li>16x 512GB Optane DIMMs (Persistent Memory 200)</li><li>16x 15.36 TB Samsung PM1733 NVMe SSDs</li><li>2x 200 Gb/s Slingshot NICs</li></ul> +<p>The total configuration is quoted at 220 PB usable, but Kevin pointed out that this assumes that every object is erasure coded at 16+2. Unlike virtually every other storage system out there, though, users can choose the data protection for their individual objects when they create them, meaning this 220 PB capacity is an upper limit to what users can do. Users with very hot, read-only objects may choose to replicate instead of erasure code, while others who are capacity-constrained may choose to erasure code everything at 16+2 at the cost of latency and IOPS. This flexibility is really powerful for users since they can tailor their object layout ("<a href="https://www.intel.com/content/www/us/en/developer/articles/technical/understanding-data-redundancy-and-sharding-in-daos.html">object class</a>" in DAOS parlance) to match the needs of their workload.</p> +<p>Argonne will be slicing up this DAOS system by giving each scientific project its own DAOS pool, and each pool will be assigned to only 80% of the available DAOS servers by default. This seems like a nice way of providing most of the storage system performance to every user, but offering more freedom to work around bad hardware, bad users, and other performance problems that plague file systems like Lustre that distribute everything across every single server equally.</p> +<p>Finally, I noticed that Aurora will be using Samsung SSDs, not the Intel (now Solidigm) QLC NAND that appeared in all the DAOS slides floating around two years ago. I'm not sure what happened there, but the move from Solidigm QLC to Samsung TLC couldn't have been cheap.</p> +<h3 style="text-align: left;">New features and contributions</h3> +<p>DAOS is starting to pick up some truly valuable features that are being developed and contributed by third parties. Of note, croit has contributed a feature which allows DAOS to serve up NVMe over Fabrics targets, and Seagate contributed an S3 gateway for DAOS. Along with the DFS file system interface, DAOS now offers the trifecta of standard object, block, and file services just like Ceph. Unlike Ceph though, performance on DAOS is a first-class citizen. While croit made it clear that the NVMeoF support still has a ways to go to improve the way it does thread pooling and provides resilience, they showed 1.4 million IOPS from a single storage client using TCP over Ethernet with minimal client-side overhead.</p> +<p>Intel is also developing multitenant support for DFUSE, allowing a single compute node to share a DAOS mount and let permissions be enforced through UID/GID just like a regular file system. Before this update, the FUSE-based nature of DAOS allowed any unprivileged user to mount their container (good), but only one FUSE agent could be alive on a single node at a time (not good) which prevented multiple users sharing a node from both mounting their own containers.</p> +<p>DAOS also has some longer-term enhancements that I thought were interesting:</p> +<p></p> +<ul style="text-align: left;"><li>expanding the range of POSIX calls supported by DAOS's intercept library to include metadata calls and memory-mapped I/O using <a href="https://docs.kernel.org/admin-guide/mm/userfaultfd.html">userfaultfd</a></li><li>implementing collaborative caching - essentially reimplementing the Linux kernel page cache in userspace so that multiple processes can share cached DAOS pages</li><li>supporting a computational storage paradigm by enabling offload of <a href="https://github.com/rlane/ubpf">userspace eBPF scripts</a> to DAOS servers</li></ul> +<h3 style="text-align: left;">DAOS in a larger data center ecosystem</h3> +<p>Dean Hildebrand from Google Cloud then gave an overview of Google's efforts in bringing DAOS into the cloud. He had some nice performance graphs and I'll link the full presentation here once it's uploaded (it's worth a watch), but the part I found the most insightful was how they are trying to decide where a technology like DAOS fits in the larger cloud storage ecosystem. He outlined two different ways DAOS could work in GCP:</p> +<p></p> +<ol style="text-align: left;"><li><b>Caching</b>: Google Cloud Storage (GCS) is the point of truth and DAOS is a cache</li><li><b>Tiering</b>: DAOS is a point of truth, and GCS is an archive</li></ol> +<p></p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>He said they were leaning towards the caching model where data only lives ephemerally in DAOS, and personally, I think this is the right move since DAOS in the cloud is not resilient without Optane. However, this choice reflects a much larger tension in cloud storage for HPC:</p> +<p></p> +<ol style="text-align: left;"><li>The centerpiece of every cloud's data story is a scalable, low-cost, low-performance object store which is analogous to what on-prem HPC would call campaign, community, or project storage.</li><li>HPC demands higher performance than what these object stores can generally deliver though.</li></ol> +<div>To bridge the gap between these two truths, auxiliary services must bolt on to the object layer and provide higher performance, at a higher cost, for the duration of I/O-intensive HPC jobs. Some choose to provide true tiering from object into a resilient layer of flash (like <a href="https://aws.amazon.com/fsx/lustre/">FSx Lustre</a> and <a href="https://docs.weka.io/overview/data-storage">Weka</a> do), while others project the contents of the object through a high-performance caching layer (like <a href="https://azure.microsoft.com/en-us/products/hpc-cache/#overview">HPC Cache</a> and <a href="https://aws.amazon.com/blogs/aws/amazon-file-cache-a-high-performance-cache-on-aws-for-your-on-premises-file-systems/">File Cache</a>) and are never meant to persistently hold data.</div> +<p></p> +<p>This isn't rocket science, but I never thought deeply about the two models since campaign/community/project storage in on-prem HPC is usually fast enough to avoid needing caches or fine-grained tiering capabilities.</p> +<p>John Bent also had a thought-provoking presentation about how Seagate's now-"deprioritized" CORTX object store, which once <a href="https://blog.seagate.com/enterprises/seagate-and-sage-project-innovate-to-boost-hpc-and-big-data-community/">competed with DAOS as Mero</a>, contains ideas that can complement DAOS:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>Whereas DAOS delivers high performance using NVMe, CORTX delivers great economics using HDDs, and their strengths are complementary to each other. While I don't fully grasp how a tiered (or caching!) system comprised of DAOS and CORTX could be implemented, John rightly pointed out that the same level of space efficiency can deliver higher data protection if multi-level erasure coding is used to stripe across durable block storage. His specific example was erasure coding at 8+1 across servers and 10+1 within servers to deliver both high efficiency and high durability. This could map to something like running DAOS atop something like CORVAULT, but I don't think all the necessary pieces are in place to realize such a harmonious coexistence yet.</p> +<p>Of course, completely tossing Reed-Solomon for something more sophisticated (like VAST does with its locally decodable 150+4 scheme) obviates the need for multilevel erasure entirely. But DAOS has not gone down that route yet.</p> +<p>And as with every talk John gives, there were lots of other interesting nuggets scattered throughout his presentation. Two of my favorites were:</p> +<p></p> +<ul style="text-align: left;"><li>A slide that pointed out that, when you buy something like Ceph as an appliance, you may be spending only 25% of the total cost on storage media and the rest is infrastructure, service, and support. This struck me as a bit on the low end, but some enterprisey NAS and midrange parallel file system appliances can go this low. Spending 60% to 90% on media is a lot nicer for the buyer (and companies like Seagate) if you can buy at scale or eschew the white-glove support, and John suggested that it's up to companies like Seagate to fix the software issues that require customers to pay for white-glove support in the first place.  After all, the less someone spends on support and licenses, the more they can spend on Seagate hard drives.</li><li>John's final slide pointed out that object stores were originally designed to get around the limitations of POSIX file systems, but as they've evolved over the last decade, they're starting to look a lot like file systems anyway since they require strong consistency, hierarchical namespaces, and familiar file semantics. Has all the work put into developing super-fast object stores like DAOS over the last ten years really just brought us back full circle to parallel file systems?  Companies like VAST and Weka have shown that <a href="https://www.nextplatform.com/2017/09/11/whats-bad-posix-io/">maybe POSIX isn't as bad as the research community (myself included!) have claimed it to be</a>; it was really just low-performance implementations that nobody wanted.</li></ul> +<div>Once John's talk is uploaded to the DUG 2022 website, I'll link it here.  Like Dean Hildebrand's talk, it is well worth watching (but for wildly different reasons!)</div> +<p></p> +<p></p> +<p></p> +<h2 style="text-align: left;">PDSW 2022</h2> +<p>I had to duck out of the DAOS User Group early to run (through the rain) to the 7th International Parallel Data Systems Workshop (PDSW 2022) on Monday afternoon.</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p><br />Much to everyone’s surprise, PDSW was only given a half day this year and everything felt a little compressed as a result. The organizers kept the work-in-progress (WIP) sessions which can often be an interesting peek into what students are pursuing, but little A/V problems and the unforgiving schedule probably did a disservice to the up-and-comers who use the WIP track to lay the groundwork for future full-length papers. Hopefully SC’23 restores PDSW to its original full-day status.&lt;p&gt;&lt;/p&gt;</p> +<h3 style="text-align: left;">Splinters keynote from Arif Merchant at Google</h3> +<p>The keynote presentation was given by Arif Merchant from Google about Splinters, the framework that Google Cloud uses to sample I/Os in a scalable way. The challenge they face is that it's impossible to trace and store every single I/O that hits Google's storage servers (D servers), but having an understanding of I/O patterns is essential for characterizing workload I/O behavior and planning for future infrastructure. In fact, this problem is so important that Google isn't the only cloud that's solved it!</p> +<p>A lot of what Arif talked about is very similar to how Azure does its I/O tracing under the hood. I suppose it should not be surprise that there are only so many ways to solve the challenge of sampling individual IOPS in a way that fairly represents the aggregate workload of a huge distributed storage system. One really smart thing Splinters does that I liked was sample along two different dimensions: not only do they evenly sample across all IOPS at a fixed rate (the obvious thing), but they also sample across files at a fixed rate. In this latter case of per-file sampling, they take a tiny fraction of files and capture every I/O for that file to get a complete picture of how individual files are being accessed.</p> +<p>This file sampling fills the huge gap that exists when randomly sampling IOPS alone. Because different I/Os have different "costs" (for example, reading a 1 MiB file using a single 1 MiB read op or 256x 4 KiB read ops are functionally equivalent to an application), randomly sampling ops introduces systematic biases that can be difficult to back out after the data has been sampled, subsampled, aggregated, and reduced. Splinters' approach lets you see the workload from two different angles (and biases) and answer a much larger range of questions about what's really happening across thousands of storage servers.</p> +<p>That said, it was interesting to hear Arif describe how Splinters evolved out of a different internal Google project but wound up outliving it. Splinters is also similar to, but slightly different from, their <a href="https://research.google/pubs/pub36356/">Dapper</a> infrastructure which also does scalable distributed system tracing. And he made overtures to <a href="https://research.google/pubs/pub41344/">F1</a>, a scalable SQL database that is similar to (but not the same as) the SQL-like query interface that Splinters uses. I got the impression that new technologies come and go pretty quickly at Google, and there's a large appetite for creating new software systems outright rather than shoehorning an existing system into solving a new problem. I can't say one way is better than the other; I was just surprised at the contrast with my own experiences.</p> +<h3 style="text-align: left;">Practical papers</h3> +<p>PDSW had a healthy combination of both very-researchy papers and applied research papers this year. I could only stick around for the applied papers, and two left an impression.</p> +<p>In the first, <a href="https://jeanlucabez.io">Jean Luca Bez</a> presented <a href="https://github.com/hpc-io/drishti">Drishti</a>, a tool that lives downstream of the Darshan I/O profiling library and finally does what the Darshan community has danced around for years--turning a Darshan log into an actionable set of recommendations on how to improve I/O performance. It does this by cataloguing a bunch of heuristics and using Darshan's new Python integrations to pore through a log and identify known-problematic I/O patterns. Like Jean Luca's <a href="https://dxt-explorer.readthedocs.io/en/latest/">DXT Explorer tool</a>, Drishti has a slick user interface and greatly extends the usability and insights that can be pulled out of a Darshan log file. It probably won't win a Turing Award, but this sort of work is probably going to benefit scores of HPC end-users by making Darshan (and troubleshooting I/O problems) much more accessible to mere mortals for years to come.</p> +<p>Adrian Jackson also presented a very tidy <a href="https://arxiv.org/abs/2211.09162">apples-to-apples comparison of DAOS and Lustre on the same hardware</a> using both a systems-level benchmark and an application-inspired, object-oriented data model benchmark. The specific bake-off of a new curiosity (DAOS) and the decades-old incumbent (Lustre) is probably interesting to storage nerds, but I think the real novelty of the work is in its exploration of some uncomfortable realities that the HPC I/O community will have to face in the coming years:</p> +<p></p> +<ul style="text-align: left;"><li>Does "slow memory" (nonvolatile Optane or CXL-attached memory SSDs) give actual benefit to existing file systems (like Lustre), or is rethinking the entire storage stack (like DAOS did) really necessary to unlock the performance of new hardware?</li><li>Do applications need to rethink their approach to I/O to make use of post-POSIX storage systems like DAOS, or is performing I/O as you would on a file system (Lustre) on a post-POSIX storage system (DAOS) good enough?</li></ul> +<p>My take from the work is that, for simple I/O patterns like checkpoint/restart, you can get pretty far by just treating something like DAOS the same as you would a parallel file system:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Figure from Manubens et al, "<a href="https://arxiv.org/abs/2211.09162">Performance Comparison of DAOS and Lustre for Object Data Storage Approaches</a>."</span></b></div> +<p>But if you want your data at rest to have the same data model as how it's handled within the application, you really ought to use a storage system that supports data models that are more expressive than a stream of bytes (which is what POSIX files are).</p> +<p>The authors didn't do a perfect job of giving Lustre its fair shake since they chose to use (abuse) directories and files to represent their application's data model on-disk instead of developing an object-file model that file systems like Lustre handle a little better. But let's be real--HPC is full of applications that do the exact same thing and represent datasets on-disk using complex hierarchies of directories and files simply because that's the easiest way to map the application's representation of data into the standard file system model. In that sense, storage systems that represent rich data models in a high-performance way should be really valuable to naive applications that map in-memory data structures directly to files and directories.</p> +<p>Going back to John Bent's closing slide from his DAOS User Group talk, though, does any of this even matter since all answers lead back to parallel file systems? Maybe there's something to be learned about adding better back-door APIs that support more diverse data models than what POSIX file interfaces give us.</p> +<h2 style="text-align: left;">The SC22 Expo</h2> +<p>The expo is my favorite part of SC because it's when I get to talk to people one-on-one and learn about corners of the HPC industry that I would've never otherwise sought out. Much to my dismay, though, I had very little time to walk the floor this year--so little that I didn't get any swag. If you want to read up on what interesting technology was being showcased, I strongly recommend reading <a href="https://www.servethehome.com/?s=sc22">all the great content that Patrick Kennedy and his team at STH created covering the expo</a>.</p> +<p>That said, I did notice some curious trends about the show floor overall.</p> +<p>The NVIDIA booth was notably absent this year (though they shared booth space with partners), and many of the usual top vendors had significantly smaller presence on the expo floor. Just for fun, I compiled the top ten(ish) vendors by booth size:</p> +<p></p> +<ol style="text-align: left;"><li>Weka.io (3,200 sqft)</li><li>VAST Data, Department of Energy, Penguin Computing, HPE, and Microsoft (2,500 sqft)</li><li>AWS (2,000 sqft)</li><li>Google and TACC (1,600 sqft)</li><li>Supermicro, AMD, Intel, Dell, NASA, and Indiana University (1,500 sqft)</li></ol> +<p>I think it's amazing to see all-flash storage companies at the top of the list alongside all of the Big 3 cloud service providers. I may be reading too much into this, but this may mean that the money behind SC is shifting towards companies playing in the cloud-based AI space instead of traditional big iron for simulation. Or perhaps it's a sign that most of the traditional HPC players are taking a hard look at the return they get on a big booth given the current economic climate and pulled back this year.</p> +<p>I did chat with a couple colleagues who completely opted out of a booth this year (for reference, <a href="https://hallerickson.ungerboeck.com/prod/app85.cshtml?AppCode=VFP&amp;OrgCode=34&amp;EvtID=5025&amp;CC=SC22SM">SC'21</a> had 10% fewer exhibitor booths than <a href="https://hallerickson.ungerboeck.com/prod/app85.cshtml?AppCode=VFP&amp;OrgCode=34&amp;EvtID=5020&amp;CC=SC19">SC'19</a>), and the reasoning was consistent: they found more value in having staff meet with customers privately or attend the technical sessions and engage with people organically. Combined with a bit of bad taste left over from SC's <a href="https://sc21.supercomputing.org/exhibits/exhibit-at-sc/">high cost of hosting pandemic-era "digital booths"</a> despite low return (did anyone visit digital booths at SC'20 or SC'21?), I can see why some vendors may have chosen to skip the expo this year.</p> +<p>Whatever the reasons may be, I was a bit sad to see such a small presence from some of my favorites like IBM, Fujitsu, Atos, and NEC. Hopefully the SC Exhibits Committee (and the economy!) can find ways to bring back the pre-pandemic glory of the show floor.</p> +<p>The expo wasn't all doom and gloom though! Even though I couldn't make my complete rounds this year, there were a couple of highlights for me.</p> +<p></p> +<h3 style="text-align: left;">VAST's masterful marketing</h3> +<p>Perhaps the splashiest vendor at SC was VAST Data who had a brilliant marketing presence. First was the giant Vastronaut mascot that was the centerpiece of their booth:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>A <a href="https://twitter.com/search?q=sc22%20vast&amp;f=live">quick search of Twitter</a> shows just how many people seized the opportunity to take a selfie at their booth. I would love to know how they transported that thing to and from the conference, but whatever the cost, I'll bet it was worth it.</p> +<p>At the Grand Opening Gala on Monday, they also gave out delightfully tacky light-up cowboy hats that everyone seemed to be wearing:</p> +<blockquote class="twitter-tweet"><p dir="ltr" lang="en">We were there! <a href="https://twitter.com/hashtag/sc22?src=hash&amp;ref_src=twsrc%5Etfw">#sc22</a> <a href="https://twitter.com/hashtag/sc2022?src=hash&amp;ref_src=twsrc%5Etfw">#sc2022</a> <a href="https://twitter.com/VAST_Data?ref_src=twsrc%5Etfw">@VAST_Data</a> <a href="https://t.co/fWhuSgBfpL">pic.twitter.com/fWhuSgBfpL</a></p> +— ntnu-hpc (@ntnuhpc) <a href="https://twitter.com/ntnuhpc/status/1592330266932301829?ref_src=twsrc%5Etfw">November 15, 2022</a></blockquote> +<p>The subtle genius of this was that not only did people wear them during the gala and the <a href="https://beowulfbash.com">Flop Gun-themed Beowulf Bash 2022 party</a> later that night, but they had to wear them on their plane rides home since they were so inconveniently bulky. Proof in point, my wife (who doesn't work in tech) sent me this text message to confirm that she was waiting for me at the right luggage carousel at San Francisco Airport:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>I wonder how many innocent bystanders, traveling home for Thanksgiving on Thursday or Friday, saw the shiny cowboy hats at airports around the country and wondered what VAST was.</p> +<p>The icing on the cake was VAST's CEO, Renen Hallak, parading around in an unmissable Chuck McGill-style space suit all week, clearly not taking himself too seriously and painting VAST as a work hard/play hard kind of company. Now, do flashy space suits and blinking cowboy hats alone mean VAST has a great product? I can't say<sup>**</sup>. But marketing is an art that I appreciate, and VAST hit some great notes this year.</p> +<p style="font-size: xx-small;"><sup>**</sup> (Seriously, I'm not sure I wouldn't get in trouble for opining about another company here.)</p> +<h3 style="text-align: left;">The Microsoft hardware bar</h3> +<p>The only booth where I spent any appreciable time this year was my own employer's. I personally love booth duty and accosting strangers on the show floor, especially if there's something interesting at the booth to jumpstart a conversation. When I worked at SDSC it was a <a href="https://www.sdsc.edu/News%20Items/PR111213_meteor.html">Raspberry Pi cluster</a>, and at the Microsoft booth this year it was the "hardware bar."</p> +<p>In addition to the customary booth presentations with giveaways, swag desk, seating area, and a fun caricature artist, the physical servers that underpin the HPC nodes in Azure were on display. <a href="https://www.opencompute.org/wiki/Server/ProjectOlympus">Microsoft contributes its hardware platform designs to the Open Compute Project</a> so the physical hardware that runs in Azure data centers isn't entirely mysterious. Still, every cloud has its hardware secrets, so I was surprised to see these servers laid bare.</p> +<p>The newest HPC node type (dubbed <a href="https://learn.microsoft.com/en-us/azure/virtual-machines/hbv4-series">HBv4</a>) on display was a node powered by AMD's Genoa processors just announced a few days earlier:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>This wasn't a display model, either; it had real DDR5 DRAM, a real NDR InfiniBand HCA, real PCIe Gen5, and real big OCP mezzanine card with real big aluminum heat sinks and a big Microsoft sticker on top. A couple visitors commented on the way the heat piping for those Genoa CPUs was done which I guess is unusual; rather than have a giant copper block on top of each socket, heat pipes connect the socket to massive aluminum heat sinks that are closer to the chassis inlets. In retrospect it makes sense; Genoa has a whopping twelve DDR5 DIMMs per socket which leaves little extra room for heat sinks, and these 88+ core sockets have a staggering thermal design power.</p> +<p>Another exotic piece of hardware on display was an "ND MI200 v4" server:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>It's logically similar to Azure's "<a href="https://learn.microsoft.com/en-us/azure/virtual-machines/nda100-v4-series">ND A100 v4</a>" server platform with two CPU sockets, eight SXM4 GPU sockets, eight 200G HDR InfiniBand HCAs, and a bunch of M.2 NVMes. But this specific server has eight MI200 GPUs on a common OAM baseboard and uses Infinity Fabric for GPU-to-GPU communication. I've never seen an OAM-socketed anything in real life before, much less eight of them on a baseboard, so I thought this was pretty great to see in the flesh.</p> +<p>The ND A100 v4 platform was also on display and looked very similar-but-different with its eight A100 GPUs and HGX baseboard:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>And unlike the MI200 variant, the general public can run on these nodes.</p> +<p>I'm not sure what more I'm allowed to say, but my colleague Karl made a nice, <a href="https://twitter.com/KarlPodesta/status/1593627537330126851?s=20&amp;t=uthjeb7YYmTZWRVWaF4XUA">quick video that runs through the entire Microsoft booth</a> that's worth a watch, and more details can be had by contacting me or your favorite Microsoft account team privately.</p> +<p>Of course, the hardware bar was just a way to lure people into the booth so I could achieve my real goal: meeting new folks. As I wrote before, one of my biggest realizations at SC this year is how generally confused people are about what HPC in the cloud really means--both people who come from traditional on-prem HPC and people who come from traditional enterprisey cloud. I found myself surprising many of the people with whom I spoke on the show floor with factoids that I have taken for granted. For example,</p> +<p></p> +<ul style="text-align: left;"><li>Linux is the most common OS on these HPC node types. While you probably(?) can run Windows if you want on this stuff, I think only a few niche markets do this.</li><li>The usage model for an HPC cluster in the cloud can be the same as on-prem. You can have login nodes, Slurm, home directories, parallel file systems, and all that. Jobs don't have to be containerized or turned into a VM image.</li><li>The InfiniBand coming out of these nodes is real InfiniBand with real OFED that supports real mpich/mvapich/OpenMPI. It's the same stuff as in on-prem supercomputers. And nodes are assembled into <a href="https://learn.microsoft.com/en-us/azure/virtual-machines/sizes-hpc">full-bisection fat tree InfiniBand</a> clusters just like normal.</li><li>There's no noisy neighbor problem on compute nodes because HPC node types aren't shared between users. When you run a VM on an HPC node, you get the whole thing. Just like on large supercomputers.</li><li>There's no horrible loss of performance due to running in a VM. Virtualization extensions, PCIe passthrough, and SR-IOV bypass the hypervisor for most things. Inside your VM, you see real Zen cores and real Mellanox HCAs, not virtualized devices.</li></ul> +<p>My takeaway impression is that a lot of traditional HPC folks looked at the cloud five or ten years ago, had a sour experience, and haven't paid attention since. In those last five years, though, AI has changed the game. Massive demand for the latest CPUs and accelerators, funded by live-fast-die-young venture capital, has given cloud vendors tremendous financial incentive to catch up to on-prem levels of performance efficiency for AI workloads. And it just so happens that infrastructure that's good for AI is also good for traditional modeling and simulation.</p> +<h2 style="text-align: left;">SCinet!</h2> +<p>One of the unexpected highlights of my SC this year arose from a chance encounter with a former coworker from NERSC, <a href="https://www.nersc.gov/about/nersc-staff/networking-security/ronal-kumar/">Ron Kumar</a>, who gave me a whirlwind tour of SCinet.</p> +<p>I have to confess great ignorance around SCinet in general; I always saw it was a weird technological proof of concept that the strange networking people at work would go off and do in the weeks leading up to the actual conference. I knew they did some impressive wide-area transfer demos (like the <a href="https://scinet.supercomputing.org/community/documents/43/sc17-Kettimuthu-transferring_1petabyte_per_day.pdf">petabyte-in-a-day demo at SC'16</a>), but I didn't really get the significance.</p> +<p>So what is SCinet? It's this yellow bundle of cables dangling from the ceiling.</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p><br />&lt;p&gt;The yellow cables are 144-core fiber trunks that bring over a terabit per second of bandwidth into the convention center from the Internet via the national research backbones like ESnet and Internet2 and distribute many terabits per second of capacity throughout the SC conference venue. For comparison, most HPC centers in the US only have a tenth of SCinet’s wide-area bandwidth at best since 400G infrastructure is still rolling out.&lt;/p&gt;</p> +<p>Most attendees may be familiar with the row of expensive-looking networking racks behind a glass wall towards the back of the expo which is where those yellow cables dangling from the ceiling end. Here's a photo from inside that glass wall:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>What I didn't realize is that if you go around to the back of the giant walled area behind this glass display, there's a security checkpoint that gates entry into a massive network operations center (NOC) full of laptops, spools of fiber, meeting rooms, and busily working teams in charge of all the lower layers of the networking stack.</p> +<p>The process to get into the NOC involves an escort and being tagged in with a tamper-proof wristband, and I learned on the tour that there's millions upon millions of dollars worth of high-end networking equipment in the racks shown above. If you look closely, you can see a security camera at the end of the aisle that speaks to this; that camera was one of many.</p> +<p>Behind the pretty public-facing side of the SCinet racks is a mess of fiber and cables:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>I guess if you have to tear all this down after just a few weeks, there's no point in investing days in dressing it all up nicely! I particularly enjoyed the fiber panels in the third rack that appear to be affixed to the rack post with shoe laces.</p> +<p>This year, SCinet did do a neat proof-of-concept where they demonstrated three 400G routers from three vendors (Juniper, Arista, and Cisco?) all talking the same protocol to handle what I assume is the core routing for everything in the convention center:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>I wish I remembered exactly what was going on here, but I know enough about networking to know that, despite there being standard protocols for coordinating between networking gear, each vendor does their own implementation that is rarely easy to get interoperability from. If anyone out there knows the details of this achievement, please let me know so I can explain this a little better!</p> +<p>In addition to networking nerd-level demonstrations, SCinet also serves up all the wifi across the convention center. That is why there were tripods with access points scattered around, and why astute attendees may have noticed janky networking equipment scattered around that looked like this:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>Again, I get it: for a network infrastructure that's only going to last a week, I don't think it's a good use of anyone's time or money to nicely dress all the networking.</p> +<p>One last factoid I didn't know until this year was that exhibitors can request 100 Gb/s network drops into their individual booths for demos (or downloading the latest version of a PowerPoint presentation <i>really fast</i>). The end result of supporting both a vast wifi network and 100G fiber across the show floor is that there was a <u>lot</u> of fiber going into the single row of SCinet equipment:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>Finally, when I <a href="https://twitter.com/glennklockwood/status/1592725187015114752?s=61&amp;t=1c4Kbx75SpTJhCruzuy0Ng">posted some of these photos online</a> during the conference, my colleague Bilel was kind enough to post a slide from the SC22 opening presentation that had the speeds and feeds of what I had toured:</p> +<blockquote class="twitter-tweet"><p dir="ltr" lang="en">Candy Culhane shared Scinet facts <a href="https://twitter.com/hashtag/SC22?src=hash&amp;ref_src=twsrc%5Etfw">#SC22</a> <a href="https://twitter.com/hashtag/HPC?src=hash&amp;ref_src=twsrc%5Etfw">#HPC</a><br /><br />5.01 Tb/s of WAN capacity<br />$70M in HW &amp; SW, &amp; services provided by 29 SCinet contrib.<br />175 volunteers from 80 vol. organiz.<br />&gt; 450 wireless deployed<br />29 network research exhibition proposals<br />11.7 miles of fiber <br />2384 fiber patch <a href="https://t.co/JtPhjVHZJd">https://t.co/JtPhjVHZJd</a> <a href="https://t.co/kwGl5Ydqp5">pic.twitter.com/kwGl5Ydqp5</a></p> +— Bilel Hadri (@mnoukhiya) <a href="https://twitter.com/mnoukhiya/status/1592737463617089536?ref_src=twsrc%5Etfw">November 16, 2022</a></blockquote> +<p>If you know anyone involved with SCinet, I highly recommend seeing if you can get a tour at the next SC. Even as a relative networking novice, I walked away with a much greater appreciation for the annual achievement of building SCinet. And who knows? Once I get bored of this whole storage thing, maybe I'll try getting into high-performance networking.</p> +<h2 style="text-align: left;">Composability panel</h2> +<p>This year I was invited to participate in a panel titled "Smackdown! Does HPC Need Composability Now?" moderated by Addison Snell and Dan Olds from <a href="https://www.intersect360.com">Intersect360 Research</a>. This panel was...different. Unlike the traditional SC panel where panelists take turns presenting slides and saying erudite things, this panel had two teams of panelists. And my team only had one slide to present:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>The ground rules included "personal attacks are allowed," and needless to say, the panel was about equal parts entertainment and technical discourse. That's not a bad thing, though.</p> +<p>Addison and Dan did a phenomenal job of pulling their respective teams together and leading discussion in a format that both brought forward the key pros and cons of composability in HPC while poking fun at the thinly veiled, ego-driven personalities that often make up these sorts of panels. Rather than politely dancing around issues like sacrificing memory bandwidth by putting accelerators at the far end of a PCIe bus or gaining higher utilization by allowing users to mix and match CPU, NICs, and GPUs, us panelists were free to shoot straight (or perhaps a bit hyperbolically) and call each other out on our hidden agendas.</p> +<p>I hope it goes without saying that all us panelists were in on the format and don't actually think people on the other side are dumb. By wrapping technical arguments in snarky comments, we could keep the level of discussion accessible to a wide audience, drive home the key points from both sides, and ensure that we weren't losing audience members who don't care about the PhD-level details as much as they want to hear what their peers are thinking about this exciting new space. I got some feedback afterwards that I didn't seem to hold back, so if anyone did take anything I said seriously, I am very sorry!</p> +<p>On a technical level, what was the outcome?</p> +<p>It turns out that <a href="https://www.hpcwire.com/off-the-wire/informal-poll-of-sc22-attendees-suggests-a-bright-future-for-composability/">there was about a 60/40 split between people who felt composability wasn't required yet and those who felt it was</a> after both sides argued their case. Even among panelists, many of us were a lot less convinced about our respective positions than we let on during the panel itself. I got a chuckle when I realized that I wasn't the only one who, when invited to be on the panel, asked "what side do you want me to argue?" I honestly could have gone either way because the dust has not yet settled. <a href="https://www.tacc.utexas.edu/about/directory/dan-stanzione">Dan Stanzione, director of TACC</a>, gave the truest answer to the question of "will composability help HPC" up front--"<a href="https://twitter.com/HPC_Guru/status/1592604467698241537?s=20&amp;t=tn3WQBUY9M0MWSfqx1XLKA">it depends</a>." Maybe this is a growth opportunity, or maybe it's a lukewarm reception.</p> +<p>Either way, composable technologies are hitting the market regardless of whether you think they'll be useful or not.  <a href="https://www.nextplatform.com/2022/11/10/amd-genoa-epyc-server-cpus-take-the-heavyweight-title/">AMD Genoa supports CXL 1.1 with extensions for memory pooling</a>, <a href="https://news.samsung.com/global/samsung-electronics-unveils-far-reaching-next-generation-memory-solutions-at-flash-memory-summit-2022">Samsung has memory-semantic SSDs</a>, and everyone and their mother is working on photonics to get higher bandwidths and lower latencies over longer distances. This makes it easier for people to dip their toes in the water to see if composability makes sense, and I think that's what a lot of people will wind up doing in the coming years.</p> +<h2 style="text-align: left;">Customer meetings</h2> +<p>Unlike in years past, my SC experience this year was dominated by customer meetings. I've been on the customer side of the table plenty of times, but I was surprised to find that it was actually more fun to be on the vendor side for a change. I'm part salesman at heart, so I found it personally gratifying to end a meeting with people nodding along rather than scratching their heads. I learned as a customer that it's very easy for vendors to go way off the rails and waste everyone's time, so I was grateful to have avoided the awkward confusion that punctuates those kinds of meetings. </p> +<p>I also went into the week worrying that I'd be sitting in the same room, hearing the same pitch and the same jokes, and answering the same questions all week. Thankfully, I work with some great field, business, and product teams who set up interesting conversations rather than rote recitations of boring roadmap slides. Approaching the same topics from different angles helped me figure out how all the pieces of what I'm working on fit together to make a complete picture too; there weren't nearly as many opportunities to do this in the DOE world since the end-users of the HPC systems on which I worked aren't told anything until all the design decisions have already been made.</p> +<h2 style="text-align: left;">A few personal notes</h2> +<p>This SC was significant to me at a variety of levels; it was the first time I'd gotten on an airplane since February 2020, the first time I'd traveled since starting a new job at a new company, and the first time I'd met any of my new coworkers outside of the structure of a Teams call. During the pandemic I realized that getting out into the world and talking to people from all corners of HPC were my favorite part of my job. Not being able to go to events like SC and maintain that a sense of community involvement dramatically impacted my level of professional satisfaction for the last two years, so I'm glad I was able to finally go this year.</p> +<p>Though customer meetings were a lot more fun than I expected them to be, I still felt bummed that I could spend so little time walking the expo, talking to folks, and attending all the BOFs normally on my <a href="https://sc22.supercomputing.org/presentation/?id=bof124&amp;sess=sess331">must</a>-<a href="https://sc22.supercomputing.org/presentation/?id=bof112&amp;sess=sess307">attend</a> <a href="https://sc22.supercomputing.org/presentation/?id=bof110&amp;sess=sess369">list</a>. Compounding this was my personal choice to not dine indoors and consequently miss out on almost all other chances to catch up with old friends and colleagues. I also decided to leave SC a day earlier than I usually do to reduce my risk of getting sick which didn't help either. There's never enough time at SC, but this year was particularly pressed.</p> +<p>I say all this not to complain, but to say how much I appreciated the people who went out of their way to come accost me during the precious few hours I actually had on the exhibit floor. Some I'd not seen since SC'19, and some I'd never actually met since we only started working together mid-pandemic. The conference is busy for everyone, so giving me a slice of your time was very meaningful. That sense of community membership is why I go to SC, it's why I still work in this business, and it's why I try to contribute whatever I can to whomever wants it whether it be a student, engineer, salesperson, or marketer.</p> + + + + + Converged Computing + + 2022-11-18T08:30:00-07:00 + https://hpc.social/2022/converged-computing + <p>For many years, there has been a battle between cloud and HPC. The cloud side of the equation says “micro services, cloud native!” +and the HPC side says “too expensive!” Conversations often don’t progress because both sides are up-in-arms and +focused on why they cannot work together. At best, we might get access to cloud from an HPC center, +or an company might present a product as branded for “HPC.” But it’s not truly collaborative in the way that I’d like.</p> + +<p>I’ll also step back and comment that (I do not believe) folks (myself included) on the HPC side have done enough +to sit at the table. For example, we haven’t been a voice in the Open Containers Initiative (<a href="https://supercontainers.github.io/containers-wg/" target="_blank">although I’ve tried</a>), nor have we been present (historically) for conferences that are more focused around cloud native technologies. +There is no pointing fingers or fault here - it’s just a matter of two different cultures, and it’s been challenging figuring out how to talk to one another, and how to work together. I’ve tried my best to be involved, to the best of my ability, in small ways on both sides. But I’m only one person. This isn’t to say there haven’t been small collaborations, but I believe we can do more.</p> + +<h2 id="change-is-coming">Change is Coming</h2> + +<p>I think this is going to change. The reason is because both sides of the equation have started to realize we have similar goals, +and it’s not about creating hybrid environments – having both pancakes and waffles for breakfast – but rather convergence – recognizing that pancakes and waffles are both kinds of breakfast cakes, and we can take features that we like of each to create a breakfast cake that will make everyone happy. +The idea of “Converged Computing” comes from my amazing team (see <a href="https://www.youtube.com/watch?v=9VwAcSOtph0" target="_blank">Dan’s talk at KubeCon here</a>) and is the idea that technologies from HPC can be integrated into more traditionally cloud approaches to produce a solution that +solves problems on both sides. Explicitly for these projects, it means testing the Flux Framework scheduler alongside Kubernetes. Do we still want portable workflows that can move from an HPC environment to cloud? Of course. +However, the niche or gradient that I’m interested in is the space that lives <em>between</em> these two worlds.</p> + +<p>While I won’t go into huge detail (this would be more appropriate for a talk) the lab openly works on +<a href="https://github.com/flux-framework" target="_blank">Flux Framework</a>, a resource manager that (in my opinion) is one of the coolest projects coming out of our space. I started working with these teams a few months ago, and am bringing my excitement and vision for (what I hope to be) a future where we are actively developing alongside other Kubernetes projects, and our work is well-known and established in this space. +What does that mean? Let me share some cool work under development. This is all being done publicly on GitHub, so there is +no issue to talk about it! My first year or so at the lab I was hired under a research project, and although I learned a lot, I haven’t felt inspired and driven until starting this work. Let’s talk about some of it! 🎉️</p> + +<h3 id="the-flux-operator">The Flux Operator</h3> + +<div style="padding: 20px;"> +<img src="https://flux-framework.org/flux-operator/_images/the-operator.jpg" /> +</div> + +<p>If you aren’t familiar with Kubernetes Operators, let’s step back and talk about a human operator. If you are a syadmin managing apps +with associated services and databases on a cluster, you often had to do maintenance or update tasks like increasing a storage volume, +or modifying a service to a new user need. As this pattern has emerged as a common thing, they have come up with the concept of a Kubernetes Operator - an actual controller you install to your cluster that can automate this. In simple terms, after you install an operator to your cluster, +you can hand it a desired state (represented in a yaml configuration file) and the operator will do whatever it takes to reach that state. What does that means in the context of Flux? The Flux Operator is interested in creating +what we are calling a “Mini Cluster,” illustrated below.</p> + +<div style="padding: 20px;"> +<img src="https://flux-framework.org/flux-operator/_images/design-three-team1.png" /> +</div> + +<p>In Kubernetes object terms this is an <a href="https://kubernetes.io/docs/tasks/job/indexed-parallel-processing-static/" target="_blank">Indexed Job</a>, a few config maps, secrets, and a <a href="https://flux-framework.org/flux-restful-api/" target="_blank">RESTFul API</a> and user interface that I designed exposed as a service. You can read more about our current design <a href="https://flux-framework.org/flux-operator/development/designs.html" target="_blank">here</a>.</p> + +<p>This Mini Cluster is generated from a “custom resource definition” or CRD (the yaml you provide), and it can take <a href="https://flux-framework.org/flux-operator/getting_started/custom-resource-definition.html" target="_blank">these parameters</a>. Concetually, you as the user own the Mini Cluster and can submit jobs to it (either via the web interface or the API) until you are done. When you are done, you can bring down the cluster.</p> + +<p>We are excited for this work because in the next months (to a bit longer) we are going to be testing different kinds of workloads +running using Flux alongside this Mini Cluster, but on Kubernetes! I’ve started a small repository of dummy examples that I’m extending quickly at +<a href="https://github.com/rse-ops/flux-hpc" target="_blank">rse-ops/flux-hpc</a> and please open an issue there if you have a suggestion.</p> + +<h3 id="stay-tuned">Stay Tuned!</h3> + +<p>Stay tuned for more work in this space! I’ve been doing a ton of programming in Go, Python, and working +on a wide range of technologies, and fairly quickly, and I am very much in my happy place. Please come and join us! ❤️</p> + + + + + Ceph OSD CPU Scaling - Part 1 + + 2022-11-08T00:00:00-07:00 + https://hpc.social/2022/ceph-osd-cpu-scaling-part-1 + <p>Last summer we had a user that hit some performance issues based on a recommendation to use 2 cores per OSD in their systems. I wanted to provide some data for the community and wrote up a blog <a href="https://ceph.io/en/news/blog/2022/ceph-osd-cpu-scaling/">post</a> on the ceph.io website. Please take a look!</p> + + + + + Containerize It, Baby! + + 2022-11-03T09:30:00-06:00 + https://hpc.social/2022/containerize-it-baby- + <p>I’ve just submit my <a href="https://twitter.com/vsoch/status/1588215058009464832" target="_blank">entry</a> to the HPC Guru Elevator Pitch Contest for the Supercomputing 2022 conference!</p> + +<p>I’m fairly sure (like many of these contests) it will be a politically correct winner - someone that is best appealing +to the conference, but I’ll take a stand right now that I think my submission is tops in terms of creativity +and excited energy! I mean, there is just no alternative when it comes to technologies I’m excited about.</p> + +<blockquote> + <p>Containerize it, baby!</p> + +</blockquote> + +<p><em>Mic Drop!</em> 🎙️</p> + +<p>Regardless of the outcome of this contest, I feel like I’ve already won - I’ve had so much fun making this and sharing with the community! 🎉️</p> + + + + + happy living close (-ish) to the metal + + 2022-11-02T00:18:17-06:00 + https://hpc.social/2022/happy-living-close-ish-to-the-metal + <p>For various reasons, I’ve been doing a little bit of career introspection lately. One of the interesting realizations to come out of this is that, despite in practice doing mostly software work, I’ve been happiest when my work involved a strong awareness of the hardware I was running on.</p> + +<p><span id="more-247"></span></p> + +<p>I suppose it shouldn’t be a surprise, exactly, but I hadn’t exactly thought about it in those terms before! Before I got into computing, I got a bachelors degree in physics, and got through much of a PhD in materials science. While I wasn’t building computers directly, I was definitely working regularly on hardware, building experimental apparatus involving various combinations of vacuum chambers, lasers, exotic microscopes, custom electronics, and microfluidics.</p> + +<p>In terms of my computing career, I’ve generally worked in the area of “high-performance computing”, a buzzword that means I’ve focused on building fast parallel systems aimed at researchers. </p> + +<p>It’s a sub-field that lends itself to awareness of hardware: even as a new baby sysadmin, I was staring at motherboard block diagrams and thinking about the performance differences between different PCIe topologies. </p> + +<p>And because HPC is one of the areas that took the longest to embrace cloud computing, I spent a lot of years doing work in datacenters. Most of my work would usually involve writing code, doing configuration management, and managing Linux systems… but on a regular basis I’d head into a big loud room full of air conditioners and server racks, carrying a screwdriver.</p> + +<p>Amusingly, my relatively recent stint at a hyperscaler was the first time I had worked on computers, but didn’t have my office in the same building as the computers I was running! Even there I was at least somewhat cognizant of hardware specifics, and one of my early projects was performance testing on the B<a href="https://www.opencompute.org/documents/facebook-bryce-canyon-storage-system-specification">ryce Canyon </a>storage node, to see if it was ready for use in a large-scale distributed filesystem.</p> + +<p>And these days, at NVIDIA, I’m enjoying being even closer to the metal. (At least conceptually; I still work remote…) I spend my days thinking about datacenter requirements, cable lengths, firmware upgrades, hardware health checks, and application performance tests on large clusters. And I love getting to play with these shiny toys.</p> + +<p>Anyway, this is just a ramble. But a useful one. While I’d be the first to admit that cloud has its place, and I use it for some personal projects, I really enjoy understanding the hardware I run on. I have trouble thinking of computers as remote abstractions with no underlying detail. They are pleasingly physical in my mind, even if they’re thousands of miles away.</p> + + + + + The web services I self-host + + 2022-10-30T21:59:55-06:00 + https://hpc.social/2022/the-web-services-i-self-host + <h2>Why self-host anything?</h2> + +<p>In a lot of ways, self-hosting web services is signing up for extra pain. Most useful web services are available in SaaS format these days, and most people don&#8217;t want to be a sysadmin just to use chat, email, or read the news.</p> + +<p>In general, I decide to self-host a service if one of two things is true:</p> + +<p><span id="more-235"></span></p> + +<ul><li>Self-hosting is going to add a capability that&#8217;s difficult to find in a SaaS alternative. That might be privacy, or extra compute, or just an extra degree of customization that I want.<br /></li><li>I find it interesting or amusing to self-host it! I <em>have been</em> a professional sysadmin, and ran production web services for over a decade. So I enjoy messing around with servers, and can have a fair amount of fun with this.</li></ul> + +<h2>Infrastructure and general tooling</h2> + +<p>Right now my self-hosted services are hosted on <a href="https://www.oracle.com/cloud/">Oracle Cloud Infrastructure</a>, for a very simple reason: OCI includes a <em>very</em> generous <a href="https://www.oracle.com/cloud/free/">Always Free tier</a>, which doesn&#8217;t even ask for a credit card! So I&#8217;m confident I&#8217;m not going to accidentally spend any money. I use ARM Ampere A1 Compute instances for service hosting.</p> + +<p>The individual services are mostly managed using <a href="https://docs.docker.com/compose/">Docker Compose files</a>, though a few are just running bare-metal. I have so far managed to resist the urge to put everything in Kubernetes.</p> + +<p>Everything is backed up on a regular basis using <a href="https://www.tarsnap.com/">Tarsnap</a>.</p> + +<p>I also use <a href="https://tailscale.com/">Tailscale</a> to provide a VPN between my cloud servers and my various client devices (phone, laptop, tablet). If a service needs to be exposed to the public Internet to function, I do that&#8230; but otherwise, everything is only exposed within the Tailscale VPN, so that only my own devices can access them. This is both a lovely convenience (not having to manage as many DNS records), and provides an extra degree of security by hiding services that no one else needs to access.</p> + +<h2>Services that I self-host</h2> + +<ul><li><strong>RSS reader: </strong>Despite the demise of Google Reader back in the mists of time, I&#8217;ve been a consistently heavy user of RSS feed since at least 2008. At times I&#8217;ve used commercial products such as <a href="https://feedly.com/">Feedly</a>, but these days I self-host the aggregator using <a href="https://freshrss.org/">FreshRSS</a>. I use FreshRSS partly because it&#8217;s pretty easy to spin up and administer, and partly because it&#8217;s compatible with <a href="https://reederapp.com/">Reeder</a>, a Mac and iOS app that I generally use to actually read my feeds.<br /></li><li><strong>Fediverse instance: </strong>I run a <a href="https://calico.social/">self-hosted instance</a> on the <a href="https://en.wikipedia.org/wiki/Fediverse">Fediverse</a> ensemble of social networking sites. The best-known tool for this is <a href="https://joinmastodon.org/">Mastodon</a>, but I currently use the <a href="https://pleroma.social/">Pleroma server</a>, mostly because it seemed less painful to set up and configure. I run my own instance partly out of curiosity, and partly because I didn&#8217;t strongly resonate with any particular topic-specific server that&#8217;s already out there.<br /></li><li><strong>IRC bouncer: </strong>I&#8217;m not on IRC very much these days, but I do like to avoid losing messages, and sometimes want to be logged into the same channels on different physical clients. So I run a <a href="https://wiki.znc.in/ZNC">ZNC</a> server to maintain persistence.<br /></li><li><strong>Matrix server: </strong><a href="https://matrix.org/">Matrix</a> is a decentralized messaging platform that supports end-to-end encryption. Think of it as being a little like the Fediverse, but for chat rather than microblogging. This falls pretty squarely in the category of &#8220;I find this amusing to run&#8221;, because I mostly chat with less-nerdy folks on other, commercial platforms.<br /></li><li><strong>Git server: </strong>I run a <a href="https://gitea.io/en-us/">Gitea</a> server which I use to mirror my own repos, as well as a variety of other open source repos. This is mostly to ensure that I have an up-to-date backup of repos I care about, independent of Github or whatever provider.<br /></li><li><strong>Jupyter notebooks: </strong>I keep a persistent <a href="https://jupyter.org/">Jupyter</a> notebook instance running for random code experiments and as a tiny development playground. This runs on its own VM where I also do other random software development, and it&#8217;s separate from the other services mostly so I don&#8217;t take down all my personal infra with an accidental OOM from a big build.<br /></li><li><strong>Software package repository: </strong>I run an instance of <a href="https://www.sonatype.com/products/repository-oss-download">Nexus Repository OSS</a>, mostly to cache Docker images and other content that run the rest of the services above!</li></ul> + +<h2>Services where I use managed hosting but don&#8217;t own the server</h2> + +<ul><li><strong>This website!</strong> My <a href="https://www.ajdecon.org">regular website</a> and this blog run on a shared hosting provider, mostly through inertia. (I&#8217;ve used the same hosting provider for web hosting since around 2008.)<br /></li><li><strong>Email: </strong>In theory it&#8217;s an open, federated system similar to the Fediverse. In practice, the combination of spam and the growth of large providers makes it increasingly painful to run a server yourself. This <a href="https://cfenollosa.com/blog/after-self-hosting-my-email-for-twenty-three-years-i-have-thrown-in-the-towel-the-oligopoly-has-won.html">post from Carlos Fenollosa</a> does a good job of describing the difficulties.<br /><br />I do, however, run all my email through my own domain, though it&#8217;s hosted via <s>Google Apps</s> <s>GSuite</s> Google Workspace. I also back up my inbox locally on a regular basis. That means that if Google ever decides to remove my account, charge obnoxious costs, or otherwise misbehave, my email address is at least portable to other providers.</li></ul> + +<p></p> + + + + + QEMU/KVM + Ceph Librbd Performance + + 2022-10-24T01:00:00-06:00 + https://hpc.social/2022/qemu-kvm-ceph-librbd-performance + <p>Checkout my blog <a href="https://ceph.io/en/news/blog/2022/qemu-kvm-tuning/">post</a> at the ceph.io website about tuning QEMU/KVM for high performance with librbd. We got over 123K random read IOPs with 16K IOs from a single VM!</p> + + + + + Dashboards for Learning Data Visualizations + + 2022-09-14T06:00:00-06:00 + https://hpc.social/2022/dashboards-for-learning-data-visualizations + <p>Creating dashboards and data visualizations are a favorite past time of mine. Also, I jump at any chance to learn a new technology. That is why I have spent the last couple of months building dashboards and data visualizations for various projects while learning several web technologies.</p> + +<p>Through these dashboards, I have learned many new technologies:</p> + +<ul> + <li><a href="https://reactjs.org/">React</a> and <a href="https://nextjs.org/">NextJS</a></li> + <li>Mapping libraries such as <a href="https://leafletjs.com/">Leaflet</a> and <a href="https://www.mapbox.com/">Mapbox</a></li> + <li>CSS libraries such as <a href="https://derekweitzel.com/2022/09/14/dashboards/TailwindCSS">TailwindCSS</a></li> + <li>Data access JS clients for <a href="https://derekweitzel.com/2022/09/14/dashboards/Elasticsearch">Elasticsearch</a> and <a href="https://derekweitzel.com/2022/09/14/dashboards/Prometheus">Prometheus</a></li> + <li>Website hosting service <a href="https://derekweitzel.com/2022/09/14/dashboards/Vercel">Vercel</a></li> + <li>Data Visualization library <a href="https://derekweitzel.com/2022/09/14/dashboards/D3.js">D3.js</a></li> +</ul> + +<h2 id="gp-argo-dashboard"><a href="https://gp-argo.greatplains.net/">GP-ARGO Dashboard</a></h2> + +<p><a href="https://gp-argo.greatplains.net/">The Great Plains Augmented Regional Gateway to the Open Science Grid</a> (GP-ARGO) is a regional collaboration of 16 campuses hosting computing that is made available to the OSG. My goal with the GP-ARGO dashboard was to show who is using the resources, as well as give high level overview of the region and sites hosting GP-ARGO resources.</p> + +<p>The metrics are gathered from OSG’s <a href="https://gracc.opensciencegrid.org/">GRACC Elasticsearch</a>. The list of projects are also from GRACC, and the bar graph in the bottom right are from OSG is simply an iframe to a grafana panel from GRACC.</p> + +<p>Technologies used: <a href="https://reactjs.org/">React</a>, <a href="https://nextjs.org/">NextJS</a>, <a href="https://leafletjs.com/">Leaflet</a>, <a href="https://github.com/elastic/elasticsearch-js">Elasticsearch</a></p> + +<p><strong>Repo:</strong> <a href="https://github.com/djw8605/gp-argo-map">GP-ARGO Map</a></p> + +<p><a href="https://gp-argo.greatplains.net/"><img alt="GP-ARGO" src="https://derekweitzel.com/images/posts/Dashboards/gp-argo-screenshot.png" /></a></p> + +<h2 id="osdf-website"><a href="https://osdf.osg-htc.org/">OSDF Website</a></h2> + +<p>My next website was the <a href="https://osdf.osg-htc.org/">Open Science Data Federation</a> landing page. I was more bold in the design of the OSDF page. I took heavy inspiration from other technology websites such as the <a href="https://www.mapbox.com/">Mapbox</a> website and the <a href="https://k8slens.dev/">Lens</a> website. The theme is darker and it was also my first experience with the TailwindCSS library. Additionally, I learned the CSS <a href="https://en.wikipedia.org/wiki/CSS_Flexible_Box_Layout">flexbox</a> layout techniques.</p> + +<p>The spinning globe is using the <a href="https://globe.gl/">Globe.gl</a> library. The library is great to create visualizations to show distribution throughout the world. On the globe I added “transfers” between the OSDF origins and caches. Each origin sends transfers to every cache in the visualization, though it’s all just animation. There is no data behind the transfers, it’s only for visual effect. Also, on the globe, each cache location is labeled. The globe can be rotated and zoomed with your mouse.</p> + +<p>The number of bytes read and files read is gathered using the Elasticsearch client querying GRACC, the OSG’s accounting service. The OSG gathers statistics on every transfer a cache or origin perform. Additionally, we calculate the rate of data transfers and rate of files being read using GRACC.</p> + +<p>One unique feature of the OSDF website is the resiliency of the bytes read and files read metrics. We wanted to make sure that the metrics would be shown even if a data component has failed. The metrics are gathered in 3 different ways for resiliency:</p> + +<ol> + <li>If all components are working correctly, the metrics are downloaded from the OSG’s Elasticsearch instance.</li> + <li>If OSG Elasticsearch has failed, the dashboard pulls saved metrics from NRP’s S3 storage. The metrics are saved everytime they are succesfully gathered from Elasticsearch, so they should be fairly recent.</li> + <li>The metrics are gathered and saved on each website build. The metrics are static and immediatly available upon website load. If all else fails, these saved static metrics are always available, even if they may be old.</li> +</ol> + +<p>Technologies used: <a href="https://reactjs.org/">React</a>, <a href="https://nextjs.org/">NextJS</a>, <a href="https://globe.gl/">Globe.gl</a></p> + +<p><strong>Repo:</strong> <a href="https://github.com/djw8605/osdf-website">OSDF Website</a></p> + +<p><a href="https://osdf.osg-htc.org/"><img alt="OSDF" src="https://derekweitzel.com/images/posts/Dashboards/osdf-screenshot.png" /></a></p> + +<h2 id="nrp-dashboard"><a href="https://dash.nrp-nautilus.io/">NRP Dashboard</a></h2> + +<p>The National Research Platform dashboard is largely similar to the <a href="https://derekweitzel.com/2022/09/14/dashboards/#gp-argo-dashboard">GP-ARGO</a> dashboard. It uses the same basic framework and technologies. But, the data acquisition is different.</p> + +<p>The metrics shown are the number of gpus allocated, number of pod running, and the number of active research groups. The metrics are gathered from the NRP’s <a href="https://prometheus.io/">prometheus</a> server on-demand. The graph in the background of the metric is generated with <a href="https://d3js.org/">D3.js</a>.</p> + +<p>Technologies used: <a href="https://reactjs.org/">React</a>, <a href="https://nextjs.org/">NextJS</a>, <a href="https://d3js.org/">D3.js</a>, <a href="https://github.com/siimon/prom-client">Prometheus</a>, <a href="https://tailwindcss.com/">TailwindCSS</a></p> + +<p><strong>Repo:</strong> <a href="https://github.com/djw8605/nrp-map-app">NRP Map App</a></p> + +<p><a href="https://dash.nrp-nautilus.io/"><img alt="NRP Dashboard" src="https://derekweitzel.com/images/posts/Dashboards/nrp-dashboard-screenshot.png" /></a></p> + +<h2 id="pnrp-website"><a href="https://nrp-website.vercel.app/">PNRP Website</a></h2> + +<p>The <a href="https://www.nsf.gov/awardsearch/showAward?AWD_ID=2112167&amp;HistoricalAwards=false">Prototype National Research Platform</a> is a NSF research platform. The dashboard is also in prototype stage as the PNRP hardware is not fully delivered and operational yet.</p> + +<p>The dashboard is my first experience with a large map from <a href="https://www.mapbox.com/">Mapbox</a>. I used a <a href="https://visgl.github.io/react-map-gl/">React binding</a> to interface with the <a href="https://www.mapbox.com/">Mapbox</a> service. Also, when you click on a site, it zooms into the building where the PNRP hardware will be hosted.</p> + +<p>The transfer metrics come from the NRP’s prometheus which shows the bytes moving into and out of the node. The transfer metrics are for cache nodes nearby the sites, but once PNRP hardware becomes operational the transfer metrics will show the site’s cache.</p> + +<p>Technologies Used: <a href="https://reactjs.org/">React</a>, <a href="https://nextjs.org/">NextJS</a>, <a href="https://www.mapbox.com/">Mapbox</a>, <a href="https://tailwindcss.com/">TailwindCSS</a>, <a href="https://github.com/siimon/prom-client">Prometheus</a></p> + +<p><strong>Repo:</strong> <a href="https://github.com/djw8605/nrp-website">NRP Website</a></p> + +<p><a href="https://nrp-website.vercel.app/"><img alt="PNRP Website" src="https://derekweitzel.com/images/posts/Dashboards/nrp-website-screenshot.png" /></a></p> + + + + + Tunel- Apps for HPC + + 2022-08-04T13:30:00-06:00 + https://hpc.social/2022/tunel-apps-for-hpc + <p>A few months ago I was talking about <a href="https://vsoch.github.io/2022/ssh-tunnels/" target="_blank">ssh tunnels</a>. The reason was because I was looking for a solution to deploy apps (like a Jupyter notebook) onto HPC. +After an adventure I got it working, and it came down a relatively simple set of commands that I needed to just <a href="https://github.com/tunel-apps/tunel/blob/main/tunel/ssh/commands.py">write into my app logic</a> and forget about. +The reason for this was working on my new personal project, <a href="https://tunel-apps.github.io/tunel/" target="_blank">tunel</a>.</p> + +<blockquote> + <p>Tunel is named for what it does. “Tunel” is an elegant derivation of “tunnel” and will do exactly that - create a tunnel between your local workstation and an HPC cluster.</p> + +</blockquote> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/tunel/tunel-docs.png" /> +</div> + +<p>In short, tunel will provide a collection of “apps” that are easy to deploy to HPC. There are concepts called launchers, and examples are singularity, slurm, or htcondor. And we can add more! It’s the job of a launcher to take a an app recipe (a definition in yaml plus helper scripts that can be customized on the fly by the user) and get it running, whatever that means (run a job? a container? monitor something? something else?). For the most part, most apps that I’ve developing have web interfaces, as they have historically been the most challenging thing to get easily working on HPC. As a quick example, to run a jupyter notebook via Singularity on my login node, after I install tunel and have my ssh connection defined as “osg” I can do:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>tunel run-app osg singularity/socket/jupyter <span class="nt">--jupyterlab</span><span class="o">=</span><span class="nb">true</span> +</code></pre></div> +</div> + +<p>The name “singularity/socket/jupyter” is the unique identifier (and path) to the recipe and config, and I can provide custom arguments as shown above. And although this is the “singularity” launcher, we can do the same kind of interaction with a slurm launcher, going one level deeper to run the notebook on a node after we submit a job! +And in my typical way of doing things, I have automation that generates a table and documentation for each of these apps. <a href="https://tunel-apps.github.io/tunel/_static/apps/" target="_blank">Check them out here!</a>.</p> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/tunel/table.png" /> +</div> + +<p>I’m mostly working on singularity an HTCondor apps at the moment because I use the open science grid (OSG) for development, as this is a personal project. Thanks to <a href="https://twitter.com/westbynoreaster" target="_blank">Matthew West</a> for showing me OSG - I was pretty handicapped to develop before finding it!</p> + +<h2 id="django-template-with-a-socket">Django template with a socket?</h2> + +<p>This kind of framework can be powerful if I develop a bunch of custom apps, but it’s much more powerful if I can enable YOU to easily do that too! Thus, I knew one of the first tasks I wanted to do is create a template, likely in each of Flask, Django, and FastAPI, that would plug immediately into Tunel. And while I have much work left to do, last night and this evening I figured out a technical issue that is going to empower us to make so many cool things and I wanted to share! Let’s talk about the problem, what I tried, and what ultimately worked.</p> + +<h3 id="traditional-setup-with-uwsgi-and-nginx">Traditional Setup with uwsgi and nginx</h3> + +<p>If you look at a family of Python + web interface apps, you’ll find this <a href="https://uwsgi-docs.readthedocs.io/en/latest/" target="_blank">uwsgi</a> guy in the middle (I don’t know the correct pronunciation but I say YOU-SKI). It’s a fairly rich tool, but in layman’s terms I think of it as a middleman between Python and a traditional web server. But actually, you don’t technically need the web server - and this is where things start to get interesting. For a traditonal setup, you might find a nginx (a web server) configuration file that <a href="https://github.com/tunel-apps/tunel-django/blob/main/scripts/nginx/nginx.conf" target="_blank">looks like this</a>.</p> + +<div class="language-yaml highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="c1"># the upstream component nginx needs to connect to</span> +<span class="s">upstream django {</span> + <span class="s">server unix:///tmp/tunel-django.sock;</span> +<span class="err">}</span> + +<span class="c1"># configuration of the server</span> +<span class="s">server {</span> + <span class="s"># the port your site will be served on</span> + <span class="s">listen 8000;</span> + <span class="s">charset utf-8;</span> + <span class="s">server_name localhost;</span> + + <span class="s">client_max_body_size 10024M;</span> + <span class="s">client_body_buffer_size 10024M;</span> + <span class="s">client_body_timeout 120;</span> + + <span class="s">...</span> + <span class="s">location ~* \.(php|aspx|myadmin|asp)$ {</span> + <span class="s">deny all;</span> + <span class="s">}</span> + + <span class="s">location /static/ {</span> + <span class="s">autoindex on;</span> + <span class="s">alias /var/www/static/;</span> + <span class="s">}</span> + + <span class="s"># Finally, send all non-media requests to the Django server.</span> + <span class="s">location / {</span> + <span class="s">uwsgi_pass django;</span> + <span class="s">uwsgi_max_temp_file_size 10024m;</span> + <span class="s">include /code/scripts/nginx/uwsgi_params.par;</span> + <span class="s">}</span> +<span class="err">}</span> +</code></pre></div> +</div> + +<p>I’ve made a lot of web apps, and whether I use docker-compose with separate containers or a single one, I usually have to write a nginx configuration. The above gets started in the container entrypoint with my app calling uwsgi, and defining that same socket:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>uwsgi <span class="nt">--socket</span><span class="o">=</span><span class="k">${</span><span class="nv">socket</span><span class="k">}</span> /code/scripts/uwsgi.ini +</code></pre></div> +</div> + +<p>And of course things happen before that, but that’s the main last line. The uwsgi.ini is a configuration file +that makes it easier to define settings.</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>[uwsgi] +master = true +processes = 4 +threads = 4 +py-autoreload = 1 +#socket = :3031 +chdir = /code/ +post-buffering = true +log-date = true +max-requests = 5000 +http-timeout = 3600000 +socket-timeout = 120 +chmod-socket = 666 +wsgi-file = tuneldjango/wsgi.py +ignore-sigpipe = true +ignore-write-errors = true +disable-write-exception = true +buffer-size=32768 +</code></pre></div> +</div> + +<p>Without going into huge detail, the above says that the app that I wrote (in Python) is listening on that socket, so requests to the web server will either be directed to some static file, filtered out, or sent to our application. And we typically want to use nginx because it’s really good at serving static files and handling traffic.</p> + +<p>But now let’s step back. If you look under the server in the config above, you’ll notice we are serving +content on port 8000. This is why I can open the browser to localhost and that port and see my application. +But as we know with headless HPC, there are no ports. I can’t use this. So this was my first predicament, last night. I had created this application and it ran locally, but I needed to somehow get the entire thing routed through a tunneled socket to take a next step.</p> + +<h3 id="uwsgi-only">Uwsgi Only?</h3> + +<p>I’ll skip over the many hours of things that I tried and failed. I really liked having nginx so I first wanted to somehow send it to the user via a socket, but that never worked. I had an idea to just map the original socket and then have a second container on the host for nginx, but I decided that was too complex. What would up working is realizing that uwsgi can serve http directly, and that came down to a single addition to its config:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>listen=200 +protocol=http +</code></pre></div> +</div> + +<p>Once I did that, I tried the same technique to map the socket being written to directly to a port via the ssh tunnel, and <em>boum</em> I saw a page! But it was really ugly, because it had no style. This is where I was like OHNO I need nginx for static. But then I found <a href="https://uwsgi-docs.readthedocs.io/en/latest/StaticFiles.html" target="_blank">this page</a> and it was a message from the heavens - I could define the same static and media URls using uwsgi directly! That looked like this:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>uwsgi <span class="nt">--socket</span><span class="o">=</span><span class="k">${</span><span class="nv">socket</span><span class="k">}</span> <span class="nt">--static-map</span> /static<span class="o">=</span>/code/static /code/scripts/uwsgi-standalone.ini +</code></pre></div> +</div> + +<p>At this point I held my breath, re-ran my app, and wow!</p> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/tunel/home.png" /> +</div> + +<p>There it was - my entire app being served by a container running on a remote machine, only accessible to me through a physical socket. And guess what? I added a file browser, and it even worked to upload a dinosaur picture!</p> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/tunel/browser.png" /> +</div> + +<p>Here is the entire page for the app - you can see there are many flags you can add and customize to interact.</p> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/tunel/app.png" /> +</div> + +<p>While it’s only accessible to you and there isn’t need for any kind of login, I did add the default username/password login to Django, and require it for logging in to the file browser. Of course I will eventually need this to be more formally security audited, but at least I don’t have anything interesting on my OSG home to be worried about. And is using just uwsgi a performance issue? I think probably not since the expected use case is only once person.</p> + +<h3 id="a-future-for-apps">A Future for Apps</h3> + +<p>This is just the beginning - my plan is to put together a list of use cases for a GUI on a cluster, and then just package them into the core template apps for the developer user to easilyc customize. I have big plans for working on this, and honestly I’m so excited that I find I’m staying up way too late and just egging for the work day to end so I can continue. This idea is so powerful, because it’s using existing technologies to deploy containerized apps on HPC, where you don’t need any special permission. Just to show y’all, here is what it looks like to launch my app template:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>tunel run-app osg singularity/socket/tunel-django <span class="nt">--tag</span><span class="o">=</span>dev <span class="nt">--pull</span> +</code></pre></div> +</div> + +<p>I added the pull flag and a custom tag because I am actively developing, and my workflow is to quickly rebuild, push, and then run that command. That then shows me the ssh tunnel command that will immediately connect me to my app on a port in my browser.</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>ssh <span class="nt">-NT</span> <span class="nt">-L</span> 7789:/../tunel/singularity/singularity/socket/tunel-django/singularity-socket-tunel-django.sock sochat1@osg +</code></pre></div> +</div> + +<p>And that’s seriously it. You as the developer user are empowered to make and deploy apps, and they have interfaces, and you don’t need to do something silly like open a port or actually deploy a web server. It’s so stupidly easy - I’m looking around at all these complex web app setups that people have made for HPC over the years and I wonder why they aren’t doing something simpler. Maybe it’s just a space of development that people gave up on, or there are some security things I’m missing. Either way, I’m going to charge forward working on this! It’s too simple, and the idea is to beautiful to do anything else by this point.</p> + + + + + Ceph RocksDB Tuning Deep-Dive + + 2022-07-25T01:00:00-06:00 + https://hpc.social/2022/ceph-rocksdb-tuning-deep-dive + <p>See my <a href="https://ceph.io/en/news/blog/2022/rocksdb-tuning-deep-dive/">post</a> on the Ceph.io blog about tuning RocksDB in Ceph!</p> + + + + + The Utility vs the Professional Services Firm + + 2022-07-03T01:00:00-06:00 + https://hpc.social/2022/the-utility-vs-the-professional-services-firm + <h2 id="as-research-computing-and-data-becomes-more-complex-and-diverse-we-need-more-professional-services-firms-and-fewer-utilties">As research computing and data becomes more complex and diverse, we need more professional services firms and fewer utilties</h2> + +<p>(Note: This post is adapted from <a href="https://www.researchcomputingteams.org/newsletter_issues/0127">#127</a> of the <a href="https://www.researchcomputingteams.org">Research Computing Teams Newsletter</a>)</p> + +<p>I get to talk with a lot of research computing and data teams - software, data, and systems. Sometimes in these conversations it’s pretty clear that some teams, or the team and their funder, or a team and I, are talking a bit past each other. And that’s usually because they or we are (currently) operating with very different mental models of how they operate.</p> + +<p>Some research computing and data teams are operating as Utilities, and see the world through that lens; a growing number are operating as Professional Services Firms. Others are moving from one to the other, and are at different places along that very abrupt transition. Some kinds of groups (like bioinformatics cores) are much more likely to already be operating in service mode, while others (like research compute infrastructure teams) are more likely to still think of themselves as utilities. It varies from place to place, though, depending on local conditions. But they’re very different models!</p> + +<figure style="width: 45%; float: right;"> + <img alt="Utility vs professional services. Image Credit: left, John Moore (@thejmoore) at Unsplash.com; right, Jason Goodman @jasongoodman_youxventures at Unsplash.com" src="https://www.dursi.ca/assets/imgs/utility-vs-professional-svc.png" /> + <figcaption><i>Utility service and professional services delivery are very different, and require different funding, management, and career development models. Image credit: <a href="https://unsplash.com/photos/0MKzwPmehRE">left</a> and <a href="https://unsplash.com/photos/X8H8vPcelPk">right</a>.</i></figcaption> +</figure> + +<p>Utilities, like power companies or garbage collection or municipal potable water, were really the only sensible role models for the first decades of research computing and data teams. Those teams were entirely about operating large equipment purchased from vendors. Costs were mostly a big capital expense. Everyone who needed the utility needed the same thing - undifferentiated flops and bytes, or 60Hz 120VAC. Because everyone needed the same thing, economies of scale led to <a href="https://en.wikipedia.org/wiki/Natural_monopoly">natural monopolies</a>; the most reasonable provision model was for the local jurisdiction/institution to own or control a single operator. Differentiation or strategy, or gaining new customers, weren’t meaningful discussion topics. The only thing that really makes a difference is scale, which leads to mergers. Innovation happens slowly, top-down, at the industry-wide scale and usually from the vendors (“hey, did you hear about those new gas compressors Dyneco announced?”), and diffuses outwards. Employees take pride in and the organization values operational skill and things ticking along smoothly. Customers value reliability. The only thing that matters for any individual operator is to operate effectively and to provide the standard service with the right amount of cost: high enough to absorb the available subsidy, low enough to not go broke. If a customer needs something other than what the utility provides, rather than that being a market opportunity, it’s either an inconvenience or an irrelevance. The power company or the water utility or the <a href="https://vimeo.com/355556831">old phone monopoly</a> just doesn’t serve that need.</p> + +<p>Professional Service Firms — say engineering firms, or architects, or consultancies — are very different beasts. They might very well have significant capital investment in specialized equipment, but their main selling point and their biggest cost is expertise. Competing for and retaining that expertise, and developing that expertise in house and amongst their clients, are principal concerns. As part of a “full-service” offering they they likely have some fairly standard services they offer at the low end, where operating cost and efficiency is vital. But what the organization values, and the employees enjoy, is at the high-touch end — getting deeply involved with the client work, and being as much a collaborator or partner or “trusted advisor” as a service provider. Different clients want very different things, and that high-touch high-expertise work is specialized and labour intensive, so the firms themselves need a clear focus; they <em>can’t</em> meet all needs. Clients can go elsewhere, so there is redundancy and competition, but less than you’d think at a distance. In civil engineering a geotechnical firm is complementary, not competing, with one that specializes in water resource engineering.</p> + +<p>As in the rest of our lives, in research computing we need to have utilities. As research data management matures, institutional or regional data depositories become mature and “enterprise” enough to become utilities, likely run by IT or the Library. Teaching or CI/CD or MLOps resources for data science or software development are likely best served by this model. The closer the operations are to standard, something that can be run by IT, the more likely it is to be a utility. But one has to be careful. Utilies are commodoties: they tend to get merged together wherever feasible, since scale matters and it’s all undifferentiated commodity provision.</p> + +<p>As research computing becomes broader and faster changing and more diverse, we need more and more professional services firms, too; nimble groups specialized to particular needs and ready to adapt as those needs change. As even infrastructure is becoming less one-size-fits-all, and methods for making use of computing and data for diverse fields grow more complex and expertise intensive, the preconditions for the utility model are met in fewer situations than used to be.</p> + +<p>A lot of research computing teams are interested in providing something more like professional services, but were created in the Utility model, and are stuck there by their funders. The institutional or external funders still have this very specific (and to their mind time tested and successful) operating model in their plans. Utilities are funded very differently than professional services firms. At utility scale, it doesn’t make sense to outsource things, or develop non-standard services (who wants non-standard power coming into their house!) Funders requirements on eligible expenses may focus almost entirely on the capital spend, and not on operating funding that’s needed to make effective use of the capital, or to be more agile in how services are delivered.</p> + +<p>Even those teams who aren’t being held back by funders and who want to make the switch to professional services from their original utility model find it a hard transition. There’s no obvious, incremental path to go from providing a standard, stable commodity to changing, specialized, bundles of expertise. Utilities operate very differently from professional services firms. They value different things. The models for staff growth are different. So they have to be managed quiet differently, and there’s no clear path internally from A to B.</p> + +<p>Besides funding, and internal considerations, utilities and professional services firms are also percieved and valued by their clients very differently. Utilities’ existing customers don’t want change, and new customers aren’t yet interested in getting advanced app software development suggestions from what they perceive to still be the mobile telephony provider.</p> + +<p>But research computing and data is changing, increasingly quickly, and the utility approach only meets a piece of these growing needs. Navigating the transition isn’t going to be easy, for RCD teams, leaders, or funders; but expressing it clearly and talking about it more will maybe mean we’re not talking past each other so often.</p> + + + + + SSH Tunnels + + 2022-06-26T13:30:00-06:00 + https://hpc.social/2022/ssh-tunnels + <p>Today I want to talk about ssh tunnels. Very abstractly, we would want to use an ssh +tunnel to securely send information. In the case of HPC, you are probably familiar with ssh, +(Secure Shell or Secure Socket Shell) when you login to your node. You might do something like this:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>ssh dinosaur@server.address.edu +</code></pre></div> +</div> + +<p>Or if you have a proper setup in your <code class="language-plaintext highlighter-rouge">~/.ssh/config</code> (with a named server) you might just do:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>ssh dinosaur +</code></pre></div> +</div> + +<p>I like to use <a href="https://en.wikibooks.org/wiki/OpenSSH/Cookbook/Multiplexing" target="_blank">ssh connection multiplexing</a> +so the connection is kept alive for a bit, but I won’t go into detail because +this post isn’t specifically about the details of ssh. The use case I’m interested in (and the thing +that HPC is very bad at) is how to deploy something interactive on an HPC cluster.</p> + +<h2 id="ssh-tunnel-with-ports">SSH Tunnel with Ports</h2> + +<p>Given that a cluster has exposed ports (either the login node, or both the login node and compute nodes) +creating a tunnel is fairly straight forward! In the past I created a tool called <a href="https://github.com/vsoch/forward" target="_blank">forward</a> to handle all the manual steps to get this working, meaning:</p> + +<ol class="custom-counter"> + <li>Show the user <a href="https://github.com/vsoch/forward#ssh-config" target="_blank">how to set up their ~/.ssh/config</a> (once)</li> + <li>Define (once) parameters like a port, memory, GPUs, and if the cluster has isolated nodes</li> + <li>Start any number of provided apps that come with forward (e.g., jupyter, singularity, etc.)</li> +</ol> + +<p>An interaction using forward might look like any of the following:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="c"># Run a Singularity container that already exists on your resource (recommended)</span> +bash start-node.sh singularity-run /scratch/users/vsochat/share/pytorch-dev.simg + +<span class="c"># Execute a custom command to the same Singularity container</span> +bash start-node.sh singularity-exec /scratch/users/vsochat/share/pytorch-dev.simg <span class="nb">echo</span> <span class="s2">"Hello World"</span> + +<span class="c"># Run a Singularity container from a url, `docker://ubuntu`</span> +bash start-node.sh singularity-run docker://ubuntu + +<span class="c"># Execute a custom command to the same container</span> +bash start-node.sh singularity-exec docker://ubuntu <span class="nb">echo</span> <span class="s2">"Hello World"</span> + +<span class="c"># To start a jupyter notebook in a specific directory ON the cluster resource</span> +bash start.sh jupyter &lt;cluster-dir&gt; + +<span class="c"># To start a jupyter notebook with tensorflow in a specific directory</span> +bash start.sh py3-tensorflow &lt;cluster-dir&gt; +</code></pre></div> +</div> + +<p>Note that the last set of commands are pertaining to notebooks, which is where these tunnels come into play! +A notebook is going to be run on a compute node that looks something like the following:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>jupyter notebook <span class="nt">--no-browser</span> <span class="nt">--port</span><span class="o">=</span><span class="nv">$PORT</span> +</code></pre></div> +</div> + +<p>And if you ran this with a Singularity container, you’d also want to bind jovyan’s home to be the user’s, along with the jupyter config directory:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>singularity <span class="nb">exec</span> <span class="nt">--home</span> <span class="k">${</span><span class="nv">HOME</span><span class="k">}</span> <span class="se">\</span> + <span class="nt">--bind</span> <span class="k">${</span><span class="nv">HOME</span><span class="k">}</span>/.local:/home/jovyan/.local <span class="se">\</span> + <span class="nt">--bind</span> <span class="k">${</span><span class="nv">HOME</span><span class="k">}</span>/.jupyter:/home/jovyan/.jupyter <span class="se">\ </span> + datascience_notebook.sif jupyter notebook <span class="nt">--no-browser</span> <span class="nt">--port</span><span class="o">=</span><span class="nv">$PORT</span> <span class="nt">--ip</span> 0.0.0.0 +</code></pre></div> +</div> + +<p>As we described earlier <a href="https://github.com/vsoch/forward#ssh-port-forwarding-considerations" target="_blank">here</a>, +there are subtle differences between making a tunnel (with a port) given that you have isolated nodes (or not). +You can determine this based on your ability to ssh into a non-login node (meaning where your job is running) from “the outside world” +that is your computer. If you cannot, your nodes are isolated, which we will discuss next.</p> + +<h3 id="isolated-nodes">Isolated Nodes</h3> + +<p>Let’s say that we need to create a tunnel (using ports) to an isolated node. This means that we are basically going +to establish a tunnel to the login node, and then from the login node another one to the compute node. +We might use a command that looks like this:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>ssh <span class="nt">-L</span> <span class="nv">$PORT</span>:localhost:<span class="nv">$PORT</span> <span class="k">${</span><span class="nv">RESOURCE</span><span class="k">}</span> ssh <span class="nt">-L</span> <span class="nv">$PORT</span>:localhost:<span class="nv">$PORT</span> <span class="nt">-N</span> <span class="s2">"</span><span class="nv">$MACHINE</span><span class="s2">"</span> &amp; +</code></pre></div> +</div> + +<p>In the command above, the first half (<code class="language-plaintext highlighter-rouge">ssh -L $PORT:localhost:$PORT ${RESOURCE}</code>) is executed on the local machine, which establishes a port forwarding to the login node. The “-L” in the above (from the <a href="https://linuxcommand.org/lc3_man_pages/ssh1.html" target="_blank">man pages</a>) :</p> + +<blockquote> + <p>Specifies that connections to the given TCP port or Unix socket on the local (client) host are to be forwarded to the +given host and port, or Unix socket, on the remote side. +This works by allocating a socket to listen to either a TCP +port on the local side, optionally bound to the specified +bind_address, or to a Unix socket. Whenever a connection is +made to the local port or socket, the connection is for‐ +warded over the secure channel, and a connection is made to +either host port hostport, or the Unix socket remote_socket, +from the remote machine.</p> + +</blockquote> + +<p>Or in layman’s terms:</p> + +<blockquote> + <p>Forward whatever is running on the second port on my resource to my local machine.</p> + +</blockquote> + +<p>Since we are forwarding ports, this would require minimally the login node to expose ports. +The next line <code class="language-plaintext highlighter-rouge">ssh -L $PORT:localhost:$PORT -N "$MACHINE" &amp;</code> is a second command run from the login node, +and port forwards it to the compute node, since you can only access the compute node from the login nodes. +You’ll notice it looks just like the first, and this works because ssh commands can be chained. +The <code class="language-plaintext highlighter-rouge">-N</code> says “don’t execute a remote command (and just forward the port).” +Finally, the last <code class="language-plaintext highlighter-rouge">$MACHINE</code> is the node that the jupyter notebook is running on.</p> + +<h3 id="not-isolated">Not Isolated</h3> + +<p>For HPCs where the compute node is not isolated from the outside world the ssh command for port forwarding first establishes a connection the login node, but then continues to pass on the login credentials to the compute node to establish a tunnel between the localhost and the port on the compute node. The ssh command in this case utilizes the flag <code class="language-plaintext highlighter-rouge">-K</code> that forwards the login credentials to the compute node:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>ssh <span class="s2">"</span><span class="nv">$DOMAINNAME</span><span class="s2">"</span> <span class="nt">-l</span> <span class="nv">$FORWARD_USERNAME</span> <span class="nt">-K</span> <span class="nt">-L</span> <span class="nv">$PORT</span>:<span class="nv">$MACHINE</span>:<span class="nv">$PORT</span> <span class="nt">-N</span> &amp; +</code></pre></div> +</div> + +<p>I’m not sure in practice how common this is anymore. At least at my current employer it’s not even the case +that ports are exposed on the login node! It’s probably better that way, because in cases where you do get ports it’s sort of a +“pick a port above this range and hope that no other user picks the same one!” It’s messy. +So let’s talk about the case of not having ports exposed next, since this was the entire reason I wanted to write this post!</p> + +<h2 id="ssh-tunnel-with-socket">SSH Tunnel with Socket</h2> + +<p>More than a year ago, I had this realization that a lot of people at Stanford used the “forward” tool, and just for notebooks (and this +was before they were available via Open OnDemand, which is what I’d recommend to a Stanford user at this point). I decided I wanted to make a new +open source tool, “tunel” (an elegant derivation of “tunnel”) <a href="https://github.com/vsoch/tunel" target="_blank">vsoch/tunel</a> to make it easy +to run what I call “apps” on an HPC cluster. Are there better ways of exposing user interfaces on HPC? Yes, indeed. But not everyone +has easy access. It was also a stubborn “I want this to work” proof of concept. This new tool would be like forward, but a little nicer. +Because I, along with every other HPC developer and user, wishes we could have nice things 😭️.</p> + +<p>At this time I had just started a new role at a national lab, and I realized that none of my old techniques for launching +the job worked because of the lack of exposed ports. Thinking this was impossible, I abandoned it for a year. But then this last week I found +<a href="https://github.com/jupyter/notebook/pull/4835" target="_blank">this</a>! I was motivated! I was excited! The basic launch command of the notebook looks like this:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>jupyter notebook <span class="nt">--sock</span> /tmp/test.sock <span class="nt">--no-browser</span> +</code></pre></div> +</div> + +<p>And then with a different looking tunnel, we could forward this socket to the host, and map it to a port! My excitement was then brought down +by what led to two days of struggling. I first tried my entire tunel workflow, meaning launching a job on a node, +and then running that command, and providing the instruction to the user to create the tunnel as follows:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>ssh <span class="nt">-L</span> 8888:/tmp/test.sock <span class="nt">-N</span> user@this_host +</code></pre></div> +</div> + +<p>That didn’t work (and remember this socket was created on the isolated node, that’s important to remember for later). So I started looking at the socket with “nc” - “arbitrary TCP and UDP connections and listens” from the login node. The “-U” below is for UNIX sockets:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>nc <span class="nt">-U</span> /tmp/test.sock +</code></pre></div> +</div> + +<p>And from the head node I saw:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code>Ncat: Connection refused. +</code></pre></div> +</div> + +<p>So then I knew I needed a simpler, dummier example. I got rid of tunel and just ran the notebook command on the head node. +Dear reader, it still did not work. I <a href="https://github.com/jupyter/notebook/issues/6459" target="_blank">opened an issue</a> and asked <a href="https://twitter.com/vsoch/status/1540546526044250112" target="_blank">Twitter for help</a>. Someone else on Twitter reported that <a href="https://twitter.com/al3x609/status/1540846694262243328" target="_blank">it worked for them</a>, and that (in my opinion) is the challenge and story of HPC - given the huge differences in setups, it’s hard to reproduce what another person does unless you scope to a very specific +environment or technology and hugely go out of your way to do it. I’m always grateful when someone tries to help, but when the ultimate answer is just +“But it works on my machine!” I (and I think all of us) are like:</p> + +<p><span style="font-size: 50px; color: darkorchid;">(╯°□°)╯︵ ┻━┻</span></p> + +<p>🤣️</p> + +<p>Please know that is intended to be funny, and I really am grateful for the attempt to help! Anyway, the first night I was devastated because I was so excited about the possibility of this working! But of course (as it usually does) my quasi-sadness turned again into relentless stubborn-ness, and for my Saturday +I embarked on trying everything. I call this the stubborn brute force approach, and it actually leads to some pretty good outcomes?</p> + +<h3 id="socket-from-login-node">Socket from Login Node</h3> + +<p>First from the login node, I started reading about flags in detail, again from the <a href="https://linuxcommand.org/lc3_man_pages/ssh1.html" target="_blank">man pages</a>. It occurred to me that the suggested command included “-L” (discussed earlier) but there were a ton of other flags to try, and maybe I need them for my setup? The command that wound up working (after much trial and error) was just:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="c"># Running on login node</span> +<span class="nv">$ </span>ssh <span class="nt">-NT</span> <span class="nt">-L</span> 8888:/tmp/test.sock user@server +</code></pre></div> +</div> + +<p>And here again was the suggested command:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>ssh <span class="nt">-L</span> 8888:/tmp/test.sock <span class="nt">-N</span> user@this_host +</code></pre></div> +</div> + +<p>So they are very similar - and the main difference is the <code class="language-plaintext highlighter-rouge">-T</code> is to “Disable pseudo-terminal allocation.” +So I suspect (also based on the version of ssl I’m using) that without the flag, you might be making a request for a pty to the server +(<a href="https://stackoverflow.com/questions/10330678/gitolite-pty-allocation-request-failed-on-channel-0/10346575#10346575" target="_blank">more details here</a>) and then it could abort. Adding the flag just skips this, because we don’t need that - we just need the simple forward. And yes, this indeed feels very specific to your ssh setup, version of ssh, and server configuration. Of course, this was only the beginning of figuring things out, because I had no idea how to get this working from one level deeper - an isolated compute node.</p> + +<h3 id="socket-with-isolated-nodes">Socket with Isolated Nodes</h3> + +<p>Remember that when we created the socket on the isolated node and we tried this out from the login node:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>nc <span class="nt">-U</span> /tmp/test.sock +</code></pre></div> +</div> + +<p>And the result was this:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code>Ncat: Connection refused. +</code></pre></div> +</div> + +<p>My spidey senses were telling me that this should work. Indeed, when I ssh into the isolated node from the login node, +that same command allowed me to connect (meaning it hung / there was no error output). So my first task, I decided, was to try +and “forward” this socket to the login node. Again, back to the man pages! I wound up with something like this (run from the login node):</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>ssh isolated-node <span class="nt">-NT</span> <span class="nt">-L</span> /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sock +</code></pre></div> +</div> + +<p>The above is again using <code class="language-plaintext highlighter-rouge">-L</code> but instead of a port (which aren’t exposed) we are using a socket! It’s kind of neat you can switch out those two. +When I tried the same nc command from the login +node, we had progress (no connection refused message!) 🎉️ And then I moved this up one level to see if I could make this same request from my local machine, sort of combining the first command that worked with the login node notebook with this one. That looked like this (and yes this took more trial and error):</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>ssh <span class="nt">-NT</span> user@server ssh isolated-node <span class="nt">-NT</span> <span class="nt">-L</span> /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sock +</code></pre></div> +</div> + +<p>And to confirm it was working, I’d ssh into the server and again run that nc command to ensure that the newly forwarded socket was readable from +the login node. After this, again with more trial and error, I tried running a second command to just forward that (now working socket) to my host. +That eventually looked like this:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="c"># And another for the local socket</span> +<span class="nv">$ </span>ssh <span class="nt">-NT</span> <span class="nt">-L</span> 8899:/home/dinosaur/login-node.sock user@server +</code></pre></div> +</div> + +<p>And then (all together now!) I tried putting them together.</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>ssh <span class="nt">-NT</span> <span class="nt">-L</span> 8899:/home/dinosaur/login-node.sock user@server ssh isolated-node <span class="se">\</span> + <span class="nt">-NT</span> <span class="nt">-L</span> /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sock +</code></pre></div> +</div> + +<p>And then I spent some time integrating it into tunel, and <em>surprise!</em> the first implementation didn’t work. The first bug was that I needed to clean up old sockets each time the “same” app was run (determined by the job name and organizational namespace so the user can only run one of a particular interactive app at once, and not forget about previous runs). The second issue was about opening the tunnel - it didn’t seem to work if the process exited and/or it was run in a subshell (that also probably exits). I realized that (for the time being) running this connection step on behalf of the user, since it’s something the user should have more control over, probably wasn’t the right way to go. If the user hasn’t added something like an rsa key to <code class="language-plaintext highlighter-rouge">~/.ssh/authorized_keys</code> on their clusters, it would also ask for a password interactively, making it harder for me to manage. So for simplicity sake, and assuming that we really should put the user in control of deciding when to start/stop the tunnel, I simply print the full ssh command in the terminal and let them copy paste it. A successful connection might then prompt them for their password for that second ssh, which (by default) I don’t think is carrying forward auth from the first.</p> + +<p>So that was my adventure! Mind you, this entire adventure was only about two days, and that included time to write this post, so I still have lots in front of me to work on. However, with these updated commands (and some nice tweaks from Python’s <a href="https://github.com/Textualize/rich" target="_blank">rich</a> library) I quickly had a nice set of commands to run and stop an app with an interactive jupyter notebook, and using sockets on isolated nodes!</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>tunel run-app server slurm/socket/jupyter +<span class="nv">$ </span>tunel stop-app server slurm/socket/jupyter +</code></pre></div> +</div> + +<p>As a sidenote, one thing I like about rich is that it puts the aesthetic as a first class citizen. +So many tools just don’t consider this, and I love that with rich I can think about colors, presentation, +and even animations like spinners!</p> + +<p>Getting a socket working means I’ll be able to continue working on this library (hooray!) so if you have ideas or requests for apps +you’d like to run on HPC, assuming just this basic technology, please give me a ping and I’d love to chat and support them. +I’m also going to be requesting an allocation on the Open Science Grid, which hopefully will give me other kinds of clusters +to test on. I hope this was interesting to read, thanks for doing that!</p> + + + + + Research Software Registries + + 2022-06-19T13:15:00-06:00 + https://hpc.social/2022/research-software-registries + <p>This post spurred from some original thinking about <a href="https://rse-ops.github.io/proposals/proposals/drafts/research-software-registry/" target="_blank">research software registries</a>, and my recent discovery of the <a href="https://scicodes.net/" target="_blank">SciCodes Consortium</a>, which I’m excited to find (and a bit surprised I didn’t earlier given my experience with research software and registries)! Since I’ve developed registries and been involved extensively in communities that develop standards and tooling for them, I’ve naturally been ruminating over ideas for several months, and hoping to find others that are motivated to think about similar things. This is the motivation of this post - to ruminate, share my thinking, and think together about ideas. You can read the content, or listen to the ideas below.</p> + +<div style="font-size: 10px; color: #cccccc; overflow: hidden; white-space: nowrap; font-family: Interstate,Lucida Grande,Lucida Sans Unicode,Lucida Sans,Garuda,Verdana,Tahoma,sans-serif; font-weight: 100;"><a href="https://soundcloud.com/vsoch" style="color: #cccccc; text-decoration: none;" target="_blank" title="vsoch">vsoch</a> · <a href="https://soundcloud.com/vsoch/research-software-registries" style="color: #cccccc; text-decoration: none;" target="_blank" title="Research Software Registries">Research Software Registries</a></div> + +<h2 id="why-do-we-want-research-software-registries">Why do we want research software registries?</h2> + +<p>Research software registries have value when they are deployed for a specific context. However, +I’m not convinced that a research software registry, at the most basic form providing archives with DOIS and metadata, is a useful thing in and of itself. It’s adding complexity and redundancy to an already cluttered ecosystem. The reason is because the source of truth of software is usually the source code in version control, e.g., the GitHub repository, which often already has support for features we need to enable easy citation (CITATION.cff), tagged releases, and programmatically accessible metadata. In this context, any kind of registry that provides another identifier and points to the first is providing redundant information. The only potential benefit is grouping and curation, which I would then argue should still point to the version control and/or a specific release as a source of truth.</p> + +<p>I’m also not convinced that we have established an actual use case of “searching a registry for software.” What happens in labs and communities is that you establish communities around the software, and then there are established workflows or slack communities or GitHub organizations to join around that. Most labs already have chosen languages, and even software pipelines that new members extend or work on. I would even go as far to say that for some (myself included) I don’t find research software, but it finds me. It appears as a link in some social media or chat channel, and I click the link and then there are about 15 seconds during which I make a determination if the software can help me to solve a problem that I have, or if it looks easy, professional, and/or fun and I simply want to try it out. If the answer is “yes” then I add it to a list in a Google Document with other things to try out when I have time. If not, I close the lab and life moves on. But I want to point out that nowhere in this workflow do I explicitly go looking for software. The software often finds me, and then I keep a mental cache of “tools that I’ve seen” and go back to it when the use case arises.</p> + +<p>So being able to answer this question about wanting research software registries is especially challenging because I’m not sure I’ve ever wanted one. +Unless there is a specific kind of context around a registry (e.g., search for a specific name in a package manager to use, or look for an already assembled workflow) I haven’t been able to convince myself (yet) that I would find a use for one. I could be wrong about this, however, because as we know, people (myself included) are fairly bad at predicting the future, and perhaps there could be some future where “checking a research software registry” is a part of a daily workflow. I am skeptical because I think that a context is needed. Even if some central source of software ability truth was established, would it not be the case that a graduate student or researcher needs to go there with a use case or context in mind? I can’t imagine just mindlessly browsing for the sake of browsing. It’s akin to search engines - we are usually looking for something very specific. We don’t search without a purpose. The question here then is, what is the purpose?</p> + +<h2 id="research-software-registries-with-a-purpose">Research Software Registries with a Purpose</h2> + +<p>A very good example of purpose comes down to workflows. This is the “I need to perform this specific function and I want to use what many others have done before me and not re-invent the wheel.” The minimum example of a workflow registry would be a search interface that indexes pipelines that are perhaps stored in version control. And extended version of that includes being able to provide structured inputs, outputs, and arguments, so the registry can programmatically provide this information to tools. You can then also quickly see how changing this to be general inputs/outputs of software (and functions within) and entrypoints of containers can quickly become a more generalized registry for software that could be used by any workflow manager that knows how to consume its information. However, there is a fine line here, because when we talk about I/O we are going +squarely into workflow management territoty, and again in my opinion, we have to be careful about that scope. The closest thing that comes to mind for providing workflows as a service is something like <a href="https://openneuro.org/" target="_blank">openneuro</a> that has a beautiful idea of “Get your data into this standard format and we will serve it and provide other easy ways to analyze it.” This kind of success story tells me that perhaps there is something to say for developing anything related to processing or pipelines in the context of a community. You can’t create the perfect registry for every scientific discipline, or perhaps you can do a mediocre job at trying, but perhaps if you scope to a specific one you can do a very good job. I’ve found the same to be true with software - it’s often better to do one or few things very well than more things kind of mediocre.</p> + +<h3 id="a-provider-of-identifiers">A Provider of Identifiers?</h3> + +<p>I’m skeptical when I hear that people want to apply our traditional model of publication (e.g., having a DOI) to software. The reason isn’t because I don’t value means to support reproducibility (and knowing the exact version of something that was used) but rather that we already have means to tag specific versions of software, and means that fit into a well-established ecosystem: package managers, versions, and releases. To think that a single frozen version of software is “the correct unit to provide” I also disagree with. Software is a living, and changing entity, and when it truly does “freeze” and stops being worked on, unlike a DOI in the academic space, this is sort of its death. The correct entrypoint for a piece of software, in my opinion, is the current version on version control, from where you could decide to pin a particular release or install a particular version from a package manager. But to provide a single frozen DOI that is wrapping some other version / release of the software? It doesn’t make sense to me. It’s adding additional complexity that’s not needed. So my opinion (as I’ve shared before) is that we should be thinking more about preserving specific timepoints in package managers, and not adding on an artificially created layer of “DOI” that seems (in my opinion) more of a reflection of our need to shove things into an academic framework we are comfortable with than anything else.</p> + +<p>So (I hope) that the purpose of a research software registry would not just be to provide DOIs. That doesn’t help me get my work done at the end of the day. All that said, I don’t think there can be a singular answer for purpose. I think the purpose ultimately comes down to the institution (or community) and the specific goals of the registry. For this reason there is no one answer for what a registry should look like or provide, and it is (or will be) challenging to define attributes that “any registry should have.”</p> + +<h3 id="what-is-my-purpose">What is my purpose?</h3> + +<p><em>You cut butter</em>!</p> + +<p>Just kidding :_) I’ve been ruminating on this idea for quite some time, and namely because I’m motivated to build a new kind of research software registry, but first I need to convince myself of a meaningful purpose. While I don’t have my convincing answer yet (but I do have a sense of direction) the way I’ve been thinking about this is to provide a set of questions or use cases that seem plausible. It seems like most people are asking “What kind of information should we have in a registry” but I think this isn’t exactly the question I’m interested in - I want to know:</p> + +<blockquote> + <p>What do you want to do next with the software you find?</p> + +</blockquote> + +<p>This is important because it’s going to drive the context and purpose of the registry. Here are a few examples:</p> + +<ol class="custom-counter"> + <li><strong>I want to quickly try this out</strong> → a registry that can deploy a developer environment</li> + <li><strong>I want to find if this is in a package manager</strong> → a reproducible install</li> + <li><strong>I want to use this with a workflow manager</strong> → this is some kind of workflow hub</li> + <li><strong>I want to see inputs / outputs / entrypoints</strong> → support to workflow tools</li> + <li><strong>I want to install this on HPC</strong> → I want a module deployment or similar</li> + <li><strong>I want to cite this</strong> → use case akin to CITATION.cff</li> + <li><strong>I want to understand dependencies of an ecosystem</strong> → a registry deploying something akin to citelang</li> + <li><strong>I want to see all my options to do X</strong> → a domain or categorical registry</li> + <li><strong>I want to see new and noteworthy libraries</strong> → a registry with advanced filtering and ranking</li> + <li><strong>I want to see change over time</strong> → a registry with a layer of analysis tools</li> +</ol> + +<p>Indeed many of the above contexts require additional information. For example, if we want to be able to ask what software is specifically used to perform X, we need a set of functions that are common to a domain, and then to annotate specific software (or even functions) that do it. If we want to then ask “Which of these is the best?” we need to then generate benchmarks to measure this functionality. E.g., how long does it take to run? What are the inputs and outputs and are they correct? What are resource needs? It would be an incredibly cool thing to be able to ask these questions, but an enormous amount of work for any particular scientific domain. As an example of thinking about functional needs, we might look to brain imaging, which is arguably a subfield of neuroinformatics. We might define custom processing functions like thresholding, registration, normalization, or creating regions of interest, tag specific functions that can do each, and then collect and share metrics about the degree to which easy is successful to do each. Arguably, if I wanted to do this I would create wrappers to workflow managers (akin to Snakemake Wrappers) that not only measure metrics, but make it easy for people to quickly use it in their work.</p> + +<h2 id="it-needs-to-be-easy">It needs to be easy</h2> + +<p>Whether I’m thinking about being a user of a research software registry or a developer, it just needs to be easy. Here are some ideas around that.</p> + +<h3 id="re-inventing-the-wheel">Re-inventing the wheel?</h3> + +<p>I come with the experience of deploying a custom container registry (Singularity Hub) years ago, and then being involved in standards committees (the Open Container Initiative) that develop generalized specifications that now drive the most common software (container) registries. I’ve also developed registry proxies that do interesting things, along with a Python OCI registry, and I’m the main developer for oras-py (ORAS == OCI Registry as Storage). So believe me when I say that in terms of storing blobs and metadata about them, I don’t think we should re-invent the wheel. Any new registry I create is going to start with these standards. You might disagree, and that’s OK. But I think people have thought long and hard about these things, and we are stronger for working together on them over always making our own new thing.</p> + +<p>As a supplement to that, I want to point out one of the biggest challenges in our community. The majority of research software, I would argue, doesn’t get used beyond the lab it’s created for. Said lab might submit or include it in a paper, and then they get their publication and move on. This is reflective of many things, and I’ll review them here. The first is our funding model - we maybe can fund working on a piece of software only up until the funding dries out, and then it becomes an abandoned repository, if it’s made publicly available. The second is our incentive model - the academic community is focused on writing papers, so once you get there, you don’t have reason to consider the long term impact of the software. The third is communication. It is actually much easier to throw together your own library than to have to search and then try contributing to someone else’s. +I say this because I don’t think the way that things are are necessarily the fault of anyone - we are all agents responding to incentives and resources available.</p> + +<p>But then on the flip side - these observations beg to ask what leads to software that is successful, on a community level? I think a few things can happen. Either someone puts time and energy into establishing community, period, meaning bringing together people that are working on common goals and explicitly asking “How can we do this together,” or what I’ve seen with more commercial open source - having enough money or power that you can create strong branding and community just by way of having the funds for it. I’ve talked about this a <a href="https://vsoch.github.io/2019/transparency/" target="_blank">few times before</a> and it’s not necessarily bad, but it’s unfair at best. Software that maybe would not be successful by its own merit rises to the top, and really great software that doesn’t have those resources does not. That said, I’ve also seen sort of mediocre software get much better and earn its reputation, so I can’t say it’s a completely wrong dynamic.</p> + +<h3 id="is-the-answer-mooooar-metadata">Is the answer Mooooar Metadata?</h3> + +<p>As we design the “perfect set of information” we want provided for any piece of software, we need to put people first. +We have to ask ourselves what are people willing to do, and generally people aren’t wanting to spend inordinate amounts of extra time defining metadata or inputs/outputs for their custom scripts. This was a point also brought up by <a href="https://twitter.com/orchid00" target="_blank">Paula</a> in the SciCodes meeting and I am 100% behind it. If we require extensive metadata about software, it needs to be done in an automated fashion. In practice when I think of archives for software, I’m just not that motivated to provide more than the absolute minimum to click the “submit” button.</p> + +<h2 id="do-people-know-what-they-want">Do people know what they want?</h2> + +<p>One of the hardest things about this kind of problem is that people don’t often know what they want. +And actually - I’d extend that to software in general. Think of common tools like git (version control) or containers. +Could most people have told you in advance about the designs for these tools? I suspect likely not. +This is often the game that software developers play - we imagine new ways of doing things that scratch an itch +or have a problem that we have, and then hand over our duct taped laden prototype to others and we’re like +hey, is this useful to you? And often the response in radio silence, but then sometimes it’s a resounding, “WoW, yes!” +So I’m going to throw out this idea that people generally don’t know what they want until they see it, touch it and try it. +This is also why I want to inspire you to take some time to think about your specific needs and motivation for wanting +(on a high level) to browse and interact with research software. What are the compelling reasons for this registry, +for you?</p> + +<p>This is actually really fun to think about, because what even is a research software registry? +Is it a place to find software to plug into workflows? Does it provide ABI or more general function signatures to help you plug into workflows? Does it provide a citation? A container? An interactive environment? Dependency graph? Something else? This is inded why this problem is so hard - there are so many ways to thinkabout this basic concept. And that’s kind of what makes it fun too? But also what makes it hard. Personally speaking sinceI’m more interested in building things I find myself ruminating about details for a specific use case. And since I’m a developer and craving better support for developer environments, this tends to be where my brain goes. And have you noticed I haven’t given +a direct answer for what is a research software registry? It’s 1. because I don’t know, and 2. because we are trying to define a registry for a kind of output that we don’t even have an agreed upon definition for yet! So perhaps the definition will happen on the level of the deployment or institution? Anyway, I hope you take the opportunity to discuss with your peers, pets, and even yourself, to try and answer this question.</p> + +<h2 id="summary">Summary</h2> + +<p>To summarize, I’m spending a lot of time thinking about this, and less in an “I’m an academic that wants DOIs and metadata” and more in a “I am a software engineer that wants to build something that I actually find useful.” Might I scratch itches along the way? Sure. And I do have some early ideas that I plan to hack on before sharing publicly. In the meantime, I do hope you are interested in some of these ideas and take time to write or introspect yourself.</p> + +<p>And on a higher level, I really like this format of writing and speaking, where the speaking isn’t formal enough to be a talk that you put together and practice for weeks (I put this all together in an afternoon) but it still is a media format that literally gives a voice.</p> + + + + + MNT Reform 2 - part deux + + 2022-06-09T01:06:51-06:00 + https://hpc.social/2022/mnt-reform-2-part-deux + <p>A few days back I posted some of my <a href="https://www.gaborsamu.com/blog/neunundneunzig_reform/">initial thoughts</a> of the MNT Reform 2 laptop which just +recently arrived. I ran the usual battery of tests on the laptop including the High Performance +Linpack (HPL) of course just for kicks.</p> + +<p>At that time, I made no attempt to optmize HPL. I simply went with the OS supplied gcc and +math libraries. My next step was to look at how I could improve my HPL result using +the Arm compiler for Linux and the Arm performance libraries. Here I&rsquo;ll walk through those +steps from installing the Arm tools, to compiling and running HPL - and all of the small +details in between.</p> + +<p>(1) To start, I downloaded the latest verion of the Arm compiler for Linux package from <a href="https://developer.arm.com/Tools%20and%20Software/Arm%20Compiler%20for%20Linux">here</a>. +This was the package with the filename: <em>arm-compiler-for-linux_22.0.2_Ubuntu-20.04_aarch64.tar</em>.</p> + +<p>(2) After uncompressing <em>arm-compiler-for-linux_22.0.2_Ubuntu-20.04_aarch64.tar</em>, I ran the +installation command <em>./arm-compiler-for-linux_22.0.2_Ubuntu-20.04.sh -a</em> which installed the +software to <em>/opt/arm</em> on the system. <strong>Note</strong> that the Arm compilers for Linux ship with +module files to make setting up the envionment for compiling easy. To support this +I had to install the OS environment-modules package with <em>apt-get install environment-modules</em></p> + +<p>(3) In order to load the module for the Arm compiler for Linux, the following steps are +necessary. This assumes that the Arm compiler for Linux is installed in <em>/opt/arm</em>.</p> + +<div class="highlight"><pre><code class="language-plaintext"> +root@reform:/# module avail +----------------------------------- /usr/share/modules/modulefiles ------------------------------------ +dot module-git module-info modules null use.own + +Key: +modulepath +root@reform:/# export MODULEPATH=/opt/arm/modulefiles:$MODULEPATH +root@reform:/# module avail +---------------------------------------- /opt/arm/modulefiles ----------------------------------------- +acfl/22.0.2 binutils/11.2.0 gnu/11.2.0 + +----------------------------------- /usr/share/modules/modulefiles ------------------------------------ +dot module-git module-info modules null use.own + +Key: +modulepath +root@reform:/# module load acfl/22.0.2 +Loading acfl/22.0.2 + Loading requirement: binutils/11.2.0 +root@reform:/# echo $PATH +/opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/bin:/opt/arm/gcc-11.2.0_Generic-AArch64_Ubuntu-20.04_aarch64-linux/binutils_bin:/root/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin +root@reform:/# armclang --version +Arm C/C++/Fortran Compiler version 22.0.2 (build number 1776) (based on LLVM 13.0.0) +Target: aarch64-unknown-linux-gnu +Thread model: posix +InstalledDir: /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/bin</code></pre></div> + +<p>(4) Now we shift our focus to Open MPI. Open MPI is an open source distribution of the message +passing interface (MPI) library for writing parallel applications. We will compile HPL against this +Open MPI version. For this, I downloaded the latest Open MPI version (4.1.4) from <a href="https://www.netlib.org/benchmark/hpl/">here</a>.</p> + +<p>By default, Open MPI compiles with support for the SLURM workload manager. My Reform has +IBM Spectrum LSF installed as the workload scheduler. In order to enable LSF support in Open MPI, we +need to specify the appropriate configure flags (see below).</p> + +<div class="highlight"><pre><code class="language-plaintext">root@reform:/opt/HPC/openmpi-4.1.4# ./configure --prefix=/opt/HPC/openmpi-4.1.4 --with-lsf=/opt/ibm/lsf/10.1 --with-lsf-libdir=/opt/ibm/lsf/10.1/linux3.12-glibc2.17-armv8/lib + +root@reform:/opt/HPC/openmpi-4.1.4# make -j 4 +... +... + +root@reform:/opt/HPC/openmpi-4.1.4# make install +... +...</code></pre></div> + +<p>(5) After completing the compilation of Open MPI, the <em>ompi_info</em> command is run to check if +support for LSF has been enabled. Note that you must ensure to source the LSF environment +(i.e. . ./profile.lsf) before running <em>ompi_info</em> or the LSF libraries won&rsquo;t be found.</p> + +<div class="highlight"><pre><code class="language-plaintext">root@reform:/opt/HPC/openmpi-4.1.4# ./bin/ompi_info |grep -i lsf + Configure command line: '--prefix=/opt/HPC/openmpi-4.1.4' '--with-lsf=/opt/ibm/lsf/10.1' '--with-lsf-libdir=/opt/ibm/lsf/10.1/linux3.12-glibc2.17-armv8/lib' + MCA ess: lsf (MCA v2.1.0, API v3.0.0, Component v4.1.4) + MCA plm: lsf (MCA v2.1.0, API v2.0.0, Component v4.1.4) + MCA ras: lsf (MCA v2.1.0, API v2.0.0, Component v4.1.4)</code></pre></div> + +<p>(6) Next, I downloaded the latest HPL package from <a href="https://www.netlib.org/benchmark/hpl/">here</a>. +I uncompressed the the package <em>hpl-2.3.tar.gz</em> in the /opt/HPC directory. Next, I had to create +a new Makefile for HPL which would use the Arm compiler for Linux and optmized math libraries. +A copy of <em>Make.imx8qm</em> follows below.</p> + +<!-- raw HTML omitted --> +<div class="highlight"><pre><code class="language-plaintext"># +# -- High Performance Computing Linpack Benchmark (HPL) +# HPL - 2.3 - December 2, 2018 +# Antoine P. Petitet +# University of Tennessee, Knoxville +# Innovative Computing Laboratory +# (C) Copyright 2000-2008 All Rights Reserved +# +# -- Copyright notice and Licensing terms: +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions, and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# 3. All advertising materials mentioning features or use of this +# software must display the following acknowledgement: +# This product includes software developed at the University of +# Tennessee, Knoxville, Innovative Computing Laboratory. +# +# 4. The name of the University, the name of the Laboratory, or the +# names of its contributors may not be used to endorse or promote +# products derived from this software without specific written +# permission. +# +# -- Disclaimer: +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY +# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# ###################################################################### +# +# ---------------------------------------------------------------------- +# - shell -------------------------------------------------------------- +# ---------------------------------------------------------------------- +# +SHELL = /bin/sh +# +CD = cd +CP = cp +LN_S = ln -s +MKDIR = mkdir +RM = /bin/rm -f +TOUCH = touch +# +# ---------------------------------------------------------------------- +# - Platform identifier ------------------------------------------------ +# ---------------------------------------------------------------------- +# +ARCH = imx8qm +# +# ---------------------------------------------------------------------- +# - HPL Directory Structure / HPL library ------------------------------ +# ---------------------------------------------------------------------- +# +TOPdir = /opt/HPC/hpl-2.3 +INCdir = /opt/HPC/hpl-2.3/include +BINdir = /opt/HPC/hpl-2.3/bin/$(ARCH) +LIBdir = /opt/HPC/hpl-2.3/lib/$(ARCH) +# +HPLlib = /opt/HPC/hpl-2.3/lib/libhpl.a +# +# ---------------------------------------------------------------------- +# - Message Passing library (MPI) -------------------------------------- +# ---------------------------------------------------------------------- +# MPinc tells the C compiler where to find the Message Passing library +# header files, MPlib is defined to be the name of the library to be +# used. The variable MPdir is only used for defining MPinc and MPlib. +# +MPdir = /opt/HPC/openmpi-4.1.4 +MPinc = /opt/HPC/openmpi-4.1.4/include +MPlib = /opt/HPC/openmpi-4.1.4/lib/libmpi.so +# +# ---------------------------------------------------------------------- +# - Linear Algebra library (BLAS or VSIPL) ----------------------------- +# ---------------------------------------------------------------------- +# LAinc tells the C compiler where to find the Linear Algebra library +# header files, LAlib is defined to be the name of the library to be +# used. The variable LAdir is only used for defining LAinc and LAlib. +# +LAdir = +LAinc = +# LAlib = -lamath -lm -mcpu=native +LAlib = +# +# ---------------------------------------------------------------------- +# - F77 / C interface -------------------------------------------------- +# ---------------------------------------------------------------------- +# You can skip this section if and only if you are not planning to use +# a BLAS library featuring a Fortran 77 interface. Otherwise, it is +# necessary to fill out the F2CDEFS variable with the appropriate +# options. **One and only one** option should be chosen in **each** of +# the 3 following categories: +# +# 1) name space (How C calls a Fortran 77 routine) +# +# -DAdd_ : all lower case and a suffixed underscore (Suns, +# Intel, ...), [default] +# -DNoChange : all lower case (IBM RS6000), +# -DUpCase : all upper case (Cray), +# -DAdd__ : the FORTRAN compiler in use is f2c. +# +# 2) C and Fortran 77 integer mapping +# +# -DF77_INTEGER=int : Fortran 77 INTEGER is a C int, [default] +# -DF77_INTEGER=long : Fortran 77 INTEGER is a C long, +# -DF77_INTEGER=short : Fortran 77 INTEGER is a C short. +# +# 3) Fortran 77 string handling +# +# -DStringSunStyle : The string address is passed at the string loca- +# tion on the stack, and the string length is then +# passed as an F77_INTEGER after all explicit +# stack arguments, [default] +# -DStringStructPtr : The address of a structure is passed by a +# Fortran 77 string, and the structure is of the +# form: struct {char *cp; F77_INTEGER len;}, +# -DStringStructVal : A structure is passed by value for each Fortran +# 77 string, and the structure is of the form: +# struct {char *cp; F77_INTEGER len;}, +# -DStringCrayStyle : Special option for Cray machines, which uses +# Cray fcd (fortran character descriptor) for +# interoperation. +# +F2CDEFS = +# +# ---------------------------------------------------------------------- +# - HPL includes / libraries / specifics ------------------------------- +# ---------------------------------------------------------------------- +# +HPL_INCLUDES = -I$(INCdir) -I$(INCdir)/$(ARCH) $(LAinc) -I$(MPinc) -I/opt/arm/armpl-22.0.2_AArch64_Ubuntu-20.04_gcc_aarch64-linux/include/ +HPL_LIBS = $(HPLlib) $(LAlib) $(MPlib) +# +# - Compile time options ----------------------------------------------- +# +# -DHPL_COPY_L force the copy of the panel L before bcast; +# -DHPL_CALL_CBLAS call the cblas interface; +# -DHPL_CALL_VSIPL call the vsip library; +# -DHPL_DETAILED_TIMING enable detailed timers; +# +# By default HPL will: +# *) not copy L before broadcast, +# *) call the BLAS Fortran 77 interface, +# *) not display detailed timing information. +# +HPL_OPTS = +# +# ---------------------------------------------------------------------- +# +HPL_DEFS = $(F2CDEFS) $(HPL_OPTS) $(HPL_INCLUDES) +# +# ---------------------------------------------------------------------- +# - Compilers / linkers - Optimization flags --------------------------- +# ---------------------------------------------------------------------- +# +CC = armclang +CCNOOPT = $(HPL_DEFS) +CCFLAGS = $(HPL_DEFS) -O3 -larmpl_lp64 -lamath -lm +# +LINKER = armclang -O3 -armpl -lamath -lm +LINKFLAGS = $(CCFLAGS) +# +ARCHIVER = ar +ARFLAGS = r +RANLIB = echo +# +# ----------------------------------------------------------------------</code></pre></div> + +<!-- raw HTML omitted --> +<p>(7) To compile HPL with the above Makefile is as simple as running the appropriate <em>make</em> command and +specify the architecture <em>imx8qm</em>.</p> + +<div class="highlight"><pre><code class="language-plaintext">root@reform:/opt/HPC/hpl-2.3# make arch=imx8qm +... +...</code></pre></div> + +<p>(8) Barring any errors, we should now have an <em>xhpl</em> binary in under the /opt/HPC/hpl-2.3/bin/imx8qm +directory.</p> + +<div class="highlight"><pre><code class="language-plaintext">root@reform:/opt/HPC/hpl-2.3/bin/imx8qm# pwd +/opt/HPC/hpl-2.3/bin/imx8qm +root@reform:/opt/HPC/hpl-2.3/bin/imx8qm# ls -la +total 156 +drwxr-xr-x 2 root root 4096 Jun 8 13:30 . +drwxr-xr-x 3 root root 4096 Jun 8 13:20 .. +-rw-r--r-- 1 root root 1454 Jun 8 13:30 HPL.dat +-rwxr-xr-x 1 root root 146960 Jun 8 13:24 xhpl +root@reform:/opt/HPC/hpl-2.3/bin/imx8qm# ldd ./xhpl + linux-vdso.so.1 (0x0000007faa7b1000) + libamath_aarch64.so =&gt; /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libamath_aarch64.so (0x0000007faa5ef000) + libm.so.6 =&gt; /lib/aarch64-linux-gnu/libm.so.6 (0x0000007faa520000) + libarmpl_lp64.so =&gt; /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/lib/clang/13.0.0/armpl_links/lib/libarmpl_lp64.so (0x0000007fa3cd5000) + libmpi.so.40 =&gt; /usr/lib/aarch64-linux-gnu/libmpi.so.40 (0x0000007fa3b8f000) + libarmflang.so =&gt; /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libarmflang.so (0x0000007fa3728000) + libomp.so =&gt; /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libomp.so (0x0000007fa3649000) + librt.so.1 =&gt; /lib/aarch64-linux-gnu/librt.so.1 (0x0000007fa3631000) + libdl.so.2 =&gt; /lib/aarch64-linux-gnu/libdl.so.2 (0x0000007fa361d000) + libpthread.so.0 =&gt; /lib/aarch64-linux-gnu/libpthread.so.0 (0x0000007fa35ed000) + libastring_aarch64.so =&gt; /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libastring_aarch64.so (0x0000007fa35da000) + libc.so.6 =&gt; /lib/aarch64-linux-gnu/libc.so.6 (0x0000007fa345f000) + /lib/ld-linux-aarch64.so.1 (0x0000007faa77e000) + libgcc_s.so.1 =&gt; /opt/arm/gcc-11.2.0_Generic-AArch64_Ubuntu-20.04_aarch64-linux/lib64/libgcc_s.so.1 (0x0000007fa343a000) + libopen-rte.so.40 =&gt; /usr/lib/aarch64-linux-gnu/libopen-rte.so.40 (0x0000007fa336c000) + libopen-pal.so.40 =&gt; /usr/lib/aarch64-linux-gnu/libopen-pal.so.40 (0x0000007fa32aa000) + libhwloc.so.15 =&gt; /usr/lib/aarch64-linux-gnu/libhwloc.so.15 (0x0000007fa3245000) + libstdc++.so.6 =&gt; /opt/arm/gcc-11.2.0_Generic-AArch64_Ubuntu-20.04_aarch64-linux/lib64/libstdc++.so.6 (0x0000007fa3030000) + libz.so.1 =&gt; /lib/aarch64-linux-gnu/libz.so.1 (0x0000007fa3006000) + libevent_core-2.1.so.7 =&gt; /usr/lib/aarch64-linux-gnu/libevent_core-2.1.so.7 (0x0000007fa2fbf000) + libutil.so.1 =&gt; /lib/aarch64-linux-gnu/libutil.so.1 (0x0000007fa2fab000) + libevent_pthreads-2.1.so.7 =&gt; /usr/lib/aarch64-linux-gnu/libevent_pthreads-2.1.so.7 (0x0000007fa2f98000) + libudev.so.1 =&gt; /usr/lib/aarch64-linux-gnu/libudev.so.1 (0x0000007fa2f5e000)</code></pre></div> + +<p>(9) A default HPL.dat file should ber present in the directory /opt/HPC/hpl-2.3/bin/imx8qm. The file +HPL.dat is used to tune the benchmark problem size according to the system. A copy of the +HPL.dat file I created follows below. This is suitable for the 4 GB memory configuration of +Reform with 4 processor cores.</p> + +<!-- raw HTML omitted --> +<div class="highlight"><pre><code class="language-plaintext">HPLinpack benchmark input file +Innovative Computing Laboratory, University of Tennessee +HPL.out output file name (if any) +6 device out (6=stdout,7=stderr,file) +1 # of problems sizes (N) +19000 Ns +1 # of NBs +192 NBs +0 PMAP process mapping (0=Row-,1=Column-major) +1 # of process grids (P x Q) +2 Ps +2 Qs +16.0 threshold +1 # of panel fact +2 PFACTs (0=left, 1=Crout, 2=Right) +1 # of recursive stopping criterium +4 NBMINs (&gt;= 1) +1 # of panels in recursion +2 NDIVs +1 # of recursive panel fact. +1 RFACTs (0=left, 1=Crout, 2=Right) +1 # of broadcast +1 BCASTs (0=1rg,1=1rM,2=2rg,3=2rM,4=Lng,5=LnM) +1 # of lookahead depth +1 DEPTHs (&gt;=0) +2 SWAP (0=bin-exch,1=long,2=mix) +64 swapping threshold +0 L1 in (0=transposed,1=no-transposed) form +0 U in (0=transposed,1=no-transposed) form +1 Equilibration (0=no,1=yes) +8 memory alignment in double (&gt; 0) +##### This line (no. 32) is ignored (it serves as a separator). ###### +0 Number of additional problem sizes for PTRANS +1200 10000 30000 values of N +0 number of additional blocking sizes for PTRANS +40 9 8 13 13 20 16 32 64 values of NB</code></pre></div> + +<!-- raw HTML omitted --> +<p>(10) Now we&rsquo;re ready to execute the appropriate <em>mpirun</em> command to run the <em>xhpl</em> executable. +We specify -np 4 to run across the 4 cores of the processor. With this better optimized run we&rsquo;re +seeing ~8.9 GFLOPS performance compared with ~4 GFLOPS for my previous runs where HPL was compiled +with the OS supplied GCC and Math libraries (ATLAS). Note that as this is roughly double the GFLOPS +from my previous runs, it <strong>appears</strong> that there is an issue with double precision or perhaps +vectorization with the non-optimized runs.</p> + +<div class="highlight"><pre><code class="language-plaintext">gsamu@reform:/opt/HPC/hpl-2.3/bin/imx8qm$ mpirun -np 4 ./xhpl +================================================================================ +HPLinpack 2.3 -- High-Performance Linpack benchmark -- December 2, 2018 +Written by A. Petitet and R. Clint Whaley, Innovative Computing Laboratory, UTK +Modified by Piotr Luszczek, Innovative Computing Laboratory, UTK +Modified by Julien Langou, University of Colorado Denver +================================================================================ + +An explanation of the input/output parameters follows: +T/V : Wall time / encoded variant. +N : The order of the coefficient matrix A. +NB : The partitioning blocking factor. +P : The number of process rows. +Q : The number of process columns. +Time : Time in seconds to solve the linear system. +Gflops : Rate of execution for solving the linear system. + +The following parameter values will be used: + +N : 19000 +NB : 192 +PMAP : Row-major process mapping +P : 2 +Q : 2 +PFACT : Right +NBMIN : 4 +NDIV : 2 +RFACT : Crout +BCAST : 1ringM +DEPTH : 1 +SWAP : Mix (threshold = 64) +L1 : transposed form +U : transposed form +EQUIL : yes +ALIGN : 8 double precision words + +-------------------------------------------------------------------------------- + +- The matrix A is randomly generated for each test. +- The following scaled residual check will be computed: + ||Ax-b||_oo / ( eps * ( || x ||_oo * || A ||_oo + || b ||_oo ) * N ) +- The relative machine precision (eps) is taken to be 1.110223e-16 +- Computational tests pass if scaled residuals are less than 16.0 + +================================================================================ +T/V N NB P Q Time Gflops +-------------------------------------------------------------------------------- +WR11C2R4 19000 192 2 2 513.92 8.8987e+00 +HPL_pdgesv() start time Wed Jun 8 21:28:07 2022 + +HPL_pdgesv() end time Wed Jun 8 21:36:41 2022 + +-------------------------------------------------------------------------------- +||Ax-b||_oo/(eps*(||A||_oo*||x||_oo+||b||_oo)*N)= 4.89711678e-03 ...... PASSED +================================================================================ + +Finished 1 tests with the following results: + 1 tests completed and passed residual checks, + 0 tests completed and failed residual checks, + 0 tests skipped because of illegal input values. +-------------------------------------------------------------------------------- + +End of Tests. +================================================================================</code></pre></div> + +<p>(11) Finally, we submit the same run of Linpack but through Spectrum LSF. The LSF <em>bsub</em> command +invocation is shown below and the resulting output.</p> + +<div class="highlight"><pre><code class="language-plaintext">gsamu@reform:~$ bsub -n 4 -I -m reform "cd /opt/HPC/hpl-2.3/bin/imx8qm ; mpirun ./xhpl" +Job &lt;35301&gt; is submitted to default queue &lt;interactive&gt;. +&lt;&lt;Waiting for dispatch ...&gt;&gt; +&lt;&lt;Starting on reform&gt;&gt; +================================================================================ +HPLinpack 2.3 -- High-Performance Linpack benchmark -- December 2, 2018 +Written by A. Petitet and R. Clint Whaley, Innovative Computing Laboratory, UTK +Modified by Piotr Luszczek, Innovative Computing Laboratory, UTK +Modified by Julien Langou, University of Colorado Denver +================================================================================ + +An explanation of the input/output parameters follows: +T/V : Wall time / encoded variant. +N : The order of the coefficient matrix A. +NB : The partitioning blocking factor. +P : The number of process rows. +Q : The number of process columns. +Time : Time in seconds to solve the linear system. +Gflops : Rate of execution for solving the linear system. + +The following parameter values will be used: + +N : 19000 +NB : 192 +PMAP : Row-major process mapping +P : 2 +Q : 2 +PFACT : Right +NBMIN : 4 +NDIV : 2 +RFACT : Crout +BCAST : 1ringM +DEPTH : 1 +SWAP : Mix (threshold = 64) +L1 : transposed form +U : transposed form +EQUIL : yes +ALIGN : 8 double precision words + +-------------------------------------------------------------------------------- + +- The matrix A is randomly generated for each test. +- The following scaled residual check will be computed: + ||Ax-b||_oo / ( eps * ( || x ||_oo * || A ||_oo + || b ||_oo ) * N ) +- The relative machine precision (eps) is taken to be 1.110223e-16 +- Computational tests pass if scaled residuals are less than 16.0 + +================================================================================ +T/V N NB P Q Time Gflops +-------------------------------------------------------------------------------- +WR11C2R4 19000 192 2 2 518.02 8.8283e+00 +HPL_pdgesv() start time Thu Jun 9 09:33:35 2022 + +HPL_pdgesv() end time Thu Jun 9 09:42:13 2022 + +-------------------------------------------------------------------------------- +||Ax-b||_oo/(eps*(||A||_oo*||x||_oo+||b||_oo)*N)= 4.89711678e-03 ...... PASSED +================================================================================ + +Finished 1 tests with the following results: + 1 tests completed and passed residual checks, + 0 tests completed and failed residual checks, + 0 tests skipped because of illegal input values. +-------------------------------------------------------------------------------- + +End of Tests. +================================================================================</code></pre></div> + + + + + Neunundneunzig MNT Reform(s) + + 2022-06-06T18:54:07-06:00 + https://hpc.social/2022/neunundneunzig-mnt-reform-s- + <p>I&rsquo;ll admit it. I sat on the fence for a long time before placing an order +for the MNT Reform 2 laptop. At the time, I was in the market for a laptop +as my 2 Macbook Pro retina laptops were repurposed for online schooling +for my children during the pandemic (and as it turns out were never +returned to me).</p> + +<p>I have fairly extensive experience with Arm-based systems and was aware of potential +angst with custom distros when specific system support is not in the Linux mainline. +Yes this has been pretty much addressed - for servers with the Arm SBSA +specifications. However the MNT Reform 2 was never marketed as SBSA.</p> + +<p>With eyes wide open, I ultimately decided to go ahead an order an MNT Reform 2. +My laptop needs were really for light coding/scripting, occasional browsing, +writing (blogs, etc), tinkering and as a terminal to my other systems. Sure, +these requirements coud have bee met by some less expensive x86 laptops or +even Chromebooks. But those are distinctly lacking a cool factor. What really helped +to reach this decision was the following:</p> + +<ul> +<li>Put together by a small, enthusiastic team</li> +<li>A proper keyboard and cool trackball in a laptop</li> +<li>Intel outside</li> +<li>Swappable CPUs (there are some drop in replacements in the works)</li> +<li>User replaceable, 18650 batteries</li> +<li>Antithesis of paper thin laptops</li> +</ul> +<p>Of course, knowing that the Reform is based on the NXP/Freescale i.MX8MQ with +4 x Arm Cortex-A53 cores (1.5 GHz), I knew it was not going to be a barn burner +in terms of performance.</p> + +<p><strong>Late to the party</strong></p> + +<p>Because of my procrastination, I only recieved my Reform this past week. +Given that they&rsquo;ve been shipping for some time, I definitely had that +<em>late to the party feeling</em>. In a way this was good though as all of the +write-ups and videos that have been posted over time gave me a good +idea of what to expect. So in this blog I don&rsquo;t expect to cover +anything ground breaking - just my experience so far.</p> + +<p>Much has been written about the packaging of the system. And in this sense +it didn&rsquo;t disappoint. You could tell that it was packaged with great care +by the team at MNT and it was frankly a very enjoyable experience to +unwrap the components. I&rsquo;ve included a collage of photos at the end of this blog +of the Reform.</p> + +<p>And that wasn&rsquo;t the only fun. Right away I had to remove the transparent bottom +cover of the Reform in order to connect up the batteries and install the Wifi and +NVMe SSD. At this time I also replaced the plastic port covers with the optional metal +versions that I ordered earlier this year. Once this was done, the system sprang to +life and I was able to very quickly get it booting from the encrypted NVMe thanks +to the detailed handbook that was also included in the bundle I purchased and +tips on the <a href="https://community.mnt.re/">MNT Community site</a>.</p> + +<p>As for the keyboard, I really enjoy the tactile feel of it. It&rsquo;s quite a refreshing +experience from the mushy keyboard on the MacBook Air M1 that I use for work. And +although I ordered both the trackball and trackpad for the Reform, I&rsquo;ll likely +stick with the trackball for now as it&rsquo;s just a pleasure to use. Note that +my Reform appears to have the updated trackball which has ball bearings for a +smoother action.</p> + +<p><strong>Fanless bitte</strong></p> + +<p>Of course one of the first things an #HPC minded person like myself will do with +a system is run <a href="https://www.netlib.org/benchmark/hpl/">High-performance Linpack</a> (HPL) on it. This is a force of habit for me +and thought it may also prove to be a good way to burn in the system.</p> + +<p>So I started with <a href="https://www.open-mpi.org/">Open MPI</a>. I downloaded and compiled Open MPI v4.1.4. This completed +without a hitch. Note that I didn&rsquo;t specify any specific flags configuring Open MPI +other than a prefix for the installation location (under $HOME).</p> + +<p>HPL was easy to compile as well. Note that I simply used the OS ATLAS and BLAS +libraries and the OS supplied compiler(s). So we can say that this is not an +optimized build of HPL.</p> + +<p>And below we see the results of the run of xhpl below, which achieved a result of +4.2 GFLOPS.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ mpirun -np 4 ./xhpl +================================================================================ +HPLinpack 2.3 -- High-Performance Linpack benchmark -- December 2, 2018 +Written by A. Petitet and R. Clint Whaley, Innovative Computing Laboratory, UTK +Modified by Piotr Luszczek, Innovative Computing Laboratory, UTK +Modified by Julien Langou, University of Colorado Denver +================================================================================ + +An explanation of the input/output parameters follows: +T/V : Wall time / encoded variant. +N : The order of the coefficient matrix A. +NB : The partitioning blocking factor. +P : The number of process rows. +Q : The number of process columns. +Time : Time in seconds to solve the linear system. +Gflops : Rate of execution for solving the linear system. + +The following parameter values will be used: + +N : 19000 +NB : 192 +PMAP : Row-major process mapping +P : 2 +Q : 2 +PFACT : Right +NBMIN : 4 +NDIV : 2 +RFACT : Crout +BCAST : 1ringM +DEPTH : 1 +SWAP : Mix (threshold = 64) +L1 : transposed form +U : transposed form +EQUIL : yes +ALIGN : 8 double precision words + +-------------------------------------------------------------------------------- + +- The matrix A is randomly generated for each test. +- The following scaled residual check will be computed: + ||Ax-b||_oo / ( eps * ( || x ||_oo * || A ||_oo + || b ||_oo ) * N ) +- The relative machine precision (eps) is taken to be 1.110223e-16 +- Computational tests pass if scaled residuals are less than 16.0 + +================================================================================ +T/V N NB P Q Time Gflops +-------------------------------------------------------------------------------- +WR11C2R4 19000 192 2 2 1073.27 4.2610e+00 +HPL_pdgesv() start time Mon Jun 6 12:08:36 2022 + +HPL_pdgesv() end time Mon Jun 6 12:26:30 2022 + +-------------------------------------------------------------------------------- +||Ax-b||_oo/(eps*(||A||_oo*||x||_oo+||b||_oo)*N)= 1.18409443e-03 ...... PASSED +================================================================================ + +Finished 1 tests with the following results: + 1 tests completed and passed residual checks, + 0 tests completed and failed residual checks, + 0 tests skipped because of illegal input values. +-------------------------------------------------------------------------------- + +End of Tests. +================================================================================</code></pre></div> + +<p>Just for kicks, I&rsquo;ve also included a screenshot of <em>lstopo</em>, which is part of the +<a href="https://www.open-mpi.org/projects/hwloc/">Portal Hardware Locality (hwloc)</a> project. I am a bit confused as to why the +L1 and L2 cache sizes are zero though in the output.</p> + +<figure><img src="https://www.gaborsamu.com/images/reform_lstopo.jpg" /> +</figure> + +<p>I&rsquo;ve included the output from some system commands below including <em>lscpu</em>, <em>lspci</em> and <em>lsusb</em>.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ lscpu +Architecture: aarch64 +CPU op-mode(s): 32-bit, 64-bit +Byte Order: Little Endian +CPU(s): 4 +On-line CPU(s) list: 0-3 +Thread(s) per core: 1 +Core(s) per socket: 4 +Socket(s): 1 +NUMA node(s): 1 +Vendor ID: ARM +Model: 4 +Model name: Cortex-A53 +Stepping: r0p4 +CPU max MHz: 1500.0000 +CPU min MHz: 1000.0000 +BogoMIPS: 16.66 +NUMA node0 CPU(s): 0-3 +Vulnerability Itlb multihit: Not affected +Vulnerability L1tf: Not affected +Vulnerability Mds: Not affected +Vulnerability Meltdown: Not affected +Vulnerability Spec store bypass: Not affected +Vulnerability Spectre v1: Mitigation; \__user pointer sanitization +Vulnerability Spectre v2: Not affected +Vulnerability Srbds: Not affected +Vulnerability Tsx async abort: Not affected +Flags: fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid</code></pre></div> + +<div class="highlight"><pre><code class="language-plaintext">$ lspci +0000:00:00.0 PCI bridge: Synopsys, Inc. DWC_usb3 / PCIe bridge (rev 01) +0000:01:00.0 Network controller: Qualcomm Atheros AR928X Wireless Network Adapter (PCI-Express) (rev 01) +0001:00:00.0 PCI bridge: Synopsys, Inc. DWC_usb3 / PCIe bridge (rev 01) +0001:01:00.0 Non-Volatile memory controller: Silicon Motion, Inc. SM2262/SM2262EN SSD Controller (rev 03)</code></pre></div> + +<div class="highlight"><pre><code class="language-plaintext">$ lsusb +Bus 004 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub +Bus 003 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub +Bus 002 Device 002: ID 0451:8140 Texas Instruments, Inc. TUSB8041 4-Port Hub +Bus 002 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub +Bus 001 Device 004: ID 03eb:2041 Atmel Corp. LUFA Mouse Demo Application +Bus 001 Device 003: ID 03eb:2042 Atmel Corp. LUFA Keyboard Demo Application +Bus 001 Device 002: ID 0451:8142 Texas Instruments, Inc. TUSB8041 4-Port Hub +Bus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub</code></pre></div> + +<p>So that&rsquo;s a very brief look at my initial experiences with the Reform laptop. I&rsquo;ve +only scratched the surface here, but so far I&rsquo;m liking what I&rsquo;m seeing. As for the +<em>neunundneunzig</em> title reference, well I suppose that&rsquo;s part of the vibe I got with +the laptop.</p> + +<p>A few photos for your viewing pleasure!</p> + +<p><figure><img src="https://www.gaborsamu.com/images/reform_bottom.jpg" /> +</figure> + +<figure><img src="https://www.gaborsamu.com/images/reform_firstboot.jpg" /> +</figure> +</p> + + + + + Life and leaving NERSC + + 2022-05-27T06:42:00-06:00 + https://hpc.social/2022/life-and-leaving-nersc + <p>When word started to spread that I was leaving my job at NERSC for Microsoft, a lot of people either directly or indirectly attributed my decision to being one motivated by money.  Rationalizing my decision to leave is certainly a lot easier with this "Glenn was lured away with bags of cash" narrative, but that wasn't really a factor when I chose to move on.  Rather, my decision is a reflection of where I see the world of HPC going in the coming decade and where I personally wanted to position myself.  For my own therapeutic reasons (and perhaps the benefit of anyone interested in what it's like to work within, and subsequently leave, the DOE HPC complex), I'll try to write it all out here.<span></span></p> +<p></p> +<h2 style="text-align: left;">Working at NERSC</h2> +<p>First things first: NERSC has been a wonderful place to work.</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p><b>&lt;div style="text-align: center;"&gt;<b><span style="font-size: x-small;">A typical view from outside NERSC’s facility in Berkeley after work during the winter months.  Yes, it really does look like this.</span></b>&lt;/div&gt; +</b>&lt;p&gt;When I started in mid-2015, I came in with about three years of prior work experience (two at SDSC doing user support and one at a biotech startup) and knew a little bit about a lot of things in HPC.  But I didn’t really know the basics of I/O or storage–I couldn’t tell you what “POSIX I/O” really meant or how GPFS worked.  The fact that I got to help author <a href="https://www.nersc.gov/news-publications/nersc-news/nersc-center-news/2017/new-storage-2020-report-outlines-future-hpc-storage-vision/">NERSC’s ten-year strategy around storage</a> in just two years, was invited to present <a href="https://insidehpc.com/2019/08/designing-future-flash-storage-systems-for-hpc-and-beyond/">my view on how to bridge the gap between HPC and enterprise storage</a> at Samsung’s North American headquarters a year later, and was trusted to oversee <a href="https://www.nextplatform.com/2021/06/07/a-35-petabyte-all-flash-balancing-act/">the design and execution of the world’s first 35 petabyte all-flash Lustre file system</a> through my first four years is a testament to how much opportunity is available to learn and grow at NERSC.&lt;/p&gt;</p> +<p>There are a couple of reasons for this.</p> +<h3 style="text-align: left;">Stable funding</h3> +<p>Perhaps foremost, NERSC (and DOE's Leadership Computing Facilities, ALCF and OLCF) enjoy healthy budgets and financial stability since worldwide leadership in scientific advancement is generally a national priority by both major political parties in the US.  This means that, regardless of who is president and which party holds majorities in Congress, the DOE HPC facilities can pay their employees and deploy new supercomputers.  This solid funding makes it much easier to invest in staff development and long-term planning; I was able to become a resident I/O expert at NERSC because I was never forced to chase after the funding du jour to make ends meet.  Congress trusts NERSC to allocate its funding responsibly, and NERSC prioritized letting me learn as much as I could without distraction.</p> +<h3 style="text-align: left;">Instant credibility and access</h3> +<p>Second, <a href="https://twitter.com/hpcprogrammer/status/1061278775353196544?s=20&amp;t=_YGQXWvykuCElqltJ-x09Q">having a NERSC affiliation gives you instant credibility and access</a> in many cases.  It's not necessarily fair, but it's definitely true.  Within my first year at NERSC, I was invited to give <a href="https://archive.siam.org/meetings/pp16/pp16_program.pdf">a presentation about I/O performance monitoring in Paris</a> because the organizer wanted a lineup of speakers from all the big players in HPC.  I had never been to Europe at that point in my life, but being the I/O guy from NERSC (and being able to present well!) was enough to get me there.  And it was during that trip to Paris that I got to meet--and literally have conversation over dinner with--<a href="https://www.linkedin.com/in/larry-kaplan-b101936">more</a> <a href="https://people.llnl.gov/tgamblin">industry</a> <a href="https://en.wikipedia.org/wiki/David_E._Keyes">bigshots</a> that I can remember.  And that trip to Paris was not an outlier; pandemic aside, NERSC let me go to Europe at least once or twice every year I've worked there.</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p><b>&lt;div style="text-align: center;"&gt;<b><span style="font-size: x-small;">The first photo I ever took of Notre Dame on the first day I’d ever set foot in Europe.  NERSC sent me there less than a year after I started.</span></b>&lt;/div&gt; +</b>&lt;p&gt;Of course, this is not to say that every employee at a DOE HPC facility is wining and dining in Paris every summer.  Many of these opportunities are earned by showing the value of the work you’re doing, just like at any job.  But owing to healthy budgets, travel expenses are rarely the limiting factor in chasing after these opportunities.  In addition, going out into the world and talking about what you do is part of the job at a DOE facility; being a leader in the field of HPC is part of the mission of NERSC, ALCF, and OLCF, so doing high-risk, first-of-a-kind work <i>and telling the world about it</i> is uniquely valued within DOE in a way that it is not in industry.&lt;/p&gt;</p> +<h3 style="text-align: left;">Smart people</h3> +<p>A product of these two factors (stable budget and instant credibility) results in coworkers and colleagues who are generally very experienced and capable.  There's an interesting mix of laissez-faire management and rigorous process-driven management as a result.</p> +<p>Staff are generally given the freedom to choose their own destiny and focus on work that they enjoy much like in any academic environment; it's not hard to pick up passion projects or even move between groups if things get stale on a day-to-day basis.  Since everyone is working on their own slices of HPC, there's also easy access to world experts in different areas of technology if you need one.  For example, I recall once reviewing a storage system that appeared to rely on multiplexing two 12G SAS links over a single 24G SAS.  After one email and a few hours, a coworker confirmed, complete with a citation to the SCSI standards, that this was totally possible.  Even if someone in-house didn't know the answer, I had direct access to an engineering manager at a leading storage vendor who owed me a favor and definitely would've known the answer.  It's really, really hard to find as many smart people in arm's reach in most other HPC centers. </p> +<p>At the same time, there is rigorous federal oversight on major projects and procurements to ensure that taxpayer dollars are responsibly spent.  This is a double-edged sword because all of the reporting and reviews that go into <a href="https://www.energy.gov/articles/doe-build-next-generation-supercomputer-lawrence-berkeley-national-laboratory">massive</a> <a href="https://www.ornl.gov/news/us-department-energy-and-cray-deliver-record-setting-frontier-supercomputer-ornl">capital</a> <a href="https://www.energy.gov/articles/us-department-energy-and-intel-build-first-exascale-supercomputer">projects</a> make forward progress very slow at times.  All DOE HPC facilities review and re-review everything about these giant supercomputers before making a decision, so by the time the public sees a press release about a new supercomputer, lab staff have spent literal years going over every detail and risk.  It sometimes may not seem that way (how many problems has Aurora had?), but rest assured that every schedule slip or technology change the public hears was preceded by countless hours of meetings about risk and cost minimization.  On the flip-side though, you have the opportunity to learn every gory detail about the system directly from the people who designed it.</p> +<h3 style="text-align: left;">Pay</h3> +<p>In <a href="https://www.bankrate.com/banking/federal-reserve/younger-workers-sharing-salaries/">true millennial fashion</a>, I think it's important to have an open discussion about the pay.  DOE labs pay more than any other HPC facility in the world as far as I am aware, and even in the San Francisco Bay Area, salary at NERSC is comparable to the base salaries offered by all the big tech companies.  You can get an idea of what entry-level salaries (think: first job after postdoc or a few years out of undergrad) by searching <a href="https://h1bdata.info/">H1B Visa postings</a>, and anecdotally, I'd wager that a typical HPC job at NERSC pays about 2x that of the same job at a typical US university and 3x-4x that of the same job at a British or European university.  All the labs pay about the same to boot, so an HPC job at somewhere like Oak Ridge can afford you a relatively luxurious lifestyle.</p> +<p>Don't get me wrong though; affording to buy a Bay Area house on a single NERSC salary alone would be tough in the same way that buying a Bay Area house on any single salary would be.  And while NERSC's compensation is comparable to the <i>base</i> salary of the big tech companies, that base is about all you can get since DOE labs cannot offer equity or substantial bonuses.  This is less of a gap if you're just starting out, but anyone who's <a href="https://www.levels.fyi/">looked at compensation structures in tech</a> knows that stock-based compensation, not base salary, dominates total compensation as you move up.</p> +<p>So, if money wasn't an issue for me and NERSC is such a great place to work, why would I ever leave?</p> +<h2 style="text-align: left;">The road ahead for HPC</h2> +<p>On one hand, HPC's future has never been brighter thanks to how much life (and money!) the AI industry is bringing to the development of HPC technologies.  We have new <a href="https://vastdata.com/">all-flash</a> <a href="https://www.weka.io/">file systems</a>, <a href="https://developer.nvidia.com/blog/nvidia-hopper-architecture-in-depth/">gigantic GPUs</a>, awesome <a href="https://www.tomshardware.com/news/intels-sapphire-rapids-to-have-64-gigabytes-of-hbm2e-memory">CPU memory technologies</a>, and <a href="https://arxiv.org/abs/2205.12182">mixed-precision techniques</a> in the HPC space that were all directly driven by developments primarily intended for AI workloads.  On the other hand, leadership HPC appears to be engaging in unsustainable brinkmanship while midrange HPC is having its value completely undercut by cloud vendors.  I've <a href="https://glennklockwood.blogspot.com/2020/05/exascales-long-shadow-and-hpc-being.html">not been shy about my overall anxiety about where HPC is going</a> because of this, but I'll elaborate now that the exascale race has been won.</p> +<h3 style="text-align: left;">The future of leadership HPC</h3> +<p>Without some monumental breakthrough in transistor technology, there is only one path forward in continuing to build faster and faster supercomputers in the next decade: pour more and more energy (and dissipate more and more heat) into larger and larger (and more and more) GPUs.</p> +<p>The goal post for exascale power keeps moving because that's been the easiest way to hit the mythical exaflop milestone; while the original goal was 20 MW, <a href="https://www.nextplatform.com/2021/10/04/first-look-at-oak-ridges-frontier-exascaler-contrasted-to-argonnes-aurora/">Frontier is coming in at 29 MW</a> and <a href="https://www.tomshardware.com/news/nvidia-amd-polaris-supercomputer-department-of-energy">Aurora at "under 60 MW."</a>  Not only is this just a lot of power to feed into a single room, but the <a href="https://www.olcf.ornl.gov/2020/09/23/powering-frontier/">cost and effort</a> of actually <a href="https://www.llnl.gov/news/powering-llnl-prepares-exascale-massive-energy-and-water-upgrade">building this infrastructure</a> is <a href="https://www.lanl.gov/asc/fous/sixty-megawatts-power-available-2025.php">newsworthy</a> in and of itself these days.  At the current trajectory, the cost of building a new data center and extensive power and cooling infrastructure for every new leadership supercomputer is going to become prohibitive very soon.</p> +<p>HPC data centers situated in places where the cost of electricity and real estate (stacked atop the risk of earthquake or wildfire) further skew the economics of just adding more power are going to run up against this first.  It used to be easy to dismiss these practicality concerns by arguing that colocating scientists with supercomputers created immeasurable synergy and exchange of ideas, but the fact that science never stopped during the work-from-home days of the pandemic have taken a lot of air out of that argument.</p> +<p>My guess is that all the 50-60 MW data centers being built for the exascale supercomputers will be the last of their kind, and that there will be no public appetite to keep doubling down.</p> +<p>Given this, DOE's leadership computing facilities are facing an existential threat: how do you define leadership computing after exascale if you can't just add another 50% more power into your facility?  How do you justify spending another $600 million for a supercomputer that uses the same power but only delivers 15% more performance?  You can pour similarly huge amounts of money into application modernization to accelerate science, but at the end of the day, you'd still be buying a lot of hardware that's not a lot faster.</p> +<h3 style="text-align: left;">The future of places like NERSC</h3> +<p>NERSC is probably a little better off since its lack of an exascale machine today gives it at least one more turn of the crank before it hits a hard power limit in its data center.  That gives it the ability to deploy at least one more system after Perlmutter that is significantly (at least 2x) more capable but draws significantly more power.  However, compared to Frontier and Aurora, such a system may still look rather silly when it lands in the same way that Perlmutter looks a bit silly compared Summit, which was funded by the same agency but deployed years earlier.</p> +<p>And therein lies the dilemma of centers like NERSC--how do you position yourself now so that by the time you deploy an HPC system that is close to maxing out on power, it is sufficiently different from a pure-FLOPS leadership system that it can solve problems that the leadership systems cannot?</p> +<p>The easy go-to solution is to craft a story around "data-centric" supercomputing.  We did this when I was at the San Diego Supercomputer Center when we were budget-limited and had to differentiate our $12 million Comet supercomputer from TACC's $30 million Stampede.  You invest more in the file system than you would for a pure-FLOPS play, you provide low-cost but high-value onramps like Jupyter and science gateways to enable new science communities that have modest computing needs, and you fiddle with policies like allocations and queue priority to better suit interactive and urgent computing workloads.  From a productivity standpoint, this is can be a great story since users will always respond well to lower queue wait times and less frustrations with the file system.  From a system architect's standpoint, though, this is really boring.  The innovation happens in policies and software, not clever hardware or design, so there's very little that's new for a system designer to think about in this case.</p> +<p>A more innovative approach is to start thinking about how to build a system that does more than just run batch jobs.  Perhaps it gives you a private, fast file system where you can store all your data in a way indistinguishable from your personal laptop.  Perhaps it gives you a convenient place to run a Jupyter notebook that has immediate access to a powerful GPU.  Or perhaps it gives you all the tools to set up an automated process where all you have to do is upload a file to trigger an automatic data analysis and reduction pipeline that returns its output to a shiny HTTP interface.  Such a system may not be able to crank out an exaflop using HPL, but does that matter if it's the only system in the country that supports such automation?</p> +<p>There <i>are</i> interesting system architecture questions in the latter case, so as a system designer, I much prefer it over the "data-centric" angle to non-exaflop supercomputing strategies.  But there remains a problem.</p> +<h3 style="text-align: left;">The problem: cloud</h3> +<p>Such a "more than just batch jobs" supercomputer actually already exists.  It's called the cloud, and it's far, far ahead of where state-of-the-art large-scale HPC is today--it pioneered the idea of providing an integrated platform where you can twist the infrastructure and its services to exactly fit what you want to get done.  Triggering data analysis based on the arrival of new data has been around for the better part of a decade in the form of serverless computing frameworks like <a href="https://docs.microsoft.com/en-us/learn/modules/execute-azure-function-with-triggers/2-determine-best-trigger">Azure Functions</a>.  If you need to run a Jupyter notebook on a server that has a beefy GPU on it, just pop a few quarters into your favorite cloud provider.  And if you don't even want to worry about what infrastructure you need to make your Jupyter-based machine learning workload go fast, the cloud providers all have <a href="https://docs.microsoft.com/en-us/azure/machine-learning/overview-what-is-machine-learning-studio">integrated machine learning development environments</a> that hide all of the underlying infrastructure.</p> +<p>And therein lies the problem: the definition of "innovation" as non-exaflop HPC runs up against this power wall might actually mean "catching up to the cloud."</p> +<p>This is not to say that NERSC-like HPC centers are entirely behind the cloud; all the DOE HPC facilities have bigger, faster, and more convenient parallel file systems that are generally always on and where data is always somewhere "fast."  They also provide familiar, managed software environments and more egalitarian support to small- to mid-scale science projects.  DOE HPC also takes the most risk in deploying unproven technologies to shake them out before they become available to the wide market.</p> +<p>However, those gaps are beginning to close.  You can stick <a href="https://azure.microsoft.com/en-us/solutions/high-performance-computing/cray/">a full Cray EX system, identical to what you might find at NERSC or OLCF, inside Azure</a> nowadays and avoid that whole burdensome mess of building out a 50 MW data center.  You can also integrate such a system with all the rich infrastructure features the cloud has to offer like triggered functions.  And when it comes to being first to market for risky HPC hardware, the cloud has already caught up in many ways--<a href="https://azure.microsoft.com/en-us/blog/azure-hbv3-virtual-machines-for-hpc-now-up-to-80-percent-faster-with-amd-milanx-cpus/">Microsoft deployed AMD Milan-X CPUs in their data centers</a> before any HPC shop did, and more recently, <a href="https://www.theregister.com/2022/05/26/amd_azure_microsoft/">Microsoft invested in AMD MI-200 GPUs</a> before Frontier had a chance to shake them out.</p> +<p>Given this steep trajectory, I see only two scenarios for large-scale, non-exaflop HPC facilities in the 10+ year horizon:</p> +<p></p> +<ol style="text-align: left;"><li>They develop, adopt, steal, or squish cloud technologies into their supercomputers to make them functionally equivalent to cloud HPC deployments.  They may be a little friendlier to scientific users since cloud functionality wasn't designed for scientific computing alone, but they also may not be as stable, mature, or feature-rich as their cloud cousins.</li><li>They find better overall economics in eventually moving to <a href="https://www.hpcwire.com/2021/05/13/behind-the-met-offices-procurement-of-a-billion-dollar-microsoft-system/">massive, long-term, billion-dollar deals</a> where flagship HPC systems and their "more than just batch jobs" features are colocated inside cloud datacenters sited at economically advantageous (that is, cheap power, cooling, and labor) locations in the country.</li></ol> +<p>There's also grey area in between where national HPC facilities consolidate their physical infrastructure in cheap areas to manage costs but still self-manage their infrastructure rather than fully outsource to a commercial cloud.  <a href="https://ethz.ch/en/news-and-events/eth-news/news/2021/03/we-dont-just-procure-a-new-computer.html">CSCS has hinted at this model as their future plan</a> since they cannot build 100 MW datacenters in Switzerland, and this is proof that leading HPC facilities around the world see the writing on the wall and need to maneuver now to ensure they remain relevant beyond the next decade.  Unfortunately, the politics of consolidating the physical infrastructure across the DOE HPC sites would likely be mired in Congressional politics and take at least a decade to work out.  Since serious work towards this hasn't started yet, I don't envision such a grey-area solution emerging before all the DOE facilities hit their power limit.</p> +<p>Hopefully I've painted a picture of how I perceive the road ahead for large-scale HPC facilities and you can guess which one I think will win out.</p> +<h2 style="text-align: left;">Final thoughts</h2> +<p>I have every confidence that there will still be DOE HPC facilities in ten years and that they will still be staffed by some of the brightest minds in HPC.  And even if a cloud-based HPC facility ultimately consumes centers like NERSC, I don't think many people would be out of work.  The vast majority of what DOE's HPC people do is think carefully about technology trends, maintain a deep understanding of user requirements, provide excellent support to its thousands of users, and keep complex supercomputers running well.  Those jobs don't go away if the supercomputer is in the cloud; it's just the physical location, the hands doing physical hardware swaps, and the breadth of vendor interactions that may change.</p> +<p>For me as a system architect though, it's become too hard for me to catch up to all the new technologies and techniques HPC needs for the future while also building up other staff to be masters of today's I/O challenges.  I found myself at a fork in the road.  One path would mean catching up on a technical level and then getting in front of where the future of HPC lies before it gets there.  The other path would mean trying to steer the entire DOE HPC ship in the right direction, as long as that may take, and have faith that the people I bring along can race far enough ahead to tell me if we're still going where we need to go.  Perhaps a bit selfishly, I chose the former.  I'm just not ready to give up on racing ahead myself yet, and the only way I could hope to catch up was to make it a full-time job.</p> +<p>I don't claim to know the future, and a lot of what I've laid out is all speculative at best.  NERSC, ALCF, or OLCF very well may build another round of data centers to keep the DOE HPC party going for another decade.  However, there's no denying that the stakes keep getting higher with every passing year.</p> +<p>That all said, DOE has pulled off stranger things in the past, and it still has a bunch of talented people to make the best of whatever the future holds.</p> +<p></p> + + + + + Experimenting with Igor’s Bluestore WAL + + 2022-05-26T01:00:00-06:00 + https://hpc.social/2022/experimenting-with-igor-s-bluestore-wal + <p>Igor Fedetov is one of the most knowledgable developers working on Ceph. He’s started working on replacing our use of RocksDB’s write ahead log with a bluestore native implementation. After tuning we can <a href="https://docs.google.com/spreadsheets/d/1zETd1Nq_CbLNSh3R2II-z8efQizUjDYfHDBIcMwGNdg/edit?usp=sharing">achieve</a> up to 122K random write IOPS on a single OSD! That’s nearly a 50% improvment over the current main branch and over twice as fast as Pacific!</p> + + + + + Interesting links I clicked this week + + 2022-05-14T19:35:32-06:00 + https://hpc.social/2022/interesting-links-i-clicked-this-week + <p>I watched several really interesting talks from <a href="https://www.usenix.org/conference/srecon22americas/program">SRECon22 Americas</a> this week, and in particular I&#8217;d like to highlight:</p> + +<ul><li><a href="https://www.usenix.org/conference/srecon22americas/presentation/desai">Principled Performance Analytics</a>, Narayan Desai and Brent Bryan from Google. Some interesting thoughts on quantitative analysis of live performance data for monitoring and observability purposes, moving past simple percentile analysis.</li><li><a href="https://www.usenix.org/conference/srecon22americas/presentation/rosenthal">The &#8216;Success&#8217; in SRE is Silent</a>, Casey Rosenthal from Verica.io. Interesting thoughts here on the visibility of reliability, qualitative analysis of systems, and why regulation and certification might not be the right thing for web systems.</li><li><a href="https://www.usenix.org/conference/srecon22americas/presentation/ryan">Building and Running a Diversity-focused Pre-internship program for SRE</a>, from Andrew Ryan at <s>Facebook</s> Meta. Some good lessons-learned here from an early-career internship-like program, in its first year.</li><li><a href="https://www.usenix.org/conference/srecon22americas/presentation/means">Taking the 737 to the Max</a>, Nickolas Means from Sym. A really interesting analysis of the Boeing 737 Max failures from both a technical and cultural perspective, complete with some graph tracing to understand failure modes.</li></ul> + +<p>I also ran across some other articles that I&#8217;ve been actively recommending and sharing with friends and colleagues, including:</p> + +<ul><li><a href="https://ferd.ca/plato-s-dashboards.html">Plato&#8217;s Dashboards</a>, Fred Hebert at Honeycomb. This article has some great analysis of how easily-measurable metrics are often poor proxies for the information we&#8217;re actually interested in, and discussing qualitative research methods as a way to gain more insight.</li><li><a href="https://cyberlaw.stanford.edu/blog/2022/05/end-roe-will-bring-about-sea-change-encryption-debate">The End of Roe Will Bring About A Sea Change In The Encryption Debate</a>, Rianna Pfefferkorn from the Stanford Internet Observatory. You should absolutely go read this article, but to sum up: Law enforcement in states than ban abortion is now <em>absolutely</em> part of the threat model that encrypted messaging defends against. No one claiming to be a progressive should be arguing in favor of &#8220;exceptional access&#8221; or other law enforcement access to encryption.</li></ul> + +<p></p> + + + + + Customizing command output in IBM Spectrum LSF + + 2022-05-12T13:16:02-06:00 + https://hpc.social/2022/customizing-command-output-in-ibm-spectrum-lsf + <p><a href="https://www.ibm.com/products/hpc-workload-management">IBM Spectrum LSF</a> provides many ways to query the LSF cluster for information about workloads. As a user, once you’ve submitted a job to LSF, it’s logical to want to understand what has happened to your job. Has the job started yet? Is the job pending? If so, why is it pending? And the all important, “Is my job done yet?”. Of course, LSF provides a very rich CLI which has been developed and refined - over the past three decades. It’s also possible to get JSON-formatted output from various LSF query commands. This is useful for users and administrators alike as JSON-formatted output is easy to parse, and scripting can be used to extract values from the JSON output.</p> + +<p>This is not meant to be a definitive guide on how to query information in LSF, but rather provides some examples of the various ways that users can query job related information using the LSF CLI. This will include a look at the <em>-json</em> and <em>-o <!-- raw HTML omitted --></em> options which have been introduced during the lifecycle of LSF v10.1.0 family. The <em>-json</em> option can be used to provide JSON-formatted output from various LSF query commands and the <em>-o <!-- raw HTML omitted --></em> can be used to customize the fields in the output to only those desired.</p> + +<p>We’ll start with a simple job submission. Here we submit a test workload as a non-root user in the LSF cluster.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bsub -o $HOME/output.%J -e $HOME/error.%J ./testjob.sh +Job &lt;24520&gt; is submitted to default queue &lt;normal&gt;.</code></pre></div> + +<p>With the unique jobID number 24520, we can now query LSF for information about the job:</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bjobs 24520 +JOBID USER STAT QUEUE FROM_HOST EXEC_HOST JOB_NAME SUBMIT_TIME +24520 gsamu RUN normal kilenc kilenc *estjob.sh May 10 21:09</code></pre></div> + +<p>Adding the <em>-l</em> option to <em>bjobs</em> provides the long output (more details).</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bjobs -l 24520 + +Job &lt;24520&gt;, User &lt;gsamu&gt;, Project &lt;default&gt;, Status &lt;RUN&gt;, Queue &lt;normal&gt;, Com + mand &lt;./testjob.sh&gt;, Share group charged &lt;/gsamu&gt; +Tue May 10 21:09:22: Submitted from host &lt;kilenc&gt;, CWD &lt;$HOME&gt;, Output File &lt;/h + ome/gsamu/output.24520&gt;, Error File &lt;/home/gsamu/error.245 + 20&gt;; +Tue May 10 21:09:23: Started 1 Task(s) on Host(s) &lt;kilenc&gt;, Allocated 1 Slot(s) + on Host(s) &lt;kilenc&gt;, Execution Home &lt;/home/gsamu&gt;, Execut + ion CWD &lt;/home/gsamu&gt;; +Tue May 10 21:10:01: Resource usage collected. + MEM: 12 Mbytes; SWAP: 0 Mbytes; NTHREAD: 5 + PGID: 313588; PIDs: 313588 313589 313591 313592 + + + MEMORY USAGE: + MAX MEM: 12 Mbytes; AVG MEM: 10 Mbytes + + SCHEDULING PARAMETERS: + r15s r1m r15m ut pg io ls it tmp swp mem + loadSched - - - - - - - - - - - + loadStop - - - - - - - - - - - + + RESOURCE REQUIREMENT DETAILS: + Combined: select[type == local] order[r15s:pg] + Effective: select[type == local] order[r15s:pg] </code></pre></div> + +<p>It is possible to customize the output format of the <em>bjobs</em> command using the <em>-o <!-- raw HTML omitted --></em> option. In this case, we want to show only some specific details about the job in the output of bjobs. We’ve selected to view: jobID, job status, project name, memory consumed, output and error files. A full list of the available fields for the custom format can be found <a href="https://www.ibm.com/docs/en/spectrum-lsf/10.1.0?topic=options-o">here</a>.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bjobs -o "jobid stat: queue:- project:10 mem:12:G output_file error_file" 24520 +JOBID STAT QUEUE PROJ_NAME MEM OUTPUT_FILE ERROR_FILE +24520 RUN normal default 0.01 G /home/gsamu/output.24520 /home/gsamu/error.24520</code></pre></div> + +<p>Adding the <em>-json</em> option, it’s possible to get this customized job output in JSON format.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bjobs -o "jobid stat: queue:- project:10 mem:12:G output_file error_file" -json 24520 +{ + "COMMAND":"bjobs", + "JOBS":1, + "RECORDS":[ + { + "JOBID":"24520", + "STAT":"RUN", + "QUEUE":"normal", + "PROJ_NAME":"default", + "MEM":"0.01 G", + "OUTPUT_FILE":"\/home\/gsamu\/output.24520", + "ERROR_FILE":"\/home\/gsamu\/error.24520" + } + ] +}</code></pre></div> + +<p>Next, let’s look at the <em>bhist</em> command. This can be used to view historical data about jobs.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bhist 24520 +Summary of time in seconds spent in various states: +JOBID USER JOB_NAME PEND PSUSP RUN USUSP SSUSP UNKWN TOTAL +24520 gsamu *tjob.sh 1 0 457 0 0 0 458 </code></pre></div> + +<p>We see that the job command has been truncated. Let’s now run <em>bhist</em> again with the <em>-w</em> option to produce a wide output.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bhist -w 24520 +Summary of time in seconds spent in various states: +JOBID USER JOB_NAME PEND PSUSP RUN USUSP SSUSP UNKWN TOTAL +24520 gsamu ./testjob.sh 1 0 462 0 0 0 463 </code></pre></div> + +<p>And finally, with the <em>-l</em> option to produce a long, detailed output.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bhist -l 24520 + +Job &lt;24520&gt;, User &lt;gsamu&gt;, Project &lt;default&gt;, Command &lt;./testjob.sh&gt; +Tue May 10 21:09:22: Submitted from host &lt;kilenc&gt;, to Queue &lt;normal&gt;, CWD &lt;$HOM + E&gt;, Output File &lt;/home/gsamu/output.%J&gt;, Error File &lt;/home + /gsamu/error.%J&gt;; +Tue May 10 21:09:23: Dispatched 1 Task(s) on Host(s) &lt;kilenc&gt;, Allocated 1 Slot + (s) on Host(s) &lt;kilenc&gt;, Effective RES_REQ &lt;select[type == + local] order[r15s:pg] &gt;; +Tue May 10 21:09:25: Starting (Pid 313588); +Tue May 10 21:09:25: Running with execution home &lt;/home/gsamu&gt;, Execution CWD &lt; + /home/gsamu&gt;, Execution Pid &lt;313588&gt;; + + +Summary of time in seconds spent in various states by Tue May 10 21:17:26 + PEND PSUSP RUN USUSP SSUSP UNKWN TOTAL + 1 0 483 0 0 0 484 </code></pre></div> + +<p>When the job is done, the <em>bacct</em> command can be used to get detailed accounting information for jobs.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bacct 24520 + +Accounting information about jobs that are: + - submitted by all users. + - accounted on all projects. + - completed normally or exited + - executed on all hosts. + - submitted to all queues. + - accounted on all service classes. + - accounted to all RC accounts. +------------------------------------------------------------------------------ + +SUMMARY: ( time unit: second ) + Total number of done jobs: 1 Total number of exited jobs: 0 + Total CPU time consumed: 3.4 Average CPU time consumed: 3.4 + Maximum CPU time of a job: 3.4 Minimum CPU time of a job: 3.4 + Total wait time in queues: 1.0 + Average wait time in queue: 1.0 + Maximum wait time in queue: 1.0 Minimum wait time in queue: 1.0 + Average turnaround time: 669 (seconds/job) + Maximum turnaround time: 669 Minimum turnaround time: 669 + Average hog factor of a job: 0.01 ( cpu time / turnaround time ) + Maximum hog factor of a job: 0.01 Minimum hog factor of a job: 0.01 + Average expansion factor of a job: 1.00 ( turnaround time / run time ) + Maximum expansion factor of a job: 1.00 + Minimum expansion factor of a job: 1.00 + Total Run time consumed: 668 Average Run time consumed: 668 + Maximum Run time of a job: 668 Minimum Run time of a job: 668 + Scheduler Efficiency for 1 jobs + Slot Utilization: 100.00% Memory Utilization: 100.00% </code></pre></div> + +<p>And now the long, detailed output from bacct using the <em>-l</em> parameter.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bacct -l 24520 + +Accounting information about jobs that are: + - submitted by all users. + - accounted on all projects. + - completed normally or exited + - executed on all hosts. + - submitted to all queues. + - accounted on all service classes. + - accounted to all RC accounts. +------------------------------------------------------------------------------ + +Job &lt;24520&gt;, User &lt;gsamu&gt;, Project &lt;default&gt;, Status &lt;DONE&gt;, Queue &lt;normal&gt;, Co + mmand &lt;./testjob.sh&gt;, Share group charged &lt;/gsamu&gt; +Tue May 10 21:09:22: Submitted from host &lt;kilenc&gt;, CWD &lt;$HOME&gt;, Output File &lt;/h + ome/gsamu/output.%J&gt;, Error File &lt;/home/gsamu/error.%J&gt;; +Tue May 10 21:09:23: Dispatched 1 Task(s) on Host(s) &lt;kilenc&gt;, Allocated 1 Slot + (s) on Host(s) &lt;kilenc&gt;, Effective RES_REQ &lt;select[type == + local] order[r15s:pg] &gt;; +Tue May 10 21:20:31: Completed &lt;done&gt;. + +Accounting information about this job: + Share group charged &lt;/gsamu&gt; + CPU_T WAIT TURNAROUND STATUS HOG_FACTOR MEM SWAP + 3.37 1 669 done 0.0050 12M 0M +------------------------------------------------------------------------------ + +SUMMARY: ( time unit: second ) + Total number of done jobs: 1 Total number of exited jobs: 0 + Total CPU time consumed: 3.4 Average CPU time consumed: 3.4 + Maximum CPU time of a job: 3.4 Minimum CPU time of a job: 3.4 + Total wait time in queues: 1.0 + Average wait time in queue: 1.0 + Maximum wait time in queue: 1.0 Minimum wait time in queue: 1.0 + Average turnaround time: 669 (seconds/job) + Maximum turnaround time: 669 Minimum turnaround time: 669 + Average hog factor of a job: 0.01 ( cpu time / turnaround time ) + Maximum hog factor of a job: 0.01 Minimum hog factor of a job: 0.01 + Average expansion factor of a job: 1.00 ( turnaround time / run time ) + Maximum expansion factor of a job: 1.00 + Minimum expansion factor of a job: 1.00 + Total Run time consumed: 668 Average Run time consumed: 668 + Maximum Run time of a job: 668 Minimum Run time of a job: 668 + Scheduler Efficiency for 1 jobs + Slot Utilization: 100.00% Memory Utilization: 100.00% </code></pre></div> + +<p><strong>From jobs to queues</strong></p> + +<p>We’ve looked briefly at querying LSF for job related information. Let’s now take a closer look at querying LSF for information regarding the queue configuration. Batch queues are where users submit jobs to. Queues can have a very wide array of attributes and settings. Below we see a listing of the default queues configured in LSF Suite for HPC. The <em>bqueues</em> command is used to query LSF for the queue configuration.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bqueues +QUEUE_NAME PRIO STATUS MAX JL/U JL/P JL/H NJOBS PEND RUN SUSP +admin 50 Open:Active - - - - 0 0 0 0 +owners 43 Open:Active - - - - 0 0 0 0 +priority 43 Open:Active - - - - 0 0 0 0 +night 40 Open:Active - - - - 0 0 0 0 +short 35 Open:Active - - - - 0 0 0 0 +dataq 33 Open:Active - - - - 0 0 0 0 +normal 30 Open:Active - - - - 0 0 0 0 +interactive 30 Open:Active - - - - 0 0 0 0 +idle 20 Open:Active - - - - 0 0 0 0</code></pre></div> + +<p>The <em>-l</em> option of <em>bqueues</em> can be used to get a more details view about the queues. Here, we look at the long output for the queue <em>normal</em>.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bqueues -l normal + +QUEUE: normal + -- For normal low priority jobs, running only if hosts are lightly loaded. This is the default queue. + +PARAMETERS/STATISTICS +PRIO NICE STATUS MAX JL/U JL/P JL/H NJOBS PEND RUN SSUSP USUSP RSV PJOBS + 30 0 Open:Active - - - - 0 0 0 0 0 0 0 +Interval for a host to accept two jobs is 0 seconds + +SCHEDULING PARAMETERS + r15s r1m r15m ut pg io ls it tmp swp mem + loadSched - - - - - - - - - - - + loadStop - - - - - - - - - - - + +SCHEDULING POLICIES: FAIRSHARE NO_INTERACTIVE +USER_SHARES: [default, 1] + +SHARE_INFO_FOR: normal/ + USER/GROUP SHARES PRIORITY STARTED RESERVED CPU_TIME RUN_TIME ADJUST GPU_RUN_TIME +gsamu 1 0.333 0 0 0.0 0 0.000 0 +elasticsearch 1 0.333 0 0 0.0 0 0.000 0 + +USERS: all +HOSTS: all </code></pre></div> + +<p>Custom output formatting can also be used for the <em>bqueues</em> command. Below is an example of the use of custom output formatting using the <em>-o <!-- raw HTML omitted --></em> parameter. For this example, we display queue name, status and the number of jobs (all states). More details about the <em>bqueues -o <!-- raw HTML omitted --></em> parameter can be found <a href="https://www.ibm.com/docs/en/spectrum-lsf/10.1.0?topic=reference-bqueues">here</a>.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bqueues -o "queue_name:12 status:12 njobs" +QUEUE_NAME STATUS NJOBS +admin Open:Active 0 +owners Open:Active 0 +priority Open:Active 0 +night Open:Active 0 +short Open:Active 0 +dataq Open:Active 0 +normal Open:Active 0 +interactive Open:Active 0 +idle Open:Active 0</code></pre></div> + +<p>And for JSON-formatted output, we add the <em>-json</em> parameter.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bqueues -json -o "queue_name:12 status:12 njobs" +{ + "COMMAND":"bqueues", + "QUEUES":9, + "RECORDS":[ + { + "QUEUE_NAME":"admin", + "STATUS":"Open:Active", + "NJOBS":"0" + }, + { + "QUEUE_NAME":"owners", + "STATUS":"Open:Active", + "NJOBS":"0" + }, + { + "QUEUE_NAME":"priority", + "STATUS":"Open:Active", + "NJOBS":"0" + }, + { + "QUEUE_NAME":"night", + "STATUS":"Open:Active", + "NJOBS":"0" + }, + { + "QUEUE_NAME":"short", + "STATUS":"Open:Active", + "NJOBS":"0" + }, + { + "QUEUE_NAME":"dataq", + "STATUS":"Open:Active", + "NJOBS":"0" + }, + { + "QUEUE_NAME":"normal", + "STATUS":"Open:Active", + "NJOBS":"0" + }, + { + "QUEUE_NAME":"interactive", + "STATUS":"Open:Active", + "NJOBS":"0" + }, + { + "QUEUE_NAME":"idle", + "STATUS":"Open:Active", + "NJOBS":"0" + } + ] +}</code></pre></div> + +<p><strong>From queues to servers</strong></p> + +<p>Finally, we’ll look at the LSF <em>bhosts</em> command, which is used to display information about the batch hosts in the LSF cluster.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bhosts +HOST_NAME STATUS JL/U MAX NJOBS RUN SSUSP USUSP RSV +archie ok - 2 0 0 0 0 0 +kilenc ok - 32 0 0 0 0 0</code></pre></div> + +<p>To view detailed information about a batch host, the <em>-l</em> parameter can be specified for <em>bhosts</em>. Here we query for information on host <em>archie</em>.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bhosts -l archie +HOST archie +STATUS CPUF JL/U MAX NJOBS RUN SSUSP USUSP RSV DISPATCH_WINDOW +ok 6.00 - 2 0 0 0 0 0 - + + CURRENT LOAD USED FOR SCHEDULING: + r15s r1m r15m ut pg io ls it tmp swp mem slots ngpus + Total 0.0 0.0 0.0 0% 0.0 1 1 437 3456M 0M 1.7G 2 0.0 + Reserved 0.0 0.0 0.0 0% 0.0 0 0 0 0M 0M 0M - - + + ngpus_physical gpu_shared_avg_ut gpu_shared_avg_mut gpu_mode0 + Total 0.0 0.0 0.0 0.0 + Reserved - - - - + + gpu_mode1 gpu_mode2 gpu_mode3 gpu_mode4 gpu_mode5 gpu_mode6 + Total 0.0 0.0 0.0 0.0 0.0 0.0 + Reserved - - - - - - + + gpu_mode7 gpu_temp0 gpu_temp1 gpu_temp2 gpu_temp3 gpu_temp4 + Total 0.0 0.0 0.0 0.0 0.0 0.0 + Reserved - - - - - - + + gpu_temp5 gpu_temp6 gpu_temp7 gpu_ecc0 gpu_ecc1 gpu_ecc2 gpu_ecc3 + Total 0.0 0.0 0.0 0.0 0.0 0.0 0.0 + Reserved - - - - - - - + + gpu_ecc4 gpu_ecc5 gpu_ecc6 gpu_ecc7 gpu_ut0 gpu_ut1 gpu_ut2 gpu_ut3 + Total 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 + Reserved - - - - - - - - + + gpu_ut4 gpu_ut5 gpu_ut6 gpu_ut7 gpu_mut0 gpu_mut1 gpu_mut2 gpu_mut3 + Total 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 + Reserved - - - - - - - - + + gpu_mut4 gpu_mut5 gpu_mut6 gpu_mut7 gpu_mtotal0 gpu_mtotal1 + Total 0.0 0.0 0.0 0.0 0.0 0.0 + Reserved - - - - - - + + gpu_mtotal2 gpu_mtotal3 gpu_mtotal4 gpu_mtotal5 gpu_mtotal6 + Total 0.0 0.0 0.0 0.0 0.0 + Reserved - - - - - + + gpu_mtotal7 gpu_mused0 gpu_mused1 gpu_mused2 gpu_mused3 gpu_mused4 + Total 0.0 0.0 0.0 0.0 0.0 0.0 + Reserved - - - - - - + + gpu_mused5 gpu_mused6 gpu_mused7 gpu_maxfactor + Total 0.0 0.0 0.0 0.0 + Reserved - - - - + + + LOAD THRESHOLD USED FOR SCHEDULING: + r15s r1m r15m ut pg io ls it tmp swp mem + loadSched - - - - - - - - - - - + loadStop - - - - - - - - - - - + + + CONFIGURED AFFINITY CPU LIST: all</code></pre></div> + +<p>Similar to <em>bjobs</em> and <em>bqueues</em>, the <em>-o <!-- raw HTML omitted --></em> parameter can be used for custom formatting of output of bhosts. Below is an example of the use of custom output formatting using the <em>-o <!-- raw HTML omitted --></em> parameter. For this example, we display host name, status and the number of jobs (all states). More details about the bqueues <em>-o <!-- raw HTML omitted --></em> parameter can be found <a href="https://www.ibm.com/docs/en/spectrum-lsf/10.1.0?topic=reference-bhosts">here</a>.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ bhosts -o "host_name:12 status:12 njobs" +HOST_NAME STATUS NJOBS +archie ok 0 +kilenc ok 0</code></pre></div> + +<p>And adding the <em>-json</em> parameter for JSON-formatted output.</p> + +<div class="highlight"><pre><code class="language-text">$ bhosts -json -o "host_name:12 status:12 njobs" +{ + "COMMAND":"bhosts", + "HOSTS":2, + "RECORDS":[ + { + "HOST_NAME":"archie", + "STATUS":"ok", + "NJOBS":"0" + }, + { + "HOST_NAME":"kilenc", + "STATUS":"ok", + "NJOBS":"0" + } + ] +}</code></pre></div> + +<p>That concludes our brief look at LSF query commands. We’ve only scratched the surface here in terms of capabilities and query commands for LSF. The LSF command line interface is powerful and flexible including ways to customize the command outputs and to output in JSON-format. For more details, the complete set of IBM Spectrum LSF documentation can be found online at IBM Documentation <a href="https://www.ibm.com/docs/en/spectrum-lsf/10.1.0">here</a>.</p> + + + + + Pipelib- Simple Library to Parse, Filter, and Sort Things + + 2022-05-07T13:30:00-06:00 + https://hpc.social/2022/pipelib-simple-library-to-parse-filter-and-sort-things + <p>In early April I added an “update” command to Singularity Registry HPC (<a href="https://github.com/singularityhub/singularity-hpc/pull/538" target="_blank">see the pull request here</a> and needed to start with a list of docker tags and +parse them into version strings to sort, and still return the original tag for later use. +I wound up creating a <a href="https://github.com/singularityhub/singularity-hpc/blob/main/shpc/main/container/update/versions.py" target="_blank">custom class and set of functions</a> that use +<a href="https://github.com/python/cpython/blob/bd030b633f98ea5d9f93ef0105a51d2faf67070d/Lib/distutils/version.py#L269" target="_blank">distutils.LooseVersion</a> to support that, but in creating this +“hard coded thing” I stepped back and had a question.</p> + +<blockquote> + <p>Can we more intelligentally compose custom parsing pipelines?</p> + +</blockquote> + +<p>Specifically I wanted to:</p> + +<ol class="custom-counter"> +<li>Start with a list of container tags for an image from a registry</li> +<li>Filter out anything that looks like a commit, but isn't a string (e.g., latest)</li> +<li>Derive a major, minor, and patch version for each, and filter to newest</li> +<li>Sort!</li> +</ol> + +<p>For step 3, as an example if there was a <code class="language-plaintext highlighter-rouge">1.2.3-commitA</code> and <code class="language-plaintext highlighter-rouge">1.2.3-commitB</code> I’d only want to keep one, and the newer one of the two, +so I could ask for “unique by patch” and filter the older one out. +Ultimately of course I <a href="https://twitter.com/vsoch/status/1516197732708282369" target="_blank">dove right in</a>, +and this led to the creation of <a href="https://vsoch.github.io/pipelib" target="_blank">Pipelib</a>, which was an itch I terribly wanted to scratch! In this quick post, I want to share the overall design, because it was really fun to make.</p> + +<div style="padding: 20px;"> +<img src="https://raw.githubusercontent.com/vsoch/pipelib/main/docs/assets/pipelib-small.png" /> +</div> + +<h2 id="design">Design</h2> + +<p>Before we talk about the design, let me show it to you.</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="kn">import</span> <span class="nn">pipelib.steps</span> <span class="k">as</span> <span class="n">step</span> +<span class="kn">import</span> <span class="nn">pipelib.pipeline</span> <span class="k">as</span> <span class="n">pipeline</span> + +<span class="c1"># A pipeline to process a list of strings +</span><span class="n">steps</span> <span class="o">=</span> <span class="p">(</span> + + <span class="c1"># convert everything to lowercase +</span> <span class="n">step</span><span class="p">.</span><span class="n">transform</span><span class="p">.</span><span class="n">ToLowercase</span><span class="p">(),</span> + + <span class="c1"># don't include anything with "two" +</span> <span class="o">~</span><span class="n">step</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasPatterns</span><span class="p">(</span><span class="n">filters</span><span class="o">=</span><span class="p">[</span><span class="s">"two"</span><span class="p">])</span> +<span class="p">)</span> + +<span class="c1"># Strings to process +</span><span class="n">items</span> <span class="o">=</span> <span class="p">[</span><span class="s">'item-ONE'</span><span class="p">,</span> <span class="s">'item-TWO'</span><span class="p">,</span> <span class="s">'item-two-THREE'</span><span class="p">]</span> + +<span class="n">p</span> <span class="o">=</span> <span class="n">pipeline</span><span class="p">.</span><span class="n">Pipeline</span><span class="p">(</span><span class="n">steps</span><span class="p">)</span> + +<span class="c1"># The updated and transformed items +</span><span class="n">updated</span> <span class="o">=</span> <span class="n">p</span><span class="p">.</span><span class="n">run</span><span class="p">(</span><span class="n">items</span><span class="p">)</span> +<span class="c1"># ['item-one'] +</span> +</code></pre></div> +</div> + +<p>In the above, we take a pipeline object and add steps to it. That design is fairly simple, +as the Pipeline class takes an optional iterable of things to process. I say “things” because +we can give it steps, composed steps, or even entire other pipelines. Here is an example +of adding an entire other Pipeline!</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="kn">import</span> <span class="nn">pipelib.steps</span> <span class="k">as</span> <span class="n">step</span> +<span class="kn">import</span> <span class="nn">pipelib.pipeline</span> <span class="k">as</span> <span class="n">pipeline</span> + +<span class="n">fruits</span> <span class="o">=</span> <span class="p">[</span><span class="s">"Orange"</span><span class="p">,</span> <span class="s">"Melon"</span><span class="p">,</span> <span class="s">"Watermelon"</span><span class="p">,</span> <span class="s">"Fruit23"</span><span class="p">]</span> +<span class="n">preprocess</span> <span class="o">=</span> <span class="n">pipeline</span><span class="p">.</span><span class="n">Pipeline</span><span class="p">(</span> + <span class="n">steps</span> <span class="o">=</span> <span class="p">(</span> + <span class="c1"># Example of chaining steps together +</span> <span class="n">step</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasMaxLength</span><span class="p">(</span><span class="n">length</span><span class="o">=</span><span class="mi">8</span><span class="p">)</span> <span class="o">&amp;</span> <span class="n">step</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasAllLetters</span><span class="p">(),</span> + <span class="p">)</span> +<span class="p">)</span> + +<span class="c1"># Add this preprocess step alongside other steps (make lowercase) +</span><span class="n">steps</span> <span class="o">=</span> <span class="p">(</span> + <span class="n">step</span><span class="p">.</span><span class="n">transform</span><span class="p">.</span><span class="n">ToLowercase</span><span class="p">(),</span> + <span class="n">preprocess</span><span class="p">,</span> +<span class="p">)</span> + +<span class="c1"># Create a new pipeline and run +</span><span class="n">p</span> <span class="o">=</span> <span class="n">pipeline</span><span class="p">.</span><span class="n">Pipeline</span><span class="p">(</span><span class="n">steps</span><span class="p">)</span> + +<span class="c1"># We should expect orange and melon! +</span><span class="n">updated</span> <span class="o">=</span> <span class="n">p</span><span class="p">.</span><span class="n">run</span><span class="p">(</span><span class="n">fruits</span><span class="p">)</span> +<span class="p">[</span><span class="s">'orange'</span><span class="p">,</span> <span class="s">'melon'</span><span class="p">]</span> + +</code></pre></div> +</div> + +<p>Implementation-wise, this is also fairly simple. We can check the underlying class of the provided object +and either add a single step, or insert a set of steps given another pipeline. In fact, pipelib comes with a +small set of “pipelines” that are ready for you to use. For example, here is one to +filter out “things that look like complete or partial git commits”</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="kn">import</span> <span class="nn">pipelib.steps</span> <span class="k">as</span> <span class="n">step</span> +<span class="kn">import</span> <span class="nn">pipelib.pipeline</span> <span class="k">as</span> <span class="n">pipeline</span> + +<span class="c1"># Pre-generated sets of steps we can use +</span><span class="kn">import</span> <span class="nn">pipelib.pipelines</span> <span class="k">as</span> <span class="n">pipelines</span> + +<span class="n">pipeline</span><span class="p">.</span><span class="n">Pipeline</span><span class="p">(</span> + <span class="n">pipelines</span><span class="p">.</span><span class="n">git</span><span class="p">.</span><span class="n">RemoveCommits</span> +<span class="p">).</span><span class="n">run</span><span class="p">([</span><span class="s">"832b1c"</span><span class="p">,</span> <span class="s">"832b1c645e562d5cc6e376e5a3e058c02a40d92a"</span><span class="p">,</span> <span class="s">"123-abcd"</span><span class="p">])</span> +<span class="p">[</span><span class="s">"123-abcd"</span><span class="p">]</span> + +</code></pre></div> +</div> + +<p>This is something I found useful because people sometimes use commits as Docker tags, and I don’t find this +incredibly meaningful as a version to compare to (and want to remove them). Under the hood, it looks like this:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="n">RemoveCommits</span> <span class="o">=</span> <span class="n">pipeline</span><span class="p">.</span><span class="n">Pipeline</span><span class="p">(</span> + <span class="n">steps</span><span class="o">=</span><span class="p">(</span> + <span class="n">step</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasMinLength</span><span class="p">(</span><span class="n">length</span><span class="o">=</span><span class="mi">8</span><span class="p">)</span> <span class="o">&amp;</span> <span class="o">~</span><span class="n">step</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasAllLowerLettersNumbers</span><span class="p">(),</span> + <span class="p">)</span> +<span class="p">)</span> + +</code></pre></div> +</div> + +<p>Do you also notice something interesting in the above? We are actually combining steps akin to logical operations. +The above “pipeline” is actually just one step that combined other steps!</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="n">pipelines</span><span class="p">.</span><span class="n">git</span><span class="p">.</span><span class="n">RemoveCommits</span><span class="p">.</span><span class="n">steps</span> +<span class="p">[</span><span class="n">HasMinLength_AND_NotHasAllLowerLettersNumbers</span><span class="p">]</span> + +</code></pre></div> +</div> + +<p>Let’s step back and talk about some concepts that allow this.</p> + +<h2 id="concepts">Concepts</h2> + +<h3 id="pipeline">Pipeline</h3> + +<p>As we’ve seen above, a pipeline is a collection of steps that take, as input, a listing of items and return a parser and filtered list.</p> + +<h3 id="step">Step</h3> + +<p>A step is some action in a pipeline. The way this works is that we have different kinds of steps, and this makes them easy +to implement and even test. A <em>boolean</em> step is akin to a filter, and is expected to return True or False to indicate if the item passes, e.g., False means it’s filtered out. Boolean steps are neat because they afford different kinds of logic and combination.</p> + +<h4 id="logical-operations">Logical Operations</h4> + +<p>Let’s say that we have a step that checks that an input is all letters:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="n">step</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasAllLetters</span><span class="p">()</span> +</code></pre></div> +</div> + +<p>For the above, anything that had a number (e.g., orange123) would be filtered out. But what if we wanted to inverse that, and allow passing of inputs that don’t have all letters (meaning we want numbers or special characters?) We can simply do that:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="o">~</span><span class="n">step</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasAllLetters</span><span class="p">()</span> +</code></pre></div> +</div> + +<p>Implementation wise, this was really fun to do! For Python to respect the logical operator <code class="language-plaintext highlighter-rouge">~</code> I simply define the “<strong>invert</strong>” function for the BooleanStep class.</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">def</span> <span class="nf">__invert__</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span> + <span class="s">""" + We can say "~step" and reverse the logic. + """</span> + <span class="bp">self</span><span class="p">.</span><span class="n">reverse</span> <span class="o">=</span> <span class="bp">True</span> + <span class="k">return</span> <span class="bp">self</span> +</code></pre></div> +</div> + +<p>It sets an attribute “reverse” to True, and returns itself, that way we use the same step, but with this variable set to be true. +What does that do? In the “run” <a href="https://github.com/vsoch/pipelib/blob/69d7d4ac677a24a31ffa9322f03090cf074442c8/pipelib/steps/step.py#L217-L238" target="_blank">function</a> of the BooleanStep we basically retrieve an outcome from the underlying step (True or False) and simply reverse it given that boolean is True! Again, it’s very simple, and allows for doing things like this:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="kn">from</span> <span class="nn">pipelib.pipeline</span> <span class="kn">import</span> <span class="n">Pipeline</span> +<span class="kn">import</span> <span class="nn">pipelib.steps</span> <span class="k">as</span> <span class="n">steps</span> + +<span class="n">Pipeline</span><span class="p">(</span><span class="o">~</span><span class="n">steps</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasAllLetters</span><span class="p">()).</span><span class="n">run</span><span class="p">([</span><span class="s">"I-have-special-characters"</span><span class="p">,</span> <span class="s">"Idonot"</span><span class="p">])</span> +<span class="p">[</span><span class="s">'I-have-special-characters'</span><span class="p">]</span> + +<span class="n">Pipeline</span><span class="p">(</span><span class="n">steps</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasAllLetters</span><span class="p">()).</span><span class="n">run</span><span class="p">([</span><span class="s">"I-have-special-characters"</span><span class="p">,</span> <span class="s">"Idonot"</span><span class="p">])</span> +<span class="p">[</span><span class="s">'Idonot'</span><span class="p">]</span> + +</code></pre></div> +</div> + +<p>What if we wanted to combine steps? E.g., what if I want to say “has all letters” OR “has minimum length 10?” If we put the steps +side by side we would only be able to support an AND - allowing passing through of entries that have all letters and the minimum length of 10. +Pipelib supports both those operators - AND and OR as follows:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="o">&gt;</span> <span class="n">step</span> <span class="o">=</span> <span class="n">steps</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasAllLetters</span><span class="p">()</span> <span class="o">&amp;</span> <span class="n">steps</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasMinLength</span><span class="p">(</span><span class="n">length</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span> +<span class="o">&gt;</span> <span class="n">step</span> +<span class="n">HasAllLetters_AND_HasMinLength</span> + +<span class="n">Pipeline</span><span class="p">(</span><span class="n">step</span><span class="p">).</span><span class="n">run</span><span class="p">([</span><span class="s">"thisonewillpass"</span><span class="p">,</span> <span class="s">"thisoneno"</span><span class="p">,</span> <span class="s">"notthisone2"</span><span class="p">])</span> +<span class="p">[</span><span class="s">'thisonewillpass'</span><span class="p">]</span> + +</code></pre></div> +</div> + +<p>For both cases above, we are using the “<strong>and</strong>” and “<strong>or</strong> functions, respectively, and:</p> + +<ol class="custom-counter"> +<li>Checking for class compatibility (both must be BooleanStep)</li> +<li>Creating a list of composed steps to added to a class attribute "composed"</li> +<li>Add the previous run functions too, naming based on the step class name</li> +<li>Define a new run function that loops through the composed set, runs, updates and returns a shared result</li> +<li>Name the class based on the combined names of the composed classes</li> +</ol> + +<p>For step 4 above, the operation (AND or OR) will vary depending on if the initial call was to “<strong>and</strong>” or “<strong>or</strong>”. +The main difference between the two is that “OR” starts with a default of False (otherwise it would always return True) +and AND starts with a default of True (otherwise it would always return False). +And since we are always taking the first class “composed” attribute, this means that you can compose +steps with other steps as many times as you like - a new check is simply added to the front or back of +the list. The result (returned) is the new class that is ready to run. Here is what an OR looks like:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="o">&gt;</span> <span class="n">step</span> <span class="o">=</span> <span class="n">steps</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasAllLetters</span><span class="p">()</span> <span class="o">|</span> <span class="n">steps</span><span class="p">.</span><span class="n">filters</span><span class="p">.</span><span class="n">HasMinLength</span><span class="p">(</span><span class="n">length</span><span class="o">=</span><span class="mi">10</span><span class="p">)</span> +<span class="o">&gt;</span> <span class="n">step</span> +<span class="n">HasAllLetters_OR_HasMinLength</span> + +<span class="n">Pipeline</span><span class="p">(</span><span class="n">step</span><span class="p">).</span><span class="n">run</span><span class="p">([</span><span class="s">"thisonewillpass"</span><span class="p">,</span> <span class="s">"veryshort"</span><span class="p">,</span> <span class="s">"12345"</span><span class="p">])</span> +<span class="p">[</span><span class="s">'thisonewillpass'</span><span class="p">,</span> <span class="s">'veryshort'</span><span class="p">]</span> + +</code></pre></div> +</div> + +<p>If you are interested in this function, you can see the entire thing <a href="https://github.com/vsoch/pipelib/blob/832b1c645e562d5cc6e376e5a3e058c02a40d92a/pipelib/steps/step.py#L177-L241" target="_blank">here</a>.</p> + +<h4 id="transformation-operations">Transformation Operations</h4> + +<p>A base step can be thought of as a transformation. Instead of expecting a boolean to be returned, we are +instead expecting a new value or None. In this respect the transform step can also act as a boolean as a return +of “None” will be removed from the list, however in most cases a transform is intended to perform an operation +on the item passed. Here is an example of a transformation operation:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="n">Pipeline</span><span class="p">(</span><span class="n">steps</span><span class="p">.</span><span class="n">transform</span><span class="p">.</span><span class="n">ToLowercase</span><span class="p">()).</span><span class="n">run</span><span class="p">([</span><span class="s">"AHHHH"</span><span class="p">])</span> +<span class="p">[</span><span class="s">'ahhhh'</span><span class="p">]</span> +</code></pre></div> +</div> + +<h4 id="sort-operations">Sort Operations</h4> + +<p>A sort operation is a step that is one level up. Instead of operating on individual items, the step +re-defines a the higher level “run” function and does operations across the iterable. +A good example from Pipelib is the use case that originally inspired me - to start with a messy +list of Docker tags, do some parsing to derive versions, and return back a sorted list.</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="n">pipeline</span><span class="p">.</span><span class="n">Pipeline</span><span class="p">(</span><span class="n">steps</span><span class="p">.</span><span class="n">container</span><span class="p">.</span><span class="n">ContainerTagSort</span><span class="p">(</span><span class="n">ascending</span><span class="o">=</span><span class="bp">False</span><span class="p">)).</span><span class="n">run</span><span class="p">([</span><span class="s">"1.2.3"</span><span class="p">,</span> <span class="s">"0.1.0"</span><span class="p">,</span> <span class="s">"8.3.2"</span><span class="p">])</span> +<span class="p">[</span><span class="s">'8.3.2'</span><span class="p">,</span> <span class="s">'1.2.3'</span><span class="p">,</span> <span class="s">'0.1.0'</span><span class="p">]</span> + +<span class="n">pipeline</span><span class="p">.</span><span class="n">Pipeline</span><span class="p">(</span><span class="n">steps</span><span class="p">.</span><span class="n">container</span><span class="p">.</span><span class="n">ContainerTagSort</span><span class="p">(</span><span class="n">ascending</span><span class="o">=</span><span class="bp">True</span><span class="p">)).</span><span class="n">run</span><span class="p">([</span><span class="s">"1.2.3"</span><span class="p">,</span> <span class="s">"0.1.0"</span><span class="p">,</span> <span class="s">"8.3.2"</span><span class="p">])</span> +<span class="p">[</span><span class="s">'0.1.0'</span><span class="p">,</span> <span class="s">'1.2.3'</span><span class="p">,</span> <span class="s">'8.3.2'</span><span class="p">]</span> + +</code></pre></div> +</div> + +<p>In the above we also demonstrate that steps can take parameters, such as the order of a sort! +This particular sorting step also allows you to say you want to return unique major, minor, or patch +versions.</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="n">pipeline</span><span class="p">.</span><span class="n">Pipeline</span><span class="p">(</span><span class="n">steps</span><span class="p">.</span><span class="n">container</span><span class="p">.</span><span class="n">ContainerTagSort</span><span class="p">(</span><span class="n">unique_major</span><span class="o">=</span><span class="bp">True</span><span class="p">)).</span><span class="n">run</span><span class="p">([</span><span class="s">"1.2.3"</span><span class="p">,</span> <span class="s">"1.1.0"</span><span class="p">,</span> <span class="s">"8.3.2"</span><span class="p">])</span> +<span class="p">[</span><span class="s">'8.3.2'</span><span class="p">,</span> <span class="s">'1.2.3'</span><span class="p">]</span> + +</code></pre></div> +</div> + +<p>And if you wanted to do a more comprehensive clean up and sort, you could do <a href="https://vsoch.github.io/pipelib/getting_started/user-guide.html#a-real-world-example-docker-tags" target="_blank">something like this</a>.</p> + +<h3 id="wrapper">Wrapper</h3> + +<p>Pipelib needed a way to be able to pass around some parsed version of an item, but still maintain +the original. For example, let’s say I’m parsing Docker tags into something that resembles a loose +semantic version, I might have filtered <code class="language-plaintext highlighter-rouge">1.2.3-boop</code> to be just <code class="language-plaintext highlighter-rouge">1.2.3</code>, but at the end of the +day I need the original tag to pull. Pipelib accomplishes this via wrappers.</p> + +<p>A wrapper is conceptually that - an internal wrapper class to an item that allows for storing +an original value, and still doing operations to change a current state. Wrappers are used inside +steps and allow for things like sorting and comparison. You probably don’t need to worry about wrappers +unless you want to develop for pipelib. By default, wrappers and “extracted away” to return the basic +types. However, you can ask Pipelib to not do this unwrapping, and then you can get back +the derived and original values:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="n">tags</span> <span class="o">=</span> <span class="p">[</span><span class="s">"1.2.3"</span><span class="p">,</span> <span class="s">"1.1.0"</span><span class="p">,</span> <span class="s">"8.3.2"</span><span class="p">]</span> +<span class="n">updated</span> <span class="o">=</span> <span class="n">pipeline</span><span class="p">.</span><span class="n">Pipeline</span><span class="p">(</span><span class="n">steps</span><span class="p">.</span><span class="n">container</span><span class="p">.</span><span class="n">ContainerTagSort</span><span class="p">()).</span><span class="n">run</span><span class="p">(</span><span class="n">tags</span><span class="p">,</span> <span class="n">unwrap</span><span class="o">=</span><span class="bp">False</span><span class="p">)</span> + +<span class="c1"># Notice that this just looks like a set of strings... +</span><span class="n">updated</span> +<span class="p">[</span><span class="s">'8.3.2'</span><span class="p">,</span> <span class="s">'1.2.3'</span><span class="p">]</span> + +<span class="c1"># But actually we have wrappers, that each have an _original attribute +</span><span class="nb">type</span><span class="p">(</span><span class="n">updated</span><span class="p">[</span><span class="mi">0</span><span class="p">])</span> +<span class="n">pipelib</span><span class="p">.</span><span class="n">wrappers</span><span class="p">.</span><span class="n">version</span><span class="p">.</span><span class="n">VersionWrapper</span> + +</code></pre></div> +</div> + +<h2 id="conclusion">Conclusion</h2> + +<p>I’ve had so much fun making this library! Like many of my projects it’s probably not super useful, +but if you see a cool use case please let me know! I’m also happy to develop custom pipelines or steps +for a use case that you might be interested in. Please don’t hesitate to ask me for help, I’m always running +out of fun things to do :)</p> + +<blockquote> + <p>Why should I care?</p> + +</blockquote> + +<p>Arguably you could just hard code this kind of filtering and sorting, but I think the +idea of being able to customize and assemble steps is a cool one. If the steps are provided +in a library it might might it slightly easier, or your work more reproducible because +someone else can use the steps. And if you don’t care? That’s okay too. I recognize this was +mostly a fun project, and yet-another-itch I really wanted to scratch because I’ve never +made a design like this before, either in terms of the idea or <a href="https://twitter.com/vsoch/status/1521670410852442112" target="_blank">underlying testing and automation</a>.</p> + + + + + The Research Software Ecosystem + + 2022-04-24T13:30:00-06:00 + https://hpc.social/2022/the-research-software-ecosystem + <p>We recently published <a href="https://openresearchsoftware.metajnl.com/articles/10.5334/jors.359/" target="_blank">the Research Software Encyclopedia</a> and also have added several new parsers for obtaining new data, meaning the total collection +of <a href="https://rseng.github.io/software/" target="_blank">curated research software</a> is greater than 1500 +entries. In honor of this collection, and of a library I’m working on called <a href="https://vsoch.github.io/citelang/getting_started/user-guide.html" target="_blank">CiteLang</a>, I wanted to do a small study to better understand:</p> + +<ol class="custom-counter"> +<li>What are the most valuable dependencies in our community, across languages?</li> +<li>What are the most valuable dependencies in our community, by language?</li> +<li>What is the credit allocation for each repository?</li> +</ol> + +<h2 id="citelang">CiteLang</h2> + +<p>To step back for a second, let’s talk again about CiteLang. It has many functions - one of them +being an ability to <a href="https://vsoch.github.io/2022/citelang-contrib/" target="_blank">assess open +source contributions</a> via git, but it’s main purpose is to be a markdown syntax for citing software, +meaning that we can:</p> + +<ol class="custom-counter"> +<li>Generate basic software credit trees, graphs, and markdown summaries.</li> +<li>Derive a new, customizable model of credit based on published packages and dependencies.</li> +<li>Provide a way to cite software in a paper and give credit without needing DOIs.</li> +</ol> + +<p>As a simple example, I can run CiteLang over this markdown file with CiteLang references:</p> + +<div class="language-md highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="gh"># Summary</span> + +Portability and reproducibility of complex software stacks is essential for researchers to perform their work. +High Performance Computing (HPC) environments add another level of complexity, where possibly conflicting +dependencies must co-exist. Although container technologies like Singularity @conda{name=singularity} make +it possible to "bring your own environment," without any form of central strategy to manage containers, +researchers who seek reproducibility via using containers are tasked with managing their own container +collection, often not taking care to ensure that a particular digest or version is used. The reproducibility +of the work is at risk, as they cannot easily install and use containers, nor can they share their software +with others. + +Singularity Registry HPC (shpc) @pypi{name=singularity-hpc} is the first of its kind to provide an easy means +for a researcher to add their research software for sharing and collaboration with other researchers to an +existing collection of over 200 popular scientific libraries @github{name=autamus/registry} +@github{name=spack/spack, release=0.17}. The software installs containers as environment modules that are easy +to use and read documentation for, and exposes aliases for commands in the container that the researcher can +add to their pipeline without thinking about complex interactions with a container. The simple addition of an +entry to the registry maintained by shpc comes down to adding a yaml file, and after doing this, another +researcher can easily install the same software, down to the digest, to reproduce the original work. + +<span class="gh"># References</span> + +<span class="c">&lt;!--citelang start--&gt;</span> +<span class="c">&lt;!--citelang end--&gt;</span> +</code></pre></div> +</div> + +<p>And then run <code class="language-plaintext highlighter-rouge">citelang render paper.md</code> to get a <a href="https://gist.github.com/vsoch/41b4559d8f87eb9d6e62945e02689428" target="_blank">nice rendered table alongside your paper</a>! What CiteLang does is find the references in the paper, they look like this:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +@conda{name=singularity} +@pypi{name=singularity-hpc} +@github{name=autamus/registry} +@github{name=spack/spack, release=0.17} + +</code></pre></div> +</div> + +<p>Each of the references above is a package manager with a package name and (optionally) a version, and we can load in the metadata +for each and then generate a table <a href="https://gist.github.com/vsoch/41b4559d8f87eb9d6e62945e02689428" target="_blank">that you see here</a> that summarizes credit across dependencies. In this model, we give some allocation of credit +(default is 50%) to the main work (paper or software) citing the software, and then recursively parse dependencies up to some minimum level of credit to calculate scores. Dependencies shared across libraries are averaged together. The final table represents the credit that you give not only to the top level software, but to all nested dependencies, for the work that you did. And that’s only the basics! CiteLang takes this simple ability to parse references and extends it to automation, graphs, badges, and more! You can read more about CiteLang <a href="https://vsoch.github.io/citelang/getting_started/index.html" target="_blank">here</a>.</p> + +<blockquote> + <p>Publish or perish? How about neither? I just need to keep writing software!</p> + +</blockquote> + +<p>But do you see what is happening above? We aren’t requiring some artificial publication +in order to cite software. We are citing it based on its actual usage, as a known dependency to some other software. +In a nutshell, we don’t believe that “the traditional academic way” of citing papers makes sense for software, and instead +of using DOIs we can use package managers and metadata as a source of truth, and derive the real value of a piece of software +based on this ecosystem. This means that as a research software engineer, you can just keep doing what you are already doing, and if +someone uses CiteLang to summarize their work, given that your software is published to a package managed you’ll get credit. There +are so many cool ideas around this! But let’s start at the beginning. We first want to show how to summarize an ecosystem. +That is exactly what we are going to do in this post.</p> + +<h2 id="the-research-software-ecosytem">The Research Software Ecosytem</h2> + +<p>Starting with these curated repositories from a <a href="https://rseng.github.io/rse/getting-started/scrapers/index.html" target="_blank"> set of scrapers</a> including the Journal of Open Source Software, the HAL Research Software Database, the Research Software NL Dictionary, ROpenSci, and +The Molecular Sciences Software Institute, we can do a basic analysis to identify the most used (and thus valued) pieces of software in our ecosystem. My analysis plan was to:</p> + +<ol class="custom-counter"> +<li>Start with the current database.</li> +<li>For each repository, look for requirements files to parse.</li> +<li>Derive dependency data based on this requirements file.</li> +<li>Combine and rank to discover the top dependencies!</li> +</ol> + +<p>This of course is limited to the subset of software in our database, and the ability of CiteLang to parse a requirements file. +Currently we parse setup.py and requirements.txt (Python), DESCRIPTION (R), go.mod (Go), package.json (npm), and Gemfile (ruby). Based on the +<a href="https://rseng.github.io/rsepedia-analysis/analysis/languages/" target="_blank">breakdown of the languages</a> found in the RSEPedia, this is a reasonable start!</p> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/citelang/languages.png" /> +</div> + +<p>But it’s also kind of sad to see that my favorite languages (Go and Rust) are barely represented in our community. Also, the above +should tell you that the R and Python results likely have some meaningful interpretation, but the others not so much, only because we don’t have a big enough sample. So for all of the above +steps, for these 1500+ repositories and many languages, I wanted th entire process to be automated, always have potential for easy improvement, +and run at some regular interval as new software comes into the Research Software Encyclopedia (also automated) so we can derive changes over time. +If you dont’ care to read further:</p> + +<ol class="custom-counter"> +<li><a href="https://rseng.github.io/rsepedia-analysis/" target="_blank">View the Research Software Ecosystem</a></li> +<li><a href="https://rseng.github.io/rsepedia-analysis/analysis/languages/" target="_blank">Check out Languages here</a></li> +<li><a href="https://rseng.github.io/rsepedia-analysis/analysis/dependencies/" target="_blank">Results for Dependencies here</a></li> +<li><a href="https://rseng.github.io/rsepedia-analysis/analysis/repos/" target="_blank">Individual Repositories here</a></li> +</ol> + +<p>For this first publication of the interface we have the following metrics:</p> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/citelang/ecosystem.png" /> +</div> + +<p>And I’m so excited because a tiny vision I had a few years ago to provide (and use) a community research software database is coming +to live! So without further adeiu, I’m just going to jump into the cool results! It will be fun to see how these change over time.</p> + +<h3 id="python">Python</h3> + +<p>Ladies and gents, dinosaurs and rabbits! Your Python results:</p> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/citelang/python-deps.png" /> +</div> + +<p>So here is the first awesome insight. Is anyone really surprised to see numpy as the number one library? +The credit value here says that the average Python repository is attributing about 3% of credit to numpy, meaning it is a direct or indirect dependency. Let that sink in! Here is the irony - when is the last time you cited numpy? You probably haven’t, because you’ve cited something +that uses it. We don’t remember numpy despite the fact that it’s so core to everything that we do.</p> + +<blockquote> + <p>The fact that the most widely used library is rarely cited is huge evidence for why a manual “write papers and cite DOIs” approach just won’t work for software.</p> + +</blockquote> + +<p>What else do we see in this list? Let me name a few things. First, we can’t be so terrible at remembering to look at or visualize +things because matplotlib is second. At least for research software, this is telling us that making plots or charts is important. +The next (possibly surprising) result is that documentation and testing is at least represented, and this might be a biased sample +because we include repositories that are peer reviewed (e.g., JoSS) and documentation and testing is necessary for that. +Given this need for Python, sphinx and pytest come up as leaders to provide that. So here is another nugget of insight:</p> + +<blockquote> + <p>Some of us are so busy focusing on domain-specific software that we forget the importance of the “less sexy” research software that helps us test, document, view things, or even create simple data structures.</p> + +</blockquote> + +<p>This kind of “base” software has always been what I’ve been most interested in, and ironically what people tell me time and time again +“That’s not research software.” Oh really? So something that is entirely powering the research community is not research software? +Of course I have my own <a href="https://rseng.github.io/software/repository/github/0x0f0f0f/Metatheory.jl/annotate-taxonomy/" target="_blank">strong opinions</a> about a taxonomy for research software, but I would encourage those of you who are very dismissive to take a step back and +consider what you are really saying.</p> + +<p>The next insight is that we see a lot of libraries for data formats (e.g., pyaml, h5py, lxml, and more lower in the list) and this is an attestment to how important being able to read, serialize, and save data is.</p> + +<p>The final insight is the fact that requests is high in the list. For those of you not familiar, requests is a library for doing that, making +http requests to get content from some webby place. This is an attestment to the fact that our work is increasingly relying on external APIs, +automation, or other resources provided on the web.</p> + +<p>You can see <a href="https://rseng.github.io/rsepedia-analysis/analysis/python/" target="_blank">the full Python results here</a>.</p> + +<h3 id="r">R</h3> + +<p>I’m less of an R programmer these days, but I think that these results also make sense.</p> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/citelang/r-deps.png" /> +</div> + +<p>We don’t see any huge leaders in the same way as we see numpy in Python, but not surprisingly the leader package +for the R language is, well, R! I at first thought this was a bug, but actually R <code class="language-plaintext highlighter-rouge">DESCRIPTION</code> files that we parse do commonly include a pinned version of R:</p> + +<pre><code class="language-DESCRIPTION"> +Depends: R (&gt;= 3.4.1), TailRank, +... + +</code></pre> + +<p>And so we actually can give credit to the language proper! If you don’t feel this is appropriate, feel free to skip this line and consider +the top package jsonlite. This is also why I think json would be represented in Python if it wasn’t part of the standard library. Us research folks - we need our json! Overall I think we see a similar pattern here as we saw with Python. The libraries that float to the top are those that involve data structures (jsonlite, yaml), webby requests or similar (httr, curl), documentation and testing (knitr, rmarkdown) and graphics or visualization. What does this tell us about what is undervalued in research software? Again, it’s not the domain specific libraries, but rather the core stuff that enables those libraries.</p> + +<p>You can see <a href="https://rseng.github.io/rsepedia-analysis/analysis/R/" target="_blank">the full R results here</a>.</p> + +<h3 id="projects">Projects</h3> + +<p>If you are interested in a specific project in the RSEPedia, we also provide a project-specific table and badge! +You can <a href="https://rseng.github.io/rsepedia-analysis/analysis/repos/" target="_blank">browse projects from here</a>, +and here is an example of a badge generated for a project called <a href="https://rseng.github.io/rsepedia-analysis/repos/github/ORNL/tx2/README" target="_blank">github.com/ORNL/tx2</a> <a href="https://github.com/ORNL/tx2" target="_blank">(and on GitHub)</a>. Without even looking I can tell you we have some machine learning and/or visualization going on here (scikit-learn! umap! pandas! matplotlib)!</p> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/citelang/project.png" /> +</div> + +<p>Notice how numpy (as an example) shows up at multiple points in the tree - when we calculate an overall credit, say, for the ecosystem, we take that into account! And we can then peek at the project-specific table and sort of verify that yes, this is a Python ML/visualization project:</p> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/citelang/project-table.png" /> +</div> + +<p>And we see some surprises! Like, the slack-sdk? What? Believe it or not, that is pulled in by <a href="https://github.com/tqdm/tqdm/blob/4f208e72552c4d916aa4fe6a955349ee8b2ed353/setup.cfg#L87" target="_blank">tqdm</a>. +The project-specific tables (and the description at the top) also give you a better sense of how CiteLang allocates +credit. The top level package is given 50%, and then the other 50% is given to all dependencies in the same fashion. +We cut off at a value of 0.001, and we do that in case we might be parsing dependencies forever down to some infintesimally small amount.</p> + +<p>Finally, every project serves its own <a href="https://rseng.github.io/rsepedia-analysis/repos/github/ORNL/tx2/data.json" target="_blank">raw data</a></p> + +<div style="padding: 20px;"> + <img src="https://vsoch.github.io/assets/images/posts/citelang/json-data.png" /> +</div> + +<p>and the site is searchable, because sites should be. 😄️</p> + +<h2 id="discussion">Discussion</h2> + +<p>I’m so happy (and a bit relieved, to be honest) to finally be able to show what I’ve been saying for years - that the most valuable software for research, and the software that is driving domain-specific research software, are the unsexy libraries that have to do with data structures, (maybe standards), documentation or testing, and data formats or retrieval. These are the packages that you aren’t going to remember to cite. Also, this set is totally leaving out the software we use on a day to day basis in our CI, which arguably isn’t research software but has done more for the research community than anything I can think of - containers, version control (git), and continuous integration. We’d be a mess without it. We need to be more thankful and aware of this, and for some of y’all that turn down your nose to anything that isn’t a domain-science library, perhaps take a pause. Next, let’s talk about limitations and hopes for the future.</p> + +<h2 id="a-living-database">A Living Database</h2> + +<p>I wouldn’t have been happy with myself to simply publish software at one point in time and call it a day. +The Research Software Encyclopedia is updated weekly, and so I’ve designed this analysis to do the same! +This means that while we do cache a result for a newly added piece of software, we do continue to grow the analysis +as new software is added. And since the tool will always use the newly updated <a href="https://github.com/vsoch/citelang" target="_blank">CiteLang</a>, any improvements to the parsers there will be reflected here! And if anyone wants to run the entire thing again (outside of the limit of GitHub Actions) they can clone the repository, nuke the _repos folder, and run the scripts again.</p> + +<h3 id="language-gaps">Language Gaps</h3> + +<p>The biggest gap in the RSEPedia is with respect to what we don’t see. First, despite being a prominent language, we don’t see anything for C++, because there isn’t a package manager with an API to use it. If you have a nifty (or even hacky) idea for how to parse a requirements file, <a href="https://github.com/vsoch/citelang/issues" target="_blank">I want to hear it</a>. The RSEPedia has support for spack, but most research-oriented C++ projects are not going to go out of their way to publish their package there, and we get no signal of the package being in spack when we clone the repository. Sppaaaaaack (sorry, it’s a bit of a tic at this point!) 😁️</p> + +<p>We also don’t see standard modules or libraries provided within a language. E.g., I can almost guarantee you a ton of Python libraries are importing json, but since it’s not a package manager library we wouldn’t see it. I suspect citelang could come up with a way to derive credit for these libraries by way of abstract syntax trees or just parsing the source code, although I haven’t done this yet because I’m not convinced it’s something people are as interested in. If you want to say thank you for the Python standard library, there is a <a href="https://www.python.org/psf/contrib/" target="_blank">donate button</a> on their contribution page (or you could contribute code). There is an even deeper level of parsing (at least for Python) that looks at function signatures, and I wrote a library called <a href="https://github.com/vsoch/caliper" target="_blank">caliper</a> in early 2021 to do that, and it’s able to generate <a href="https://raw.githubusercontent.com/vsoch/caliper-metrics/main/pypi/tensorflow/functiondb/functiondb-0.12.0rc1.json" target="_blank">function databases</a> for Python software of interest. This would be cool to do for some kind of (unrelated) compatibility analysis here, but yes that’s very different.</p> + +<h3 id="parsing-limitation">Parsing Limitation</h3> + +<p>For all requirements files except for Python, we are forced to do static parsing. While not perfect because bugs can happen for niche cases of someone defining requirements in a weird way, it’s a reasonable start. There is always room for improvement, or adding more static parsers for requirements files I have not considered yet.</p> + +<p>However, this is not the case for the Python parsing (either requirements.txt or setup.py)! For Python these results are likely +very good because we wrap the pypi package manager install command to derive a list of packages and versions from either a setup.py or requirements.txt. Don’t worry - nothing is installed, we either just parse the requirements file and return the results, or we use the solver +against a setup.py to come to an equivalent list. We originally had a static parser (and still use this as a fallback) however I talked to <a href="https://github.com/alecbcs" target="_blank">@alecbcs</a> and he had this fantastic idea! Will it likely need updates as time goes on, given +the functions are private? Sure. But I’m happy to do that to get the much more accurate listing.</p> + +<p>In practice, the only setup.py files that I was not able to parse either had a bug (e.g., trying to read a file that doesn’t exist in the repository) or they were trying to use modules outside of the standard library. For all of the cases of broken-ness, I opened issues on the respective repositories so we might have a better chance at parsing in the future! One detail is that we parse the first requirements file found. For a primary requirements file in the root of the repository, this is the best outcome. However, some repos don’t have a file in the root, and perhaps we find one in a documentation folder instead. Either way, the result represents our best effort at finding and parsing requirements given a cloned repository we don’t know the structure of in advance.</p> + +<h3 id="final-thoughts">Final Thoughts</h3> + +<p>Here are my final takeaways:</p> + +<h4 id="publication-is-not-for-research-software">Publication is not for Research Software</h4> + +<p>A system of credit that relies on software engineers to do extra manual work (to write papers) is never going to fully capture the ecosystem and give proper credit. It will only capture those that have the time and possibly privilege to take the extra time to write a paper. +Publication only makes sense given that a piece of software is paired alongside a robust result, in which case fine, write the paper and +also champion the software.</p> + +<h4 id="publication-does-not-actually-capture-credit">Publication Does not Actually Capture Credit</h4> + +<p>A system that also only skims the superficial top (the name of one package) and does not dig deep into a dependency tree is also going to miss insights and deserved attributions of credit. As the numpy example shows, nobody is actually citing numpy, but a ton of projects are using it somewhere in their dependency tree, so it deserves a lot of credit.</p> + +<h4 id="we-can-do-better">We Can Do Better</h4> + +<p>I have a pet peeve. I’m frankly just tired of people writing about credit and attribution but not doing anything about it. We could extend that to other things, but it’s especially an issue for this topic. Ironically they are writing <em>papers</em> and improving their publication record as they write about how publication and research software is a strained process. I may not have solved this problem, but damn at least I’m trying to actually do something about it instead of spurting gas.</p> + +<p>I find this idea exciting because there are so many directions you can go with it. When I first designed the idea I imagined a database and online interface where you could essentially connect your GitHub repository, and akin to a builder service, parse your repository on some event and derive a new credit or citation graph. Or you could have some set akin to the RSEPedia that are also updated regularly. And then, by way of having that database, we could do these same queries (that currently I’m doing statically) to say “What are the most important libraries for this language? Across the ecosystem?” or “How has this changed over time?” It would be a true way to derive the value of a library without needing people to publish papers, and totally automated and integrated with package managers, which is where people already should be putting their software. +Heck, if someone gave me a cloud and a little bit of funding I’d love to work on this. Are there good reasons or use cases? I don’t know, but maybe.</p> + +<p>So what do you think?</p> + + + + + Spooky Allocator Issues and Fixes + + 2022-04-13T01:00:00-06:00 + https://hpc.social/2022/spooky-allocator-issues-and-fixes + <p>Recently we started noticing performance issues in the main branch of Ceph that ultimately were traced back to a commit last summer that changed parts of our AVL and hybrid disk allocator implementations in bluestore. Strangly, the issue only affected some of the NVMe drives in our test lab but not others. The quick <a href="https://github.com/ceph/ceph/pull/45884">fix</a> was to always update and save the allocator’s cursor position so that we don’t search (and fail) over and over in fast-fit mode for every allocation request. Another interesting offshoot of this though is that it may be much <a href="https://github.com/ceph/ceph/pull/45771">nicer</a> to limit fast-fit searches based on time rather than byte distance or the number of iterations.</p> + + + + + LSF hookin' up with the CRIU + + 2022-04-12T19:32:04-06:00 + https://hpc.social/2022/lsf-hookin-up-with-the-criu + <p>With the unpredicable spring weather here in Southern Ontario, weekend projects +are the order of the day. Whether it&rsquo;s fixing my bike for spring, repairing things in the home which I&rsquo;ve neglected for far long or topics relating to IT which have been percolating in my head, I am a textbook busybody.</p> + +<p>A few decades back, when I was a support engineer at Platform Computing, I had +my first experience working with clients using both kernel-level and user-level +checkpoint and restart through the HPC workload scheduler Platform LSF (now +IBM Spectrum LSF). I distinctly recall that user-level library was a bit tricky +as you had to link your home grown code against it - and it had numerous +limitations which I can&rsquo;t recall off the top of my head. Back then, like today, +<a href="https://www.ibm.com/products/hpc-workload-management">IBM Spectrum LSF</a> provides a number of ways that administrators can extend capabilities using plug-ins. +Checkpoint and restart is an example where plug-ins can be used. More about this +later.</p> + +<p>I&rsquo;ve been keeping an eye on the project known as <a href="https://criu.org/Main_Page">CRIU</a> for some time. CRIU, which stands for <em>Checkpoint/Restore In Userspace</em> +provides checkpoint and restart functionality on Linux. And I thought it may be +an interesting weekend project to integrate CRIU with LSF. As it turns out, +I was not blazing any trails here as I found that there are others <a href="https://labs.icahn.mssm.edu/minervalab/documentation/job-checkpoint/">already +using CRIU with LSF</a> today. Nevertheless, I decided to give it a try.</p> + +<p>My system of choice for this tinkering was a dual-socket POWER9 based system +running CentOS Stream 8 and IBM Spectrum LSF Suite for HPC v10.2.0.12. The +LSF online documentation contains information on the specifications +of the LSF plugins for <a href="https://www.ibm.com/docs/en/spectrum-lsf/10.1.0?topic=restart-configuration-enable-job-checkpoint">checkpoint and restart</a>. The plugins are known as <em>echkpnt</em> and <em>erestart</em>, where the &ldquo;e&rdquo; denotes external.</p> + +<p>Here is a quick rundown on the steps to integrate CRIU with LSF.</p> + +<ul> +<li>It turns out that my system already had <em>criu</em> installed. It&rsquo;s a dependency +on <em>runc</em> which was installed as part of <em>podman</em>. This step really depends +on your distro. In my case, <em>dnf provides criu</em> was my friend.</li> +</ul> +<div class="highlight"><pre><code class="language-plaintext"># uname -a +Linux kilenc 4.18.0-373.el8.ppc64le #1 SMP Tue Mar 22 15:28:39 UTC 2022 ppc64le ppc64le ppc64le GNU/Linux + +# criu + +Usage: + criu dump|pre-dump -t PID [&lt;options&gt;] + criu restore [&lt;options&gt;] + criu check [--feature FEAT] + criu page-server + criu service [&lt;options&gt;] + criu dedup + criu lazy-pages -D DIR [&lt;options&gt;] + +Commands: + dump checkpoint a process/tree identified by pid + pre-dump pre-dump task(s) minimizing their frozen time + restore restore a process/tree + check checks whether the kernel support is up-to-date + page-server launch page server + service launch service + dedup remove duplicates in memory dump + cpuinfo dump writes cpu information into image file + cpuinfo check validates cpu information read from image file + +Try -h|--help for more info</code></pre></div> + +<ul> +<li>The <em>criu</em> command needs to be run as root to be able to checkpoint +processes. As we are going to leverage criu directly in the LSF echkpnt and +erestart scripts, I chose to enable sudo access for criu. To do this I simply +added the following to <em>/etc/sudoers</em>.</li> +</ul> +<div class="highlight"><pre><code class="language-plaintext">gsamu ALL=NOPASSWD:/usr/sbin/criu</code></pre></div> + +<ul> +<li> +<p>Next, I tested that the basic <em>criu</em> functionality was working. I found +this to be a useful <a href="https://www.redhat.com/en/blog/how-can-process-snapshotrestore-help-save-your-day">blog</a> on how to perform a simple test.</p> + +</li> +<li> +<p>With criu installed and working (see step 3), the next steps was to create +the <em>echkpnt</em> and <em>erestart</em> scripts which would ultimately call the appropriate +<em>criu dump</em> and <em>criu restore</em> commands. These scripts will be named <em>echkpnt.criu</em> and <em>erestart.criu</em>. The <em>.criu</em> extension denotes the checkpoint and +restart method name in LSF. The checkpoint method is specified at the time of +job submission in LSF.</p> + +</li> +</ul> +<p>The key for the <em>echkpnt.criu</em> script is to build out the list of PIDs for +the job in question. For this I used an inelegant approach - simply +scraping the output of the LSF <em>bjobs -l</em> command. This list +of PIDs is then used as arguments to the <em>criu dump</em> command. +The example <em>echkpnt.criu</em> script is included below.</p> + +<!-- raw HTML omitted --> +<p>I used a simple approach as well for <em>erestart.criu</em>. As per the specification +for <em>erestart</em>, the key is to create a new LSF jobfile which contains +the appropriate <em>criu restore</em> invocation, pointing to the checkpoint +data. The example <em>erestart.criu</em> script is included below.</p> + +<!-- raw HTML omitted --> +<ul> +<li> +<p>With the <em>echkpnt.criu</em> and <em>erestart.criu</em> scripts in the $LSF_SERVERDIR +directory, the process to perform a checkpoint and restart of LSF jobs is +straight forward using <em>bchkpnt</em> and <em>brestart</em> commands respectively. +Here is a simple example.</p> + +</li> +<li> +<p>Submit a job as checkpointable. The checkpoint method <em>criu</em> is specified as well as the location where the checkpoint data will be written to.</p> + +</li> +</ul> +<div class="highlight"><pre><code class="language-plaintext">$ bsub -k "/home/gsamu/checkpoint_data method=criu" ./criu_test +Job &lt;12995&gt; is submitted to default queue &lt;normal&gt;.</code></pre></div> + +<ul> +<li>The executable <em>criu_test</em> simply writes a message to standard out every 3 seconds.</li> +</ul> +<div class="highlight"><pre><code class="language-plaintext">$ bpeek 12995 +&lt;&lt; output from stdout &gt;&gt; +0: Sleeping for three seconds ... +1: Sleeping for three seconds ... +2: Sleeping for three seconds ... +3: Sleeping for three seconds ... +4: Sleeping for three seconds ...</code></pre></div> + +<ul> +<li> +<p>Next, we see that LSF has detected the job PIDS. Now we&rsquo;re ready to perform the checkpoint. +<div class="highlight"><pre><code class="language-plaintext">$ bjobs -l 12995 + +Job &lt;12995&gt;, User &lt;gsamu&gt;, Project &lt;default&gt;, Status &lt;RUN&gt;, Queue &lt;normal&gt;, Com + mand &lt;./criu_test&gt;, Share group charged &lt;/gsamu&gt; +Tue Apr 12 08:48:28: Submitted from host &lt;kilenc&gt;, CWD &lt;$HOME&gt;, C + heckpoint directory &lt;/home/gsamu/checkpoint_data/12995&gt;; +Tue Apr 12 08:48:29: Started 1 Task(s) on Host(s) &lt;kilenc&gt;, Alloc + ated 1 Slot(s) on Host(s) &lt;kilenc&gt;, Executio + n Home &lt;/home/gsamu&gt;, Execution CWD &lt;/home/gsamu&gt;; +Tue Apr 12 08:48:38: Resource usage collected. + MEM: 12 Mbytes; SWAP: 0 Mbytes; NTHREAD: 4 + PGID: 418130; PIDs: 418130 418131 418133 + + + MEMORY USAGE: + MAX MEM: 12 Mbytes; AVG MEM: 6 Mbytes + + SCHEDULING PARAMETERS: + r15s r1m r15m ut pg io ls it tmp swp mem + loadSched - - - - - - - - - - - + loadStop - - - - - - - - - - - + + RESOURCE REQUIREMENT DETAILS: + Combined: select[type == local] order[r15s:pg] + Effective: select[type == local] order[r15s:pg] </code></pre></div> +</p> + +</li> +<li> +<p>Initiate the checkpoint using the LSF <em>bchkpnt</em> command. The <em>-k</em> option is specified which will result in the job being checkpointed and killed. +<div class="highlight"><pre><code class="language-plaintext">$ bchkpnt -k 12995 +Job &lt;12995&gt; is being checkpointed</code></pre></div> +</p> + +</li> +<li> +<p>We see in the history of the job using the bhist command that the checkpoint was initiated and succeeded. The job was subsequently killed (TERM_CHKPNT). +<div class="highlight"><pre><code class="language-plaintext">$ bhist -l 12995 + +Job &lt;12995&gt;, User &lt;gsamu&gt;, Project &lt;default&gt;, Command &lt;./criu_test&gt; +Tue Apr 12 08:48:28: Submitted from host &lt;kilenc&gt;, to Queue &lt;norm + al&gt;, CWD &lt;$HOME&gt;, Checkpoint directory &lt;/home/gsamu/checkp + oint_data/12995&gt;; +Tue Apr 12 08:48:29: Dispatched 1 Task(s) on Host(s) &lt;kilenc&gt;, Al + located 1 Slot(s) on Host(s) &lt;kilenc&gt;, Effec + tive RES_REQ &lt;select[type == local] order[r15s:pg] &gt;; +Tue Apr 12 08:48:31: Starting (Pid 418130); +Tue Apr 12 08:48:31: Running with execution home &lt;/home/gsamu&gt;, Execution CWD &lt; + /home/gsamu&gt;, Execution Pid &lt;418130&gt;; +Tue Apr 12 08:54:14: Checkpoint initiated (actpid 419029); +Tue Apr 12 08:54:15: Checkpoint succeeded (actpid 419029); +Tue Apr 12 08:54:15: Exited with exit code 137. The CPU time used is 2.1 second + s; +Tue Apr 12 08:54:15: Completed &lt;exit&gt;; TERM_CHKPNT: job killed after checkpoint + ing; + + +MEMORY USAGE: +MAX MEM: 12 Mbytes; AVG MEM: 11 Mbytes + +Summary of time in seconds spent in various states by Tue Apr 12 08:54:15 + PEND PSUSP RUN USUSP SSUSP UNKWN TOTAL + 1 0 346 0 0 0 347 </code></pre></div> +</p> + +</li> +<li> +<p>Restart the job from the checkpoint data with the LSF <em>brestart</em> command. A new jobID is assigned. +<div class="highlight"><pre><code class="language-plaintext">$ brestart /home/gsamu/checkpoint_data/ 12995 +Job &lt;12996&gt; is submitted to queue &lt;normal&gt;. + +$ bjobs -l 12996 + +Job &lt;12996&gt;, User &lt;gsamu&gt;, Project &lt;default&gt;, Status &lt;RUN&gt;, Queue &lt;normal&gt;, Com + mand &lt;./criu_test&gt;, Share group charged &lt;/gsamu&gt; +Tue Apr 12 08:55:57: Submitted from host &lt;kilenc&gt;, CWD &lt;$HOME&gt;, R + estart, Checkpoint directory &lt;/home/gsamu/checkpoint_data/ + /12996&gt;; +Tue Apr 12 08:55:58: Started 1 Task(s) on Host(s) &lt;kilenc&gt;, Alloc + ated 1 Slot(s) on Host(s) &lt;kilenc&gt;, Executio + n Home &lt;/home/gsamu&gt;, Execution CWD &lt;/home/gsamu&gt;; +Tue Apr 12 08:56:07: Resource usage collected. + MEM: 14 Mbytes; SWAP: 0 Mbytes; NTHREAD: 5 + PGID: 420069; PIDs: 420069 420070 420073 420074 420076 + + + MEMORY USAGE: + MAX MEM: 14 Mbytes; AVG MEM: 14 Mbytes + + SCHEDULING PARAMETERS: + r15s r1m r15m ut pg io ls it tmp swp mem + loadSched - - - - - - - - - - - + loadStop - - - - - - - - - - - + + RESOURCE REQUIREMENT DETAILS: + Combined: select[type == local] order[r15s:pg] + Effective: select[type == local] order[r15s:pg] </code></pre></div> +</p> + +</li> +<li> +<p>Viewing the standard output of the job, we see the point where it was killed and that it has picked up from where it left off.<br /> +<div class="highlight"><pre><code class="language-plaintext">$ bpeek 12996 +&lt;&lt; output from stdout &gt;&gt; +0: Sleeping for three seconds ... +1: Sleeping for three seconds ... +2: Sleeping for three seconds ... +3: Sleeping for three seconds ... +4: Sleeping for three seconds ... +…. +…. +110: Sleeping for three seconds ... +111: Sleeping for three seconds ... +112: Sleeping for three seconds ... +113: Sleeping for three seconds ... +/home/gsamu/.lsbatch/1649767708.12995: line 8: 418133 Killed ./criu_test +114: Sleeping for three seconds ... +115: Sleeping for three seconds ... +116: Sleeping for three seconds ... +117: Sleeping for three seconds ... +118: Sleeping for three seconds ... +119: Sleeping for three seconds ... +120: Sleeping for three seconds ... +.... +....</code></pre></div> +</p> + +</li> +</ul> +<p>We&rsquo;ve demonstrated how one can integrate CRIU checkpoint and restart +with IBM Spectrum LSF using the <em>echkpnt</em> and <em>erestart</em> interfaces. +As highlighted earlier, LSF provides a number of plugin interfaces +which provides flexibility to organizations looking to do site specific +customizations.</p> + + + + + Relivin' the 90's - Amiga style + + 2022-03-29T13:53:09-06:00 + https://hpc.social/2022/relivin-the-90-s-amiga-style + <p>Although I very much started my experience with home computers with IBM +compatibles running MSDOS in the late 1980&rsquo;s, I&rsquo;m a lifelong, self-professed +Commodore-Amiga addict. I distinctly recall the launch of the Amiga A1000 and +being dazzled by it&rsquo;s multimedia capabilities around the same time that +I had a PC XT with CGA graphics. I was instantly hooked. Having great video +games for the time was just icing on the cake.</p> + +<p>I started my Amiga experience with an A500, which I quickly traded in for an +A2000 model, which I still have today. I came across an A3000 in the late 1990&rsquo;s +for a small sum which I added to my collection. The A3000 is my favourite +Amiga system with onboard SCSI and it&rsquo;s design is reminiscent of pizza-box +UNIX servers which were common back in the day.</p> + +<p>The majority of my friends at the time were all-in on PCs. But for me there +was just something a bit clinical and boring about them. The Amiga filled +this gap for me and continues to do so. It&rsquo;s probably one of big +reasons why I still to this day tinker so much with non-X86 systems.</p> + +<p>Retro computing is a hobby that requires much time. So it&rsquo;s sometimes +challenging to juggle this hobby with other things, especially as the weather +turns warmer here in Southern Ontario. My A3000 system was one that I was +looking to prioritize for resurrection this spring. This is in particular +because the last thing I tinkered with on the A3000 roughly 20 years back was Amiga UNIX. Yes, my A3000 sat in storage for around 20 years! In the mid to late 90&rsquo;s, I ventured out to an Amiga speciality shop in London, Ontario (Canada) for a clearance they were having. It&rsquo;s there that I happened across Amiga UNIX software (tape and manuals), as well the <a href="http://amiga.resource.cx/exp/a2410">Commodore A2410 High Resolution Graphics board</a>, <a href="http://amiga.resource.cx/exp/a2065">Commodore A2065 Ethernet board</a> and a Wangtek 5150ES tape drive +(which is mounted in a SUN Microsystems external case). Here is a view of the +Amiga <em>ShowConfig</em> output.</p> + +<figure><img src="https://www.gaborsamu.com/images/system_config.png" /> +</figure> + +<p>I had the foresight to remove the motherboard RTC batteries before +storing the systems. But my A3000 refused to boot when I took it out of storage +late in 2021. After much fiddling, I decided to reach out to a local Amiga +repair specialist. The gentleman worked at Comspec(?), which did repair work +for Commodore back in the day.</p> + +<p>I recently got my A3000 back after the fault was corrected, and a new +replaceable coin battery for the RTC was installed. The fault turned +out to be an issue with some of the ZIP memory sockets. Because of the +difficulty and cost in purchasing ZIP memory back in the day, I purchased +a <a href="http://amiga.resource.cx/exp/amifast">ProvTech AmiFast 3000</a> ZIP to SIMM converter which allows me to use 72-pin SIMM memory.</p> + +<p>With a working A3000 system, it was time to look at software once again. +I found my old dusty Amiga OS 3.5 and OS 3.9 original media sets. With +some effort I was able to get Amiga OS 3.9 installed on the system. It&rsquo;s +not that the installation is difficult, it was more a matter of getting my +CDROM working and clearing out some of the cobwebs on my Amiga knowledge.</p> + +<p>Additionally, I was able to successfully boot Amiga UNIX off an external +SCSI disk which I installed back in roughly &lsquo;98 or &lsquo;99. I plan to write +more about Amiga UNIX in a subsequent post. For those who are curious about +Amiga UNIX there is a fantastic wiki <a href="https://www.amigaunix.com/doku.php">here</a>.</p> + +<p>Back to Amiga OS 3.9. After getting it installed successfully I had a few goals:</p> + +<ul> +<li>Get my Amiga online via the A2065 Ethernet board</li> +<li>Get a high resolution Workbench (desktop) via the A2410</li> +<li>Relive the memories!</li> +</ul> +<p><strong>Amiga on the &lsquo;Net</strong></p> + +<p>I recall back in the day various Amiga TCP/IP implementations such as +AS225 and AmiTCP. Consulting with the gentleman who repaired my Amiga, +he suggested <a href="http://roadshow.apc-tcp.de/index-en.php">Roadshow</a>. I&rsquo;d never +heard of Roadshow before, but downloaded and got the trial version working +easily. I required to copy the a2065.device driver for the +A2065 board to the system and created the necessary configuration file +in <em>SYS:Devs/NetInterfaces</em>. The configuration file A2065 is shown in the +image below.</p> + +<figure><img src="https://www.gaborsamu.com/images/roadshow1.png" /> +</figure> + +<p>A quick aside here. I had to create a CD with a bunch of software +including Roadshow and a number of utilities from <a href="http://aminet.net/">Aminet</a> such as the A2065 device driver. Aminet is one of the goto places for Amiga +software on the net.</p> + +<p>I found Roadshow so easy to get setup and working that I purchased a license +for it. I also purchased licenses at the same time for <a href="http://www.bitplan.pl/goadf/">GoADF</a>, which is a great utility for managing ADF (Amiga Disk Format) files.</p> + +<p>With a working TCP/IP stack, I installed the trial version of <a href="https://www.ibrowse-dev.net/">iBrowse</a>, in addition to the FTP utility RNOXfer (from Aminet). With a working FTP client, I could now more easily move files to and from the A3000. This definitely helped for the next stage.</p> + +<figure><img src="https://www.gaborsamu.com/images/rnoxfer.png" /> +</figure> + +<p>Just a note that browsing on the Amiga is definitely a retro experience. +This is in no way a slight at the fine folks who develop and maintain +browsers such as iBrowse. I&rsquo;m considering updating my iBrowse demo +license to a full license in the future as well.</p> + +<p>I also took the opportunity at this time to install an NTP client. Even though +my Amiga now has a working RTC, I still like to use NTP to keep the clock +accurately set. For this I used the AmiTimeKeeper utility from Aminet. +I pointed it as I do normally to the NTP servers at the National Research +Council (NRC) of Canada. TimeKeeper has a CLI interface as well as a UI +status window to provide information on the synchronization status.</p> + +<figure><img src="https://www.gaborsamu.com/images/timekeeper.png" /> +</figure> + +<p><strong>Workbench à la A2410</strong></p> + +<p>It was time to move on to having a high resolution Workbench (desktop) experience. I also own a Picasso II video card which is presently in my Amiga 2000 system. Using P96 or CyberGraphX on the Picasso II was quite straightforward in +the past. My goal this time was to use the A2410, which from what I could +read, was supported in CyberGraphX V4.</p> + +<p>Thing is, when I went to install CyberGraphX V4 from my original media, +I did not see the A2410 listed. It was only when I applied the update(s) that I could see the A2410 listed as a supported video card. Note the final patch version of CyberGraphX I&rsquo;m using is from <em>cgxv42_rc6.lha</em> which I downloaded +from Aminet <a href="https://aminet.net/package/driver/video/CyberGraphX_4.3rc6">here</a>.</p> + +<p>The A2410 CyberGraphX (CGX) driver installed without a hitch, getting +it to work was a challenge. Although I could get a Workbench to appear in the +desired resolution and colours, when I double clicked on any icon on the desktop, the system would hang. It was only through trial and error that I discovered +that some specific CyberGraphX variables had to be set. The screenshot below of the CyberGraphX settings tool shows the current, woring settings. Ultimately, +the hang seemed to be addressed by enabling the CGX <em>SUPERGELS</em> variable.</p> + +<figure><img src="https://www.gaborsamu.com/images/cyber_prefs.png" /> +</figure> + +<p>Here is a look at the CGX <em>showcgxconfig</em> tool output.</p> + +<figure><img src="https://www.gaborsamu.com/images/cyber_config.png" /> +</figure> + +<p>A screenshot of the Workbench driven by the A2410 is shown below. The +performance is not great, but it does work, and I&rsquo;m super pleased about +that. On the subject of graphics cards, I&rsquo;ve had my eye on the MNT <a href="https://shop.mntmn.com/products/zz9000-for-amiga-preorder">ZZ9000</a> which I&rsquo;m considering purchasing to breathe more life into my A3000.</p> + +<figure><img src="https://www.gaborsamu.com/images/desktop_clean.png" /> +</figure> + +<p>The next stage in this journey is to get the same configuration working +with Amiga OS 3.2, which I purchased from the folks at <a href="https://retrorewind.ca/">Retro Rewind</a> in Toronto. According to what I&rsquo;ve read, I need to downgrade the +intuition.library version to get CyberGraphX working with OS 3.2. I&rsquo;ll +write more about this when I have the opportunity.</p> + +<p>And now, I&rsquo;m ready to begin to relive those memories!</p> + +<p>Update! Here are some photos of the A2065, A2410 and A3000 daughterboard +from my system.</p> + +<figure><img src="https://www.gaborsamu.com/images/a2065.jpg" /> +</figure> + +<figure><img src="https://www.gaborsamu.com/images/a2410.jpg" /> +</figure> + +<figure><img src="https://www.gaborsamu.com/images/daughterboard.jpg" /> +</figure> + +<figure><img src="https://www.gaborsamu.com/images/1992.jpg" /> +</figure> + + + + + An unstructured rant on running long-lived software services + + 2022-03-12T16:00:00-07:00 + https://hpc.social/2022/an-unstructured-rant-on-running-long-lived-software-services + <p>&#8211; Be kind to your colleagues. Be kind to your users. Be kind to yourself. This is a long haul and you’ll all fuck up.</p> + +<p>⁃ The natural environment for your code is production. It will run there longer than it does anywhere else. Design for prod first, and if possible, make your dev environment act like prod.</p> + +<p>⁃ Legacy code is the only code worth caring about.</p> + +<p>⁃ Users do weird stuff, but they usually have a very good reason, at least in their context. Learn from them.</p> + +<p>⁃ It’s 2022, <em>please</em> do structured logging.</p> + +<p>⁃ Contexts and tracing make everyone&#8217;s lives easier when it comes time to debug. At minimum, include a unique request id with every request and plumb it through the system.</p> + +<p>⁃ Do your logging in a separate thread. It sucks to find a daemon blocked and hanging because of a full disk or a down syslog server.</p> + +<p>⁃ Don’t page for individual machines going down. Do provide an easy or automated way for bad nodes to get thrown out of the system.</p> + +<p>&#8211; Be prepared for your automation to be the problem, and include circuit breakers or kill switches to stop it. I&#8217;ve seen health checks that started flagging every machine in the fleet as bad, whether it was healthy or not. We didn&#8217;t bring down prod because the code assumed if it flagged more than 15% of the fleet as bad, the problem was probably with the test, not the service.</p> + +<p>⁃ Make sure you have a way to know who your users are. If you allow anonymous access, you&#8217;ll discover in five years that a business-critical team you&#8217;ve never heard of is relying on you.</p> + +<p>⁃ Make sure you have a way to turn off access for an individual machine, user, etc. If your system does anything more expensive than sending network requests, it will be possible for a single bad client to overwhelm a distributed system with thousands of servers. Turning off their access is easier than begging them to stop.</p> + +<p>⁃ If you don’t implement QOS early on, it will be hellish to add it later, and you will certainly need it if your system lasts long enough.</p> + +<p>⁃ If you provide a client library, and your system is internal only, have it send logs to the same system as your servers. This will help trace issues back to misbehaving clients so much.</p> + +<p>⁃ Track the build time for every deployed server binary and monitor how old they are. If your CI process deploys daily, week-old binaries are a problem. Month-old binaries are a major incident.</p> + +<p>⁃ If you can get away with it (internal services): track the age of client library builds and either refuse to support builds older than X, or just cut them off entirely. It sucks to support requests from year-old clients, force them to upgrade!</p> + +<p>⁃ Despite all this, you will at some point start getting requests from an ancient software version, or otherwise malformed. Make sure these requests don’t break anything.</p> + +<p>⁃ Backups are a pain, and the tooling is often bad, but I swear they will save you one day. Take the time to invest in them.</p> + +<p>⁃ Your CI process should exercise your turnup process, your decommission process, and your backups workflow. Life will suck later if you discover one of these is broken.</p> + +<p>⁃ Third party services go down. Your service goes down too, but they probably won’t happen at the same time. Be prepared to either operate without them, or mirror them yourself</p> + +<p>⁃ Your users will never, ever care if you’re down because of a dependency. Every datacenter owned by AWS could be hit by a meteor at the same time, but <em>your</em> user will only ever ask “why doesn’t my service work?”</p> + +<p>⁃ Have good human relationships with your software dependencies. Know the people who develop them, keep in touch with them, make sure you understand each other. This is especially true internally but also important with external deps. In the end, software is made of people.</p> + +<p>⁃ If users don’t have personal buy-in to the security policy, they <em>will</em> find ways to work around them and complain about you for making their lives harder. Take the time to educate them, or you&#8217;ll be fighting them continuously.</p> + + + + + What I've Learned from Looking at 1,500 Jobs Leading Research Computing Teams + + 2022-02-26T00:00:00-07:00 + https://hpc.social/2022/what-i-ve-learned-from-looking-at-1-500-jobs-leading-research-computing-teams + <h2 id="job-numbers-continue-to-grow-lots-of-data-and-product-management-jobs-ir-groups-at-universities-becoming-bigger-employers">Job numbers continue to grow; lots of data and product management jobs; IR groups at Universities becoming bigger employers</h2> + +<p>(Note: This post is adapted from <a href="https://www.researchcomputingteams.org/newsletter_issues/0111">#111</a> of the <a href="https://www.researchcomputingteams.org">Research Computing Teams Newsletter</a>)</p> + +<p>A year and a half ago I <a href="https://www.dursi.ca/post/jobs_managing_research_computing_teams">posted</a> my observations on the first 500 jobs posted to <a href="https://www.researchcomputingteams.org/jobs">the job board</a> - we’re getting close to 1,500 now, and it’s worth taking a look to see what if anything has changed in research computing team leadership and management jobs<sup id="fnref:1"><a class="footnote" href="https://www.dursi.ca/feed.xml#fn:1" rel="footnote">1</a></sup>.</p> + +<p>There are some trends that have continued since the posting. The jobs in industry are growing vastly beyond what I would have imagined possible when I started in research computing in the 1990s. (The number of jobs working with biomedical data of one sort or another in particular is just astonishing.) Rather than technical computing being a niche, it’s utterly mainstream now. There are a <em>lot</em> of jobs out there, and I don’t even bother posting generic “data science manager” jobs unless they’re connected to some real complex research questions - which happens a lot, whether it’s fraud detection or improving financial modelling or supporting biomedical research. Some really fun-looking jobs that would probably feel a lot like working at a research computing centre keep coming up at consultancies –– go visit a client and help them with their data science/data engineering/<em>etc</em> needs. There’s also a growing number of data science/engineering jobs at Universities that fall under the Provost/VP Operations rather than the VPR’s side of the house — Institutional Research, looking at (say) student success in support of the teaching mission.</p> + +<p>Because of the growth in number of jobs, it is very much a candidate’s market out there. I’m seeing postings –– <em>especially</em> for the traditional academic “director of research computing” jobs –— stay open for cringe-inducing periods of time. A few in particular I’ve watched with vicarious embarrassment continue coming up in the listings for 8+ months. That’s a bad sign for us as hiring managers - the market for individual contributors is at least as tight - but it’s amazing news for us as individuals.</p> + +<p>When I wrote that post in late 2020 it was just regulated industries like health/biotech or financial services that were developing data governance or other data management jobs, but now data management is popping up everywhere, whether it’s retail or logistics or anywhere else. These are being joined, again first in the regulated industries, by data privacy or data risk management jobs. Privacy-preserving data analysis jobs (and teams supporting same with software development) are also starting to be more common (and there’s a <em>lot</em> of cool research and technology work to be done there!)</p> + +<p>I’m also (finally!) starting to see a explicitly <em>product</em> management jobs in research computing, both academic and private-sector. You see it around data management — bundling and curating of data into real data products — but also in software development, especially around analysis pipelines for some reason.</p> + +<p>Probably related to the growth of product <em>vs</em> project thinking, I’m starting to see a lot of “delivery manager” jobs that would have been called “project managers” just a year ago. Projects are defined by having clearly defined start- and end-points up-front. “Delivery” jobs seem to focus on sustained, ongoing work, more appropriate for long-lived products.</p> + +<p>These products that keep coming up often combine data, software, and systems one way or another. That really points to weaknesses around organizing by type of skills - the research software engineering movement, for instance - as the lines between software and systems in this DevOps, infrastructure-as-code era is very fuzzy; and as data grows more and more important, data skills are needed everywhere.</p> + +<p>Especially for us as managers or leads, but especially for individual contributors as they grow their skills, it’s important to have a pretty holistic view of research computing and data and not try to break it up into silos. The growing number of data engineering jobs is a great example. That work often involves all three of software, systems, and data expertise. Data engineering is getting so broad and important that not only are there different sub-fields, in large organizations there are likely to be <a href="https://medium.com/data-arena/team-topologies-for-data-engineering-teams-a15c5eb3849c">completely distinct data engineering teams</a> doing different work. Trying to decide which of those jobs are “research software engineering” jobs and which aren’t is not a productive way forward, for those candidates or for us as a community.</p> + +<p>Needless to say, the growth of remote jobs has been off the charts - especially in the private sector, although the academic institutions are gamely doing what they can to keep up (often hampered by institutional policies).</p> + +<p><strong>Late June 2022 update</strong>: At the time that I write this, there’s a slow down in hiring in tech, especially among early stage-startups. That slowdown due to economic conditions as I write this is <em>not</em>, as far as I can tell, affecting these more research-oriented kinds of jobs. The job board doesn’t have a lot of jobs from startups anyway. For larger organizations, the biotech firms or the banking firms doing fraud detection research or the computing providers or academic groups or… clearly do not view these roles as “nice to haves” that can wait until there’s a bit more economic certainty.</p> + +<hr /> + +<div class="footnotes"> + <ol> + <li id="fn:1"> + <p>What counts as such a job? Any job that involves leading, or mentoring people, or managing projects, programs, or products, in software, systems, or data curation/management/engineering/analysis to support the solution of research problems is a good fit. If you are hiring for such a job, feel free to <a href="https://airtable.com/shrL6QGic3Mv9JFrs">submit it to the job board</a>. <a class="reversefootnote" href="https://www.dursi.ca/feed.xml#fnref:1">&#8617;</a></p> + + </li> + </ol> +</div> + + + + + A supportive job interview story + + 2022-02-25T16:00:00-07:00 + https://hpc.social/2022/a-supportive-job-interview-story + <p>(adapted from an <a href="https://lobste.rs/s/1bwpi8/have_you_ever_had_given_really_good#c_1r7cs6">old lobste.rs comment</a>)</p> + +<p>My favorite interview ever was a systems interview that didn’t go as planned. This was for an SRE position, and while I expected the interview to be a distributed systems discussion, the interviewer instead wanted to talk kernel internals.</p> + +<p>I was not <em>at all</em> prepared for this, and admitted it up front. The interviewer said something along the lines of, “well, why don’t we see how it goes anyway?”</p> + +<p>He then proceeded to teach me a <em>ton</em> about how filesystem drivers work in Linux, in the form of leading me carefully through the interview question he was “asking” me. The interviewer was incredibly encouraging throughout, and we had a good discussion about why certain design decisions worked the way they did.</p> + +<p>I ended the interview (a) convinced I had bombed it, but (b) having had an excellent time anyway and having learned a bunch of new things. I later learned the interviewer had recommended to hire me based on how our conversation had gone, though I didn’t end up taking the job for unrelated reasons having to do with relocation.</p> + +<p>I’ve given a number of similar interviews since, on system design or general sysadmin skills. I’ve always tried to go into these thinking about both where I could learn, and where I could teach, and how either outcome would give the candidate a chance to shine.</p> + + + + + Interactive Development Containers + + 2022-02-15T12:30:00-07:00 + https://hpc.social/2022/interactive-development-containers + <p>I’ve recently been interested in developer workflows. Aside from being a developer, I feel +like the tooling for our community, and especially for HPC or hybrid environments, is lacking. +As a simple example, let’s ask a basic question:</p> + +<blockquote> + <p>How do I start developing here and move it over there?</p> + +</blockquote> + +<p>For the most part, creating a development container is fairly straight forward, and we can even bind source +code to the host to work on in one editor terminal and then build and run or test in another. However, +for the moving part, it gets shoddy. Our best bet is to rebuild the container with the +most updated source code, push to a registry, and then pull down somewhere else. +For a container that is a binary and not layers provided by a registry, we could even scp it. +If we do this right, we will have an automated build and deploy that triggers when we +merge new code into main, but do you see the problem? What about the code that we want +to test that isn’t ready to merge? This is why we typically would need to manually +push to a registry with some kind of “work in progress” tag and then pull somewhere else. +Minimally we’d need to build fresh again, and then reproduce all the steps to set up our environment.</p> + +<h2 id="interactive-development-containers">Interactive Development Containers</h2> + +<p>Now I don’t have all the answers, but recently <a href="https://github.com/alecbcs" target="_blank">@alecbcs</a> and +I have been dreaming about what kinds of development environments we want. +functionality such as:</p> + +<ol class="custom-counter"> + <li>Saving the container state without leaving it.</li> + <li>Loading or saving or otherwise interacting with named environments.</li> + <li>Inspecting or interacting with container metadata, also without leaving the container.</li> + <li>Moving files or sizing the container without the same.</li> +</ol> + +<p>And actually I won’t even get to answering the first question in this post about moving something +from one place to another, but rest assured it is an important one. This post is about some prototype +or fun testing work that we’ve started around these ideas. +The playground for some of these early ideas has been <a href="https://syspack.github.io/paks/" target="_blank">Paks</a>.</p> + +<div style="padding: 20px;"> + <img src="https://github.com/syspack/paks/raw/main/docs/assets/img/paks.png" /> +</div> + +<p>Paks is a Python library that I’m calling a developer wrapper for containers. +Mind you, it’s more of a playground right now to experiment with ideas. But I’ve had so +much fun even this early on that I want to share what I’ve learned.</p> + +<h3 id="wrapper">Wrapper</h3> + +<p>Because Paks is a wrapper, you will run containers using the paks command. Here are a few quick examples.</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="nv">$ </span>paks run ubuntu +<span class="nv">$ </span>paks run <span class="nt">--shell</span> /bin/sh busybox +<span class="nv">$ </span>paks run <span class="nt">--container-tech</span> podman busybox + +</code></pre></div> +</div> + +<p>What is happening on the backend that took me a bit to figure out is that we will need to run a subprocess, +but create a <a href="https://docs.python.org/3/library/pty.html" target="_blank">pseudo terminal</a> to better +watch and interact with it. This is going to happen in the “interactive_terminal” command below. But unless you +want your terminal to get wonky, we need to use <a href="https://docs.python.org/3/library/termios.html" target="_blank">termios</a> to +grab the current tty and make sure it gets restored no matter what at the end. That looks like this:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> + <span class="k">def</span> <span class="nf">interactive_command</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">cmd</span><span class="p">):</span> + <span class="s">""" + Ensure we always restore original TTY otherwise terminal gets messed up + """</span> + <span class="c1"># Controller to get history +</span> <span class="bp">self</span><span class="p">.</span><span class="n">hist</span> <span class="o">=</span> <span class="bp">self</span><span class="p">.</span><span class="n">commands</span><span class="p">.</span><span class="n">history</span> + + <span class="c1"># save original tty setting then set it to raw mode +</span> <span class="n">old_tty</span> <span class="o">=</span> <span class="n">termios</span><span class="p">.</span><span class="n">tcgetattr</span><span class="p">(</span><span class="n">sys</span><span class="p">.</span><span class="n">stdin</span><span class="p">)</span> + <span class="n">old_pty</span> <span class="o">=</span> <span class="n">termios</span><span class="p">.</span><span class="n">tcgetattr</span><span class="p">(</span><span class="n">sys</span><span class="p">.</span><span class="n">stdout</span><span class="p">)</span> + <span class="k">try</span><span class="p">:</span> + <span class="bp">self</span><span class="p">.</span><span class="n">_interactive_command</span><span class="p">(</span><span class="n">cmd</span><span class="p">)</span> + <span class="k">finally</span><span class="p">:</span> + <span class="n">termios</span><span class="p">.</span><span class="n">tcsetattr</span><span class="p">(</span><span class="n">sys</span><span class="p">.</span><span class="n">stdin</span><span class="p">,</span> <span class="n">termios</span><span class="p">.</span><span class="n">TCSADRAIN</span><span class="p">,</span> <span class="n">old_tty</span><span class="p">)</span> + <span class="n">termios</span><span class="p">.</span><span class="n">tcsetattr</span><span class="p">(</span><span class="n">sys</span><span class="p">.</span><span class="n">stdout</span><span class="p">,</span> <span class="n">termios</span><span class="p">.</span><span class="n">TCSADRAIN</span><span class="p">,</span> <span class="n">old_pty</span><span class="p">)</span> + +</code></pre></div> +</div> + +<p>What happens if you don’t do that? Your terminal gets weird and wonky. And then in the interactive +command function, this is where we launch a subprocess with a new pseudo terminal:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> + <span class="n">tty</span><span class="p">.</span><span class="n">setraw</span><span class="p">(</span><span class="n">sys</span><span class="p">.</span><span class="n">stdin</span><span class="p">.</span><span class="n">fileno</span><span class="p">())</span> + + <span class="c1"># open pseudo-terminal to interact with subprocess +</span> <span class="n">openpty</span><span class="p">,</span> <span class="n">opentty</span> <span class="o">=</span> <span class="n">pty</span><span class="p">.</span><span class="n">openpty</span><span class="p">()</span> + + <span class="c1"># use os.setsid() make it run in a new process group, or bash job control will not be enabled +</span> <span class="n">p</span> <span class="o">=</span> <span class="n">subprocess</span><span class="p">.</span><span class="n">Popen</span><span class="p">(</span> + <span class="n">cmd</span><span class="p">,</span> + <span class="n">preexec_fn</span><span class="o">=</span><span class="n">os</span><span class="p">.</span><span class="n">setsid</span><span class="p">,</span> + <span class="n">stdin</span><span class="o">=</span><span class="n">opentty</span><span class="p">,</span> + <span class="n">stdout</span><span class="o">=</span><span class="n">opentty</span><span class="p">,</span> + <span class="n">stderr</span><span class="o">=</span><span class="n">opentty</span><span class="p">,</span> + <span class="n">universal_newlines</span><span class="o">=</span><span class="bp">True</span><span class="p">,</span> + <span class="p">)</span> + + <span class="c1"># Welcome to Paks! +</span> <span class="bp">self</span><span class="p">.</span><span class="n">welcome</span><span class="p">(</span><span class="n">openpty</span><span class="p">)</span> + +</code></pre></div> +</div> + +<p>The <a href="https://stackoverflow.com/questions/45911705/why-use-os-setsid-in-python" target="_blank">setsid</a> as a pre-exec function + is ensuring the child process is a new session and won’t exit, sort of akin to a daemon. +So at face value, yes it is doing exactly what you think - we are shelling into the container +and watching the command line and looking for paks-known commands. And I didn’t use a Python keylogger because +I found that <a href="https://github.com/boppreh/keyboard" target="_blank">keyboard</a> requires sudo (like really?!) +and <a href="https://pynput.readthedocs.io/en/latest/" target="_blank">pynput</a> is really scary because it doesn’t just get keys from the terminal - it’s watching anything you type anywhere! That gave me the heebie jeebies. I hope there is some scanner for pypi that is looking for that package +and checking it’s not being malicious.</p> + +<p>All of the above said, and all the time spent, I’m not convinced that this exact method is +the best way to be running commands from inside the container. There are other ideas +that need to be tested!</p> + +<h3 id="structure">Structure</h3> + +<p>We could have talked about this first, but let me show you the basic structure of paks +so you get an understanding of the components.</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>paks + +# Backends are different wrappers, so logically we start with podman and docker +├── backends +│   ├── base.py +│   ├── docker.py +│   ├── __init__.py +│   └── podman.py + +# The client is what you interact with on the command line. This shows the various commands available. +├── cli +│   ├── config.py +│   ├── env.py +│   ├── __init__.py +│   └── run.py + +# This is a central controller for things +├── client.py + +# Here's all the built-in, interactive commands paks supports! +├── commands +│   ├── command.py +│   ├── cp.py +│   ├── env.py +│   ├── history.py +│   ├── __init__.py +│   ├── inspect.py +│   └── state.py +├── defaults.py +├── env.py +├── logger.py + +# Coming soon - load your own commands! +├── plugins.py +├── schemas.py +├── settings.py +├── settings.yml +├── templates.py +├── utils +└── version.py +</code></pre></div> +</div> + +<p>So that should give you the gist - we have container wrappers (backends) and then +commands that we can issue while we are inside the container. Let’s talk about them next.</p> + +<h3 id="saving-state">Saving State</h3> + +<p>The first thing I wanted to try with Paks was to save a container state, but not needing +to open a separate terminal and save from the outside. The use case for this is that given I’m in an interactive +container and I’ve made some changes, I don’t want to exit and rebuild. All y’all reproducibility folks +can stop wincing, and realize that we also need more temporary or throwaway development environments like this. +Reproducibilty is important, but mostly for the final production thing, and only up to a level of not +giving us pain. So how might I do this?</p> + +<p>For paks, while you are inside the container (let’s say ubuntu) you simply ask to <code class="language-plaintext highlighter-rouge">#save</code>:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +$ paks run ubuntu +# touch PANCAKES +# #save +Saving container... +sha256:d82aaa268feb59344cf31a757ce7f5c0caa6a6bbd10b8d0af1d55cdbc50b609b +[+] Building 0.2s (5/5) FINISHED +... +=&gt; =&gt; writing image sha256:f58ae524d8644400b33c078f19612cba7849ef8f3ea158e2291ac697a4129080 +=&gt; =&gt; naming to docker.io/library/busybox-saved +Untagged: dockerio-busybox-joyous-hippo-3922-gloopy-peanut-9044:latest +Deleted: sha256:d82aaa268feb59344cf31a757ce7f5c0caa6a6bbd10b8d0af1d55cdbc50b609b +Deleted: sha256:f58ae524d8644400b33c078f19612cba7849ef8f3ea158e2291ac697a4129080 +Successfully saved container! ⭐️ +</code></pre></div> +</div> + +<p>And then you can see that there is an ubuntu-saved container!</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="nv">$ </span>docker images | <span class="nb">grep </span>ubuntu +ubuntu-saved latest 93e336d994de 2 minutes ago 72.8MB +ubuntu latest 54c9d81cbb44 7 days ago 72.8MB + +</code></pre></div> +</div> + +<p>So this has saved me some tiny bit of energy to open up another terminal, remember how to docker commit, +and then also rebuild with a squash to minimize the layers (as there is a maximum number we don’t want to hit). +What Paks could then eventually do is make it easy to move this entire container between +places, e.g., from your local machine to HPC without a hitch. I haven’t started to work on that yet +because this is a fun side project.</p> + +<h3 id="environments">Environments</h3> + +<p>One thing I do a lot is use GitHub tokens to do fun stuff with the API. I usually need to +keep this in some hidden file, then find it, open it, copy paste it, and export it in the container. +And then I do that a million times when I have to run a new container. But with Paks, we can +create a named environment on the host (a file to source with exports):</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="nv">$ </span>paks <span class="nb">env </span>edit github +You can also quickly show an environment: + +<span class="nv">$ </span>paks <span class="nb">env </span>show github +<span class="nv">GITHUB_TOKEN</span><span class="o">=</span>xxxxxxxxxxx + +</code></pre></div> +</div> + +<p>And then in our container, as many times as we need, load it seamlessly!</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +root@9ec6c3d43591:/# <span class="c">#envload github</span> +Loading environment... +Successfully loaded environment github + +root@9ec6c3d43591:/# <span class="nb">export </span><span class="nv">GITHUB_TOKEN</span><span class="o">=</span>xxxxxxxxx +root@9ec6c3d43591:/# <span class="nb">export </span><span class="nv">GITHUB_USER</span><span class="o">=</span>dinosaur + +</code></pre></div> +</div> + +<p>If only my GitHub username was dinosaur! 😁️ Is it loaded?</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +root@9ec6c3d43591:/# <span class="nb">env</span> | <span class="nb">grep </span>GITHUB +<span class="nv">GITHUB_USER</span><span class="o">=</span>dinosaur +<span class="nv">GITHUB_TOKEN</span><span class="o">=</span>xxxxxxxxx + +</code></pre></div> +</div> + +<p>Okay, so to be fair, there are a bunch of other commands for inspection and size, +and I’m not going to go through them all! You can see them +<a href="https://syspack.github.io/paks/getting_started/user-guide.html" target="_blank">in the Paks user guide</a>. +And I don’t mean to say you should use this - you probably shouldn’t. But you might be interested to try it out.</p> + +<h3 id="parsing-keystrokes">Parsing Keystrokes</h3> + +<p>So the most interesting part of this project has been learning about input from the terminal, +and actually the reason I wanted to write this post to share what I learned. Let’s go back to the interactive +function where we ran subprocess and created a pseudo terminal. There actually is a pretty simple way +to watch what is being typed:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="c1"># This is the subprocess return code, keep going until we are done (e.g. have a return code) +</span><span class="k">while</span> <span class="n">p</span><span class="p">.</span><span class="n">poll</span><span class="p">()</span> <span class="ow">is</span> <span class="bp">None</span><span class="p">:</span> + + <span class="c1"># Wait for io completion (e.g., see man select) +</span> <span class="n">r</span><span class="p">,</span> <span class="n">w</span><span class="p">,</span> <span class="n">e</span> <span class="o">=</span> <span class="n">select</span><span class="p">.</span><span class="n">select</span><span class="p">([</span><span class="n">sys</span><span class="p">.</span><span class="n">stdin</span><span class="p">,</span> <span class="n">openpty</span><span class="p">],</span> <span class="p">[],</span> <span class="p">[])</span> + + <span class="c1"># Was it a new input? +</span> <span class="k">if</span> <span class="n">sys</span><span class="p">.</span><span class="n">stdin</span> <span class="ow">in</span> <span class="n">r</span><span class="p">:</span> + <span class="n">terminal_input</span> <span class="o">=</span> <span class="n">os</span><span class="p">.</span><span class="n">read</span><span class="p">(</span><span class="n">sys</span><span class="p">.</span><span class="n">stdin</span><span class="p">.</span><span class="n">fileno</span><span class="p">(),</span> <span class="mi">10240</span><span class="p">)</span> + <span class="n">new_char</span> <span class="o">=</span> <span class="n">terminal_input</span><span class="p">.</span><span class="n">decode</span><span class="p">(</span><span class="s">"utf-8"</span><span class="p">)</span> + + <span class="c1"># Do something with what you see here +</span> + <span class="c1"># Was it a new output? +</span> <span class="k">elif</span> <span class="n">openpty</span> <span class="ow">in</span> <span class="n">r</span><span class="p">:</span> + <span class="n">o</span> <span class="o">=</span> <span class="n">os</span><span class="p">.</span><span class="n">read</span><span class="p">(</span><span class="n">openpty</span><span class="p">,</span> <span class="mi">10240</span><span class="p">)</span> + <span class="k">if</span> <span class="n">o</span><span class="p">:</span> + <span class="n">os</span><span class="p">.</span><span class="n">write</span><span class="p">(</span><span class="n">sys</span><span class="p">.</span><span class="n">stdout</span><span class="p">.</span><span class="n">fileno</span><span class="p">(),</span> <span class="n">o</span><span class="p">)</span> +</code></pre></div> +</div> + +<p>I learned a lot from this! Let’s talk about it.</p> + +<h4 id="debugging">Debugging</h4> + +<p>So the first thing I learned is that my typical “import IPython” and “IPython.embed()” +isn’t going to work as easily as normal, because (at least superficially) I didn’t +see a way to have it sort of injected into the process. Anything that is interactive in +that loop is still (conceptually) running on my host. So when I use IPython +it does some weird stuff with carriage returns, but it’s still possible to interact with +a little bit. So what I wound up doing so I could easily see every keypress was to write +to file in append mode:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="s">'/tmp/file.txt'</span><span class="p">,</span> <span class="s">'a'</span><span class="p">)</span> <span class="k">as</span> <span class="n">fd</span><span class="p">:</span> + <span class="n">fd</span><span class="p">.</span><span class="n">write</span><span class="p">(</span><span class="n">new_char</span><span class="p">)</span> +</code></pre></div> +</div> + +<p>This was kind of neat because I could be typing in one terminal, and then have +a file open (watching it) that updates with changes, and I’d get a sense of what +is going on. I could append anything to this file to debug. And this is also really +different from how we normally use subprocess, where maybe we will parse entire lines +at once:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="n">p</span> <span class="o">=</span> <span class="n">subprocess</span><span class="p">.</span><span class="n">Popen</span><span class="p">([</span><span class="s">'python'</span><span class="p">,</span><span class="s">'thing.py'</span><span class="p">],</span> <span class="n">stdout</span><span class="o">=</span><span class="n">subprocess</span><span class="p">.</span><span class="n">PIPE</span><span class="p">)</span> +<span class="k">while</span> <span class="bp">True</span><span class="p">:</span> + <span class="n">line</span> <span class="o">=</span> <span class="n">p</span><span class="p">.</span><span class="n">stdout</span><span class="p">.</span><span class="n">readline</span><span class="p">()</span> + <span class="k">if</span> <span class="ow">not</span> <span class="n">line</span><span class="p">:</span> + <span class="k">break</span> +</code></pre></div> +</div> + +<p>because we are reading on character at a time! So what we essentially need to do +is keep a string that we continue appending to unless there is a newline, up or down, +or left or right to indicate moving the cursor.</p> + +<h4 id="ascii-characters">Ascii Characters</h4> + +<p>I started to quickly see characters that my editor didn’t know - e.g., likely +escape sequences and other ascii that showed up in the little question mark box. +I quickly realized that I was seeing <a href="https://www.w3resource.com/python-exercises/python-basic-exercise-86.php" target="_blank">ascii</a> +code (and some characters that couldn’t be parsed) so the solution was to look at the ord +of the character and compare to a number. For example, for a backspace +the number is 127. So to act on it I might do:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="c1"># if we have a backspace (ord 127) +</span><span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">new_char</span><span class="p">)</span> <span class="o">==</span> <span class="mi">1</span> <span class="ow">and</span> <span class="nb">ord</span><span class="p">(</span><span class="n">new_char</span><span class="p">)</span> <span class="o">==</span> <span class="mi">127</span><span class="p">:</span> + + <span class="c1"># This is our in progress line. If we have content, backspace! +</span> <span class="k">if</span> <span class="nb">len</span><span class="p">(</span><span class="n">string_input</span><span class="p">)</span> <span class="o">&gt;</span> <span class="mi">0</span><span class="p">:</span> + <span class="n">string_input</span> <span class="o">=</span> <span class="n">string_input</span><span class="p">[:</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> + + <span class="c1"># But if we don't, just write the character for the person to see and +</span> <span class="c1"># keep collecting new characters (continue in the loop) +</span> <span class="k">if</span> <span class="ow">not</span> <span class="n">string_input</span><span class="p">:</span> + <span class="n">os</span><span class="p">.</span><span class="n">write</span><span class="p">(</span><span class="n">openpty</span><span class="p">,</span> <span class="n">terminal_input</span><span class="p">)</span> + <span class="k">continue</span> + +<span class="c1"># Otherwise (not a backspace) add to our growing line to parse further! +</span><span class="k">else</span><span class="p">:</span> + <span class="n">string_input</span> <span class="o">=</span> <span class="n">string_input</span> <span class="o">+</span> <span class="n">new_char</span> +</code></pre></div> +</div> + +<p>The above is basically looking for a backspace, and if we find one, we remove +one character from the line we are assembling. Otherwise we just add the new character +to the line.</p> + +<h4 id="xterm-sequences">xterm sequences</h4> + +<p>And a similar thing happens for pressing up/down and right/left, except the +terminal parses them as “[A”, “[B”, “[C”, and “[D”, respectively, and often with +an escape sequence first. There are <a href="https://en.wikipedia.org/wiki/ANSI_escape_code" target="_blank">some nice tables here</a> +for the interested reader! And this was also the point that I realized how challenging parsing input is! +Along with needing to account for every character, you also need to account for platform +differences. That’s also why I view this library as mostly for development and thinking, +or at least for mostly Linux and bash shells, because I’m not sure I could ever handle them all. +So for the purposes of my library, for now I decided I’m not going to handle moving left and right, +nor do I want to deal with weird extra ascii characters that are added, so I just clean them up.</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="c1"># Get rid of left/right +</span><span class="n">string_input</span> <span class="o">=</span> <span class="n">string_input</span><span class="p">.</span><span class="n">replace</span><span class="p">(</span><span class="s">"[D"</span><span class="p">,</span> <span class="s">""</span><span class="p">).</span><span class="n">replace</span><span class="p">(</span><span class="s">"[C"</span><span class="p">,</span> <span class="s">""</span><span class="p">)</span> + +<span class="c1"># Replace weird characters and escape sequences +</span><span class="n">string_input</span> <span class="o">=</span> <span class="bp">self</span><span class="p">.</span><span class="n">clean</span><span class="p">(</span><span class="n">string_input</span><span class="p">)</span> +</code></pre></div> +</div> + +<p>Yes, that probably means some of your ninja shortcuts won’t work perfectly when running paks, +and if you absolutely want one to be parsed please let me know and we can add it.</p> + +<h4 id="newlines">Newlines</h4> + +<p>So the gold nugget of content that Paks is interested in is when you press enter. +This means you’ve finished typing something and there is some version of a newline +or carriage return. This is also a pretty variable thing depending on the platform you are +on - newlines can come in very different forms! I tried to honor the two that I see most often:</p> + +<ol class="custom-counter"> + <li><strong>\r\n</strong>: Windows </li> + <li><strong>\n</strong>: UNIX (e.g., Mac OSX)</li> + <li><strong>\r</strong>: Mac (pre OSX)</li> +</ol> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="n">has_newline</span> <span class="o">=</span> <span class="s">"</span><span class="se">\n</span><span class="s">"</span> <span class="ow">in</span> <span class="n">string_input</span> <span class="ow">or</span> <span class="s">"</span><span class="se">\r</span><span class="s">"</span> <span class="ow">in</span> <span class="n">string_input</span> +</code></pre></div> +</div> + +<p>At this point, we can start acting on what we see. E.g., if the user has asked for any +kind of exit, I honor it.</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="c1"># Universal exit command +</span><span class="k">if</span> <span class="s">"exit"</span> <span class="ow">in</span> <span class="n">string_input</span> <span class="ow">and</span> <span class="n">has_newline</span><span class="p">:</span> + <span class="k">print</span><span class="p">(</span><span class="s">"</span><span class="se">\n\r</span><span class="s">Container exited.</span><span class="se">\n\r</span><span class="s">"</span><span class="p">)</span> + <span class="k">return</span> <span class="bp">self</span><span class="p">.</span><span class="n">uri</span><span class="p">.</span><span class="n">extended_name</span> +</code></pre></div> +</div> + +<p>The return of the name at the end is to handle cleaning up the image, which was allocated +a temporary name.</p> + +<h3 id="history">History</h3> + +<p>One of the more interesting parts of this project was realizing that people use history, a lot. +At least I do. This is going to appear as an up or down press, and only when a newline is found +is some item in history re-executed. So first let’s look for exploring history with up/down. There are +two cases - pressing up/down without a newline:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="c1"># Pressing up or down, but not enter +</span><span class="k">if</span> <span class="p">(</span><span class="s">"[A"</span> <span class="ow">in</span> <span class="n">string_input</span> <span class="ow">or</span> <span class="s">"[B"</span> <span class="ow">in</span> <span class="n">string_input</span><span class="p">)</span> <span class="ow">and</span> <span class="ow">not</span> <span class="n">has_newline</span><span class="p">:</span> + <span class="n">string_input</span> <span class="o">=</span> <span class="bp">self</span><span class="p">.</span><span class="n">get_history</span><span class="p">(</span><span class="n">string_input</span><span class="p">,</span> <span class="n">openpty</span><span class="p">)</span> + <span class="n">os</span><span class="p">.</span><span class="n">write</span><span class="p">(</span><span class="n">openpty</span><span class="p">,</span> <span class="n">terminal_input</span><span class="p">)</span> + <span class="k">continue</span> +</code></pre></div> +</div> + +<p>And with one:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="c1"># Pressing up or down with enter +</span><span class="k">if</span> <span class="p">(</span><span class="s">"[A"</span> <span class="ow">in</span> <span class="n">string_input</span> <span class="ow">or</span> <span class="s">"[B"</span> <span class="ow">in</span> <span class="n">string_input</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_newline</span><span class="p">:</span> + <span class="n">string_input</span> <span class="o">=</span> <span class="bp">self</span><span class="p">.</span><span class="n">get_history</span><span class="p">(</span><span class="n">string_input</span><span class="p">,</span> <span class="n">openpty</span><span class="p">)</span> + <span class="n">os</span><span class="p">.</span><span class="n">write</span><span class="p">(</span><span class="n">openpty</span><span class="p">,</span> <span class="n">terminal_input</span><span class="p">)</span> +</code></pre></div> +</div> + +<p>If we don’t have a newline, we add a continue to keep parsing characters the user is +typing. If we do have a newline, we let the loop keep running to keep parsing the line of history we retrieved. +But let’s step back and talk about that history. We basically want to retrieve whatever line of history that +the user is asking for, because to us it looks like up and down errors. You could imagine +restoring the previous line, and then editing it. This actually proved to be quite challenging, +because I realized (by default) when we start running a container (well, ubuntu and centos) +the history is stored in memory and not written to ~/.bash_history. This led to +<a href="https://twitter.com/vsoch/status/1492377777684639748" target="_blank">this thread</a> and some people coming in to <a href="https://twitter.com/ajdecon/status/1492381132998033409" target="_blank">quickly help</a> +and others coming in just to say “Why are you doing this with containers it makes no sense stop.” Yeah, right. If I +listened to every person that has ever told me to stop working on something because “REASONS!” I wouldn’t +ultimately work on much at all.</p> + +<p>The short answer was that I needed a function to be able to get a line of history, and based on the +number of times pressing up or down. For my first attempt I said “nevermind this, I’ll just save my own history!” +but that got hugely complicated very fast because it turns out, we don’t just stupidly type commands over and over, +we are constantly using more characters on the keyboard than letters and numbers, retrieving old things to edit, +updating again, and in practice I found that I could keep up with simple parsing, but it would get out of sync +for a longer session. There also is the issue that people can tweak the amount of history saved, or how it’s saved, +and there are a set of environment <a href="https://www.redhat.com/sysadmin/history-command" target="_blank">variables and commands</a> +to do that. So most containers will start running and save history to memory and not file (and this makes +sense in case there is sensitive information) but it was problematic for me because I couldn’t parse it. +For example, when someone presses up and down a bunch of times, I might see:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="o">[</span>A[A[A[A[A[B[A +</code></pre></div> +</div> + +<p>This is a reference to some previous command that I can only find in history +given I’m parsing the input/output as I am. So my second attempt (well, maybe second through +tenth) I was trying different variations of trying to be able to parse the history. +If you looked at <a href="https://twitter.com/ajdecon/status/1492381132998033409" target="_blank">the tweet</a> +you’ll see we need to run:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span><span class="nb">history</span> <span class="nt">-a</span> +</code></pre></div> +</div> + +<p>to start writing what’s in memory to file. I didn’t want to do this on every command, because along +with the user seeing it and the UI being awful, it was just too much. Instead, I realized that I had a small +opportunity when the user first shells into the container (and is expecting a jump in their UI) to run whatever +I need and then clear the terminal. So I ran it there, right before a clear and welcome message.</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> + <span class="k">def</span> <span class="nf">welcome</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">openpty</span><span class="p">):</span> + <span class="s">""" + Welcome the user and clear terminal + """</span> + <span class="c1"># Don't add commands executed to history +</span> <span class="n">os</span><span class="p">.</span><span class="n">write</span><span class="p">(</span><span class="n">openpty</span><span class="p">,</span> <span class="bp">self</span><span class="p">.</span><span class="n">encode</span><span class="p">(</span><span class="s">" export PROMPT_COMMAND='history -a'</span><span class="se">\r</span><span class="s">"</span><span class="p">))</span> + <span class="n">os</span><span class="p">.</span><span class="n">write</span><span class="p">(</span><span class="n">openpty</span><span class="p">,</span> <span class="bp">self</span><span class="p">.</span><span class="n">encode</span><span class="p">(</span><span class="s">" clear</span><span class="se">\r</span><span class="s">"</span><span class="p">))</span> + <span class="n">os</span><span class="p">.</span><span class="n">write</span><span class="p">(</span><span class="n">openpty</span><span class="p">,</span> <span class="bp">self</span><span class="p">.</span><span class="n">encode</span><span class="p">(</span><span class="s">" ### Welcome to PAKS! ###</span><span class="se">\r</span><span class="s">"</span><span class="p">))</span> + +</code></pre></div> +</div> + +<p>And with this method you aren’t aware of the extra commands at all! And did you notice the spaces above? That’s also another trick! Any command that you type with a leading +space won’t be saved to history, and this is thanks to <a href="https://unix.stackexchange.com/questions/115934/why-does-bash-have-a-histcontrol-ignorespace-option">HISTCONTROL</a> that has an ignorespace option. I think most people / containers +set it to ignore space and to ignore duplicates:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="n">root</span><span class="o">@</span><span class="mi">1</span><span class="n">c268386714a</span><span class="p">:</span><span class="o">/</span><span class="c1"># echo $HISTCONTROL +</span><span class="n">ignoredups</span><span class="p">:</span><span class="n">ignorespace</span> + +</code></pre></div> +</div> + +<p>That said, I don’t explicitly try to reset this in the container, so that could be a bug +if there is a container base that doesn’t do that. And I’m pretty sure centos doesn’t come with clear! +I’ll likely need to work on this a bit more.</p> + +<blockquote> + <p>For now, please consider this only working for debian/ubuntu bases and we can inspect the other ones later!</p> + +</blockquote> + +<p>Okay, so now let’s look at the function to get history (self.hist.run). For now, just ignore the command to +get the history, that’s actually done via a Paks command that we will talk about after. +Here is what is going on:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">def</span> <span class="nf">get_history</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">line</span><span class="p">,</span> <span class="n">openpty</span><span class="p">):</span> + <span class="s">""" + Given an input with some number of up/down and newline, derive command. + """</span> + <span class="c1"># Calculate the absolute change of ups/downs +</span> <span class="n">up</span> <span class="o">=</span> <span class="n">line</span><span class="p">.</span><span class="n">count</span><span class="p">(</span><span class="s">"[A"</span><span class="p">)</span> + <span class="n">down</span> <span class="o">=</span> <span class="n">line</span><span class="p">.</span><span class="n">count</span><span class="p">(</span><span class="s">"[B"</span><span class="p">)</span> + <span class="n">change</span> <span class="o">=</span> <span class="n">up</span> <span class="o">-</span> <span class="n">down</span> + + <span class="c1"># pushed down below history (maybe they are angry?) +</span> <span class="k">if</span> <span class="n">change</span> <span class="o">&lt;=</span> <span class="mi">0</span><span class="p">:</span> + <span class="k">return</span> <span class="s">""</span> + + <span class="c1"># Retrieve history, actually via a command run from the outside to get the file +</span> <span class="n">history</span> <span class="o">=</span> <span class="bp">self</span><span class="p">.</span><span class="n">hist</span><span class="p">.</span><span class="n">run</span><span class="p">(</span> + <span class="n">container_name</span><span class="o">=</span><span class="bp">self</span><span class="p">.</span><span class="n">uri</span><span class="p">.</span><span class="n">extended_name</span><span class="p">,</span> + <span class="n">out</span><span class="o">=</span><span class="n">openpty</span><span class="p">,</span> + <span class="n">history_file</span><span class="o">=</span><span class="bp">self</span><span class="p">.</span><span class="n">settings</span><span class="p">.</span><span class="n">history_file</span><span class="p">,</span> + <span class="n">user</span><span class="o">=</span><span class="bp">self</span><span class="p">.</span><span class="n">settings</span><span class="p">.</span><span class="n">user</span><span class="p">,</span> + <span class="p">)</span> + <span class="n">history</span> <span class="o">=</span> <span class="p">[</span><span class="n">x</span> <span class="k">for</span> <span class="n">x</span> <span class="ow">in</span> <span class="n">history</span><span class="p">.</span><span class="n">split</span><span class="p">(</span><span class="s">"</span><span class="se">\n</span><span class="s">"</span><span class="p">)</span> <span class="k">if</span> <span class="n">x</span><span class="p">]</span> + + <span class="c1"># No history, nothing to return +</span> <span class="k">if</span> <span class="ow">not</span> <span class="n">history</span><span class="p">:</span> + <span class="k">return</span> <span class="s">""</span> + + <span class="c1"># The change is outside the length of history +</span> <span class="k">if</span> <span class="n">change</span> <span class="o">&gt;</span> <span class="nb">len</span><span class="p">(</span><span class="n">history</span><span class="p">):</span> + <span class="k">return</span> <span class="s">""</span> + + <span class="c1"># here we are looking back up into history (negative index) +</span> <span class="n">newline</span> <span class="o">=</span> <span class="n">history</span><span class="p">[</span><span class="o">-</span><span class="mi">1</span> <span class="o">*</span> <span class="n">change</span><span class="p">]</span> + + <span class="c1"># Add back any characters typed AFTER the up/down presses +</span> <span class="n">newline</span> <span class="o">+=</span> <span class="n">re</span><span class="p">.</span><span class="n">split</span><span class="p">(</span><span class="s">"(\[A|\[B)"</span><span class="p">,</span> <span class="n">line</span><span class="p">,</span> <span class="mi">1</span><span class="p">)[</span><span class="o">-</span><span class="mi">1</span><span class="p">]</span> + <span class="k">return</span> <span class="n">newline</span> +</code></pre></div> +</div> + +<p>The above might not be perfect, but it worked the best for everything that I tried! +This allows us to issue a command that paks knows, press up to get it again, and then edit +it and have the command work correctly. Speaking of commands…</p> + +<h3 id="commands">Commands</h3> + +<p>The core meat of paks is the commands that it recognizes. Every command has a <a href="https://github.com/syspack/paks/blob/ab61458a061c555434e5d3406914612fd1d60442/paks/commands/command.py#L26" target="_blank">base class</a> +that is going to handle parsing a line (with a main command and optional args or kwargs, depending on the command), +ensuring all required variables are passed (this is largely internal to the library and even a developer user +doesn’t need to think about it unless they want to change what is passed), and then providing functions for basic kinds of +execution. So let’s step back and first look at how we find a command (or executor). Basically, once we have a newline +and we’ve parsed it per the above (looking up history and such) we can sniff it to see if it matches a known +command pattern:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="c1"># If we have a newline (and possibly a command) +</span><span class="k">if</span> <span class="n">has_newline</span><span class="p">:</span> + <span class="bp">self</span><span class="p">.</span><span class="n">run_executor</span><span class="p">(</span><span class="n">string_input</span><span class="p">,</span> <span class="n">openpty</span><span class="p">)</span> + + <span class="c1"># Add derived line to the history +</span> <span class="n">os</span><span class="p">.</span><span class="n">write</span><span class="p">(</span><span class="n">openpty</span><span class="p">,</span> <span class="n">terminal_input</span><span class="p">)</span> + <span class="n">string_input</span> <span class="o">=</span> <span class="s">""</span> +</code></pre></div> +</div> + +<p>The function “run_executor” is going to make this call if there is a Paks command and handle it. +And no matter what, we reset our string input to be empty given that the user pressed enter, because +they are going to start typing fresh. But before that, this function “run_executor” is going to see +if there are any known commands, and if so, to run them! That function looks like this:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="k">def</span> <span class="nf">run_executor</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">string_input</span><span class="p">,</span> <span class="n">openpty</span><span class="p">):</span> + <span class="s">""" + Given a string input, run executor + """</span> + <span class="c1"># Get out early if it's not a Paks command (always starts with #) +</span> <span class="n">string_input</span> <span class="o">=</span> <span class="n">string_input</span><span class="p">.</span><span class="n">replace</span><span class="p">(</span><span class="s">"[A"</span><span class="p">,</span> <span class="s">""</span><span class="p">).</span><span class="n">replace</span><span class="p">(</span><span class="s">"[B"</span><span class="p">,</span> <span class="s">""</span><span class="p">)</span> + <span class="k">if</span> <span class="ow">not</span> <span class="n">string_input</span><span class="p">.</span><span class="n">startswith</span><span class="p">(</span><span class="s">"#"</span><span class="p">):</span> + <span class="k">return</span> + + <span class="c1"># Do we have a matching executor? +</span> <span class="n">executor</span> <span class="o">=</span> <span class="bp">self</span><span class="p">.</span><span class="n">commands</span><span class="p">.</span><span class="n">get_executor</span><span class="p">(</span><span class="n">string_input</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">openpty</span><span class="p">)</span> + <span class="k">if</span> <span class="n">executor</span> <span class="ow">is</span> <span class="ow">not</span> <span class="bp">None</span><span class="p">:</span> + + <span class="c1"># Print any message it wants to the terminal before run... +</span> <span class="k">if</span> <span class="n">executor</span><span class="p">.</span><span class="n">pre_message</span><span class="p">:</span> + <span class="k">print</span><span class="p">(</span><span class="s">"</span><span class="se">\n\r</span><span class="s">"</span> <span class="o">+</span> <span class="n">executor</span><span class="p">.</span><span class="n">pre_message</span><span class="p">)</span> + + <span class="c1"># Run it! +</span> <span class="n">result</span> <span class="o">=</span> <span class="n">executor</span><span class="p">.</span><span class="n">run</span><span class="p">(</span> + <span class="n">name</span><span class="o">=</span><span class="bp">self</span><span class="p">.</span><span class="n">image</span><span class="p">,</span> + <span class="n">container_name</span><span class="o">=</span><span class="bp">self</span><span class="p">.</span><span class="n">uri</span><span class="p">.</span><span class="n">extended_name</span><span class="p">,</span> + <span class="n">original</span><span class="o">=</span><span class="n">string_input</span><span class="p">,</span> + <span class="p">)</span> + + <span class="c1"># And any message it wants to print after +</span> <span class="k">if</span> <span class="n">result</span><span class="p">.</span><span class="n">message</span><span class="p">:</span> + <span class="k">print</span><span class="p">(</span><span class="s">"</span><span class="se">\r</span><span class="s">"</span> <span class="o">+</span> <span class="n">result</span><span class="p">.</span><span class="n">message</span><span class="p">)</span> +</code></pre></div> +</div> + +<p>The result object holds what you would expect - a return code, some message, +and the basic outputs of the call. It’s up to the executor (command) to decide +what to show the user. Some might not show anything beyond commands that are run +with the executor. So what does that function “get_executor” look like? +This is where we delive into the commands module, where there is a simple lookup of +the starting prefixes of commands matched to Command classes:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="c1"># lookup of named commands and settings +</span><span class="n">docker_commands</span> <span class="o">=</span> <span class="p">{</span> + <span class="s">"#save"</span><span class="p">:</span> <span class="n">SaveContainer</span><span class="p">,</span> + <span class="s">"#inspect"</span><span class="p">:</span> <span class="n">InspectContainer</span><span class="p">,</span> + <span class="s">"#envload"</span><span class="p">:</span> <span class="n">EnvLoad</span><span class="p">,</span> + <span class="s">"#envhost"</span><span class="p">:</span> <span class="n">EnvHost</span><span class="p">,</span> + <span class="s">"#envsave"</span><span class="p">:</span> <span class="n">EnvSave</span><span class="p">,</span> + <span class="s">"#cp"</span><span class="p">:</span> <span class="n">Copy</span><span class="p">,</span> + <span class="s">"#size"</span><span class="p">:</span> <span class="n">Size</span><span class="p">,</span> +<span class="p">}</span> +</code></pre></div> +</div> + +<p>When I add a load functionality, all it will need to do is update this dictionary. +And the reason those are “docker commands” is that you can imagine we eventually +support other container technologies, and the commands you run are going to vary. +Each Command actually has a class attribute for the container types that are supported. +Here is a snippet of the DockerCommands class attached to the client that we are calling “get_executor” on:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="k">class</span> <span class="nc">DockerCommands</span><span class="p">:</span> + + <span class="c1"># Required kwargs for any docker/podman command to run +</span> <span class="n">required</span> <span class="o">=</span> <span class="p">[</span><span class="s">"container_name"</span><span class="p">,</span> <span class="s">"name"</span><span class="p">]</span> + + <span class="k">def</span> <span class="nf">__init__</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">container_tech</span><span class="p">):</span> + <span class="bp">self</span><span class="p">.</span><span class="n">command</span> <span class="o">=</span> <span class="n">container_tech</span> + <span class="bp">self</span><span class="p">.</span><span class="n">lookup</span> <span class="o">=</span> <span class="n">docker_commands</span> + + <span class="k">def</span> <span class="nf">parse_name</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">cmd</span><span class="p">):</span> + <span class="n">parts</span> <span class="o">=</span> <span class="n">cmd</span><span class="p">.</span><span class="n">split</span><span class="p">(</span><span class="s">" "</span><span class="p">)</span> + <span class="k">return</span> <span class="n">parts</span><span class="p">.</span><span class="n">pop</span><span class="p">(</span><span class="mi">0</span><span class="p">).</span><span class="n">replace</span><span class="p">(</span><span class="s">"</span><span class="se">\n</span><span class="s">"</span><span class="p">,</span> <span class="s">""</span><span class="p">).</span><span class="n">replace</span><span class="p">(</span><span class="s">"</span><span class="se">\r</span><span class="s">"</span><span class="p">,</span> <span class="s">""</span><span class="p">).</span><span class="n">strip</span><span class="p">()</span> + + <span class="k">def</span> <span class="nf">has_command</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">):</span> + <span class="n">name</span><span class="p">,</span> <span class="n">_</span> <span class="o">=</span> <span class="bp">self</span><span class="p">.</span><span class="n">parse_name</span><span class="p">(</span><span class="n">name</span><span class="p">)</span> + <span class="k">return</span> <span class="n">name</span> <span class="ow">in</span> <span class="bp">self</span><span class="p">.</span><span class="n">lookup</span> + + <span class="o">@</span><span class="nb">property</span> + <span class="k">def</span> <span class="nf">history</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span> + <span class="k">return</span> <span class="n">History</span><span class="p">(</span><span class="bp">self</span><span class="p">.</span><span class="n">command</span><span class="p">)</span> + + <span class="k">def</span> <span class="nf">get_executor</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">name</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="bp">None</span><span class="p">):</span> + <span class="s">""" + Backend is required to update history + """</span> + <span class="n">name</span> <span class="o">=</span> <span class="bp">self</span><span class="p">.</span><span class="n">parse_name</span><span class="p">(</span><span class="n">name</span><span class="p">)</span> + <span class="k">if</span> <span class="n">name</span> <span class="ow">in</span> <span class="bp">self</span><span class="p">.</span><span class="n">lookup</span><span class="p">:</span> + <span class="k">return</span> <span class="bp">self</span><span class="p">.</span><span class="n">lookup</span><span class="p">[</span><span class="n">name</span><span class="p">](</span><span class="bp">self</span><span class="p">.</span><span class="n">command</span><span class="p">,</span> <span class="n">required</span><span class="o">=</span><span class="bp">self</span><span class="p">.</span><span class="n">required</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">out</span><span class="p">)</span> +</code></pre></div> +</div> + +<p>To focus on the last function, you basically see that we parse the line (name), and then +see if it’s in our lookup. If so, we return the initialized executor, and we need to add +the output source in case it needs to interact with the current terminal. The self.command +refers to the container technology (e.g., docker or podman in this case).</p> + +<p>Then we can look at a particular command (e.g., inspect) and see it’s pretty simple! We have defined +the supported container technologies along with optional messages, and a main run function. Here is the command +to inspect, which will dump out the json manifest and optionally take a section:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="k">class</span> <span class="nc">InspectContainer</span><span class="p">(</span><span class="n">Command</span><span class="p">):</span> + + <span class="n">supported_for</span> <span class="o">=</span> <span class="p">[</span><span class="s">"docker"</span><span class="p">,</span> <span class="s">"podman"</span><span class="p">]</span> + <span class="n">pre_message</span> <span class="o">=</span> <span class="s">"Inspecting Container..."</span> + + <span class="k">def</span> <span class="nf">run</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="o">**</span><span class="n">kwargs</span><span class="p">):</span> + <span class="s">""" + Inspect a container fully, or specific sections + """</span> + <span class="c1"># Always run this first to make sure container tech is valid +</span> <span class="bp">self</span><span class="p">.</span><span class="n">check</span><span class="p">(</span><span class="o">**</span><span class="n">kwargs</span><span class="p">)</span> + + <span class="c1"># These are both required for docker/podman +</span> <span class="n">container_name</span> <span class="o">=</span> <span class="bp">self</span><span class="p">.</span><span class="n">kwargs</span><span class="p">[</span><span class="s">"container_name"</span><span class="p">]</span> + + <span class="c1"># inspect particular attributes provided as args +</span> <span class="k">if</span> <span class="bp">self</span><span class="p">.</span><span class="n">args</span><span class="p">:</span> + <span class="k">for</span> <span class="n">section</span> <span class="ow">in</span> <span class="bp">self</span><span class="p">.</span><span class="n">args</span><span class="p">:</span> + <span class="n">result</span> <span class="o">=</span> <span class="bp">self</span><span class="p">.</span><span class="n">run_command</span><span class="p">(</span> + <span class="p">[</span> + <span class="bp">self</span><span class="p">.</span><span class="n">tech</span><span class="p">,</span> + <span class="s">"inspect"</span><span class="p">,</span> + <span class="s">"--format"</span><span class="p">,</span> + <span class="s">""</span> <span class="o">%</span> <span class="n">section</span><span class="p">.</span><span class="n">capitalize</span><span class="p">(),</span> + <span class="n">container_name</span><span class="p">,</span> + <span class="p">]</span> + <span class="p">)</span> + + <span class="c1"># Otherwise just dump the whole thing +</span> <span class="k">else</span><span class="p">:</span> + <span class="n">result</span> <span class="o">=</span> <span class="bp">self</span><span class="p">.</span><span class="n">run_command</span><span class="p">([</span><span class="bp">self</span><span class="p">.</span><span class="n">tech</span><span class="p">,</span> <span class="s">"inspect"</span><span class="p">,</span> <span class="n">container_name</span><span class="p">])</span> + <span class="k">if</span> <span class="n">result</span><span class="p">:</span> + <span class="k">return</span> <span class="n">result</span> + <span class="k">return</span> <span class="bp">self</span><span class="p">.</span><span class="n">return_success</span><span class="p">()</span> +</code></pre></div> +</div> + +<p>You’ll now know the main Paks trick - because we are still running on the host, +we can issue commands to the host while we are in the container! In the above, we can just type:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="c">#inspect</span> +<span class="c">#inspect config</span> +</code></pre></div> +</div> + +<p>And see the output in the terminal! This is how a lot of the interactions with the host work. +It’s kind of simple and silly, but also really cool when you see it work on the container! +So the run function above, just as a reminder, is called by this part:</p> + +<div class="language-python highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="n">result</span> <span class="o">=</span> <span class="n">executor</span><span class="p">.</span><span class="n">run</span><span class="p">(</span> + <span class="n">name</span><span class="o">=</span><span class="bp">self</span><span class="p">.</span><span class="n">image</span><span class="p">,</span> + <span class="n">container_name</span><span class="o">=</span><span class="bp">self</span><span class="p">.</span><span class="n">uri</span><span class="p">.</span><span class="n">extended_name</span><span class="p">,</span> + <span class="n">original</span><span class="o">=</span><span class="n">string_input</span><span class="p">,</span> +<span class="p">)</span> +</code></pre></div> +</div> + +<p>And honestly, that’s the majority of Paks! 🎉️</p> + +<h2 id="discussion">Discussion</h2> + +<p>Paks has honestly been so fun to work on, despite long hours of trying to figure things out during evenings and weekends. I’m so excited +about the ideas, and I want to share them with others because I think developer tools for containers +are kind of lacking. Heck, I stayed up until like 4am writing this post. No, I don’t normally do that, +I had some things on my mind, but it was an excellent use of the time, despite the fact that I woke up 4 hours later and +I’m going to crash tonight (err tomorrow night… err now that I’m tweaking up the finishing touches to this post)!</p> + +<h3 id="next-steps">Next Steps</h3> + +<p>I’m working on a “paks load” command that will let someone develop a Python module +with some set of commands for their custom use case. The first thing I wanted to try +was to generate sboms for spack (e.g., “Generate sboms for this spack install in the container +and save them to my host so I can upload alongside the container to a registry). I had +some <a href="https://github.com/spack/spack-sbom" target="_blank">previous work</a> to use +spack scripting, but ultimately this weekend did a <a href="https://github.com/spack/spack/pull/28909" target="_blank">pull request</a> +to add sbom generation to spack proper. And then I’ll be able to work on the load commands. +I also want to address some of the anticipated bugs I mentioned above, like properly setting “HISTCONTROL” +to ensure we don’t save commands issued by the client to history, and possibly having a cleanup step on save +that removes the file. I haven’t added this yet is because if I’m developing in the container +and want to say, move it from my local machine to HPC, I kind of want to have my history so I can lazily use it.</p> + +<h3 id="but-really">But Really…</h3> + +<p>We have some magic up our sleeves for what we are actually working on to inspire these ideas! +I guess you’ll just have to wait for the future, because <a href="https://github.com/alecbcs" target="_blank">@alecbcs</a> and +I are both have vision and are a great tag team! 🎉️</p> + +<h3 id="security">Security</h3> + +<p>So there are obviously security issues around a library like this - and I added notes +to the documentation that I’ll re-iterate here. Paks is intended for use by a developer +that is in their own trusted environment, whether local or on HPC. Because there is an interaction +with the host, you wouldn’t use this in production someone to give users an ability to load +environments or save. You also wouldn’t want to save a development container with something +private in history and push it. I’m still an advocate for, after development is done, +pushing changed code to GitHub and having an automated build build, test, and deploy. +Could we eventually have a production grade library to enable interactions inside the +container? Possibly, but it’s not Paks in Python in its current state. I think +that’s okay - we have to start small with ideas and go from there.</p> + +<h3 id="didnt-i-see-paks-before">Didn’t I see paks before?</h3> + +<p>Yes, you did! A previous version was intended for making spack build caches on GitHub, but that +didn’t work because you couldn’t build a spack package within a container and then +pull the same container and install it and hit the cache. I think this might work someday, +hence why I haven’t completely deleted the code, but I couldn’t let a cute logo and colorscheme go to waste! +So for now it’s on a separate branch but largely I am not working on it. If you want to see this branch, +it’s still <a href="https://github.com/syspack/paks/tree/v1/spack" target="_blank">here</a>!</p> + +<p>Thanks for reading friends! I hope this has been interesting and you might be inspired to +also work on better tooling for developers, even if that just means exploring the ideas.</p> + + + + + Developing managed vs self-hosted software + + 2022-02-12T16:00:00-07:00 + https://hpc.social/2022/developing-managed-vs-self-hosted-software + <p>I&#8217;ve done some work lately with teams that deliver their products in very different ways, and it has me thinking about how much our &#8220;best practices&#8221; depend on a product&#8217;s delivery and operations model. I&#8217;ve had a bunch of conversations about this tension</p> + +<p>On the one hand, some of the teams I&#8217;ve worked with build software services that are developed and operated by the same team, and where the customers (internal or external) directly make use of the operated service. These teams try to follow what I think of as &#8220;conventional&#8221; SaaS best practices:</p> + +<ul><li>Their development workflow prioritizes iteration speed above all else</li><li>They tend to deploy from HEAD, or close to it, in their source repository<ul><li>In almost all cases, branches are short-lived for feature development</li></ul></li><li>They&#8217;ve built good automated test suites and well-tuned CI/CD pipelines</li><li>Releases are very frequent</li><li>They make extensive use of observability tooling, often using third-party SaaS for this</li><li>Fast roll-back is prioritized over perfect testing ahead of time</li><li>While their user documentation is mostly good, their operations documentation tends to be &#8220;just good enough&#8221; to onboard new team members, and a lot of it lives in Slack</li></ul> + +<p>However, we also have plenty of customers who deploy our software to their own systems, whether in the cloud or on-premise. (Some of them don&#8217;t even connect to the Internet on a regular basis!) The development workflow for software aimed at these customers looks rather different:</p> + +<ul><li>Deploys are managed by the customer, and release cycles are longer</li><li>These teams do still have CI/CD and extensive automated tests&#8230; but they may also have explicit QA steps before releases</li><li>There tend to be lots of longer-lived version branches, and even &#8220;LTS&#8221; branches with their own roadmaps</li><li>Logging is prioritized over observability, because they can&#8217;t make assumptions about the customer tooling</li><li>They put a lot more effort into operational documentation, because most operators will not also be developers</li></ul> + +<p>From a developer perspective, of course, this all feels much more painful! The managed service use case feels much more comfortable to develop for, and most of the community tooling and best practices for web development seems to optimize for that model.</p> + +<p>But from a sysadmin perspective, used to mostly operating third-party software, the constraints of self-hosted development are all very familiar. And even managed service teams often rely on third-party software developed using this kind of model, relying on LTS releases of Linux distributions and pinning major versions of dependencies.</p> + +<p>The biggest challenge I&#8217;ve seen, however, is when a development team tries to target the same software at <em>both use cases</em>. As far as I can tell, it&#8217;s very difficult to simultaneously operate a reliable service that is being continuously developed and deployed, and to provide predictable and high-quality releases to self-hosted customers.</p> + +<p>So far, I&#8217;ve seen this tension resolved in three different ways:</p> + +<ul><li>The internal service becomes &#8220;just another customer&#8221;, operating something close to the latest external release, resulting in a slower release cycle for the internal service</li><li>Fast development for the internal service gets prioritized, with external releases becoming less frequent and including bigger and bigger changes</li><li>Internal and external diverge completely, with separate development teams taking over (and often a name change for one of them)</li></ul> + +<p>I don&#8217;t really have a conclusion here, except that I don&#8217;t really love any of these results. /sigh</p> + +<p>If you&#8217;re reading this and have run into similar tensions, how have you seen this resolved? Have you seen any success stories in deploying the same code internally and externally? Or alternatively &#8212; any interesting stories of failure to share? <img alt="😉" class="wp-smiley" src="https://s.w.org/images/core/emoji/14.0.0/72x72/1f609.png" style="height: 1em;" /> Feel free to <a href="mailto:ajdecon@ajdecon.org">send me an email</a>, I&#8217;d be interested to hear from you.</p> + + + + + NUMA on POWER9 + + 2022-01-25T00:53:21-07:00 + https://hpc.social/2022/numa-on-power9 + <p>Non-uniform memory access (NUMA) systems are servers made up a single planar board (motherboard) with more than one CPU socket. On such servers, each CPU socket is directly connected to part of the system main memory but can also use parts of the main memory to which it is not directly connected via a crossbar or interconnect. Memory access times on NUMA systems are thus not uniform and depend upon the location of the memory from the CPU socket from which it is accessed. In other words, there is a performance penalty to accessing memory which is not local to a given CPU socket via the interconnect. Much has been written about NUMA from both a hardware and OS perspective.</p> + +<p>From a performance perspective therefore, strategies such as memory affinity and CPU pinning are important considerations when running on NUMA systems. As this is an HPC focused blog, we’ll look at this through the lens of the well-known IBM Spectrum LSF workload scheduler. LSF supports Linux on ppc64le, aarch64 and x86-64. The test system in this case is a dual-socket IBM POWER9 based server configured as follows:</p> + +<ul> +<li>CentOS Stream 8 (ppc64le)</li> +<li>Dual socket POWER9</li> +<li>32GB main memory (2 x 16GB DIMMs)</li> +<li>1TB storage</li> +<li>IBM Spectrum LSF Suite for HPC v10.2.0.11</li> +<li>NVIDIA V100 GPU</li> +</ul> +<p>Let’s begin by looking at how many NUMA zones are on this POWER9 based system. We can do this using the <em>lscpu</em> command.</p> + +<div class="highlight"><pre><code class="language-plaintext"># lscpu +Architecture: ppc64le +Byte Order: Little Endian +CPU(s): 128 +On-line CPU(s) list: 0-127 +Thread(s) per core: 4 +Core(s) per socket: 16 +Socket(s): 2 +NUMA node(s): 2 +Model: 2.1 (pvr 004e 1201) +Model name: POWER9, altivec supported +CPU max MHz: 3800.0000 +CPU min MHz: 1838.0000 +L1d cache: 32K +L1i cache: 32K +L2 cache: 512K +L3 cache: 10240K +NUMA node0 CPU(s): 0-63 +NUMA node8 CPU(s): 64-127</code></pre></div> + +<p>We see above that there are two NUMA nodes (0, 8), each with 64 threads. The system has a (meager) total of 32GB RAM - we confirm this using the <em>free</em> command.</p> + +<div class="highlight"><pre><code class="language-plaintext"># free + total used free shared buff/cache available +Mem: 32252096 7766080 21522880 130048 2963136 23240448 +Swap: 16650176 0 16650176</code></pre></div> + +<p>In order to see how memory is attached to each NUMA node, we use the system <em>numactl</em> command as follows. We confirm that there is 16GB RAM per NUMA node. This also shows the distances (weights) between each NUMA node. As expected, this corresponds to how the 2 x 16 GB DIMMs are installed in the system – one for each respective CPU socket.</p> + +<div class="highlight"><pre><code class="language-plaintext"># numactl -H +available: 2 nodes (0,8) +node 0 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 +node 0 size: 15262 MB +node 0 free: 9424 MB +node 8 cpus: 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 +node 8 size: 16233 MB +node 8 free: 11595 MB +node distances: +node 0 8 + 0: 10 40 + 8: 40 10 </code></pre></div> + +<p>Above, we see the system has a total of 128 threads. This is with simultaneous multithreading (SMT) configured to 4. There is a total of 16 cores per socket, for a total of 32 cores in the system. With SMT set to 4, we get the value of 128 (32 cores * 4 threads).</p> + +<p>Linux on PowerLE (Little Endian) includes the <em>ppc64_cpu</em> command which allows you to display characteristics and settings of CPUs including SMT (Simultaneous multithreading). The output from <em>ppc64_cpu</em> is provided below, showing cores 0 - 31, each with 4 threads. The * beside each thread denotes that it’s active.</p> + +<div class="highlight"><pre><code class="language-plaintext"># ppc64_cpu --info +Core 0: 0* 1* 2* 3* +Core 1: 4* 5* 6* 7* +Core 2: 8* 9* 10* 11* +Core 3: 12* 13* 14* 15* +Core 4: 16* 17* 18* 19* +Core 5: 20* 21* 22* 23* +Core 6: 24* 25* 26* 27* +Core 7: 28* 29* 30* 31* +Core 8: 32* 33* 34* 35* +Core 9: 36* 37* 38* 39* +Core 10: 40* 41* 42* 43* +Core 11: 44* 45* 46* 47* +Core 12: 48* 49* 50* 51* +Core 13: 52* 53* 54* 55* +Core 14: 56* 57* 58* 59* +Core 15: 60* 61* 62* 63* +Core 16: 64* 65* 66* 67* +Core 17: 68* 69* 70* 71* +Core 18: 72* 73* 74* 75* +Core 19: 76* 77* 78* 79* +Core 20: 80* 81* 82* 83* +Core 21: 84* 85* 86* 87* +Core 22: 88* 89* 90* 91* +Core 23: 92* 93* 94* 95* +Core 24: 96* 97* 98* 99* +Core 25: 100* 101* 102* 103* +Core 26: 104* 105* 106* 107* +Core 27: 108* 109* 110* 111* +Core 28: 112* 113* 114* 115* +Core 29: 116* 117* 118* 119* +Core 30: 120* 121* 122* 123* +Core 31: 124* 125* 126* 127* </code></pre></div> + +<p>The <em>ppc64_cpu</em> command makes it very easy to configure SMT on the fly. Here we turn off SMT.</p> + +<div class="highlight"><pre><code class="language-plaintext"># ppc64_cpu --smt=off + +# ppc64_cpu --smt +SMT is off</code></pre></div> + +<p>Now when we run <em>ppc64_cpu –info</em> we see that SMT is disabled. Note that we only see one * per row now and the <em>lscpu</em> command shows the corresponding offline CPUs (threads).</p> + +<div class="highlight"><pre><code class="language-plaintext"># ppc64_cpu --info +Core 0: 0* 1 2 3 +Core 1: 4* 5 6 7 +Core 2: 8* 9 10 11 +Core 3: 12* 13 14 15 +Core 4: 16* 17 18 19 +Core 5: 20* 21 22 23 +Core 6: 24* 25 26 27 +Core 7: 28* 29 30 31 +Core 8: 32* 33 34 35 +Core 9: 36* 37 38 39 +Core 10: 40* 41 42 43 +Core 11: 44* 45 46 47 +Core 12: 48* 49 50 51 +Core 13: 52* 53 54 55 +Core 14: 56* 57 58 59 +Core 15: 60* 61 62 63 +Core 16: 64* 65 66 67 +Core 17: 68* 69 70 71 +Core 18: 72* 73 74 75 +Core 19: 76* 77 78 79 +Core 20: 80* 81 82 83 +Core 21: 84* 85 86 87 +Core 22: 88* 89 90 91 +Core 23: 92* 93 94 95 +Core 24: 96* 97 98 99 +Core 25: 100* 101 102 103 +Core 26: 104* 105 106 107 +Core 27: 108* 109 110 111 +Core 28: 112* 113 114 115 +Core 29: 116* 117 118 119 +Core 30: 120* 121 122 123 +Core 31: 124* 125 126 127 + +# lscpu +Architecture: ppc64le +Byte Order: Little Endian +CPU(s): 128 +On-line CPU(s) list: 0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60,64,68,72,76,80,84,88,92,96,100,104,108,112,116,120,124 +Off-line CPU(s) list: 1-3,5-7,9-11,13-15,17-19,21-23,25-27,29-31,33-35,37-39,41-43,45-47,49-51,53-55,57-59,61-63,65-67,69-71,73-75,77-79,81-83,85-87,89-91,93-95,97-99,101-103,105-107,109-111,113-115,117-119,121-123,125-127 +Thread(s) per core: 1 +Core(s) per socket: 16 +Socket(s): 2 +NUMA node(s): 2 +Model: 2.1 (pvr 004e 1201) +Model name: POWER9, altivec supported +CPU max MHz: 3800.0000 +CPU min MHz: 1838.0000 +L1d cache: 32K +L1i cache: 32K +L2 cache: 512K +L3 cache: 10240K +NUMA node0 CPU(s): 0,4,8,12,16,20,24,28,32,36,40,44,48,52,56,60 +NUMA node8 CPU(s): 64,68,72,76,80,84,88,92,96,100,104,108,112,116,120,124</code></pre></div> + +<p>We’ve explored a number of useful commands up to this point to display information about the NUMA nodes. Another command to note is <em>lstopo</em>, which is part of the <a href="https://www.open-mpi.org/projects/hwloc/">portable hardware locality (hwloc) project</a>. <em>lstopo</em> can display both in text and graphical format information about the system topology including NUMA node and PCIe devices. On CentOS 8 Stream, <em>lstopo</em> is part of the OS <em>lstopo-gui</em> package.</p> + +<p>However, the OS <em>lstopo</em> binary is not compiled with NVIDIA CUDA support. As the system in question is equipped with an NVIDIA V100, I’ve recompiled <em>lstopo</em> based on the latest version of <a href="https://www.open-mpi.org/software/hwloc/v2.7/">hwloc 2.7.0</a>.</p> + +<p>Both the text and graphical output from <em>lstopo</em> follow below.</p> + +<div class="highlight"><pre><code class="language-plaintext"># ./lstopo +Machine (31GB total) + Package L#0 + NUMANode L#0 (P#0 15GB) + L3 L#0 (10MB) + L2 L#0 (512KB) + L1d L#0 (32KB) + L1i L#0 (32KB) + Core L#0 + PU L#0 (P#0) + L1d L#1 (32KB) + L1i L#1 (32KB) + Core L#1 + PU L#1 (P#4) + L3 L#1 (10MB) + L2 L#1 (512KB) + L1d L#2 (32KB) + L1i L#2 (32KB) + Core L#2 + PU L#2 (P#8) + L1d L#3 (32KB) + L1i L#3 (32KB) + Core L#3 + PU L#3 (P#12) + L3 L#2 (10MB) + L2 L#2 (512KB) + L1d L#4 (32KB) + L1i L#4 (32KB) + Core L#4 + PU L#4 (P#16) + L1d L#5 (32KB) + L1i L#5 (32KB) + Core L#5 + PU L#5 (P#20) + L3 L#3 (10MB) + L2 L#3 (512KB) + L1d L#6 (32KB) + L1i L#6 (32KB) + Core L#6 + PU L#6 (P#24) + L1d L#7 (32KB) + L1i L#7 (32KB) + Core L#7 + PU L#7 (P#28) + L3 L#4 (10MB) + L2 L#4 (512KB) + L1d L#8 (32KB) + L1i L#8 (32KB) + Core L#8 + PU L#8 (P#32) + L1d L#9 (32KB) + L1i L#9 (32KB) + Core L#9 + PU L#9 (P#36) + L3 L#5 (10MB) + L2 L#5 (512KB) + L1d L#10 (32KB) + L1i L#10 (32KB) + Core L#10 + PU L#10 (P#40) + L1d L#11 (32KB) + L1i L#11 (32KB) + Core L#11 + PU L#11 (P#44) + L3 L#6 (10MB) + L2 L#6 (512KB) + L1d L#12 (32KB) + L1i L#12 (32KB) + Core L#12 + PU L#12 (P#48) + L1d L#13 (32KB) + L1i L#13 (32KB) + Core L#13 + PU L#13 (P#52) + L3 L#7 (10MB) + L2 L#7 (512KB) + L1d L#14 (32KB) + L1i L#14 (32KB) + Core L#14 + PU L#14 (P#56) + L1d L#15 (32KB) + L1i L#15 (32KB) + Core L#15 + PU L#15 (P#60) + HostBridge + PCIBridge + PCI 0002:01:00.0 (SAS) + Block(Disk) "sdb" + Block(Disk) "sda" + HostBridge + PCIBridge + PCI 0004:01:00.0 (Ethernet) + Net "enP4p1s0f0" + PCI 0004:01:00.1 (Ethernet) + Net "enP4p1s0f1" + HostBridge + PCIBridge + PCIBridge + PCI 0005:02:00.0 (VGA) + Package L#1 + NUMANode L#1 (P#8 16GB) + L3 L#8 (10MB) + L2 L#8 (512KB) + L1d L#16 (32KB) + L1i L#16 (32KB) + Core L#16 + PU L#16 (P#64) + L1d L#17 (32KB) + L1i L#17 (32KB) + Core L#17 + PU L#17 (P#68) + L3 L#9 (10MB) + L2 L#9 (512KB) + L1d L#18 (32KB) + L1i L#18 (32KB) + Core L#18 + PU L#18 (P#72) + L1d L#19 (32KB) + L1i L#19 (32KB) + Core L#19 + PU L#19 (P#76) + L3 L#10 (10MB) + L2 L#10 (512KB) + L1d L#20 (32KB) + L1i L#20 (32KB) + Core L#20 + PU L#20 (P#80) + L1d L#21 (32KB) + L1i L#21 (32KB) + Core L#21 + PU L#21 (P#84) + L3 L#11 (10MB) + L2 L#11 (512KB) + L1d L#22 (32KB) + L1i L#22 (32KB) + Core L#22 + PU L#22 (P#88) + L1d L#23 (32KB) + L1i L#23 (32KB) + Core L#23 + PU L#23 (P#92) + L3 L#12 (10MB) + L2 L#12 (512KB) + L1d L#24 (32KB) + L1i L#24 (32KB) + Core L#24 + PU L#24 (P#96) + L1d L#25 (32KB) + L1i L#25 (32KB) + Core L#25 + PU L#25 (P#100) + L3 L#13 (10MB) + L2 L#13 (512KB) + L1d L#26 (32KB) + L1i L#26 (32KB) + Core L#26 + PU L#26 (P#104) + L1d L#27 (32KB) + L1i L#27 (32KB) + Core L#27 + PU L#27 (P#108) + L3 L#14 (10MB) + L2 L#14 (512KB) + L1d L#28 (32KB) + L1i L#28 (32KB) + Core L#28 + PU L#28 (P#112) + L1d L#29 (32KB) + L1i L#29 (32KB) + Core L#29 + PU L#29 (P#116) + L3 L#15 (10MB) + L2 L#15 (512KB) + L1d L#30 (32KB) + L1i L#30 (32KB) + Core L#30 + PU L#30 (P#120) + L1d L#31 (32KB) + L1i L#31 (32KB) + Core L#31 + PU L#31 (P#124) + HostBridge + PCIBridge + PCI 0033:01:00.0 (3D) + CoProc(CUDA) "cuda0" + GPU(NVML) "nvml0"</code></pre></div> + +<figure><img src="https://www.gaborsamu.com/images/lstopo.png" /> +</figure> + +<p><strong>Processor and memory affinity with LSF</strong></p> + +<p>Next, let’s look at some examples of how processor and memory affinity can be controlled with <a href="https://www.ibm.com/us-en/products/hpc-workload-management">IBM Spectrum LSF</a>. LSF is a complete workload management solution for high-performance computing (HPC).</p> + +<p>In the examples below, the well-known benchmark <a href="https://www.netlib.org/benchmark/hpl/">High-Performance Linpack (HPL)</a> is run. Please note the following:</p> + +<ul> +<li>HPL has not been optimized here and used only as an example.</li> +<li>HPL was compiled against Open MPI version 4.1.2</li> +<li><a href="https://www.open-mpi.org">Open MPI</a> version 4.1.2 was compiled with support for LSF</li> +</ul> +<p>Next, we submit a run of HPL requesting 16 processor cores, all on the same NUMA node, with the options specified to bind the tasks to the NUMA node and with memory affinity specified. For job submission, the LSF <em>bsub</em> command is used.</p> + +<div class="highlight"><pre><code class="language-plaintext">[gsamu@kilenc testing]$ bsub -n 16 -q normal -o /home/gsamu/%J.out +-R "affinity[core(1,same=numa):cpubind=numa:membind=localonly]" mpirun ./xhpl +Job &lt;101579&gt; is submitted to queue &lt;normal&gt;.</code></pre></div> + +<p>After the job starts, we can see it the list of processes (PIDs), as well as the memory utilization using the LSF <em>bjobs</em> command.</p> + +<div class="highlight"><pre><code class="language-plaintext">[gsamu@kilenc testing]$ bjobs -l 101579 + +Job &lt;101579&gt;, User &lt;gsamu&gt;, Project &lt;default&gt;, Status &lt;RUN&gt;, Queue &lt;normal&gt;, Co + mmand &lt;mpirun ./xhpl&gt;, Share group charged &lt;/gsamu&gt; +Fri Jan 10 1821: Submitted from host &lt;kilenc&gt;, CWD &lt;$HOME/hpl-2.3/testing&gt;, + Output File &lt;/home/gsamu/101579.out&gt;, 16 Task(s), Request + ed Resources &lt;affinity[core(1,same=numa):cpubind=numa:memb + ind=localonly]&gt;; +Fri Jan 10 1821: Started 16 Task(s) on Host(s) &lt;16*kilenc&gt;, Allocated 16 Sl + ot(s) on Host(s) &lt;16*kilenc&gt;, Execution Home &lt;/home/gsamu&gt; + , Execution CWD &lt;/home/gsamu/hpl-2.3/testing&gt;; +Fri Jan 10 1830: Resource usage collected. + The CPU time used is 24 seconds. + MEM: 2.9 Gbytes; SWAP: 1 Mbytes; NTHREAD: 55 + PGID: 80693; PIDs: 80693 80700 80702 + PGID: 80707; PIDs: 80707 + PGID: 80708; PIDs: 80708 + PGID: 80709; PIDs: 80709 + PGID: 80710; PIDs: 80710 + PGID: 80711; PIDs: 80711 + PGID: 80712; PIDs: 80712 + PGID: 80713; PIDs: 80713 + PGID: 80714; PIDs: 80714 + PGID: 80715; PIDs: 80715 + PGID: 80716; PIDs: 80716 + PGID: 80717; PIDs: 80717 + PGID: 80718; PIDs: 80718 + PGID: 80719; PIDs: 80719 + PGID: 80720; PIDs: 80720 + PGID: 80721; PIDs: 80721 + PGID: 80722; PIDs: 80722 + + + MEMORY USAGE: + MAX MEM: 2.9 Gbytes; AVG MEM: 1.4 Gbytes + + GPFSIO DATA: + READ: ~0 bytes; WRITE: ~0 bytes + + SCHEDULING PARAMETERS: + r15s r1m r15m ut pg io ls it tmp swp mem + loadSched - - - - - - - - - - - + loadStop - - - - - - - - - - - + + RESOURCE REQUIREMENT DETAILS: + Combined: select[type == local] order[r15s:pg] affinity[core(1,same=numa)*1:cp + ubind=numa:membind=localonly] + Effective: select[type == local] order[r15s:pg] affinity[core(1,same=numa)*1:c + pubind=numa:membind=localonly]</code></pre></div> + +<p>During the job runtime, we use the <em>ps</em> command to check which processor cores the <em>xhpl</em> processes are bound to (see PSR column). It should be noted that LSF also creates a cgroup cpuset for this job.</p> + +<div class="highlight"><pre><code class="language-plaintext">[gsamu@kilenc testing]$ ps -Fae | grep xhpl +UID PID PPID C SZ RSS PSR STIME TTY TIME CMD +gsamu 80702 80700 0 2884 31936 36 18:46 ? 0000 mpirun ./xhpl +gsamu 80707 80702 97 10471 387072 4 18:46 ? 0021 ./xhpl +gsamu 80708 80702 97 10619 396672 20 18:46 ? 0021 ./xhpl +gsamu 80709 80702 97 10345 378816 56 18:46 ? 0021 ./xhpl +gsamu 80710 80702 97 10596 395200 8 18:46 ? 0021 ./xhpl +gsamu 80711 80702 97 10470 387072 48 18:46 ? 0021 ./xhpl +gsamu 80712 80702 97 10619 396672 44 18:46 ? 0021 ./xhpl +gsamu 80713 80702 97 10351 379136 12 18:46 ? 0021 ./xhpl +gsamu 80714 80702 97 10322 377472 24 18:46 ? 0021 ./xhpl +gsamu 80715 80702 97 10350 379328 0 18:46 ? 0021 ./xhpl +gsamu 80716 80702 97 10494 388736 60 18:46 ? 0021 ./xhpl +gsamu 80717 80702 97 10232 371648 40 18:46 ? 0021 ./xhpl +gsamu 80718 80702 97 10205 370048 28 18:46 ? 0021 ./xhpl +gsamu 80719 80702 97 10321 377536 52 18:46 ? 0021 ./xhpl +gsamu 80720 80702 97 10465 387008 36 18:46 ? 0021 ./xhpl +gsamu 80721 80702 97 10200 369664 16 18:46 ? 0021 ./xhpl +gsamu 80722 80702 96 10461 386560 32 18:46 ? 0021 ./xhpl +gsamu 80879 36562 0 1736 2816 80 18:46 pts/1 0000 grep --color=auto xhpl</code></pre></div> + +<p>Cross referencing the above list of CPU cores with the output of <em>numactl</em>, we see that the job is running on NUMA node 0.</p> + +<div class="highlight"><pre><code class="language-plaintext">[gsamu@kilenc testing]$ numactl -H +available: 2 nodes (0,8) +node 0 cpus: 0 4 8 12 16 20 24 28 32 36 40 44 48 52 56 60 +node 0 size: 15245 MB +node 0 free: 944 MB +node 8 cpus: 64 68 72 76 80 84 88 92 96 100 104 108 112 116 120 124 +node 8 size: 16242 MB +node 8 free: 5607 MB +node distances: +node 0 8 + 0: 10 40 + 8: 40 10</code></pre></div> + +<p>The LSF <em>bhosts</em> command also provides an affinity option (<em>–aff</em>) to display the NUMA bindings. Below is the output from that command. The * denotes that there are tasks pinned.</p> + +<div class="highlight"><pre><code class="language-plaintext">[gsamu@kilenc testing]$ bhosts -aff +Host[30.7G] kilenc + NUMA[0: 0M / 14.8G] + Socket0 + core0(*0) + core4(*4) + core24(*8) + core28(*12) + core32(*16) + core36(*20) + core40(*24) + core44(*28) + core48(*32) + core52(*36) + core56(*40) + core60(*44) + core72(*48) + core76(*52) + core80(*56) + core84(*60) + NUMA[8: 0M / 15.8G] + Socket8 + core2064(64) + core2068(68) + core2072(72) + core2076(76) + core2080(80) + core2084(84) + core2088(88) + core2092(92) + core2096(96) + core2100(100) + core2104(104) + core2108(108) + core2112(112) + core2116(116) + core2120(120) + core2124(124)</code></pre></div> + +<p>Affinity is also crucial for GPU workloads. LSF provides advanced capabilities for scheduling of GPU workloads to NVIDIA devices. Below is an example of the LSF submission syntax that could be used to submit a machine learning training workload for exclusive execution on a single GPU with core affinity specified on the same NUMA node. In the case where a system is equipped with multiple GPUs, users could submit jobs using similar syntax and this would effectively isolate the GPU workloads from one another.</p> + +<div class="highlight"><pre><code class="language-plaintext">bsub -n 10 -R “affinity[core(same=numa):distribute=pack]” -gpu “num=1:mode=exclusive” ./train.py</code></pre></div> + +<p>We&rsquo;ve discussed NUMA concepts in brief and the ability to easily control CPU pinning and memory binding for jobs submitted to LSF. In HPC environments where performance is crucial, using these concepts can help to drive performance by preventing workloads from using the NUMA interconnect where feasible. That concludes this quick recap of affinity jobs in LSF. You can find out more about the capabilities of LSF in the <a href="https://www.ibm.com/docs/en/spectrum-lsf/10.1.0">online documentation</a>.</p> + + + + + Improving the Open Science Data Federation’s Cache Selection + + 2022-01-22T05:00:00-07:00 + https://hpc.social/2022/improving-the-open-science-data-federation-s-cache-selection + <p>Optimizing data transfers requires tuning many parameters. High latency between the client and a server can decrease data transfer throughput. The Open Science Data Federation (OSDF) attempts to optimize the latency between a client and cache by using GeoIP to locate the nearest cache to the client. But, using GeoIP alone has many flaws. In this post, we utilize <a href="https://workers.cloudflare.com/">Cloudflare Workers</a> to provide GeoIP information during cache selection. During the evaluation, we found that location accuracy grew from <strong>86%</strong> accurate with the original GeoIP service to <strong>95%</strong> accurate with Cloudflare Workers.</p> + +<figure class=""> + <img alt="Map of U.S. OSDF" src="https://derekweitzel.com/images/posts/CloudflareWorkers/CacheMap.png" /><figcaption> + Map of OSDF locations + + </figcaption></figure> + +<p>GeoIP has many flaws, first, the nearest physical cache may not be the nearest in the network topology. Determining the nearest cache in the network would require probing the network topology between the client and every cache, a intensive task to perform for each client startup, and may be impossible with some network configurations, such as blocked network protocols.</p> + +<p>Second, the GeoIP database is not perfect. It does not have every IP address, and the addresses may not have accurate location information. When GeoIP is unable to determine a location, it will default to “guessing” the location is a lake in Kansas (<a href="https://arstechnica.com/tech-policy/2016/08/kansas-couple-sues-ip-mapping-firm-for-turning-their-life-into-a-digital-hell/">a well known issue</a>).</p> + +<p>Following a review of the Open Science Data Federation (OSDF), we found that we could improve effeciency by improving the geo locating of clients. In the review, several sites where detected to not be using the nearest cache.</p> + +<h2 id="implementation">Implementation</h2> + +<p>StashCP queries the <a href="https://cernvm.cern.ch/fs/">CVMFS</a> geo location service which relies on the <a href="https://www.maxmind.com/en/home">MaxMind GeoIP database</a>.</p> + +<p><a href="https://workers.cloudflare.com/">Cloudflare Workers</a> are designed to run at Cloudflare’s many colocation facilities near the client. Cloudflare directs a client’s request to a nearby data center using DNS. Each request is annotaed with an approximate location of the client, as well as the colocation center that received the request. Cloudflare uses a GeoIP database much like MaxMind, but it also falls back to the colocation site that the request was serviced.</p> + +<p>I wrote a Cloudflare worker, <a href="https://github.com/djw8605/cache-locator"><code class="language-plaintext highlighter-rouge">cache-locator</code></a>, which calculates the nearest cache to the client. It uses the GeoIP location of the client to calculate the ordered list of nearest caches. If the GeoIP fails for a location, the incoming request to the worker will not be annotated with the location but will include the <code class="language-plaintext highlighter-rouge">IATA</code> airport code of the colocation center that received the client request. We then return the ordered list of nearest caches to the airport.</p> + +<p>We imported a <a href="https://www.partow.net/miscellaneous/airportdatabase/">database of airport codes</a> to locations that is pubically available. The database is stored in the <a href="https://developers.cloudflare.com/workers/learning/how-kv-works">Cloudflare Key-Value</a>, keyed by the <code class="language-plaintext highlighter-rouge">IATA</code> code of the airport.</p> + +<h2 id="evaluation">Evaluation</h2> + +<p>To evaluate the location, I submitted test jobs to each site available in the OSG OSPool, 43 different sites at the time of evaluation. The test jobs:</p> + +<ol> + <li> + <p>Run the existing <code class="language-plaintext highlighter-rouge">stashcp</code> to retrieve the closest cache.</p> + + + <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> stashcp --closest +</code></pre></div> + </div> + + </li> + <li> + <p>Run a custom <a href="https://github.com/djw8605/closest-cache-cloudflare">closest script</a> that will query the Cloudflare worker for the nearest caches and print out the cache.</p> + + </li> +</ol> + +<p>After the jobs completed, I compiled the caches decisions to a <a href="https://docs.google.com/spreadsheets/d/1mo1FHYW2vpCyhSeCCd_bwP21rFFzqedv0dZ0z8EY4gg/edit?usp=sharing">spreadsheet</a> and manually evaluated each cache selection decision. The site names in the spreadsheet are the somewhat arbitrary internal names given to sites.</p> + +<p>In the spreadsheet, you can see that the correct cache was choosen <strong>86%</strong> of the time with the old GeoIP service, and <strong>95%</strong> of the time with Cloudflare workers.</p> + +<h3 id="notes-during-the-evaluation">Notes during the Evaluation</h3> + +<p>Cloudflare was determined to be incorrect at two sites, the first being <code class="language-plaintext highlighter-rouge">UColorado_HEP</code> (University of Colorado in Boulder). In this case, the Colorado clients failed the primary GeoIP lookup and the cloudflare workers fell back to using the <code class="language-plaintext highlighter-rouge">IATA</code> code from the request. The requests from Colorado all where recieved by the Cloudflare Dallas colocation site, which is nearest the Houston cache. The original GeoIP service choose the Kansas City cache, which is the correct decision. It is unknown if the orignal GeoIP service choose KC cache because it knew the GeoIP location of the clients, or it defaulted to the Kansas default.</p> + +<p>The second site where the Cloudflare worker implementation was incorrect was <code class="language-plaintext highlighter-rouge">SIUE-CC-production</code> (Southern Illinois University Edwardsville). In this case, the original GeoIP service choose Chicago, while the new service choose Kansas. Edwardsville is almost equal distance from both the KC cache and Chicago. The difference in the distance to the caches is ~0.6 KM, with Chicago being closer.</p> + +<!-- TODO: Find out why KC cache was choosen SIUE --> + +<p>An example of a site that did not work with GeoIP was <code class="language-plaintext highlighter-rouge">ASU-DELL_M420</code> (Arizona Statue University). The original service returned that the KC cache was the nearest. The Cloudflare service gave the default Lat/Log if GeoIP failed, the middle of Kansas, but the data center serving the request had the airport code of <code class="language-plaintext highlighter-rouge">LAX</code> (Los Angeles). The nearest cache to <code class="language-plaintext highlighter-rouge">LAX</code> is the UCSD cache, which is the correct cache decision.</p> + +<p>During the evaluation, I originally used the Cloudflare worker development DNS address, <a href="https://stash-location.djw8605.workers.dev">stash-location.djw8605.workers.dev</a>. Purdue University and the American Museum of Natural History sites both blocked the development DNS address. The block was from an OpenDNS service which reported the domain had been linked to malware and phishing. Since the DNS hostname was hours old, it’s likely that most <code class="language-plaintext highlighter-rouge">*workers.dev</code> domains were blocked.</p> + +<h2 id="conclusion">Conclusion</h2> + +<p>Improving the cache selection can improve the download effeciency. It is left as future work to measure if the nearest geographical cache is the best choice. While the OSDF is using GeoIP service for cache selection, it is important to select the correct cache. Using the new Cloudflare service results in <strong>95%</strong> correct cache decision vs. <strong>86%</strong> with the original service.</p> + +<p>Cloudflare Workers is also very affordable for the scale that the OSDF would require. The first 100,000 requests are free, while it is $5/mo for the next 10 Million requests. The OSPool runs between 100,000 to 230,000 jobs per day, easily fitting within the $5/mo tier.</p> + + + + + Toy programs for learning a new language + + 2022-01-15T16:00:00-07:00 + https://hpc.social/2022/toy-programs-for-learning-a-new-language + <p>It used to be that I&#8217;d get interested in a new programming language, but I wouldn&#8217;t have a new project to use it for and had trouble knowing how to start. I have trouble really grasping a new language without building something in it, and &#8220;X by example&#8221; or working through a book don&#8217;t really do the job.</p> + +<p>What&#8217;s helped me lately is to build an array of &#8220;standard&#8221; toy programs that I understand reasonably well, and that I can use to explore the new language and figure out how to do something real in it.</p> + +<p>Right now, my toy program collection consists of:</p> + +<ul><li>A link shortening service, like <a href="https://bit.ly">bit.ly</a> or <a href="https://tinyurl.com/">tinyurl</a>, along with a HTTP API for adding and removing links</li><li>A <a href="https://scipython.com/book/chapter-7-matplotlib/examples/the-two-dimensional-diffusion-equation/">2D diffusion simulation</a></li><li>A &#8220;system package inventory&#8221; program, that builds a list of all the RPMs/DEBs installed on a Linux machine and shoves them into a SQLite database</li></ul> + +<p>This is almost never what I&#8217;d call production-quality code. For example, when I&#8217;m writing these toy programs, I rarely write unit tests (until I start exploring the test libraries for the language!). But they&#8217;re still very valuable learning tools, and give me space to explore some very different use-cases.</p> + +<p>I almost always write all three in a given language, but the order depends a lot on what I think the new language will be good for. For example, I&#8217;ll write the &#8220;system package inventory&#8221; program first if I think the new language might be handy for system administration tools. It&#8217;s a great way to see how well the language plays with a common Linux environment, how painful it is to use SQLite, and to get practice writing CLI tools in it. I&#8217;ll often augment the basic &#8220;scan and store&#8221; functionality with a CLI to do frequent queries, like &#8220;on what date was this package last upgraded&#8221;.</p> + +<p>On the other hand, if I think I&#8217;m going to use the new language for a bunch of numerical work, I&#8217;ll start with the diffusion simulation. When I write that, I often start with a naive implementation and then start playing with profilers and other performance tools, or try to parallelize the simulation. This is also a great excuse to dig into any plotting tools commonly used with the language.</p> + +<p>These toy programs are also handy if I want to explore new ways to integrate a service into a larger production environment. For example, I might start with the link shortening service, deploying the service itself statelessly and persisting the list of links into a PostgreSQL DB. Then I start complicating things&#8230;</p> + +<ul><li>Let&#8217;s add logging!</li><li>And tracing!</li><li>It&#8217;s always a good idea to expose Prometheus metrics</li><li>And wouldn&#8217;t it be handy to support multiple database backends?</li><li>Now wrap it all in a Helm chart for handy deployment</li></ul> + +<p>I imagine I&#8217;m not the only person to have a standard collection of learning projects for new languages. If you do this too, what does your project list look like?</p> + + + + + Cache Age Binning PR Finally Merged! + + 2022-01-12T00:00:00-07:00 + https://hpc.social/2022/cache-age-binning-pr-finally-merged- + <p>I’ve had this PR hanging around in various forms for years. It’s basically the last peice of the OSD memory target code. We can now get a “binned” view of the relative ages of items in different LRU caches and dynamically adjust target sizes for different caches. PR is <a href="https://github.com/ceph/ceph/pull/43299">here</a> and memory usage behavior charts are <a href="https://docs.google.com/spreadsheets/d/1lSp2cLzYmRfPILDCyLMXciIfdf0OvSFngwXukQFXIqQ/edit?usp=sharing">here</a>.</p> + + + + + Things that are Hard + + 2022-01-07T12:30:00-07:00 + https://hpc.social/2022/things-that-are-hard + <p>I saw a funny tweet on Twitter the other night - it was someone from a large consumer company sharing +their vision for “<a href="https://hypebeast.com/2022/1/walmart-2017-mutual-mobile-metaverse-shopping-video-resurfaces" target="_blank">the next generation shopping experience</a>” and it was a virtual person walking through a supermarket aisle and reaching out to pick up a bottle of wine. +I can’t find the specific tweet, but it said something to the effect of:</p> + +<blockquote> + <p>Nobody asked for this. Stop making stuff to solve problems that people don’t have</p> + +</blockquote> + +<p>My dear reader, it me! 😲️ This message hit me really hard, because I am definitely one to build niche tools for use cases that likely don’t exist but seem fun or interesting to me. I also feel pretty <a href="https://twitter.com/vsoch/status/1478913234136494081" target="_blank">disconnected</a> from communities that are innovating and testing new ideas.</p> + +<h2 id="what-is-hard">What is hard?</h2> + +<p>This is a problem that a lot of us have. We build things that nobody needs. We need to focus more on the problems that people are actually facing. I would also scope that to developer workflows, which includes automation, testing, and development. Since I have a nice view into my own mental space, here is my list of things that are hard.</p> + +<ol class="custom-counter"> + <li>When I am trying to develop software and I can't open an interface with the code and environment I need</li> + <li>That my main interaction with a resource is via SSH</li> + <li>When a workflow or even container works in one place but not another</li> + <li>When I need to develop, build in CI, push to a registry, and pull. One mistake? Start from scratch</li> + <li>When I need to run a job and I have to interact with a job manager and it's hard and annoying</li> + <li>Logging or monitoring means looking at text files with cryptic names</li> + <li>Automated testing on HPC is not a thing. Build on GitHub and pray.</li> + <li>When I can't easily navigate code, documentation, or it's completely missing</li> + <li>When I set up everything the way I like it and I have to login to a new system and do it all over again</li> + <li>When I want to develop something that uses a cluster resource but there are no exposed APIs.</li> + <li>When it's impossible to compare between systems because they are special snowflakes</li> + <li>When I can't easily test across the systems that my software is intended for.</li> + <li>To scale anything I have to use a job manager, wait hours, and then again if there is one mistake</li> + <li>When it takes 2 hours or more to get a node allocated</li> + <li>When I can't really make tools for HPC because I'm trying to find workarounds for all these problems</li> +</ol> + +<p>And I’d add a “thing that is annoying” to be this obsessive focus on power and scale, in a competitive sense, and this race +to be in the top 500 and beat the other guy over all else. The constant need to rebuild clusters means we never +focus on the details of how we use them. We do the same things over again. I’ve mentioned these things before, possibly many times, but I need to point it out again.</p> + +<blockquote> + <p>Our current developer environments are more like handcuffs than places we are enabled to thrive.</p> + +</blockquote> + +<p>The reality for me is that I tend to put myself in a new role or environment, and then think of lots of cool ways to extend a particular tool, or build something before it. This is why I’ve made a ton of visualizations, associated tools, or posts for spack - it’s literally just the thing that is right in front of me. Put something else in front of me, such as an entire infrastructure with APIs, and I’d do the same. So why can’t a nice set of developer tools be available for the resources I’m supposed to be using?</p> + +<h2 id="develop-based-on-specific-problems">Develop based on specific problems</h2> + +<p>I think I want to develop more focusing on these problems. Don’t get me wrong - I’ll definitely keep making silly projects. But my vision for the future needs to be oriented toward these pains. These in particular are the problems that I think our community needs to look at, at least given this developer perspective. +I say this because I’ve seen and used the dark side - having free rein of beautiful cloud APIs to let me automate to my heart’s content! +I only now, without being a part of some cloud or container cluster deployed project, am aware that I don’t have access to these development tools. + What is my solution now? I largely don’t ssh into an HPC cluster until I absolutely have to - either to scale something, or reproduce a workflow on GitHub actions that works there (but then is really challenging to get it working on the cluster resource). Indeed <a href="https://twitter.com/vsoch/status/1461908217223528448" target="_blank">this entire thread</a> resulted after a frustrating evening of exactly that.</p> + +<p>What isn’t helpful? What isn’t helpful is telling me “This center / place / person has this thing that has solved this problem.” Can I easily access it, and what about the entire research software engineering community? This kind of response shuts down the conversation +and makes the developer (myself for example) realize that the person I’m talking to is not interested in thinking about how to inspire change. +I’ve been really frustrated recently with mentioning even an abstract idea, and getting shut down that “Oh that sounds like this other tool.” +For a project to reach this “mention status” it needs to be easy to install or use, and not have a barrier of privilege that you have to work at a certain place or know people. Telling me that there is a solution that requires some convoluted steps and permissions not only implies that it is only available to those in privilege, but that the solution is not well adopted enough or shared enough to be truly a solution for our community.</p> + +<h2 id="inspiring-vision">Inspiring Vision</h2> + +<p>If we aren’t happy with the current state of the world, what are our options? Well, we could leave our current roles to find another state that is more similar to what we want. Personally speaking, I haven’t hit that point quite yet. I want to try my hardest to formulate a vision for how I want the world to be, and then find opportunity to work on it from where I am. The wisdom here is that no specific role is perfect, and optimally we should place ourself somewhere where there are resources and open mindedness for change. it’s up to us to extend our influence as best we can to help drive some possible future. If you try that and fail? At least you tried.</p> + +<p>These are the things that are hard. I am going to try harder to have them be the focus of my thinking about the future. I want to make them easier. I’m starting to realize that possibly the reality is that I should think beyond the constraints of HPC, and more toward the kind of infrastructure that I want, and then +figure out how to slowly integrate it as a part of our culture too. We can start with a core vision for a future that we want, and then +slowly build up tooling and community around that.</p> + +<p>Happy Friday, friends!</p> + + + + + New Year's Resolution for HPC- Using Resources More Efficiently + + 2022-01-04T20:27:54-07:00 + https://hpc.social/2022/new-year-s-resolution-for-hpc-using-resources-more-efficiently + <p>A hearty happy new year to everyone. It’s that time of the year that we hear from folks about their resolutions for new year’s. But rather than talk about me purchasing a gym membership, I’d like to share my thoughts on a new year’s resolution for HPC.</p> + +<p>With the topsy-turvy weather that we’re seeing all over the planet, we’re all acutely aware of the changes that are happening to our climate and what is represents for humankind. HPC is a key engine for science, including efforts that are crucial to help with our climate change battle. Climate and ocean modelling are some examples of the use of HPC that immediately come to mind in this respect. Modelling the environment is important for us to understand what is occurring around us and what is projected to occur. Additionally, materials science is also important in order to help develop the necessary technologies to more effectively store energy from renewable sources and transmit, generate energy. HPC is a consumer of energy, which brings me to the HPC resolution for this year – using computing resources more efficiently.</p> + +<p>We’ve seen great strides in the efficiency of processors and systems. But at scale, large HPC centers consume large amounts of energy for both powering the servers and storage systems, as well as the cost of cooling. And if you’re using cloud for HPC, then of course you’re not concerned with the energy and cooling, but rather the cost to you. In either case, making the most efficient use of your infrastructure should be a key consideration. Workload schedulers are the interface between users and jobs in any HPC environment. Users submit work and it’s the task of the workload scheduler to find suitable compute resources to dispatch the work to. On the surface, this may seem like a trivial task. But with potentially large numbers of jobs, users, servers and priorities, workload and resource management is anything but a trivial. The good news is that there are workload management solutions which bring decades of experience to the table.</p> + +<p>IBM Spectrum LSF Suites provide a fully integrated workload management solution for HPC environments. LSF builds on almost 30 years of experience in workload and resource management and is used on some of the worlds’ largest supercomputers including <a href="https://www.olcf.ornl.gov/olcf-resources/compute-systems/summit/">Summit</a>, at the Oak Ridge Leadership Computing Facility. On a high-level, here are some critical areas where LSF can help to drive better efficiency in your HPC infrastructure:</p> + +<ul> +<li>Dynamic hybrid cloud – automatically flex up and down cloud resources according to policies, with support for all major cloud providers. Learn more <a href="https://mediacenter.ibm.com/media/Dynamic%20hybrid%20cloud%20with%20IBM%20Spectrum%20LSF/1_u69d0jpi">here</a></li> +<li>Dynamic multi-instance GPU support – right size NVIDIA A100 multi-instance GPU slices according to incoming workload demands. Learn more <a href="https://medium.com/ibm-data-ai/i-had-a-dream-i-got-every-gpu-i-wanted-75291f4e96e9?source=friends_link&amp;sk=535b00f1d534d0bafed7a4b112189b7e">here</a></li> +<li>User productivity – single unified UI for job submission and management which captures repeatable best practices. Learn more <a href="https://www.gaborsamu.com/blog/easy_hpc/">here</a></li> +</ul> +<p>Start the year off right, with a focus efficiency in your HPC environment with IBM Spectrum LSF. Learn more <a href="https://www.ibm.com/us-en/products/hpc-workload-management">here</a>.</p> + + + + + Hello operator. I need an HPC cluster – fast + + 2021-12-15T00:00:39-07:00 + https://hpc.social/2021/hello-operator-i-need-an-hpc-cluster-fast + <p>As users of HPC look to build new workflows that go beyond traditional simulation and modeling, cloud native development models that rely upon Kubernetes (K8s) and Docker are front of mind. K8s provides the framework and a large ecosystem of key applications and technologies which can help to facilitate this transformation of HPC. This naturally leads to HPC centers looking at approaches to use their infrastructure to run their traditional HPC workloads alongside K8s workloads.</p> + +<p>To this end, there is an available K8s/OpenShift integration with IBM Spectrum LSF which is available as a tech preview on the Spectrum Computing <a href="https://github.com/IBMSpectrumComputing/lsf-kubernetes">github</a>. There are a few parts to the integration. Firstly, LSF can act as a scheduler for K8s/OpenShift pods. Secondly an operator is available, that makes it easy to deploy an LSF cluster on top of a K8s/OpenShift cluster. Note that the integration is a technical preview.</p> + +<p>It is the K8s operator for LSF that is the focus of this writeup. For those of you who have been following me on <a href="https://twitter.com/gabor_samu">Twitter</a>, you’ll be aware that I’ve been tinkering with the Spectrum LSF K8s operator on and off for about a year now.</p> + +<blockquote class="twitter-tweet"><p dir="ltr" lang="en">Even though Canadian Thanksgiving is a month behind us, <br />I'm definitely feeling the vibe from US <a href="https://twitter.com/hashtag/Thanksgiving?src=hash&amp;ref_src=twsrc%5Etfw">#Thanksgiving</a>. Rockin' Spectrum LSF on <a href="https://twitter.com/hashtag/OpenShift?src=hash&amp;ref_src=twsrc%5Etfw">#OpenShift</a> on <a href="https://twitter.com/hashtag/IBMCloud?src=hash&amp;ref_src=twsrc%5Etfw">#IBMCloud</a> - while listening to Santana. Get your LSF <a href="https://twitter.com/hashtag/k8s?src=hash&amp;ref_src=twsrc%5Etfw">#k8s</a> operator here! <a href="https://t.co/p5V7sMg6ic">https://t.co/p5V7sMg6ic</a> <a href="https://twitter.com/hashtag/Thankful?src=hash&amp;ref_src=twsrc%5Etfw">#Thankful</a> <a href="https://twitter.com/hashtag/HPC?src=hash&amp;ref_src=twsrc%5Etfw">#HPC</a></p> +&mdash; Gábor SAMU (@gabor_samu) <a href="https://twitter.com/gabor_samu/status/1332045969349865473?ref_src=twsrc%5Etfw">November 26, 2020</a></blockquote> + +<p>This December I’ve had the opportunity to revisit the K8s operator for LSF. The motivation in this case, was the need to quickly spin up LSF test clusters in order to run some Intel MPI workloads. And as we’ll see, although getting the LSF clusters spun up on demand using the operator is very straightforward, a bit of fine tuning was needed in order to be able to successfully run the Intel MPI workloads.</p> + +<p>The github page where the K8s/OpenShift and Spectrum LSF integration is hosted contains documentation on how to setup the operator and deploy an LSF cluster on K8s/OpenShift. Spinning up the LSF cluster is quite simple, once you’ve followed the steps in the above noted documentation. We’ve configured the deployment to include an LSF management pod, and 4 LSF compute pods. LSF Suite for HPC v10.2.0.11 is the version that was deployed. And the target OpenShift cluster is hosted in the IBM Cloud.</p> + +<p>After authenticating with the OpenShift cluster, using the OpenShift <em>oc</em> command line, I could spin up an LSF cluster with a single command from my laptop as follows:</p> + +<div class="highlight"><pre><code class="language-plaintext">$ oc create -f ./lsfcluster.yaml +lsfcluster.lsf.spectrumcomputing.ibm.com/example-lsfcluster created</code></pre></div> + +<p>And after a few moments, we see the one management pod, and four compute pods. There is a slight delay to start the pods as there are some dependencies on +PVCs (storage) which need to be created first by the operator.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ oc get pods +NAME READY STATUS RESTARTS AGE +ibm-lsf-operator-66bd9449c-5lzmx 2/2 Running 0 21h +lsfcluster-master-68f954645-j8v2v 1/1 Running 0 5m27s +lsfcluster-rhel7-fcdcf8559-d6955 1/1 Running 0 5m29s +lsfcluster-rhel7-fcdcf8559-h5pqb 1/1 Running 0 5m29s +lsfcluster-rhel7-fcdcf8559-xkld7 1/1 Running 0 5m28s +lsfcluster-rhel7-fcdcf8559-zmgzp 1/1 Running 0 5m29s</code></pre></div> + +<p>Connecting to the pods is straightforward and we’ve run a few commands to show that the LSF cluster is operational.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ oc exec -ti lsfcluster-rhel7-fcdcf8559-d6955 -- /bin/bash + +LSF POD [root:/]# lsid +IBM Spectrum LSF 10.1.0.11, Nov 12 2020 +Suite Edition: IBM Spectrum LSF Suite for HPC 10.2.0.11 +Copyright International Business Machines Corp. 1992, 2016. +US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule Contract with IBM Corp. + +My cluster name is lsfcluster +My master name is lsfmaster + +LSF POD [root:/]# lsload -w +HOST_NAME status r15s r1m r15m ut pg ls it tmp swp mem +lsfcluster-rhel7-fcdcf8559-h5pqb ok 0.2 0.1 0.9 3% 0.0 0 14 89G 7.4G 7.4G +lsfcluster-rhel7-fcdcf8559-xkld7 ok 1.5 1.2 0.8 3% 0.0 0 14 83G 7.4G 7.4G +lsfcluster-rhel7-fcdcf8559-zmgzp ok 2.1 0.6 0.8 3% 0.0 0 14 85G 7.4G 7.4G +lsfmaster ok 3.3 0.1 0.7 3% 0.0 0 3e+5 83G 786.8M 785M +lsfcluster-rhel7-fcdcf8559-d6955 ok 15.2 0.2 1.0 2% 0.0 0 3e+5 89G 7.4G 7.4G</code></pre></div> + +<p>Intel MPI is integrated with a number of different HPC job schedulers including LSF. More details on this can be found <a href="https://www.intel.com/content/www/us/en/develop/documentation/mpi-developer-guide-linux/top/running-applications/job-schedulers-support.html">here</a>. And you can find some tips on submitting Intel MPI jobs to LSF at in the documentation <a href="https://www.ibm.com/support/pages/using-intelmpi-under-lsf-quick-guide">here</a>. Intel MPI integrates with the LSF <a href="https://www.ibm.com/docs/en/spectrum-lsf/10.1.0?topic=reference-blaunch">blaunch</a> framework to launch tasks on hosts. The Intel MPI version which used is that which is bundled with current (at the time of writing) Intel oneAPI, which I installed from the online repos using the procedure <a href="https://www.intel.com/content/www/us/en/develop/documentation/installation-guide-for-intel-oneapi-toolkits-linux/top/installation/install-using-package-managers/yum-dnf-zypper.html">here</a>. The specific Intel oneAPI packages installed are: <em>intel-hpckit-runtime-2021.4.0.x86_64</em>, and <em>intel-oneapi-clck.x86_64</em>.</p> + +<p>By default, Intel oneAPI is installed to the directory <em>/opt/intel/oneapi</em>. To use the tools you must first source <em>setvars.sh</em>.</p> + +<div class="highlight"><pre><code class="language-plaintext"># . /opt/intel/oneapi/setvars.sh + +:: initializing oneAPI environment ... + bash: BASH_VERSION = 4.2.46(2)-release + args: Using "$@" for setvars.sh arguments: +:: clck -- latest +:: compiler -- latest +:: dev-utilities -- latest +:: dnnl -- latest +:: mpi -- latest +:: tbb -- latest +:: oneAPI environment initialized ::</code></pre></div> + +<p>Intel Cluster Checker (RPM) includes an example mpi_hello_world binary located in <em>/opt/intel/oneapi/clck/2021.5.0/provider/share/mpi_internode</em>. Let’s first check that we can run the MPI Hello World example outside of LSF. Running a single rank works.</p> + +<div class="highlight"><pre><code class="language-plaintext"># mpirun -n 1 ./mpi_hello_world +Hello world: rank 0 of 1 running on lsfcluster-rhel7-fcdcf8559-zmgzp</code></pre></div> + +<p>However, trying to run &gt; 1 rank on a single node fails with SIGBUS (Bus error).</p> + +<div class="highlight"><pre><code class="language-plaintext"># mpirun -n 4 ./mpi_hello_world + +=================================================================================== += BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES += RANK 0 PID 10582 RUNNING AT lsfcluster-rhel7-fcdcf8559-zmgzp += KILLED BY SIGNAL: 7 (Bus error) +=================================================================================== + +=================================================================================== += BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES += RANK 1 PID 10583 RUNNING AT lsfcluster-rhel7-fcdcf8559-zmgzp += KILLED BY SIGNAL: 7 (Bus error) +=================================================================================== + +=================================================================================== += BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES += RANK 2 PID 10584 RUNNING AT lsfcluster-rhel7-fcdcf8559-zmgzp += KILLED BY SIGNAL: 7 (Bus error) +=================================================================================== + +=================================================================================== += BAD TERMINATION OF ONE OF YOUR APPLICATION PROCESSES += RANK 3 PID 10585 RUNNING AT lsfcluster-rhel7-fcdcf8559-zmgzp += KILLED BY SIGNAL: 7 (Bus error) +===================================================================================</code></pre></div> + +<p>As it turns out, this is related to the shared memory size configured for the pods. The default shared memory size is 64MB in the pods. This is also documented for MPICH <a href="https://wiki.mpich.org/mpich/index.php/Frequently_Asked_Questions#Q:_Why_MPI_Put_raises_SIGBUS_error_inside_docker.3F">here</a>. Checking the shared memory size on the compute pod showed a value of 64MB.</p> + +<div class="highlight"><pre><code class="language-plaintext"># mount |grep shm +shm on /dev/shm type tmpfs (rw,nosuid,nodev,noexec,relatime,context="system_u:object_r:container_file_t:s0:c102,c977",size=65536k)</code></pre></div> + +<p>From this, we deduce that the solution is to increase the shmem size. On the surface, this seemed like something straightforward to do. Indeed, there is a documented procedure on the OpenShift page <a href="https://docs.openshift.com/container-platform/3.6/dev_guide/shared_memory.html">here</a> to achive this.</p> + +<p>However, the above steps don’t work if the changes are made to the deployment file and a new LSF cluster is created. The LSF operator tries to create a PVC for a volume of type <em>emptyDir</em> and the deployment stalls waiting on this (which can never happen). The LSF operator should not try to create a PVC in this case. Rather it should simply add the <em>Volumes</em> and <em>VolumeMounts</em> as described in the OpenShift shared memory guide above. Note that I have given this feedback to the fine folks who maintain the LSF operator.</p> + +<p>It is possible though to edit an existing deployment to add to the <em>volumes</em> and <em>volumeMounts</em> required to increase the shared memory for the pods.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ oc get deployments +NAME READY UP-TO-DATE AVAILABLE AGE +ibm-lsf-operator 1/1 1 1 13d +lsfcluster-master 1/1 1 1 3h4m +lsfcluster-rhel7 4/4 4 4 3h4m</code></pre></div> + +<p>As we’ll only be running MPI jobs on the compute nodes, we only need to edit the deployment <em>lsfcluster-rhel4</em> for which there are 4 pods running. Using the +<em>oc</em> tool, edit the deployment and make the following updates:</p> + +<div class="highlight"><pre><code class="language-plaintext">oc edit deploy lsfcluster-rhel7 +(Note: This starts an editor where you need to add the following - see bolded). +deployment.apps/lsfcluster-rhel7 edited</code></pre></div> + +<p>What to add while doing the deployment editing above.</p> + +<div class="highlight"><pre><code class="language-plaintext">... +volumeMounts: + - mountPath: /tmp/.myCluster.sbd + name: sbatchd-volume + - mountPath: /home + name: home + - mountPath: /apps + name: applications + **- mountPath: /dev/shm + name: dshm** +... +volumes: + **- emptyDir: + medium: Memory + name: dshm** + - emptyDir: {} + name: sbatchd-volume + - name: home + persistentVolumeClaim: + claimName: lsfcluster-home + - name: applications + persistentVolumeClaim: + claimName: lsfcluster-applications</code></pre></div> + +<p>OpenShift will now create the 4 new requested deployments based upon the above updates. That being said, OpenShift will try to keep half of the pods active (2 out of 4 in this case) as it creates the new deployment.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ oc get pods +NAME READY STATUS RESTARTS AGE +ibm-lsf-operator-66bd9449c-5lzmx 2/2 Running 0 24h +lsfcluster-master-68f954645-j8v2v 1/1 Running 0 3h11m +lsfcluster-rhel7-6577566b87-8jsvc 0/1 Running 0 11s +lsfcluster-rhel7-6577566b87-fhwtt 0/1 Running 0 14s +lsfcluster-rhel7-6577566b87-nxkp4 1/1 Running 0 2m28s +lsfcluster-rhel7-6577566b87-ppxm9 1/1 Running 0 2m28s +lsfcluster-rhel7-fcdcf8559-d6955 1/1 Terminating 0 3h11m +lsfcluster-rhel7-fcdcf8559-h5pqb 1/1 Terminating 0 3h11m +lsfcluster-rhel7-fcdcf8559-xkld7 1/1 Running 0 3h11m</code></pre></div> + +<p>After a short time, we end up with 4 new compute pods, which should contain the edits made earlier to the <em>volumes</em> and <em>volumeMounts</em>.</p> + +<div class="highlight"><pre><code class="language-plaintext">$ oc get pods +NAME READY STATUS RESTARTS AGE +ibm-lsf-operator-66bd9449c-5lzmx 2/2 Running 0 24h +lsfcluster-master-68f954645-j8v2v 1/1 Running 0 3h13m +lsfcluster-rhel7-6577566b87-8jsvc 1/1 Running 0 2m29s +lsfcluster-rhel7-6577566b87-fhwtt 1/1 Running 0 2m32s +lsfcluster-rhel7-6577566b87-nxkp4 1/1 Running 0 4m46s +lsfcluster-rhel7-6577566b87-ppxm9 1/1 Running 0 4m46s</code></pre></div> + +<p>Let’s confirm if the shared memory has been correctly configured according to the updates made to the deployment. We now see that <em>tmpfs</em> is mounted to <em>/dev/shm</em>.</p> + +<div class="highlight"><pre><code class="language-plaintext"># mount |grep shm +shm on /dev/shm type tmpfs (rw,nosuid,nodev,noexec,relatime,context="system_u:object_r:container_file_t:s0:c801,c980",size=65536k) +tmpfs on /dev/shm type tmpfs (rw,relatime,seclabel)</code></pre></div> + +<p>As we’ve created new deployments, it’s necessary to re-install Intel oneAPI. Note this can likely be incorporated into the pod deployment YMMV. With Intel oneAPI installed again, it&rsquo;s time to re-run the MPI Hello World example, using the same steps as earlier. Eureka!</p> + +<div class="highlight"><pre><code class="language-plaintext"># mpirun -n 4 ./mpi_hello_world +Hello world: rank 1 of 4 running on lsfcluster-rhel7-6577566b87-8jsvc +Hello world: rank 2 of 4 running on lsfcluster-rhel7-6577566b87-8jsvc +Hello world: rank 3 of 4 running on lsfcluster-rhel7-6577566b87-8jsvc +Hello world: rank 0 of 4 running on lsfcluster-rhel7-6577566b87-8jsvc</code></pre></div> + +<p>Now, let’s run the MPI Hello World example through LSF. The Intel MPI <em>mpirun</em> script takes care of setting the necessary variables to trigger the use of the LSF <em>blaunch</em> task starting mechanism. Each one of our compute pods has 4 cores. So we specify 16 (4 cores * 4 pods) job slots when submitting the MPI job to LSF.</p> + +<div class="highlight"><pre><code class="language-plaintext"># bhosts -w +HOST_NAME STATUS JL/U MAX NJOBS RUN SSUSP USUSP RSV +lsfcluster-rhel7-6577566b87-8jsvc ok - 4 0 0 0 0 0 +lsfcluster-rhel7-6577566b87-fhwtt ok - 4 0 0 0 0 0 +lsfcluster-rhel7-6577566b87-nxkp4 ok - 4 0 0 0 0 0 +lsfcluster-rhel7-6577566b87-ppxm9 ok - 4 0 0 0 0 0 +lsfmaster closed_Full - 0 0 0 0 0 0 + +# bsub -n 16 -I mpirun /opt/intel/oneapi/clck/2021.4.0/provider/share/mpi_internode/mpi_hello_world +Job &lt;102&gt; is submitted to default queue &lt;interactive&gt;. +&lt;&lt;Waiting for dispatch ...&gt;&gt; +&lt;&lt;Starting on lsfcluster-rhel7-6577566b87-fhwtt&gt;&gt; +Hello world: rank 5 of 16 running on lsfcluster-rhel7-6577566b87-nxkp4 +Hello world: rank 6 of 16 running on lsfcluster-rhel7-6577566b87-nxkp4 +Hello world: rank 7 of 16 running on lsfcluster-rhel7-6577566b87-nxkp4 +Hello world: rank 12 of 16 running on lsfcluster-rhel7-6577566b87-8jsvc +Hello world: rank 13 of 16 running on lsfcluster-rhel7-6577566b87-8jsvc +Hello world: rank 8 of 16 running on lsfcluster-rhel7-6577566b87-ppxm9 +Hello world: rank 14 of 16 running on lsfcluster-rhel7-6577566b87-8jsvc +Hello world: rank 9 of 16 running on lsfcluster-rhel7-6577566b87-ppxm9 +Hello world: rank 10 of 16 running on lsfcluster-rhel7-6577566b87-ppxm9 +Hello world: rank 11 of 16 running on lsfcluster-rhel7-6577566b87-ppxm9 +Hello world: rank 0 of 16 running on lsfcluster-rhel7-6577566b87-fhwtt +Hello world: rank 1 of 16 running on lsfcluster-rhel7-6577566b87-fhwtt +Hello world: rank 4 of 16 running on lsfcluster-rhel7-6577566b87-nxkp4 +Hello world: rank 2 of 16 running on lsfcluster-rhel7-6577566b87-fhwtt +Hello world: rank 3 of 16 running on lsfcluster-rhel7-6577566b87-fhwtt +Hello world: rank 15 of 16 running on lsfcluster-rhel7-6577566b87-8jsvc</code></pre></div> + +<p>In summary, the LSF operator for K8s/OpenShift makes it very easy to spin up an LSF cluster for your HPC needs. For specific types of workloads, the default shared memory setting for the pods is not sufficient. There is a procedure to address this post deployment currently. And Intel MPI jobs run through LSF transparently use the blaunch task starter – as expected.</p> + +<p>In an upcoming blog, I plan to devote a bit more time discussing the Spectrum LSF operator for K8s/OpenShift.</p> + + + + + The Easy HPC button + + 2021-11-23T14:14:56-07:00 + https://hpc.social/2021/the-easy-hpc-button + <p>We live in a results-driven world. Whether it&rsquo;s an aerodynamicist waiting on simulation results to determine the efficiency of their latest model, or a doctor waiting on genomic pipeline results to determine next steps for a patient, results make the world go round. And this of course goes beyond the sciences. As any thespian will tell you, stage productions are the result of the work of many individuals behind the scenes.</p> + +<p>Much in the same way, complex computational processes that are found in HPC rely upon many things behind the scenes to be carried out. And although the devil may be in the details, consumers of HPC resources shouldn’t have to go through purgatory to get results. Organizations today rely on HPC to drive their core mission, delivering products to market faster. So, it goes without saying that the need for HPC to be easy to drive productivity is crucial. And much like the technology of HPC has changed so have the skills of the users. Modern HPC infrastructure relies upon a myriad of technologies including containerization, accelerators and cloud. And for users, gone are the expectations of learning a complex CLI, replaced by the need for easy-to-use interfaces.</p> + +<p>Workload schedulers are a necessary component of any HPC cluster. Schedulers have been around for a very long time and as they become more sophisticated, they support an ever-increasing number of CLI and configuration options. Although these options provide greater functionality, their use can be complicated to end users. What if you could provide an HPC easy button for your users?</p> + +<p><a href="https://www.ibm.com/products/hpc-workload-management">IBM Spectrum LSF</a> is a workload management solution for HPC environments. Over the past 30 years, it&rsquo;s evolved from being just a workload scheduler, to an entire suite of capabilities covering the lifecycle of HPC jobs. Scheduling wise, LSF has not only kept pace with the massive scale of commercial HPC environments today, but also provides capabilities which dramatically lower the bar to access HPC.</p> + +<p>Ease of use starts with the users and LSF provides a web-based job submission and management portal which greatly simplifies the use of your HPC cluster. Administrators define custom forms that hide the complexity, and they can even be customized to use application and domain specific language understood by your users. For users on the go, LSF has Android and iOS mobile clients so you can check on the state of your running jobs. And a RESTful API is also available to integrate LSF into your corporate infrastructure.</p> + +<p>With users well taken care of, LSF features many capabilities which allow administrators to take advantage of technologies such as containerization, hybrid cloud and GPUs. Out of the box support for various container technologies let’s administrators control which containers can be used in the environment and hides the complex container startup commands from users. Support for dynamic hybrid cloud enables LSF to burst out to any of the supported cloud providers when needed and scale back the resources when no longer required. And intelligent data staging takes care of moving data to and from the cloud without blocking or making resources wait for transfers.</p> + +<p>What does this all add up to? Well, you can think of it as an HPC easy button. Your users simply fill in a form and submit their job. LSF worries about the underlying complexities, where to place the job, moving data, CPU and GPU allocation. The user waits to get the job results back and is oblivious everything that is going on behind the curtain.</p> + +<p>Learn more about easy HPC with IBM Spectrum LSF in this session: <a href="https://community.ibm.com/community/user/businessanalytics/viewdocument/replay-simplifying-hpc-just-push?CommunityKey=74d589b7-7276-4d70-acf5-0fc26430c6c0&amp;tab=librarydocuments">Simplifying HPC - Just push the button</a>.</p> + + + + + Researcher's Time Has Value, Too + + 2021-11-23T00:00:00-07:00 + https://hpc.social/2021/researcher-s-time-has-value-too + <h2 id="and-researchers-value-their-time">..And Researchers Value Their Time</h2> + +<p>(Note: This post is adapted from <a href="https://www.researchcomputingteams.org/newsletter_issues/0102">#102</a> of the <a href="https://www.researchcomputingteams.org">Research Computing Teams Newsletter</a>)</p> + +<p>If you followed HPC twitter in late 2021 at all, you will have seen a <a href="https://twitter.com/vsoch/status/1461908217223528448">heartfelt thread</a> by a well-known research software developer, one who was a key contributor to the Singularity project among others, lamenting the frankly appalling state of developer productivity in HPC - both in what tools exist, and support for them (and other tools for developers) at academic centres. A <strong>lot</strong> of people <a href="https://twitter.com/HPC_Guru/status/1462070286983983108">chimed into the discussion</a>, including <a href="https://twitter.com/five9a2/status/1462137427527675918">one of the leading developers of the PetSC project</a>, embedded software developers, some key people at big computing centres, all agreeing that there was a problem, but typically zooming in on one or another particular technical or procedural issue and not coming to any conclusion.</p> + +<p>I think the issue is a lot bigger than HPC software development workflows - it comes up in too many contexts to be about specific technical issues of running CI/CD pipelines on fixed infrastructure. The only people to identify the correct underlying issue, in my opinion, were people with experience of both academia and the private sector, such as Brendan Bouffler at AWS:</p> + +<blockquote class="twitter-tweet"><p dir="ltr" lang="en">Too much reliance on “free” labour - postgrads and post docs who, invariably, decide that burning their time being mechanical turks for their “superiors” just sucks, so they come and work for us. And since we pay $$, we’re not gonna waste them on things that software can do.</p> +&mdash; Brendan Bouffler☁️ 🏳️‍🌈 (@boofla) <a href="https://twitter.com/boofla/status/1462099372255203346?ref_src=twsrc%5Etfw">November 20, 2021</a></blockquote> + +<p>The same argument got made by R&amp;D research staff in the private sector. Their time actually has value; as a result, it gets valued.</p> + +<p>In academic research computing, partly because of low salaries — especially for the endless stream of trainees — but also because we typically provide research computing systems for free, we tend to put zero value on people’s time. Thus our “lowest-cost” approach definitely does not apply to researcher or trainee effort. If researchers have to jump through absurd hoops to get or renew their accounts, or have to distort their workflows to fit one-size-fits-all clusters and queueing systems, or postdocs have to spend hours of work by hand every month hand because tools to automate some of that work would cost $500, well, what do they expect, right?</p> + +<p>It’s not that this is an indefensible position to take, but one can’t take this position <em>and</em> act surprised when researchers who can afford to are seriously investigating taking their projects into the commercial cloud even though it costs 2x as much. It turns out that people’s time is worth quite a lot to them, and is certainly worth some money. If we were to <a href="https://www.dursi.ca/post/research-computing-funding-to-researchers">let researchers spend their research computing and data money wherever they pleased</a>, I think we’d find that significantly less than 100% of researchers would use “lowest price possible” as their sole criterion for choosing providers. Core facilities like animal facilities, sequencing centres, and microscopy centres compete on dimensions other than being the cheapest option available.</p> + +<p>To be sure, there are process issues in academia which exacerbates the tendency to see people’s time as valueless - rules about capital vs operating costs, for instance - but those rules aren’t a law of nature. If we were paying people in academia <a href="https://www.levels.fyi/">what they pay in tech</a>, administration would suddenly discover some additional flexibility in the thresholds and criteria for considering something a capital expense if it meant we could be a bit more parsimonious with people’s time.</p> + +<p>Until then, one can’t be too surprised when the most talented and ambitious staff get routinely poached by the private sector, and when research groups start considering service providers that cost more but respect their time.</p> + + + + + Ceph Crimson 2021 Q3 Project Update + + 2021-11-22T00:00:00-07:00 + https://hpc.social/2021/ceph-crimson-2021-q3-project-update + <p>This is the first time we’re seeing Bluestore in Crimson beating Bluestore in Classic in some (low core count) tests. Starting to see lower tail latency as well which is a really good sign. Top end performance will be contingent on multi-reactor support though. Slides available <a href="https://docs.google.com/presentation/d/1eydyAFKRea8n-VniQzXKW8qkKM9GLVMJt2uDjipJjQA/edit?usp=sharing">here</a>.</p> + + + + + Weekend IT SPARC-eology + + 2021-11-03T00:49:40-06:00 + https://hpc.social/2021/weekend-it-sparc-eology + <p>Well it&rsquo;s that time of the year again in the northern hemisphere where the weather starts to change and the autumn +colours are in full swing. And the gray, rainy weekends have left me looking for - something to provide a spark. +And there’s no better way than revisiting some of my old hobby IT projects which have been languishing in my basement. +This time I decided to turn my attention to something SPARC powered. And before you wonder which SUN Microsystems +server or desktop, this blog is actually about a laptop - an UltraSPARC IIe based laptop.</p> + +<p>In the mid-2000&rsquo;s I managed to pick-up a laptop curiosity. Up to that point, I only had x86_64, and PowerPC-based +laptops. In case you&rsquo;re wondering, the PowerPC based laptops were of course from Apple. When I came across the blue and +grey coloured Naturetech 777 laptop, I simply couldn&rsquo;t resist. After all, my daily driver from 2000-2005 doing tech +support at Platform Computing was a SUN Microsystems Ultra 5 with a SUN PCi card. It served me well over those years, +albeit being somewhat crippled by having poor IO performance.</p> + +<p>I&rsquo;ve written about the Naturetech 777 before and even posted some rather shaky videos to YouTube. You can find all of +the links in the blog <a href="https://www.gaborsamu.com/blog/ultrasparc_laptop/">UltraSPARC powered laptop - circa 2001</a>. +There&rsquo;s not too much information available about the Naturetech SPARC laptops. <a href="https://www.cnet.com/news/pc-maker-ships-sun-based-workstation/">Here is one write-up</a> I found from early +2002 with some information. If you’re after more details just let me know. Otherwise, your favourite search engine is +your friend.</p> + +<p>So what was the goal here with this IT archeology weekend project? Well it was to get the latest version of Solaris 9 +installed - which is Update 9. Although the system had a working copy of Solaris 9 already installed, it&rsquo;s an +older update. Furthermore, the system was configured to point to the now defunct Blastwave CSW open-source software +repositry.</p> + +<p>Solaris 9 already installed, it was an older update. Furthermore, the system was configured to point to the now +defunct Blastwave CSW software repository. And I wanted to configure the system to use <a href="https://www.opencsw.org/">OpenCSW</a> +for which the latest Solaris 9 update is recommended.</p> + +<p>On the surface, this seemed an easy enough project. After all, I had the Solaris 9 U9 DVD media in hand. I had +previously installed an older update of Solaris 9 on the system. What could possibly go wrong? As Murphy’s law would +have it, the Solaris 9 Update 9 media simply refused to boot on the laptop.</p> + +<div class="highlight"><pre><code class="language-plaintext">GENIALstation 777S (UltraSPARC-Ile 500MHz), Keyboard Present +OpenBoot 4.0.2.12, 256 MB memory installed, Serial #12648430. +Ethernet address 8:0:20:13: de: ad, Host ID: 00c0ffee. + +ok boot cdrom + +Boot device: /pcielf,0/pci@1, 1/ide@d/cdrom@2,0: f Short disk read File and args: + +The file just loaded does not appear to be executable. </code></pre></div> + +<p>The only clue I could come up with was that the likely culprit was the old OpenBOOT version, +which I really didn&rsquo;t even want to explore updating at this stage out of fear of bricking the system. +At this point I was ready to throw in the towel. What do you do with a system from early 2000 that simply refuses +to boot the OS media? Well much like an old car, you jumpstart it!</p> + +<p>A bit of digging showed that it&rsquo;s possible to do a Jumpstart installation of Solaris from Linux or NetBSD. +There were a number of very good resources to help with the setup of this. In particular I used the +following for reference:</p> + +<ul> +<li><a href="http://www.pbandjelly.org/2005/07/solaris-10-jumpstart-from-freebsd/comment-page-1/">Solaris 10 Jumpstart from FreeBSD</a></li> +<li><a href="http://www.asgaur.com/wp/solaris-jumpstart-from-a-linux-server/">Solaris jumpstart from a Linux server</a></li> +</ul> +<p>Surprisingly a few tweets of my efforts, significantly helped by retweets from some folks including <a href="https://twitter.com/PCzanik">Peter Czanik</a> resulted in a lot of attention on the interwebs. And a fellow IBMer, <a href="https://twitter.com/CrivetiMihai">Mihai Criveti</a> as well offered some useful tips which helped me get through some of the jumpstart challenges.</p> + +<p>So following the above articles, I worked to setup all of the necessary services and configuration that was required. +For brevity and to avoid repetition here is a brief rundown. Note that the jumpstart server used is running +NetBSD 9.2/amd64.</p> + +<p><!-- raw HTML omitted -->1.<!-- raw HTML omitted --> Configure the MAC address of the jumpstart client in <em>/etc/ethers</em> on the server. +Note that in my case because of a bad NVRAM battery, I need to always set the MAC address manually at the +OpenBOOT prompt. For this I use the procedure described <a href="http://www.alyon.org/InfosTechniques/informatique/SunHardwareReference/sun-nvram-hostid.faq">here</a>.</p> + +<div class="highlight"><pre><code class="language-plaintext"># cat /etc/ethers +08:00:20:13:de:ad sparc</code></pre></div> + +<p><!-- raw HTML omitted -->2.<!-- raw HTML omitted --> Configure an IP that will be assigned to the jumpstart client in <em>/etc/hosts</em> on the server.</p> + +<div class="highlight"><pre><code class="language-plaintext"># cat /etc/hosts +... +... +::1 localhost localhost. +127.0.0.1 localhost localhost. +192.168.1.187 sparc +... +...</code></pre></div> + +<p><!-- raw HTML omitted -->3.<!-- raw HTML omitted --> Next, we require to mount the Solaris 9 ISO. This will ultimately be NFS exported for the jumpstart client. +We also require to copy the appropriate Solaris <em>inetboot</em> file from the Solaris 9 media. This will be delivered via +tftpd to the jumpstart client.</p> + +<div class="highlight"><pre><code class="language-plaintext"># ls -la sol-9-905hw-ga-sparc-dvd.iso +-rw-r--r-- 1 root wheel 3104112640 Oct 30 00:37 sol-9-905hw-ga-sparc-dvd.iso +# vnconfig vnd0 sol-9-905hw-ga-sparc-dvd.iso +# mount -t cd9660 /dev/vnd0a /data/jumpstart/sol9u9</code></pre></div> + +<p><!-- raw HTML omitted -->4.<!-- raw HTML omitted --> The Solaris 9 media includes multiple versions of <em>inetboot</em>. You require to select the one that’s appropriate for +your jumpstart client system. In this case, we use <em>inetboot</em> for the sun4u architecture as the system is UltraSPARC +IIe based. The file is copied to the <em>/tftpboot</em> directory, which will be used by <em>tftpd</em>.</p> + +<div class="highlight"><pre><code class="language-plaintext"># cd /data/jumpstart/sol9u9 +# find ./ -name inetboot -print +./Solaris_9/Tools/Boot/usr/platform/sun4m/lib/fs/nfs/inetboot +./Solaris_9/Tools/Boot/usr/platform/sun4u/lib/fs/nfs/inetboot +# cp ./Solaris_9/Tools/Boot/usr/platform/sun4u/lib/fs/nfs/inetboot /tftpboot</code></pre></div> + +<p><!-- raw HTML omitted -->5.<!-- raw HTML omitted --> The request that will be made by the jumpstart client to the tftpd will be for a filename with the IP address of +the client in hexadecimal. The IP address of the client we’ve specified in <em>/etc/hosts</em> is 192.168.1.187. So we convert +that to a hexadecimal string as follows:</p> + +<div class="highlight"><pre><code class="language-plaintext"># printf "%02X%02X%02X%02Xn" 192 168 1 187 +C0A801BB</code></pre></div> + +<p>And create a symbolic link C0A801BB which points to inetboot</p> + +<div class="highlight"><pre><code class="language-plaintext"># cd /tftpboot +# ln -s ./inetboot C0A801BB +# ls -la +total 320 +drwxrwxrwx 2 root wheel 512 Nov 2 19:26 . +drwxr-xr-x 24 root wheel 1024 Nov 2 19:16 .. +lrwxr-xr-x 1 root wheel 10 Nov 2 19:26 C0A801BB -&gt; ./inetboot +-rw-r--r-- 1 root wheel 158224 Nov 2 19:13 inetboot</code></pre></div> + +<p><!-- raw HTML omitted -->6.<!-- raw HTML omitted --> The Solaris 9 ISO which was mounted previously, needs to be NFS exported from the jumpstart server. The jumpstart +installation process will NFS mount this as part of the installation process. The <em>/etc/exports</em> is configured as follows:</p> + +<div class="highlight"><pre><code class="language-plaintext"># cat /etc/exports +/data/jumpstart/sol9u9 -maproot=0 -network 192.168.1.0 -mask 255.255.255.0 +/data/jumpstart/sol9u9/Solaris_9/Tools/Boot -maproot=0 -network 192.168.1.0 -mask 255.255.255.0 </code></pre></div> + +<p><!-- raw HTML omitted -->7.<!-- raw HTML omitted --> Finally we move on to the configuration of the boot parameter server, rpc.bootparamd. This will essentially pass +information to the jumpstart client in order to boot. In this case, it will specify NFS exported paths from the +jumpstart server for the Solaris 9 media. For this we configure the following in <em>/etc/bootparams</em>. +Note that 192.168.1.154 is the IP address of the jumpstart server:</p> + +<div class="highlight"><pre><code class="language-plaintext"># cat /etc/bootparams +sparc root=192.168.1.154:/data/jumpstart/sol9u9/Solaris_9/Tools/Boot install=192.168.1.154:/data/jumpstart/sol9u9 boottype=:in rootopts=:rsize=4096</code></pre></div> + +<p>After configuring steps 1-7 above, I enabled <em>tftpd</em>, started up: <em>rarpd</em>, <em>rpc.bootparamd</em> and the NFS server. +In my case, I ran these in debug mode where possible in order to keep an eye on things during the jumpstart process. +On the jumpstart client, I ran the following command from the OpenBoot prompt:</p> + +<div class="highlight"><pre><code class="language-plaintext">boot net -v - install</code></pre></div> + +<p>And this kicked off the whole process. Note that I didn’t have the correct serial cable handy in order to be able to +capture the bootup of the jumpstart client. However a short snippet of the initial messages is provided here:</p> + +<div class="highlight"><pre><code class="language-plaintext">ok boot net -v-install +Boot device: /pcielf,0/pcie1, 1/network@c, 1 File and args: -v- install Using Onboard Transceiver - Link Up. +Timeout waiting for ARP/RARP packet +Timeout waiting for ARP/RARP packet +2aa00 +Server IP address: 192.168.1.154 +Client IP address: 192.168.1.187 Using Onboard Transceiver - Link Up. + +Using RARP/BOOTPARAMS... + +Requesting Internet address for 8:0:20:13:de:ad +Internet address is: 192.168.1.187 +hostname: sparc +Found 192.168.1.154 @ 0:1e:37:82:a6:b6 root server: 192.168.1.154 (192.168.1.154) +root directory: /data/jumpstart/so19u9/Solaris_9/Tools/Boot Size: 0x5f163+0x14c3d+0x25307 Bytes +SunOS Release 5.9 Version Generic_118558-34 64-bit. Copyright 1983-2003 Sun Microsystems, Inc. All rights reserved. +Use is subject to license terms. +os-io Ethernet address = 8:0:20:13:de:ad Using default device instance data +mem= 262144K (0x10000000) +avail mem = 245637120 +root nexus = = GENIAL station 777S (UltraSPARC-IIe 500MHz) +...</code></pre></div> + +<p>I ran the services on the jumpstart server in verbose mode where possible in order to follow along. +I&rsquo;ve provided some excerpts of the output from the various services below for completeness.</p> + +<p>An excerpt from the various services running on the jumpstart server is provided below for completeness.</p> + +<!-- raw HTML omitted --> +<div class="highlight"><pre><code class="language-plaintext"># ./rarpd -a -d +rarpd: wm0: 0:1e:37:82:a6:b6 +rarpd: received packet on wm0 +rarpd: 08:00:20:13:de:ad asked; sparc replied +rarpd: received packet on wm0 +rarpd: 08:00:20:13:de:ad asked; sparc replied +rarpd: received packet on wm0 +rarpd: 08:00:20:13:de:ad asked; sparc replied +rarpd: received packet on wm0 +rarpd: 08:00:20:13:de:ad asked; sparc replied +rarpd: received packet on wm0 +rarpd: 08:00:20:13:de:ad asked; sparc replied +rarpd: received packet on wm0 +rarpd: 08:00:20:13:de:ad asked; sparc replied</code></pre></div> + +<!-- raw HTML omitted --> +<!-- raw HTML omitted --> +<div class="highlight"><pre><code class="language-plaintext">Nov 2 20:44:21 netbsd syslogd[340]: last message repeated 2 times +Nov 2 18:51:51 netbsd dhcpcd[219]: wm0: Router Advertisement from fe80::da58:d7ff:fe00:6d83 +Nov 2 18:51:51 netbsd dhcpcd[219]: wm0: Router Advertisement from fe80::da58:d7ff:fe00:6d83 +Nov 2 20:58:23 netbsd rarpd[10179]: wm0: 0:1e:37:82:a6:b6 +Nov 2 21:00:53 netbsd rarpd[10179]: received packet on wm0 +Nov 2 21:00:53 netbsd rarpd[10179]: 08:00:20:13:de:ad asked; sparc replied +Nov 2 21:00:53 netbsd tftpd[9562]: 192.168.1.187: read request for C0A801BB: success +Nov 2 21:01:03 netbsd rarpd[10179]: received packet on wm0 +Nov 2 21:01:03 netbsd rarpd[10179]: 08:00:20:13:de:ad asked; sparc replied +Nov 2 21:02:32 netbsd rarpd[10179]: received packet on wm0 +Nov 2 21:02:32 netbsd rarpd[10179]: 08:00:20:13:de:ad asked; sparc replied +Nov 2 21:02:32 netbsd tftpd[10876]: 192.168.1.187: read request for C0A801BB: success +Nov 2 21:02:41 netbsd rarpd[10179]: received packet on wm0 +Nov 2 21:02:41 netbsd rarpd[10179]: 08:00:20:13:de:ad asked; sparc replied +Nov 2 21:03:04 netbsd rarpd[10179]: received packet on wm0 +Nov 2 21:03:04 netbsd rarpd[10179]: 08:00:20:13:de:ad asked; sparc replied +Nov 2 21:03:37 netbsd rarpd[10179]: received packet on wm0 +Nov 2 21:03:37 netbsd rarpd[10179]: 08:00:20:13:de:ad asked; sparc replied</code></pre></div> + +<!-- raw HTML omitted --> +<!-- raw HTML omitted --> +<div class="highlight"><pre><code class="language-plaintext"># ./rpc.bootparamd -d -r 0.0.0.0 +rpc.bootparamd: whoami got question for 192.168.1.187 +rpc.bootparamd: This is host sparc +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: Returning sparc 192.168.1.154 +rpc.bootparamd: getfile got question for "sparc" and file "root" +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: returning server:192.168.1.154 path:/data/jumpstart/sol9u9/Solaris_9/Tools/Boot address: 192.168.1.154 +rpc.bootparamd: whoami got question for 192.168.1.187 +rpc.bootparamd: This is host sparc +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: Returning sparc 192.168.1.154 +rpc.bootparamd: getfile got question for "sparc" and file "root" +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: returning server:192.168.1.154 path:/data/jumpstart/sol9u9/Solaris_9/Tools/Boot address: 192.168.1.154 +rpc.bootparamd: getfile got question for "sparc" and file "rootopts" +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: getfile can't resolve server for sparc +rpc.bootparamd: getfile got question for "sparc" and file "rootopts" +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: getfile can't resolve server for sparc +rpc.bootparamd: getfile got question for "sparc" and file "rootopts" +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: getfile can't resolve server for sparc +rpc.bootparamd: getfile got question for "sparc" and file "root" +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: returning server:192.168.1.154 path:/data/jumpstart/sol9u9/Solaris_9/Tools/Boot address: 192.168.1.154 +rpc.bootparamd: whoami got question for 192.168.1.187 +rpc.bootparamd: This is host sparc +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: Returning sparc 192.168.1.154 +rpc.bootparamd: getfile got question for "sparc" and file "root" +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: returning server:192.168.1.154 path:/data/jumpstart/sol9u9/Solaris_9/Tools/Boot address: 192.168.1.154 +rpc.bootparamd: getfile got question for "sparc" and file "install" +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: returning server:192.168.1.154 path:/data/jumpstart/sol9u9 address: 192.168.1.154 +rpc.bootparamd: getfile got question for "sparc" and file "sysid_config" +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: match with sparc +rpc.bootparamd: Unknown bootparams host +rpc.bootparamd: getfile lookup failed for sparc +rpc.bootparamd: getfile got question for "sparc" and file "ns" +rpc.bootparamd: match sparc with sparc +rpc.bootparamd: match with sparc +rpc.bootparamd: Unknown bootparams host +rpc.bootparamd: getfile lookup failed for sparc</code></pre></div> + +<!-- raw HTML omitted --> +<!-- raw HTML omitted --> +<div class="highlight"><pre><code class="language-plaintext"># ./mountd -d +Getting export list. +Got line +Got line /data/jumpstart/sol9u9 -maproot=0 -network 192.168.1.0 -mask 255.255.255.0 +Making new ep fs=0xe00,0x1e5c8 +doing opt -maproot=0 -network 192.168.1.0 -mask 255.255.255.0 +doing opt -network 192.168.1.0 -mask 255.255.255.0 +get_net: '192.168.1.0' v4 addr 1a8c0 +doing opt -mask 255.255.255.0 +get_net: '255.255.255.0' v4 addr ffffff +Got line /data/jumpstart/sol9u9/Solaris_9/Tools/Boot -maproot=0 -network 192.168.1.0 -mask 255.255.255.0 +Found ep fs=0xe00,0x1e5c8 +doing opt -maproot=0 -network 192.168.1.0 -mask 255.255.255.0 +doing opt -network 192.168.1.0 -mask 255.255.255.0 +get_net: '192.168.1.0' v4 addr 1a8c0 +doing opt -mask 255.255.255.0 +get_net: '255.255.255.0' v4 addr ffffff +Getting mount list. +Here we go. +got mount request from 192.168.1.187 +-&gt; rpcpath: /data/jumpstart/sol9u9/Solaris_9/Tools/Boot +-&gt; dirpath: /data/jumpstart/sol9u9/Solaris_9/Tools/Boot +comparing: +c0a801 +c0a801 +Mount successful. +got mount request from 192.168.1.187 +-&gt; rpcpath: /data/jumpstart/sol9u9/Solaris_9/Tools/Boot +-&gt; dirpath: /data/jumpstart/sol9u9/Solaris_9/Tools/Boot +comparing: +c0a801 +c0a801 +Mount successful. +got mount request from 192.168.1.187 +-&gt; rpcpath: /data/jumpstart/sol9u9/Solaris_9/Tools/Boot +-&gt; dirpath: /data/jumpstart/sol9u9/Solaris_9/Tools/Boot +comparing: +c0a801 +c0a801 +Mount successful. +got mount request from 192.168.1.187 +-&gt; rpcpath: /data/jumpstart/sol9u9 +-&gt; dirpath: /data/jumpstart/sol9u9 +comparing: +c0a801 +c0a801 +Mount successful.</code></pre></div> + +<!-- raw HTML omitted --> +<p>After a few fits and starts, I managed to get it all working. Note that the final missing piece was that I had to ping +the SPARC laptop during the bootstrap of the installation for it to continue on. Thanks to <a href="https://twitter.com/CrivetiMihai">@CrivetiMihai</a> for this tip again.</p> + +<p>With Solaris 9 U9 installed and working, I turned my attention to OpenCSW. I followed the procedure defined <a href="https://www.opencsw.org/manual/for-administrators/setup-old-versions.html#old-solaris">here</a> for older Solaris versions. The recommended +utilities gzip, coreutils, wget all seemed to install and function without issue. However ssh still appears to have an +issue. I continue to see the following error running ssh from OpenCSW (Solaris 9):</p> + +<div class="highlight"><pre><code class="language-plaintext">libresolv.so.2: version `SUNW_2.2.1' not found</code></pre></div> + +<p>The error is discussed in the following <a href="http://lists.opencsw.org/pipermail/bug-notifications/2013-April/011736.html">thread</a>. I’ll be looking at compiling both ssh and sshd using the gcc version from OpenCSW.</p> + +<p>With that, I’ll also be looking at the jumpstart installation of Solaris 10 on the laptop. Although I know that with +256MB RAM, it may not be the way to go. As an aside, my initial attempts at this so far have failed and that will +hopefully be the subject of another writeup in the future.</p> + +<p>And as for that Ultra 5 that graced my desk between 2000-2005? Well it’s sitting in my basement too waiting for another +rainy weekend day.</p> + + + + + IOPS are dumb + + 2021-10-24T17:56:00-06:00 + https://hpc.social/2021/iops-are-dumb + <div style="border: 1px solid black; font-size: x-small; margin-left: 2em; margin-right: 2em; padding: 1em;">This post is a long-form dump of some thoughts I've had while testing all-flash file systems this past year, and bits of this appear in a <a href="http://www.pdsw.org/index.shtml">presentation and paper I'm presenting at PDSW'21</a>&nbsp;about new benchmarking techniques for testing all-flash file systems.</div> +<p>"How many IOPS do you need?"</p> +<p>I'm often asked this by storage vendors, and the question drives me a little bonkers.&nbsp; I assume they ask it because their other customers bring them black-and-white IOPS requirements, but I argue that anyone would be hard-pressed to explain the scientific value of one I/O operation (versus one gigabyte) if ever called on it.&nbsp; And yet, IOPS are undeniably important; the illustrious Rob Ross devoted a whole slide dedicated to this at a <a href="https://science.osti.gov/ascr/ascac/Meetings/202109">recent ASCAC meeting</a>:</p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-uoZq9awp-3E/YVS3anWGgpI/AAAAAAABWsw/tb12XvWtTScjd42nIscFJ-6U7Dr3E_TLQCLcBGAsYHQ/s2048/rob-ross-ascac-slide.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Rob Ross' perspective on why IOPS are now important for HPC I/O" border="0" height="226" src="https://1.bp.blogspot.com/-uoZq9awp-3E/YVS3anWGgpI/AAAAAAABWsw/tb12XvWtTScjd42nIscFJ-6U7Dr3E_TLQCLcBGAsYHQ/w400-h226/rob-ross-ascac-slide.png" title="Rob Ross' perspective on why IOPS are now important for HPC I/O" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Rob Ross' perspective on why IOPS are now important for HPC I/O</span></b></div> +<p>I agree with all of Rob's bullets and yet I disagree with the title of his slide; IOPS are dumb, and yet ignoring them when designing a performance-optimized parallel file system is even more dumb in contemporary times.&nbsp; So let's talk about the grey area in between that creates this dichotomy.<span></span></p> +<p></p> +<h2 style="text-align: left;">First, bandwidth is pretty dumb</h2> +<p>If there's one constant in HPC, it's that everyone hates I/O.&nbsp; And there's a good reason: it's a waste of time because every second you wait for I/O to complete is a second you aren't doing the math that led you to use a supercomputer in the first place.&nbsp; I/O is the time you are doing zero computing amidst a field called "high performance computing."</p> +<p>That said, everyone appreciates the product of I/O--data.&nbsp; I/O is a necessary part of preserving the results of your calculation, so nobody ever says they wish there was no I/O.&nbsp; Instead, infinitely fast I/O is what people want since it implies that 100% of a scientist's time using an HPC is spent actually performing computations while still&nbsp;preserving the results of that computation after the job has completed.</p> +<p>Peeling back another layer of that onion, the saved results of that computation--data--has intrinsic value.&nbsp; In a typical simulation or data analysis, every byte of input or output is typically the hard-earned product of a lot of work performed by a person or machine, and it follows that if you want to both save a lot of bytes but want to spend as little time as possible performing I/O, the true value of a parallel storage system's performance is in how many bytes per second it can read or write.&nbsp; At a fundamental level, this is why I/O performance has long been gauged in terms of megabytes per second, gigabytes per second, and now terabytes per second.&nbsp; To the casual observer, a file system that can deliver 100 GB/s is more valuable than a file system that can deliver only 50 GB/s assuming all things are equal for this very reason.&nbsp; Easy.</p> +<p>This singular metric of storage system "goodness" quickly breaks down once you start trying to set expectations around it though.&nbsp; For example, let's say your HPC job generates 21 TB of valuable data that must be stored, and it must be stored so frequently that we really can't tolerate more than 30 seconds writing that data out before we start feeling like "too much time" is being spent on I/O instead of computation.&nbsp; This turns out to be 700 GB/s--a rather arbitrary choice since that 30 seconds is a matter of subjectivity, but one that reflects the value of your 21 TB and the value of your time.&nbsp; It should follow that any <a href="https://www.nersc.gov/news-publications/nersc-news/nersc-center-news/2016/cori-supercomputer-now-fully-installed-at-berkeley-lab/">file system that claims 700 GB/s of write capability</a> should meet your requirements, and any vendor who can deliver such a system should get your business, right?</p> +<p>Of course not.&nbsp; It's no secret that obtaining those hero bandwidths, much like obtaining Linpack-level FLOPS, requires you (the end-user) to perform I/O in exactly the right way.&nbsp; In the case of the aforementioned 700 GB/s file system, this means</p> +<p></p> +<ol style="text-align: left;"><li>Having each MPI process write to its own file (a single shared file will get slowed down by file system lock traffic)</li><li>Writing 4 MiB at a time (to exactly match the size of the network transmission buffers, remote memory buffers, RAID alignment, ...)</li><li>Using 4 processes per node (enough parallelism to drive the NIC, but not too much to choke the node)</li><li>Using 960 nodes (enough parallelism to drive all the file system drives, but not too much to choke the servers)</li></ol> +<p></p> +<p>I've never seen a scientific application perform this exact pattern, and consequentially, I don't expect that any scientific application has ever gotten that 700 GB/s of performance from a "700 GB/s file system" in practice.&nbsp; In that sense, this 700 GB/s bandwidth metric is pretty dumb since nobody actually achieves its rated performance. Of course, that hasn't prevented me from saying&nbsp;these <a href="https://storageconference.us/2019/Invited/Lockwood.slides.pdf">same</a> <a href="https://www.osti.gov/biblio/1798757">dumb</a> <a href="https://hps.vi4io.org/_media/events/2021/iodc21-lockwood.pdf">things</a>&nbsp;when I stump for file systems. &nbsp;The one saving grace of using bandwidth as a meaningful metric of I/O performance, though, is that&nbsp;<b>I/O patterns are a synthetic construct</b>&nbsp;and can be squished, stretched, and reshaped without affecting the underlying scientific data being transmitted.</p> +<p>The value of data is in its contents, not the way it is arranged or accessed.&nbsp; There's no intrinsic scientific reason why someone should or shouldn't read their data 4 MiB at a time as long as the bits eventually get to the CPU that will perform calculations on it in the correct order.&nbsp; The only reason HPC users perform nice, 1 MiB-aligned reads and writes is because they learn (either in training or on the streets) that randomly reading a few thousand bytes at a time is very slow and works against their own interests of minimizing I/O time.&nbsp; &nbsp;This contrasts sharply with the computing side of HPC where the laws of physics generally dictate the equations that must be computed, and the order in which those computations happen dictates whether the final results accurately model some physical process or just spit out a bunch of unphysical garbage results.</p> +<p>Because I/O patterns are not intrinsically valuable, we are free to rearrange them to best suit the strengths and weaknesses of a storage system to maximize the GB/s we can get out of it.&nbsp; This is the entire foundation of MPI-IO, which receives I/O patterns that are convenient for the physics being simulated and reorders them into patterns that are convenient for the storage system.&nbsp; So while saying a file system can deliver 700 GB/s is a bit disingenuous on an absolute scale, it does indicate what is possible if you are willing to twist your I/O pattern to exactly match the design optimum.</p> +<h2 style="text-align: left;">But IOPS are particularly dumb</h2> +<p>IOPS are what happen when you take the value out of a value-based performance metric like bandwidth.&nbsp; Rather than expressing how many valuable bytes a file system can move per second, IOPS express how many arbitrary I/O operations a file system can service per second.&nbsp; And since the notion of an "I/O operation" is completely synthetic and can be twisted without compromising the value of the underlying data, you might already see why IOPS are a dumb metric of performance.&nbsp; They measure how quickly a file system can do something meaningless, where that meaningless thing (an I/O operation) is itself a function of the file system.&nbsp; It's like saying you can run a marathon at five steps per second--it doesn't actually indicate how long it will take you to cover the twenty six miles.</p> +<p>IOPS as a performance measure was relatively unknown to HPC for most of history.&nbsp; <a href="https://www.sdsc.edu/News%20Items/PR030512_gordon.html">Until 2012</a>, HPC storage was dominated by hard drives which which only delivered high-value performance for large, sequential reads and writes and the notion of an "IOP" was antithetical to performance.&nbsp; The advent of flash introduced a new dimension of performance in its ability to read and write a lot of data at discontiguous (or even random) positions within files or across entire file systems.&nbsp; Make no mistake: you still read and write more bytes per second (i.e., get more value) from flash with a contiguous I/O pattern.&nbsp; Flash just raised the bottom end of performance in the event that you are unable or unwilling to contort your application to perform I/O in a way that is convenient for your storage media.</p> +<p>To that end, when a vendor advertises how many IOPS they can deliver, they really are advertising how many discontiguous 4 KiB reads or writes they can deliver under the worst-case I/O pattern (fully random offsets).&nbsp; You can convert a vendor's IOPS performance back into a meaningful value metric simply by multiplying it by 4 KiB; for example, I've been presenting a slide that claims I measured <a href="https://www.osti.gov/biblio/1798757">29,000 write IOPS and 1,400 read IOPS from a single ClusterStor E1000 OST array</a>:</p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-Aq07XkQ1A1U/YVU95I3cvwI/AAAAAAABWtA/2Z57P80DSeoWxeS2dRP42SQUlxaAjas0gCLcBGAsYHQ/s2048/Screen%2BShot%2B2021-09-29%2Bat%2B21.32.04.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Performance measurements of a single ClusterStor E1000 NVMe Lustre OST" border="0" height="206" src="https://1.bp.blogspot.com/-Aq07XkQ1A1U/YVU95I3cvwI/AAAAAAABWtA/2Z57P80DSeoWxeS2dRP42SQUlxaAjas0gCLcBGAsYHQ/w400-h206/Screen%2BShot%2B2021-09-29%2Bat%2B21.32.04.png" title="Performance measurements of a single ClusterStor E1000 NVMe Lustre OST" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Performance measurements of a single ClusterStor E1000 NVMe Lustre OST</span></b></div> +<p>In reality, I was able to write data at 0.12 GB/s and read data at 5.7 GB/s, and stating these performance metrics as IOPS makes it clear that these data rates reflect the worst-case scenario of tiny I/Os happening at random locations rather than the best-case scenario of sequential I/Os which can happen at 27 GB/s and 41 GB/s, respectively.</p> +<p>Where IOPS get particularly stupid is when we try to cast them as some sort of hero number analogous to the 700 GB/s bandwidth metric discussed above.&nbsp; Because IOPS reflect a worst-case performance scenario, no user should ever be asking "how can I get the highest IOPS" because they'd really be asking "how can I get the best, worst-case performance?"&nbsp; Relatedly, trying to measure the <i>IOPS capability</i> of a storage system gets very convoluted because it often requires twisting your I/O pattern in such unrealistic ways that heroic effort is required to get such terrible performance.&nbsp; At some point, every I/O performance engineer should find themselves questioning why they are putting so much time into defeating every optimization the file system implements to avoid this worst-case scenario.</p> +<p>To make this a little more concrete, let's look at this <a href="https://www.lustre.org/wp-content/uploads/SC19LustreBoF_All.pdf">slide I made in 2019 to discuss the IOPS projections of this exact same ClusterStor E1000 array</a>:</p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-xpPJ4SVoNcQ/YVVGQ4qV4WI/AAAAAAABWtI/Vpl-loGSSsomakJR69dc3xReU-0D_2AzgCLcBGAsYHQ/s2048/Screen%2BShot%2B2021-09-29%2Bat%2B22.01.19.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Projected performance of a ClusterStor E1000 NVMe Lustre OST based on a PCIe Gen3 platform" border="0" height="226" src="https://1.bp.blogspot.com/-xpPJ4SVoNcQ/YVVGQ4qV4WI/AAAAAAABWtI/Vpl-loGSSsomakJR69dc3xReU-0D_2AzgCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2021-09-29%2Bat%2B22.01.19.png" title="Projected performance of a ClusterStor E1000 NVMe Lustre OST based on a PCIe Gen3 platform" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Projected performance of a ClusterStor E1000 NVMe Lustre OST based on a PCIe Gen3 platform</span></b></div> +<p>Somehow the random read rate went from a projected 600,000 to an astonishing 1,400,000 read IOPS--which one is the correct measure of read IOPS?</p> +<p>It turns out that they're <i>both</i> correct; the huge difference in measured read IOPS are the result of the the 600 KIOPS estimate coming from a measurement that</p> +<ol style="text-align: left;"><li>ran for a much longer sustained period (180 seconds vs. 69 seconds)</li><li>used fewer client nodes (21 nodes vs. of 32 nodes)</li><li>wrote larger files (1,008× 8 GiB files vs. 1,024×&nbsp;384 GiB files)</li></ol> +<p>Unlike the IOPS measurements on individual SSDs which are measured using a standard tool (<a href="https://github.com/axboe/fio">fio</a> with <a href="https://pagure.io/libaio">libaio</a> from a single node), there is no standard method for measuring the IOPS of a parallel file system.&nbsp; And just as the hero bandwidth number we discussed above is unattainable by real applications, any standardized IOPS test for a parallel file system would result in a relatively meaningless number.&nbsp; And yes, this includes IO-500; <a href="https://www.glennklockwood.com/benchmarks/io500.html#interpreting-results">its numbers have little quantitative value</a> if you want to design a parallel file system the right way.</p> +<p>So who's to say whether a ClusterStor E1000 OST is capable of 600 kIOPS or 1,400 kIOPS?&nbsp; I argue that 1,400 kIOPS is more accurate since I/O is bursty and a three-minute-long burst of completely random reads is less likely than a one-minute long one on a production system.&nbsp; If I worked for a vendor though, I'm sure this would be taken to be a dishonest marketing number since it doesn't reflect an indefinitely sustainable level of performance.&nbsp; And perhaps courageously, the <a href="https://www.hpe.com/psnow/doc/PSN1012842049INEN.pdf">official Cray ClusterStor E1000 data sheet</a> doesn't even wade into these waters and avoids quoting any kind of IOPS performance expectation.&nbsp; Ultimately, the true value of the random read capability is the bandwidth achievable by all of the most random workloads that will realistically be run at the same time on a file system.&nbsp; Good luck figuring that out.</p> +<h2 style="text-align: left;">Write IOPS are <i>really</i> dumb</h2> +<p>As I said at the outset, I cannot disagree with any of the bullets in the slide Rob presented at ASCAC.&nbsp; That first one is particularly salient--there <i>are</i> a new class of HPC workloads, particularly in AI, whose primary purpose is to randomly sample large datasets to train statistical models.&nbsp; If these datasets are too large to fit into memory, you cannot avoid some degree of random read I/O without introducing biases into your weights.&nbsp; For this reason, there is legitimate need for HPC to demand high random read performance from their file systems.&nbsp; Casting this requirement in terms of 4 KiB random read rates to have a neat answer to the "how many IOPS do you need" question is dubious, but whatever.&nbsp; There's little room for intellectual purity in HPC.</p> +<p>The same can't be said for random write rates.&nbsp; Write IOPS are a completely worthless and misleading performance metric in parallel file systems.</p> +<p>In most cases, HPC applications approximate some aspect of the physical world, and mathematics and physics were created to describe this physical world in a structured way.&nbsp; Whether you're computing over atoms, meshes, or matrices, there is structure to the data you are writing out and the way your application traverses memory to write everything out.&nbsp; You may not write data out in a perfectly ordered way; you may have more atoms on one MPI process than another, or you may be traversing an imbalanced graph.&nbsp; But there is almost always enough structure to scientific data to squish it into a non-random I/O pattern using middleware like MPI-IO.</p> +<p>Granted, there are a few workloads where this is not true.&nbsp; <a href="https://www.sdsc.edu/Events/ipp_webinars/large_scale_genomics.pdf">Out-of-core sorting of short-read DNA sequences</a>&nbsp;and <a href="http://dx.doi.org/10.1016/j.future.2017.12.022">in-place updates of telescope mosaics</a> are two workloads that come to mind where you don't know where to write a small bit of data until you've computed on that small bit of data.&nbsp; In both these cases though, the files are never read and written at the same time, meaning that these random-ish writes can be cached in memory, reordered to be less random, and written out to the file asynchronously.&nbsp; And the effect of write-back caching on random write workloads is staggering.</p> +<p>To illustrate this, consider three different ways in which IOR can be run against an all-NVMe file system to measure random 4 KiB writes:</p> +<p></p> +<ul style="text-align: left;"><li>In the <b>naïve</b> case, we just write 4 KiB pages at random locations within a bunch of files (one file per MPI process) and report what IOR tells us the write IOPS were at the end.&nbsp; This includes only the time spent in write(2) calls.</li><li>In the case where we <b>include fsync</b>, we call fsync(2) at the end of all the writes and include the time it takes to return along with all the time spent in write(2).</li><li>In the <b>O_DIRECT</b> case, we open the file with direct I/O to completely bypass the client write-back cache and ensure that write(2) doesn't return until the data has been written to the file system servers.</li></ul> +<div>These seemingly minor changes result in write IOPS rates that differ by over 30x:</div> +<p></p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-zShKdPu53YE/YVVW2QbVRYI/AAAAAAABWtQ/mReqH6S2lsgF0nhAmqDdlCra7-FQoywWACLcBGAsYHQ/s565/download.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Random write IOPS measured using IOR on an all-NVMe parallel file system" border="0" height="280" src="https://1.bp.blogspot.com/-zShKdPu53YE/YVVW2QbVRYI/AAAAAAABWtQ/mReqH6S2lsgF0nhAmqDdlCra7-FQoywWACLcBGAsYHQ/w400-h280/download.png" title="Random write IOPS measured using IOR on an all-NVMe parallel file system" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Random write IOPS measured using IOR on an all-NVMe parallel file system</span></b></div> +<p>Again we ask: which one is the right value for the file system's write IOPS performance?</p> +<p>If we split apart the time spent in each phase of this I/O performance test, we immediately see that the naïve case is wildly deceptive:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-7r9NLXU8Cd8/YVVW9NcK52I/AAAAAAABWtU/hRmBYygTtDUkX1Q6an3iYdbMu68Ni4TMgCLcBGAsYHQ/s565/download-1.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Breakdown of time spent in I/O calls for 4K random write IOR workload" border="0" height="274" src="https://1.bp.blogspot.com/-7r9NLXU8Cd8/YVVW9NcK52I/AAAAAAABWtU/hRmBYygTtDUkX1Q6an3iYdbMu68Ni4TMgCLcBGAsYHQ/w400-h274/download-1.png" title="Breakdown of time spent in I/O calls for 4K random write IOR workload" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Breakdown of time spent in I/O calls for 4K random write IOR workload</span></b></div> +<p>The reason IOR reported a 2.6 million write IOPS rate is because all those random writes actually got cached in each compute node's memory, and I/O didn't actually happen until the file was closed and all cached dirty pages were flushed.&nbsp; At the point this happens, the cache flushing process doesn't result in random writes anymore; the client reordered all of those cached writes into large, 1 MiB network requests and converted our random write workload into a sequential write workload.</p> +<p>The same thing happens in the case where we include fsync; the only difference is that we're including the time required to flush caches in the denominator of our IOPS measurement.&nbsp; Rather frustratingly, we actually stopped issuing write(2) calls after 45 seconds, but so many writes were cached in memory during those 45 seconds that it took almost 15 minutes to reorder and write them all out during that final fsync and file close.&nbsp; What should've been 45 seconds of random writes to the file system turned into 45 seconds of random writes to memory and 850 seconds of sequential writes to the file system.</p> +<p>The O_DIRECT case is the most straightforward since we don't cache any writes, and every one of our random writes from the application turns into a random write out to the file system.&nbsp; This cuts our measured IOPS almost in half, but otherwise leaves no surprises when we expect to only write for 45 seconds. &nbsp;Of course, we wrote far fewer bytes overall in this case since the effective bytes/sec during this 45 seconds was so low.</p> +<p>Based on all this, it's tempting to say that the O_DIRECT case is the correct way to measure random write IOPS since it avoids write-back caches--but is it really?&nbsp; In the rare case where an application intentionally does random writes (e.g., out-of-core sort or in-place updates), what are the odds that two MPI processes on different nodes will try to write to the same part of the same file at the same time and therefore trigger cache flushing?&nbsp; Perhaps more directly, what are the odds that a scientific application would be using O_DIRECT <i>and</i> random writes at the same time?&nbsp; Only the most masochistic HPC user would ever purposely do something like this since it results in worst-case I/O performance; it doesn't take long for a user to realize this I/O pattern is terrible and reformulating their I/O pattern would increase their productive use of their supercomputer.</p> +<p>So if no user in their right mind does truly unbuffered random writes, what's the point in measuring it in the first place?&nbsp; <b>There is none.&nbsp; Measuring write IOPS is dumb</b>.&nbsp; Using O_DIRECT to measure random write performance is dumb, and measuring write IOPS through write-back cache, while representative of most users' actual workloads, isn't actually doing 4K random I/Os and therefore isn't even measuring IOPS.</p> +<p></p> +<h2>Not all IOPS are always dumb</h2> +<div>This all being said, measuring IOPS can be valuable in contexts outside of parallel file systems.&nbsp; Two cases come to mind where measuring IOPS can be a rational yard stick.</div> +<h3 style="text-align: left;">1. Serving up LUNs to containers and VMs</h3> +<div>By definition, infrastructure providers shouldn't be responsible for the applications that run inside black-box containers and VMs because they are providing storage infrastructure (block devices) and not storage services (file systems).&nbsp; Blocks in and blocks out are measured in IOPS, so the fit is natural.&nbsp; That said, HPC users care about file systems (that is, scientific applications do not perform I/O using SCSI commands directly!), so worrying about LUN performance isn't meaningful in the HPC context.</div> +<h3 style="text-align: left;">2. Measuring the effect of many users doing many things</h3> +<div>While individual HPC workloads rarely perform random I/Os on purpose, if you have enough users doing many small tasks all at once, the file system itself sees a workload that approaches something random.&nbsp; The more, small, independent tasks running parallel and the farther back you stand from the overall I/O load timeline, the more random it looks.&nbsp; So, I argue that it is fair to measure the IOPS of a parallel file system for the purposes of measuring how much abuse a file system can take before it begins to impact everybody.</div> +<div><br /></div> +<div>Take, for example, these IOPS scaling I measured on a small all-flash file system using IOR:</div> +<div><br /></div> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-TVonp3v_RWE/YW9bGX7mCrI/AAAAAAABWwQ/IWCsgpJvZYEiOAtzfntxWgnf8ZZaZyLzwCLcBGAsYHQ/s584/Unknown-1.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Scale-up IOPS benchmarking to demonstrate the saturation point of an all-flash file system" border="0" height="289" src="https://1.bp.blogspot.com/-TVonp3v_RWE/YW9bGX7mCrI/AAAAAAABWwQ/IWCsgpJvZYEiOAtzfntxWgnf8ZZaZyLzwCLcBGAsYHQ/w400-h289/Unknown-1.png" title="Scale-up IOPS benchmarking to demonstrate the saturation point of an all-flash file system" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><span style="font-size: x-small;"><b>Scale-up IOPS benchmarking to demonstrate the saturation point of an all-flash file system</b></span></div> +<p><br />&lt;div&gt;It looks like it takes about 4,096 concurrent random readers or writers to max out the file system.  This alone isn’t meaningful until you consider what this means in the context of the whole compute and storage platform.&lt;/div&gt;</p> +<div><br /></div> +<div>What fraction of the cluster's compute nodes corresponds to 4096 cores?&nbsp; If you've got, say, <a href="https://www.sdsc.edu/support/user_guides/expanse.html#tech_summary">728 dual-socket 64-core AMD Epyc processors</a>, it would only take 32 compute nodes to max out this file system.&nbsp; And if another user wanted to use any of the remaining 696 compute nodes to, say, run a Python script that needed to read in random packages scattered across the file system, there would be no remaining IOPS capacity left at this point, and everyone would experience perceptible lag.</div> +<div><br /></div> +<div>Of course, this is the most extreme case--purely random IOPS--but you can measure the IOPS that a real workload does generate on the server side when, say, sampling a deep learning training dataset. With this, you can then figure out how much headroom that application leaves for every other random-ish workload that needs to run on the same system.</div> +<div><br /></div> +<div>Once you realize that a lot of the unglamorous parts of of scientific computing--reading dotfiles when you log in, loading shared objects when you launch a dynamically linked executable, or even just editing source code--are full of random-like reads, you can establish a quantitative basis for figuring out how badly an IOPS-intensive data analysis application may affect everyone else's interactive accesses on the same file system.</div> +<div><br /></div> +<div>This is not to say that we can easily answer the question of "How many IOPS do you need?" though.&nbsp; How many IOPS a workload can drive is not how many IOPS that workload <i>needs</i>--it's really how fast it can compute before it has run out of data to process and needs to read more in.&nbsp; The faster your compute nodes, generally, the more data they can <i>consume</i>.&nbsp; They still <i>want</i> all the IOPS you can give them so they can spend as much time computing (and not waiting for I/O) as possible, and how many IOPS your application can drive is a function of how quickly it runs given the full stack between it and the storage, including CPU, memory, and networking.</div> +<h2 style="text-align: left;">If everything is dumb, now what?</h2> +<div>Give up trying to reduce I/O performance down to a single IOPS number, because it's two degrees away from being useful.&nbsp; Bandwidth is a better metric in that it's only one degree away from what actually matters, but at the end of the day, the real metric of I/O performance is how much time an application has to wait on I/O before it can resume performing meaningful computations.&nbsp; Granted, most storage vendors will give you a blank stare if you take this angle to them; telling them that your application spends 50% of its time waiting on I/O isn't going to get you a better file system from a storage company alone, so think about what the real problem could be.</div> +<div style="text-align: left;"><br /></div> +<div style="text-align: left;"><b>Is the application doing I/O in a pattern (random or otherwise) that prevents the storage system from delivering as many bytes/second as possible?</b>&nbsp; If so, ask your vendor for a storage system that delivers more bandwidth to a wider range of I/O patterns than just perfectly aligned 1 MiB reads and writes.<br /><br /></div> +<div style="text-align: left;"><b>Is the storage system already running as well as it can, but it only takes a few compute nodes to max it out?&nbsp;</b> If so, your storage system is too small relative to your compute system, and you should ask your vendor for more servers and drives to scale out.</div> +<div style="text-align: left;"><br /><b>Is the storage system running at 100% CPU even though it's not delivering full bandwidth?&nbsp;</b> Servicing a small I/O requires a lot more CPU than a large I/O since there are fixed computations that have to happen on every read or write regardless of how big it is.&nbsp; Ask your vendor for a better file system that doesn't eat up so much CPU, or ask for more capable servers.<br /></div> +<div style="text-align: left;"><br /></div> +<div style="text-align: left;">Alternatively, if you have a lot of users all doing different things and the file system is giving poor performance to everyone, ask your vendor for a file system with better quality of service.&nbsp; This will ensure that one big job doesn't starve out all the small ones.</div> +<div style="text-align: left;"><br /></div> +<div style="text-align: left;"><b>Is the storage system slow but you don't have the time to figure out why?&nbsp;</b> If so, it sounds like you work for an organization that doesn't actually value data because it's not appropriately staffed.&nbsp; This isn't a storage problem!</div> +<div style="text-align: left;"><br /></div> +<div style="text-align: left;">Ultimately, if solving I/O problems was as easy answering how many IOPS you need, storage wouldn't be the perpetual pain point in HPC that it has been.&nbsp; As with all things in computing, there is no shortcut and the proper way to approach this is by rolling up your sleeves and start ruling out problems.&nbsp; You can (and should!) ask for a lot from your storage vendors--flexibility in delivering bandwidth, CPU-efficient file systems, and quality of service controls are all valid requests when buying storage.&nbsp; But IOPS are not.</div> + + + + + 10-4 to the Ten64 with Rockstor + + 2021-09-26T17:55:00-06:00 + https://hpc.social/2021/10-4-to-the-ten64-with-rockstor + <p>I love it when a plan comes together! And this time, I&rsquo;m not referring to a +daring rescue by the A-Team, but rather something just slightly more mundane - +network attached storage (NAS).</p> + +<p>I wrote back in March of this year about my experience setting up an <a href="https://www.gaborsamu.com/blog/aarch64_nas/">Arm based +NAS</a> for home use running Rockstor on my venerable SolidRun macchiatoBIN board. Although the macchiatoBIN served +in this role well, one limiting factor is the 3 onboard SATA ports. When used +as a desktop, this wasn&rsquo;t an issue, but as a NAS it would limit things down +the road in terms of adding storage. Yes, I know I could have purchased a +PCIe SATA card to add additional ports, but decided against going this route +given the various foibles I encountered with PCIe support over the years with +the macchiatoBIN.</p> + +<p>My mind had been set a number of months earlier to purchase a <a href="https://www.crowdsupply.com/traverse-technologies/ten64">Traverse Ten64</a> <em>network appliance</em> +and to use it primarily as a NAS. The Ten64 was attractive to me because of +it&rsquo;s expandability, ECC RAM support, abundance of network ports and an +interesting capability known as DPAA2, which is thanks to the use of +NXP Layerscape LS1088A processor. A little bit more about DPAA2 later in +the writeup. Furthermore, Ten64 could stand in for home router duties should my +(also venerable) Turris Omnia router decide to give up the ghost.</p> + +<p>Through all of this, I heard the chants of QNAP and Synology from my friends, +who all thought that I was making things overly complicated for +myself. For me, it was a no brainer. The Ten64 would provide unprecedented +flexibilty and would give me a handy appliance which could take on NAS duties +as well as other tasks such as tinkering with K8s (k3s) clusters. And, who +could deny the additional cool factor of the red PCB of the Ten64! +Ultimately, I just love messing around with technology, and I&rsquo;m always looking +for unique and flexible solutions. Plug and play? Nein Danke!</p> + +<p>Back in March, after assessing that an Arm based NAS was indeed a workable +solution, I started to seek out the necessary bits and pieces in anticipation +of the arrival of the Ten64 board. Of course, with COVID still in the air +I was quite worried about being able to get all of the bits I needed in time. +Over the summer, I dilligently got all of the following pieces ready:</p> + +<ul> +<li>1 x Kingston KSM268SE8/16ME 16GB DDR4 2666 MHz ECC SODIMM</li> +<li>1 x IOCrest IO-M2F585-5I M.2 B/M 5-port SATA controller</li> +<li>2 x Seagate Ironwolf 2 TB NAS drives</li> +<li>1 x Seagate Ironwolf 240 GB NAS SSD</li> +<li>1 x Fraktal Array R2 mini-ITX NAS case</li> +</ul> +<p>And the plan was coming slowly together. At this stage only the Ten64 itself +was missing. And then, one fateful day in August the Ten64 arrived +at long last! And it was rock n' roll time. The <a href="https://ten64doc.traverse.com.au/">Traverse Ten64 online +documentation</a> and <a href="https://forum.traverse.com.au/">forum</a> turned out to be invaluable sources of information to help me +get up and running. In fact if you search the forum you&rsquo;ll find my name there +in a few threads, in particular around DPAA2, which was the most thorny issue +to resolve. Full disclosure that DPAA2 support in Linux distros is a bit hit +and miss.</p> + +<p>The Ten64 shipped in it&rsquo;s own small form factor case. I setup the Ten64 on my +workbench and installed the RAM, M.2 SATA controller and connected the 240GB +SATA SSD. The end game was to get the system booting the openWrt based +<a href="https://gitlab.com/traversetech/muvirt">muvirt</a> from the 240GB SATA SSD and +to run Rockstor as a virtual machine under muvirt, with network interfaces +managed by DPAA2.</p> + +<figure><img src="https://www.gaborsamu.com/images/ten64_collage.jpg" /> +</figure> + +<p>Once the software side of the house was figured out, it was time to install the +Ten64 board into the Fraktal NAS case. This is what is looked like during +the installation phase.</p> + +<figure><img src="https://www.gaborsamu.com/images/ten64_case.jpg" /> +</figure> + +<p>There are tons of resources on NXP DPAA2 which can be found on the Internet. +The Ten64 online documentation includes a useful <a href="https://ten64doc.traverse.com.au/network/dpaa2overview/">overview and details</a>. +It&rsquo;s effectively a way that you can represent network objects on the NXP LS1088A processor of the Ten64 and pass those securely into the VM running on the +system - which in my case was going to be Rockstor running on an OpenSUSE LEAP 15.3 VM. With DPAA2 I can avoid using virtualized networking for the VMs for +better performance. Again, I&rsquo;m very far from being an authority on DPAA2, +but it was definitely an important selling point for me, given my use case.</p> + +<p>DPAA2 took some effort to get working, but I&rsquo;m very pleased with the outcome. +Ultimately, it required updated versions of muvirt, re-compilation of the VM +guest kernel to include the necessary DPAA2 patches and to flash a +<a href="https://gitlab.com/dubito/dpl-examples">new data path layout to the Ten64 board</a>. You can find all of the nitty-gritty details about this in the following +Ten64 <a href="https://forum.traverse.com.au/t/restool-in-muvirt/63/23">forum thread</a>.</p> + +<p>Here is a view of the Rockstor dashboard showing activity on the system. +I&rsquo;m a fan of the dashboard as it gives important details at a glance about +the state of the NAS.</p> + +<figure><img src="https://www.gaborsamu.com/images/rockstor_dashboard.png" /> +</figure> + +<p>So what does the future hold? At the moment I&rsquo;m migrating data to the +Rockstor NAS. I&rsquo;ve not done extensive performance tests, but suffice it to +say that the performance reading/writing to the NAS is as I would expect +with Gigabit Ethernet. I&rsquo;ve installed both Jellyfin and Netdata rock-ons +as well to provide media server capabilities and detailed metrics on +the system load. I anticipate that I&rsquo;ll be looking more closely at +k3s in the coming weeks.</p> + +<p>So this is bit of a pat myself on the back moment. I&rsquo;m very pleased with +the outcome and the capabilities of the Ten64 now and the room it will +provide to grow in the future. And what also matters to me is that in the +end, I did it my way.</p> + + + + + Uptodate + + 2021-09-19T09:30:00-06:00 + https://hpc.social/2021/uptodate + <p>I recently had an itch to scratch - and that itch was writing a library in Go. +We don’t use Go much for my work, so I figured out a compelling reason to start a new personal project - +a command line tool written in Go (and matching GitHub action) to help keep things up to +date in a repository. Appropriately, I called it <a href="https://vsoch.github.io/uptodate/docs/#/" target="_blank">uptodate</a>! +It was hugely inspired from the <a href="https://github.com/autamus/binoc" target="_blank">binoc</a> (short for “binoculars”) +library that can also perform specific kinds of updates, but I wanted more of a focus on +Docker, and to have total control so I could go wild and crazy with writing Go code +without worrying about forcing it on the owner, <a href="https://github.com/alecbcs" target="_blank">alecbcs</a>, to merge my wild ideas.</p> + +<p><br /></p> + +<div class="padding:20px"> +<img src="https://vsoch.github.io/uptodate/assets/img/uptodate.png" /> +</div> + +<h2 id="uptodate">Uptodate</h2> + +<p>Uptodate is a command line tool in Go and GitHub action that makes it easy to:</p> + +<ol class="custom-counter"> + <li> Update FROM statements in Dockerfile to have the latest shas</li> + <li> Update build arguments that are for spack versions, GitHub releases and commits, and container hashes.</li> + <li> Generate a matrix of Docker builds from a single configuration file</li> + <li> Generate a matrix of changed files in a repository.</li> + <li> List Dockerfile in a repository that have been changed.</li> +</ol> + +<p>With all of the above, you can imagine a workflow that first updates Dockerfile +FROM statements and build args, and then re-builds and deploys these containers - +the assumption being that the underlying dependency such as a GitHub commit +or spack version has an update. Uptodate also will take a nested structure +that I call a docker “build hierarchy” and add new folders and Dockerfile when +a new tag is detected. A kind of updater in uptodate is naturally called an “updater” +and this means for the docker build and docker hierarchy updaters, we can write +a yaml configuration file with our preferences for versions to be added, and +other metadata. You should check out the <a href="https://vsoch.github.io/uptodate/docs/#/user-guide/user-guide" target="_blank">user guide</a> +for detailed usage, or read about <a href="https://vsoch.github.io/uptodate/docs/#/user-guide/github-action" target="_blank">the GitHub action</a></p> + +<h2 id="how-does-it-work">How does it work?</h2> + +<p>I’ll give a brief overview of a few of the commands and then a quick example GitHub workflow, +and I’ll recommend that you read the documentation for the latest updates on uptodate, harharhar. +The examples below assumed that you’ve <a href="https://vsoch.github.io/uptodate/docs/#/user-guide/user-guide?id=install" target="_blank">installed</a> uptodate +and have the binary “uptodate” in your path.</p> + +<h3 id="dockerfile">Dockerfile</h3> + +<p>If you have one or more Dockerfile in your repository you can run uptodate to update digests. +For example:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ uptodate dockerfile . +</code></pre></div> +</div> + +<p>will find Dockerfile in the present working directory and subfolders and update. +For digests, you might see that:</p> + +<div class="language-dockerfile highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">FROM</span><span class="s"> ubuntu:20.04</span> +</code></pre></div> +</div> + +<p>is updated to</p> + +<div class="language-dockerfile highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">FROM</span><span class="s"> ubuntu:18.04@sha256:9bc830af2bef73276515a29aa896eedfa7bdf4bdbc5c1063b4c457a4bbb8cd79</span> +</code></pre></div> +</div> + +<p>Note in the above we still have the digest and the tag, so subsequent updates can +further update the sha by looking up the container based on the tag. +And we can also update build arguments that match a particular format! This one, +specifically:</p> + +<div class="language-dockerfile highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">ARG</span><span class="s"> uptodate_&lt;build-arg-type&gt;_&lt;build-arg-value&gt;=&lt;default&gt;</span> +</code></pre></div> +</div> + +<p>The above flags the build argument for uptodate to look at using the prefix of the library +name, and then the next string after the underscore is the kind of update, followed by +specific metadata for that updater, and of course the value! A few examples are provided below.</p> + +<h4 id="spack-build-arguments">Spack Build Arguments</h4> + +<p><a href="https://github.com/spack/spack" target="_blank">Spack</a> is a package manager intended for HPC, and it’s +huge at the lab where I work. So naturally, it made sense for uptodate to be able to +look up the latest spack versions for some package. +To create an argument that matched to a spack package (and its version) you might see:</p> + +<div class="language-dockerfile highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">ARG</span><span class="s"> uptodate_spack_ace=6.5.6</span> +</code></pre></div> +</div> + +<p>After the updater runs, if it finds a new version 6.5.12, the line will read:</p> + +<div class="language-dockerfile highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">ARG</span><span class="s"> uptodate_spack_ace=6.5.12</span> +</code></pre></div> +</div> + +<p>This works by using the static API that is deployed alongside the <a href="https://spack.github.io/packages/" target="_blank">Spack Packages</a> +repository that I designed earlier this year. So the updater will get the latest versions +as known within the last 24 hours.</p> + +<h4 id="github-release-build-argument">GitHub Release Build Argument</h4> + +<p>If we want an updated version from a GitHub release (let’s say the spack software itself) +we might see this:</p> + +<div class="language-dockerfile highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">ARG</span><span class="s"> uptodate_github_release_spack__spack=v0.16.1</span> +</code></pre></div> +</div> + +<p>The above will look for new releases from spack on GitHub and update as follows:</p> + +<div class="language-dockerfile highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">ARG</span><span class="s"> uptodate_github_release_spack__spack=v0.16.2</span> +</code></pre></div> +</div> + +<h4 id="github-commit-build-argument">GitHub Commit Build Argument</h4> + +<p>Similarity, if we want more “bleeding edge” changes we can ask for a commit +from a specific branch, following this pattern:</p> + +<div class="language-dockerfile highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">ARG</span><span class="s"> uptodate_github_commit_&lt;org&gt;__&lt;name&gt;__&lt;branch&gt;=&lt;release-tag&gt;</span> +</code></pre></div> +</div> + +<p>Here is an example of asking for updates for the develop branch.</p> + +<div class="language-dockerfile highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">ARG</span><span class="s"> uptodate_github_commit_spack__spack__develop=NA</span> +</code></pre></div> +</div> + +<p>which wouldn’t care about the first “commit” NA as it would update to:</p> + +<div class="language-dockerfile highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">ARG</span><span class="s"> uptodate_github_commit_spack__spack__develop=be8e52fbbec8106150680fc628dc72e69e5a20be</span> +</code></pre></div> +</div> + +<p>And then to use it in your Dockerfile, you might pop into an environment variable:</p> + +<div class="language-dockerfile highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="k">ENV</span><span class="s"> spack_commit=${uptodate_github_commit_spack__spack__develop}</span> +</code></pre></div> +</div> + +<p>See the <a href="https://vsoch.github.io/uptodate/docs/#/user-guide/user-guide?id=dockerfile" target="_blank">docs</a> for more detailed usage and an example for the Dockerfile updater.</p> + +<h3 id="docker-build">Docker Build</h3> + +<p>The second updater that I think is pretty useful is the Docker build updater. +This updated will read a config file, an uptodate.yaml, and then follow instructions +for version regular expressoins and different kinds of builds args to generate a matrix of +builds (intended for GitHub actions). For example, let’s say that we start with this configuration file:</p> + +<div class="language-yaml highlighter-rouge"><div class="highlight"><pre class="highlight"><code> +<span class="na">dockerbuild</span><span class="pi">:</span> + <span class="na">build_args</span><span class="pi">:</span> + + <span class="c1"># This is an example of a manual build arg, versions are required</span> + <span class="na">llvm_version</span><span class="pi">:</span> + + <span class="c1"># The key is a shorthand used for naming (required)</span> + <span class="na">key</span><span class="pi">:</span> <span class="s">llvm</span> + <span class="na">versions</span><span class="pi">:</span> + <span class="pi">-</span> <span class="s2">"</span><span class="s">4.0.0"</span> + <span class="pi">-</span> <span class="s2">"</span><span class="s">5.0.1"</span> + <span class="pi">-</span> <span class="s2">"</span><span class="s">6.0.0"</span> + + <span class="c1"># This is an example of a spack build arg, the name is the package</span> + <span class="na">abyss_version</span><span class="pi">:</span> + <span class="na">key</span><span class="pi">:</span> <span class="s">abyss</span> + <span class="na">name</span><span class="pi">:</span> <span class="s">abyss</span> + <span class="na">type</span><span class="pi">:</span> <span class="s">spack</span> + + <span class="c1"># This will be parsed by the Dockerfile parser, name is the container name</span> + <span class="na">ubuntu_version</span><span class="pi">:</span> + + <span class="na">key</span><span class="pi">:</span> <span class="s">ubuntu</span> + <span class="na">name</span><span class="pi">:</span> <span class="s">ubuntu</span> + <span class="na">type</span><span class="pi">:</span> <span class="s">container</span> + <span class="na">startat</span><span class="pi">:</span> <span class="s2">"</span><span class="s">16.04"</span> + <span class="na">endat</span><span class="pi">:</span> <span class="s2">"</span><span class="s">20.04"</span> + <span class="na">filter</span><span class="pi">:</span> + <span class="pi">-</span> <span class="s2">"</span><span class="s">^[0-9]+[.]04$"</span> + <span class="na">skips</span><span class="pi">:</span> + <span class="pi">-</span> <span class="s2">"</span><span class="s">17.04"</span> + <span class="pi">-</span> <span class="s2">"</span><span class="s">19.04"</span> +</code></pre></div> +</div> + +<p>You’ll see the primary section of interest is under “dockerbuild” and under this +we have three build args for a manually defined set of versions, a version from +a spack package, and a container. You could run this in a repository root +to look for these config files (and a Dockerfile that they render with in +the same directory or below it) to generate a build matrix.</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>uptodate dockerbuild +</code></pre></div> +</div> + +<p>Or to only include changed uptodate.yaml files:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>uptodate dockerbuild <span class="nt">--changes</span> +</code></pre></div> +</div> + +<p>If you provide a registry URI that the containers build to, we can actually check +these containers to look at current build args (that are saved as labels and then +viewable in the image config by uptodate) to determine if an update is needed.</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>uptodate dockerbuild <span class="nt">--registry</span> ghcr.io/rse-radiuss +</code></pre></div> +</div> + +<p>the container. I think this is one of the neatest features - it was just added +in evenings this last week! Check out an +<a href="https://crane.ggcr.dev/config/ghcr.io/rse-radiuss/ubuntu:20.04" target="_blank">example image config</a> that has these labels! +This registry URI will also be included in the output to make it easy to build +In a GitHub action, it might be used like this:</p> + +<div class="language-yaml highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="na">jobs</span><span class="pi">:</span> + <span class="na">generate</span><span class="pi">:</span> + <span class="na">name</span><span class="pi">:</span> <span class="s">Generate Build Matrix</span> + <span class="na">runs-on</span><span class="pi">:</span> <span class="s">ubuntu-latest</span> + <span class="na">outputs</span><span class="pi">:</span> + <span class="na">dockerbuild_matrix</span><span class="pi">:</span> <span class="s">$</span> + <span class="na">empty_matrix</span><span class="pi">:</span> <span class="s">$</span> + + <span class="na">steps</span><span class="pi">:</span> + <span class="pi">-</span> <span class="na">uses</span><span class="pi">:</span> <span class="s">actions/checkout@v2</span> + <span class="na">if</span><span class="pi">:</span> <span class="s">github.event_name == 'pull_request'</span> + <span class="na">with</span><span class="pi">:</span> + <span class="na">fetch-depth</span><span class="pi">:</span> <span class="m">0</span> + <span class="na">ref</span><span class="pi">:</span> <span class="s">$</span> + + <span class="pi">-</span> <span class="na">uses</span><span class="pi">:</span> <span class="s">actions/checkout@v2</span> + <span class="na">if</span><span class="pi">:</span> <span class="s">github.event_name != 'pull_request'</span> + <span class="na">with</span><span class="pi">:</span> + <span class="na">fetch-depth</span><span class="pi">:</span> <span class="m">0</span> + + <span class="pi">-</span> <span class="na">name</span><span class="pi">:</span> <span class="s">Generate Build Matrix</span> + <span class="na">uses</span><span class="pi">:</span> <span class="s">vsoch/uptodate@main</span> + <span class="na">id</span><span class="pi">:</span> <span class="s">dockerbuild</span> + <span class="na">with</span><span class="pi">:</span> + <span class="na">root</span><span class="pi">:</span> <span class="s">.</span> + <span class="na">parser</span><span class="pi">:</span> <span class="s">dockerbuild</span> + <span class="na">flags</span><span class="pi">:</span> <span class="s2">"</span><span class="s">--registry</span><span class="nv"> </span><span class="s">ghcr.io/myreponame"</span> + + <span class="pi">-</span> <span class="na">name</span><span class="pi">:</span> <span class="s">View and Check Build Matrix Result</span> + <span class="na">env</span><span class="pi">:</span> + <span class="na">result</span><span class="pi">:</span> <span class="s">$</span> + <span class="na">run</span><span class="pi">:</span> <span class="pi">|</span> + <span class="s">echo ${result}</span> + + <span class="na">build</span><span class="pi">:</span> + <span class="na">needs</span><span class="pi">:</span> + <span class="pi">-</span> <span class="s">generate</span> + <span class="na">runs-on</span><span class="pi">:</span> <span class="s">ubuntu-latest</span> + <span class="na">strategy</span><span class="pi">:</span> + <span class="na">fail-fast</span><span class="pi">:</span> <span class="no">false</span> + <span class="na">matrix</span><span class="pi">:</span> + <span class="na">result</span><span class="pi">:</span> <span class="s">$</span> + <span class="na">if</span><span class="pi">:</span> <span class="s">$</span> + + <span class="na">name</span><span class="pi">:</span> <span class="s2">"</span><span class="s">Build</span><span class="nv"> </span><span class="s">$"</span> + <span class="na">steps</span><span class="pi">:</span> + <span class="pi">-</span> <span class="na">name</span><span class="pi">:</span> <span class="s">Checkout Repository</span> + <span class="na">uses</span><span class="pi">:</span> <span class="s">actions/checkout@v2</span> + + <span class="pi">-</span> <span class="na">name</span><span class="pi">:</span> <span class="s">Set up Docker Buildx</span> + <span class="na">uses</span><span class="pi">:</span> <span class="s">docker/setup-buildx-action@v1</span> + + <span class="pi">-</span> <span class="na">name</span><span class="pi">:</span> <span class="s">Build $</span> + <span class="na">id</span><span class="pi">:</span> <span class="s">builder</span> + <span class="na">env</span><span class="pi">:</span> + <span class="na">container</span><span class="pi">:</span> <span class="s">$</span> + <span class="na">prefix</span><span class="pi">:</span> <span class="s">$</span> + <span class="na">filename</span><span class="pi">:</span> <span class="s">$</span> + <span class="na">run</span><span class="pi">:</span> <span class="pi">|</span> + <span class="s">basedir=$(dirname $filename)</span> + <span class="s">cd $basedir</span> + <span class="s">${prefix} -t ${container} .</span> +</code></pre></div> +</div> + +<p>Of course you’d want to login to a registry, and then also possibly calculate metrics for +the container, so consider this a very simple example. +The build matrix that is being passed between those steps has entries like this:</p> + +<div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="p">[</span><span class="w"> + </span><span class="p">{</span><span class="w"> + </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"ubuntu/clang/uptodate.yaml"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"container_name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"ghcr.io/rse-radiuss/clang-ubuntu-20.04:llvm-10.0.0"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"filename"</span><span class="p">:</span><span class="w"> </span><span class="s2">"ubuntu/clang/Dockerfile"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"parser"</span><span class="p">:</span><span class="w"> </span><span class="s2">"dockerbuild"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"buildargs"</span><span class="p">:</span><span class="w"> </span><span class="p">{</span><span class="w"> + </span><span class="nl">"llvm_version"</span><span class="p">:</span><span class="w"> </span><span class="s2">"10.0.0"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"ubuntu_version"</span><span class="p">:</span><span class="w"> </span><span class="s2">"20.04"</span><span class="w"> + </span><span class="p">},</span><span class="w"> + </span><span class="nl">"command_prefix"</span><span class="p">:</span><span class="w"> </span><span class="s2">"docker build -f Dockerfile --build-arg llvm_version=10.0.0 --build-arg ubuntu_version=20.04"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"description"</span><span class="p">:</span><span class="w"> </span><span class="s2">"ubuntu/clang llvm_version:10.0.0 ubuntu_version:20.04"</span><span class="w"> + </span><span class="p">},</span><span class="w"> + </span><span class="err">...</span><span class="w"> +</span><span class="p">]</span><span class="w"> +</span></code></pre></div> +</div> + +<h3 id="git-updater">Git Updater</h3> + +<p>I also like this updater because it easily generates for you a matrix of files +that are changed, according to git. Running locally it looks like this:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>./uptodate git /path/to/repo + _ _ _ + _ _ _ __ | |_ ___ __| | __ _| |_ ___ + | | | | <span class="s1">'_ \| __/ _ \ / _ |/ _ | __/ _ \ + | |_| | |_) | || (_) | (_| | (_| | || __/ + \__,_| .__/ \__\___/ \__,_|\__,_|\__\___| + |_| git + + + ⭐️ Changed Files ⭐️ + .github/workflows/build-matrices.yaml: Modify +</span></code></pre></div> +</div> + +<p>And would generate a matrix for a GitHub action too:</p> + +<div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="p">[</span><span class="w"> + </span><span class="p">{</span><span class="w"> + </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Modify"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"filename"</span><span class="p">:</span><span class="w"> </span><span class="s2">"cli/dockerbuild.go"</span><span class="w"> + </span><span class="p">},</span><span class="w"> + </span><span class="p">{</span><span class="w"> + </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Modify"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"filename"</span><span class="p">:</span><span class="w"> </span><span class="s2">"parsers/common.go"</span><span class="w"> + </span><span class="p">},</span><span class="w"> + </span><span class="p">{</span><span class="w"> + </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Insert"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"filename"</span><span class="p">:</span><span class="w"> </span><span class="s2">"parsers/docker/buildargs.go"</span><span class="w"> + </span><span class="p">},</span><span class="w"> + </span><span class="p">{</span><span class="w"> + </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Modify"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"filename"</span><span class="p">:</span><span class="w"> </span><span class="s2">"parsers/docker/docker.go"</span><span class="w"> + </span><span class="p">},</span><span class="w"> + </span><span class="p">{</span><span class="w"> + </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Modify"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"filename"</span><span class="p">:</span><span class="w"> </span><span class="s2">"tests/ubuntu/21.04/Dockerfile"</span><span class="w"> + </span><span class="p">},</span><span class="w"> + </span><span class="p">{</span><span class="w"> + </span><span class="nl">"name"</span><span class="p">:</span><span class="w"> </span><span class="s2">"Modify"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"filename"</span><span class="p">:</span><span class="w"> </span><span class="s2">"tests/ubuntu/clang/Dockerfile"</span><span class="w"> + </span><span class="p">}</span><span class="w"> +</span><span class="p">]</span><span class="w"> +</span></code></pre></div> +</div> + +<p>And of course you can change the default “main” to another branch:</p> + +<div class="language-bash highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nv">$ </span>./uptodate git /path/to/repo <span class="nt">--branch</span> master +</code></pre></div> +</div> + +<p>and that also pipes into a GitHub action. I don’t want to redundantly reproduce the docs, +so if you are interested you can read more +at the <a href="https://vsoch.github.io/uptodate/docs/#/user-guide/user-guide" target="_blank">user guide</a> +or <a href="https://vsoch.github.io/uptodate/docs/#/user-guide/github-action" target="_blank">GitHub action pages</a>. +Mind you that the library is heavily under develop, so if you have a request for a new updater or want to report +a a bug, please <a href="https://github.com/vsoch/uptodate/issues" target="_blank">let me know!</a>.</p> + +<h2 id="overview">Overview</h2> + +<p>I have loved working on this library. I think it’s the first library in Go where +I’ve been proficient enough to not look everything up that I need - the code has just +flowed from my fingers! Mind you I’m still figuring out my own design preferences, +and I’m at the stage where I’ll write a new functionality, and then immediately not like +my design, and want to re-write it. But I think that means I’ll eventually get better. +But it’s always good to have one or more projects you are passionate about, because +I don’t personally see a point in being a software engineer if I don’t (yes, I know it +makes a salary, but I require more than that).</p> + + + + + HiFive Unmatched - some benchmarking results + + 2021-09-15T00:14:28-06:00 + https://hpc.social/2021/hifive-unmatched-some-benchmarking-results + <p>No sooner did I receive my SiFive HiFive Unmatched board than did the questions +about the performance of the board start to come in - from far and wide. +Prior to receiving the board, <a href="http://linuxgizmos.com/sifive-to-demo-pc-running-new-risc-v-soc-and-unveil-next-gen-ai-soc">articles</a> about the performance of the +Freedom U740 SOC compared it with Arm Cortex-A55 cores. I&rsquo;ve had a range of Arm +based systems over the years, so this helped to set my expectation ahead +of time in terms of performance. What was promising about the Unmatched +boars is the fact that it has a PCIe slot (for a GPU) and the possibility +to use NVMe storage. And as Unmatched boards started to arrive out in the +wild, posts regarding performance of the board appeared in the [SiFive forums] (<a href="https://forums.sifive.com/t/relative-cpu-performance/4373/2">https://forums.sifive.com/t/relative-cpu-performance/4373/2</a> ).</p> + +<p>Now that I&rsquo;ve had the board setup for a few weeks, I decided to run a series +of benchmarks on the board, with an HPC flavour. I&rsquo;ve setup my Unmatched +board effectively as a desktop system. I&rsquo;ve installed both a GPU as well as +and NVMe SSD in the system (details below). The system has been installed with +Ubuntu 21.04 and has been configured to boot with the CPU clock speed +at 1.4 GHz following the procedure <a href="https://forums.sifive.com/t/testing-unmatched-at-1-4-ghz/4863">here</a>.</p> + +<p>For the record, I&rsquo;ve configured a full desktop running KDE Plasma and +I&rsquo;m actually writing this post in Emacs running on the Unmatched. It&rsquo;s not +quite up to being a desktop replacement, but I can happily use it for +less intense tasks as well as programming, etc.</p> + +<p><strong>NVIDIA GeForce GT 1030</strong> +<div class="highlight"><pre><code class="language-plaintext">root@unmatched:/home/ubuntu# lshw -c display + *-display + description: VGA compatible controller + product: GP108 [**GeForce GT 1030**] + vendor: NVIDIA Corporation + physical id: 0 + bus info: pci@0000:07:00.0 + logical name: /dev/fb0 + version: a1 + width: 64 bits + clock: 33MHz + capabilities: pm msi pciexpress vga_controller bus_master cap_list rom fb + configuration: depth=32 driver=nouveau latency=0 mode=1920x1200 visual=truecolor xres=1920 yres=1200 + resources: iomemory:200-1ff iomemory:200-1ff irq:93 memory:61000000-61ffffff memory:2000000000-200fffffff memory:2010000000-2011ffffff ioport:0(size=128) memory:60800000-6087ffff</code></pre></div> +</p> + +<p><strong>Samsung SSD 970 EVO Plus 250GB</strong> +<div class="highlight"><pre><code class="language-plaintext">root@unmatched:/home/ubuntu# lshw -c storage + *-storage + description: Non-Volatile memory controller + product: NVMe SSD Controller SM981/PM981/PM983 + vendor: Samsung Electronics Co Ltd + physical id: 0 + bus info: pci@0000:06:00.0 + version: 00 + width: 64 bits + clock: 33MHz + capabilities: storage pm msi pciexpress msix nvm_express bus_master cap_list + configuration: driver=nvme latency=0 + resources: irq:38 memory:60500000-60503fff + *-nvme0 + description: NVMe device + product: Samsung SSD 970 EVO Plus 250GB + physical id: 0 + logical name: /dev/nvme0 + version: 2B2QEXM7 + serial: S59BNXXX2065XXX + configuration: nqn=nqn.2014.08.org.nvmexpress:144dXXXdS59BNJ0XXX6509J Samsung SSD 970 EVO Plus 250GB state=live</code></pre></div> +</p> + +<p><strong>High-Performance Linpack (HPL)</strong></p> + +<p>Although I did previously run <a href="https://www.netlib.org/benchmark/hpl/">HPL</a> on Unmatched, that was prior to the changes +needed to boot the system at 1.4 GHz. Furthermore, some helpful individuals +on Twitter actually pointed out an issue with my HPL run (based on the +massive run queue lengths I observed). You can view the thread on Twitter +here.</p> + +<blockquote class="twitter-tweet"><p dir="ltr" lang="en">Now running at a blistering 1.4 GHz, I'm giving Linpack another whirl on the <a href="https://twitter.com/hashtag/Unmatched?src=hash&amp;ref_src=twsrc%5Etfw">#Unmatched</a> board. Will definitely break 2 GFlops this time. <a href="https://twitter.com/hashtag/SiFive?src=hash&amp;ref_src=twsrc%5Etfw">#SiFive</a> <a href="https://twitter.com/hashtag/HiFive?src=hash&amp;ref_src=twsrc%5Etfw">#HiFive</a> <a href="https://twitter.com/hashtag/RISCV?src=hash&amp;ref_src=twsrc%5Etfw">#RISCV</a> <a href="https://t.co/GOV9ISOUCJ">https://t.co/GOV9ISOUCJ</a> <a href="https://t.co/nAwNRNXZWm">pic.twitter.com/nAwNRNXZWm</a></p> +&mdash; Gábor SAMU (@gabor_samu) <a href="https://twitter.com/gabor_samu/status/1409633174255116293?ref_src=twsrc%5Etfw">June 28, 2021</a></blockquote> + +<p>With the folly of my MPI ways better understood, I ran HPL to completion +with a result of 2.5 GFlops. I&rsquo;ve not devoted more time to squeeze more +performance from the system yet, as this result still seems low to me.</p> + +<div class="highlight"><pre><code class="language-plaintext">ubuntu@unmatched:~/hpl-2.3/bin/Linux_RISCV$ export OMP_NUM_THREADS=1 +ubuntu@unmatched:~/hpl-2.3/bin/Linux_RISCV$ mpirun --hostfile ./hostfile ./xhpl +================================================================================ +HPLinpack 2.3 -- High-Performance Linpack benchmark -- December 2, 2018 +Written by A. Petitet and R. Clint Whaley, Innovative Computing Laboratory, UTK +Modified by Piotr Luszczek, Innovative Computing Laboratory, UTK +Modified by Julien Langou, University of Colorado Denver +================================================================================ + +An explanation of the input/output parameters follows: +T/V : Wall time / encoded variant. +N : The order of the coefficient matrix A. +NB : The partitioning blocking factor. +P : The number of process rows. +Q : The number of process columns. +Time : Time in seconds to solve the linear system. +Gflops : Rate of execution for solving the linear system. + +The following parameter values will be used: + +N : 35712 +NB : 96 +PMAP : Row-major process mapping +P : 2 +Q : 2 +PFACT : Right +NBMIN : 4 +NDIV : 2 +RFACT : Crout +BCAST : 1ringM +DEPTH : 1 +SWAP : Mix (threshold = 64) +L1 : transposed form +U : transposed form +EQUIL : yes +ALIGN : 8 double precision words + +-------------------------------------------------------------------------------- + +- The matrix A is randomly generated for each test. +- The following scaled residual check will be computed: + ||Ax-b||_oo / ( eps * ( || x ||_oo * || A ||_oo + || b ||_oo ) * N ) +- The relative machine precision (eps) is taken to be 1.110223e-16 +- Computational tests pass if scaled residuals are less than 16.0 + +================================================================================ +T/V N NB P Q Time Gflops +-------------------------------------------------------------------------------- +WR11C2R4 35712 96 2 2 11941.62 2.5428e+00 +HPL_pdgesv() start time Thu Jul 8 16:26:11 2021 + +HPL_pdgesv() end time Thu Jul 8 19:45:12 2021 + +-------------------------------------------------------------------------------- +||Ax-b||_oo/(eps*(||A||_oo*||x||_oo+||b||_oo)*N)= 4.76417669e-03 ...... PASSED +================================================================================ + +Finished 1 tests with the following results: + 1 tests completed and passed residual checks, + 0 tests completed and failed residual checks, + 0 tests skipped because of illegal input values. +-------------------------------------------------------------------------------- + +End of Tests. +================================================================================</code></pre></div> + +<p><strong>STREAM</strong></p> + +<p>Up next is <a href="https://www.cs.virginia.edu/stream/">STREAM</a>, which is used to test memory bandwidth. I ran stream using the default settings for memory +size.</p> + +<div class="highlight"><pre><code class="language-plaintext">ubuntu@unmatched:~/STREAM$ ./stream_c.exe +------------------------------------------------------------- +STREAM version $Revision: 5.10 $ +------------------------------------------------------------- +This system uses 8 bytes per array element. +------------------------------------------------------------- +Array size = 10000000 (elements), Offset = 0 (elements) +Memory per array = 76.3 MiB (= 0.1 GiB). +Total memory required = 228.9 MiB (= 0.2 GiB). +Each kernel will be executed 10 times. + The *best* time for each kernel (excluding the first iteration) + will be used to compute the reported bandwidth. +------------------------------------------------------------- +Number of Threads requested = 4 +Number of Threads counted = 4 +------------------------------------------------------------- +Your clock granularity/precision appears to be 1 microseconds. +Each test below will take on the order of 91364 microseconds. + (= 91364 clock ticks) +Increase the size of the arrays if this shows that +you are not getting at least 20 clock ticks per test. +------------------------------------------------------------- +WARNING -- The above is only a rough guideline. +For best results, please be sure you know the +precision of your system timer. +------------------------------------------------------------- +Function Best Rate MB/s Avg time Min time Max time +Copy: 1349.8 0.121990 0.118533 0.132665 +Scale: 1175.2 0.138145 0.136152 0.149729 +Add: 1306.3 0.186115 0.183730 0.196969 +Triad: 1315.8 0.185806 0.182400 0.193755 +------------------------------------------------------------- +Solution Validates: avg error less than 1.000000e-13 on all three arrays +-------------------------------------------------------------</code></pre></div> + +<p><strong>IOzone</strong></p> + +<p>Next, I wheeled out IOzone to benchmark the filesystem (on the NVMe SSD). +The full output from IOzone can be viewed below.</p> + +<!-- raw HTML omitted --> +<div class="highlight"><pre><code class="language-plaintext">ubuntu@unmatched:~$ time iozone -a -R -c -b result_file.xls -f ./testfile + Iozone: Performance Test of File I/O + Version $Revision: 3.489 $ + Compiled for 64 bit mode. + Build: linux + + Contributors:William Norcott, Don Capps, Isom Crawford, Kirby Collins + Al Slater, Scott Rhine, Mike Wisner, Ken Goss + Steve Landherr, Brad Smith, Mark Kelly, Dr. Alain CYR, + Randy Dunlap, Mark Montague, Dan Million, Gavin Brebner, + Jean-Marc Zucconi, Jeff Blomberg, Benny Halevy, Dave Boone, + Erik Habbinga, Kris Strecker, Walter Wong, Joshua Root, + Fabrice Bacchella, Zhenghua Xue, Qin Li, Darren Sawyer, + Vangel Bojaxhi, Ben England, Vikentsi Lapa, + Alexey Skidanov, Sudhir Kumar. + + Run began: Thu Jul 8 21:24:44 2021 + + Auto Mode + Excel chart generation enabled + Include close in write timing + Command line used: iozone -a -R -c -b result_file.xls -f ./testfile + Output is in kBytes/sec + Time Resolution = 0.000001 seconds. + Processor cache size set to 1024 kBytes. + Processor cache line size set to 32 bytes. + File stride size set to 17 * record size. + random random bkwd record stride + kB reclen write rewrite read reread read write read rewrite read fwrite frewrite fread freread + 64 4 54002 117900 312204 435202 410573 129316 271200 142875 444572 106319 120331 286849 378715 + 64 8 69708 153848 357529 551422 488236 49382 321554 200653 576282 128265 167583 467002 503814 + 64 16 80398 214781 647135 571375 598110 190947 443837 248136 571375 159991 192730 503814 659861 + 64 32 77564 245190 679917 633392 633392 264258 435202 274809 492717 134769 173429 359444 385237 + 64 64 98443 280553 516414 780776 571375 279385 463774 280553 529661 266620 278516 477808 587635 + 128 4 65641 140659 488594 544601 488594 145936 365764 170879 398625 123084 138554 499970 547377 + 128 8 85860 179507 544601 597981 624409 184694 465708 231429 826202 166481 147946 408949 387686 + 128 16 93910 246632 731625 294207 612303 245616 410199 239268 419822 114917 143671 509943 537512 + 128 32 107658 274080 692017 618653 687585 290859 540217 327199 666253 200339 245168 606767 677178 + 128 64 106042 256531 606767 670413 653282 317711 533241 338761 649331 164744 195737 492178 517812 + 128 128 97563 203529 592699 695603 533241 217991 351630 198414 348661 164542 183432 430942 621518 + 256 4 25934 32372 508929 528987 512818 32245 358056 168092 428843 130406 135167 466275 519266 + 256 8 87675 196016 557844 602276 560172 186841 436870 264741 515032 178516 188514 585845 618584 + 256 16 103395 234246 576720 557554 598249 242883 531080 335564 882426 193963 223379 585845 510866 + 256 32 108845 292508 592636 560172 598249 265003 467290 357937 699594 181442 204959 474937 576411 + 256 64 104828 340027 768712 670332 817898 364622 703721 414283 828628 264481 320347 802014 792543 + 256 128 129682 291872 604991 615393 661657 335564 349206 341759 627623 182863 267512 663292 716876 + 256 256 115533 328180 666586 900936 773140 385433 585526 375459 680960 365242 374935 716876 856386 + 512 4 69857 144428 546460 582478 531055 142777 420020 177348 367291 112552 118824 355319 392678 + 512 8 83456 212373 675350 619978 688118 217978 599553 285425 503418 161710 158605 438011 466255 + 512 16 97062 246644 694797 680918 772252 277352 699322 348063 728502 230743 249858 755144 798675 + 512 32 96365 245096 451454 430978 582478 248183 433939 374269 850554 227251 268912 748563 784094 + 512 64 100864 346435 762112 706221 750656 347556 680918 357032 717069 181102 216222 435700 606667 + 512 128 126577 358283 806171 742096 861818 386947 834037 468493 912733 270914 332901 851903 855978 + 512 256 81959 223770 480227 437654 528441 194152 431671 257807 667582 145131 208558 584539 695472 + 512 512 106222 295804 618371 774480 714207 364672 538918 340448 525082 323864 312370 537838 467575 + 1024 4 65218 117835 409402 473859 473232 144876 396917 187924 466094 140814 145845 557729 374687 + 1024 8 81385 187129 525969 498788 572677 196919 466296 280867 478291 179459 185537 580181 591201 + 1024 16 103613 200392 388024 369021 452921 194089 362236 352138 456095 202403 225058 703769 768219 + 1024 32 98272 210302 471001 490192 642400 277420 605538 391883 634055 256189 277815 749583 804330 + 1024 64 113273 223326 397211 360170 523788 287254 516917 436134 872286 248899 296556 722230 793189 + 1024 128 104384 221073 377154 349019 531699 300036 581516 453734 909410 238413 282716 630518 708646 + 1024 256 117039 212718 321797 337712 437244 272129 506135 434809 874417 202891 240549 585799 679067 + 1024 512 106755 189909 310199 389078 395674 241279 344788 315671 600963 143099 158049 354815 420506 + 1024 1024 102544 206918 242204 238307 227960 178371 227153 193896 263773 198264 206540 274372 273864 + 2048 4 67621 118586 295229 291611 300380 112720 300864 191565 295188 112893 115256 329421 352627 + 2048 8 81958 147391 335848 309504 377499 160689 370818 276498 327462 138108 144500 376259 403533 + 2048 16 95172 183562 358484 312057 338069 164470 380729 361440 410337 179192 182987 442442 480399 + 2048 32 97999 199201 428729 406512 495159 234615 467484 395907 398608 173454 184373 396602 404731 + 2048 64 109430 221980 395670 380172 442624 211393 334423 425248 333877 190722 205374 436484 476297 + 2048 128 105490 208768 414576 399926 471694 255327 458893 483020 950865 176432 186811 382151 396437 + 2048 256 112029 222129 368875 360001 375930 215487 323491 384874 716855 157092 176171 380594 400952 + 2048 512 100895 205560 350440 358918 357484 218058 343751 401646 843821 134833 138013 277248 284644 + 2048 1024 102057 171524 119222 203356 200433 168239 216584 197434 282323 116628 122092 230343 233417 + 2048 2048 91309 153096 178226 192846 190850 155070 195306 154891 187525 153075 158562 196566 192898 + 4096 4 66648 112502 290992 290353 289795 111273 271941 192725 267537 109812 110244 287843 282248 + 4096 8 78723 138038 323307 290741 310723 136125 332763 289985 335657 131361 130671 326664 326738 + 4096 16 91339 158845 351946 302890 343880 158858 342216 367689 343132 152596 147163 338067 341020 + 4096 32 100225 179615 324645 309247 364058 177072 349164 396857 335381 167334 165267 318956 325340 + 4096 64 105146 190974 331288 251890 281684 187813 365818 461938 363312 173390 171374 330561 332416 + 4096 128 108193 188945 312481 305785 339551 186819 345158 492304 336571 170652 168865 310516 317958 + 4096 256 106900 190078 299239 254932 297848 190413 338707 506365 966746 164082 150516 277278 293915 + 4096 512 104123 167531 258375 268734 250319 172827 267050 420099 827474 135544 127664 260807 261893 + 4096 1024 98620 157326 218346 213023 208841 158067 220015 221117 242124 116139 112130 209105 210126 + 4096 2048 93929 145650 165971 179879 184646 146995 181736 154455 191312 99717 104482 183070 181473 + 4096 4096 88738 142112 163735 171360 171423 140774 171551 142589 172702 143331 143891 172987 172688 + 8192 4 65841 110741 282065 282657 277459 109690 260419 193678 269801 106967 106454 280607 281455 + 8192 8 78737 132677 315256 290982 308596 131899 295965 292717 307058 127962 126110 313196 313079 + 8192 16 90528 154103 323935 298181 323103 155667 320762 371369 315841 144535 144240 319861 322836 + 8192 32 98090 171610 321998 295164 326245 170297 315682 405808 327891 159856 157632 323115 323553 + 8192 64 102402 183487 320338 294367 326542 183755 330043 462696 324783 165574 164260 319251 314342 + 8192 128 104471 186530 322979 294211 308746 182616 327785 469807 307704 160870 162323 308757 308248 + 8192 256 103528 181720 293578 274062 294857 179329 303496 508379 294136 146447 150201 280433 286644 + 8192 512 101430 168594 256594 244398 255992 168462 263349 452168 823642 128695 129724 243165 252691 + 8192 1024 95799 155619 209916 202450 199985 154110 210856 221903 333823 112200 111518 204121 203129 + 8192 2048 92977 145702 175608 171498 178584 144194 181762 159601 185932 104202 103452 176134 179028 + 8192 4096 92238 141487 166700 165515 171348 141248 169064 146623 169690 99166 102031 170414 168824 + 8192 8192 90378 139526 157680 167601 165843 138495 164806 139352 165531 141163 139726 166399 166068 + 16384 4 65537 107844 282103 280749 270242 107112 249130 191125 269594 105630 105330 278957 276503 + 16384 8 77659 132704 313095 288375 300982 130745 297485 293336 304009 125047 125518 308525 310062 + 16384 16 89579 151110 324307 294989 318389 150588 318730 371619 318042 142514 144111 320895 319214 + 16384 32 97572 169494 320764 295858 323352 167762 318087 401183 324660 156694 157397 322360 317809 + 16384 64 101477 179036 318910 285769 320018 176881 314648 452435 323678 161742 161881 314654 315860 + 16384 128 102795 180331 310138 277826 312082 180186 293963 426964 284701 160913 161278 308800 310074 + 16384 256 104045 179146 293153 266918 291696 178797 289843 491096 299338 148743 147959 287292 281893 + 16384 512 100875 168689 252422 239039 253092 167970 254063 446721 266575 127001 129013 248796 253007 + 16384 1024 95676 153097 201314 195896 200676 154616 204118 289716 472338 111232 110130 199120 203740 + 16384 2048 92414 145259 176177 169912 177331 144759 176145 164623 188378 103403 103192 175926 176835 + 16384 4096 90982 141026 166237 161617 166524 139707 168260 147010 164545 101305 101404 168331 169053 + 16384 8192 91386 140560 164431 161942 165147 139306 163070 143700 166585 97515 101440 168344 166060 + 16384 16384 89645 137768 152920 160325 160462 135038 162230 136345 162555 135472 137670 162533 160600 + 32768 64 100518 176971 320112 294827 326020 173802 315206 469434 321128 161501 161534 318898 321566 + 32768 128 94197 177065 310461 267112 318089 174921 311148 514523 313221 159893 160154 309672 313030 + 32768 256 103097 176990 290264 270202 299582 175632 289551 523164 291906 149130 148485 289408 294488 + 32768 512 101035 167780 253540 241526 261351 168100 251639 541467 256591 129784 128800 251369 257644 + 32768 1024 95768 153873 206025 203363 213993 154402 206577 310768 206605 111571 110994 205200 213604 + 32768 2048 91379 143551 175813 169633 177196 143577 175918 164190 185869 102097 102274 175730 177497 + 32768 4096 90523 140886 165943 151238 166411 134797 165923 147228 168261 99992 100757 167505 165933 + 32768 8192 90276 139845 164569 158580 164541 138526 165586 144846 167084 99890 100325 166179 164616 + 32768 16384 90288 140233 161049 161259 164707 138540 166524 143497 166659 95763 99373 166405 164719 + 65536 64 99673 174433 320956 298810 326104 171399 318123 472110 319315 159545 159839 319251 325261 + 65536 128 102216 178852 315098 292086 317099 176802 309199 514040 314167 159211 159476 312052 319301 + 65536 256 103198 175205 295177 277020 299460 174958 293350 541848 291703 149396 148300 293892 300441 + 65536 512 100693 166563 257401 245254 262460 166880 256944 543574 251791 130994 130085 217541 261817 + 65536 1024 95156 151218 208433 201644 210890 153517 206626 314958 208027 111076 110836 209171 212611 + 65536 2048 91462 142076 174283 167215 175081 141982 174354 161699 174642 101625 101668 174694 175282 + 65536 4096 89889 138807 164119 157697 164260 138665 164529 146819 165145 99342 99703 164013 164483 + 65536 8192 89858 138926 163703 156592 164726 138468 165742 146436 165073 99717 100152 165121 164877 + 65536 16384 90040 138813 161472 158432 164698 138326 164635 143752 164450 99469 99683 164091 164078 + 131072 64 99699 173619 326411 298937 326454 172619 321262 474723 320924 159388 159492 325895 326394 + 131072 128 102334 177498 316453 292966 318546 176234 315274 517680 315511 158517 158811 318138 319006 + 131072 256 103082 174711 297416 275855 297015 174049 296870 444329 286940 149385 149204 298919 299437 + 131072 512 100339 166579 260214 240191 259430 166266 259838 555206 256165 130764 130765 260470 259510 + 131072 1024 95586 153909 212513 201096 213837 153992 211843 325153 210439 111303 111203 212527 213064 + 131072 2048 91508 142804 173585 165672 176190 142592 176080 164782 175868 101700 101366 176033 176177 + 131072 4096 90078 139044 164617 157582 164913 138651 165129 147795 165182 99576 99553 164871 164751 + 131072 8192 90091 139102 163556 157103 163885 138561 163914 146168 164348 99535 99576 164213 163900 + 131072 16384 90186 139243 163011 157758 164480 138613 164554 146106 164729 99687 99691 164304 164366 + 262144 64 99714 173404 320209 298489 326041 172291 322327 473060 323206 158794 158988 326698 326258 + 262144 128 101915 176234 318818 291885 318585 176135 315553 519471 316594 158608 158328 318906 319312 + 262144 256 101863 174533 299916 275040 297207 174432 297790 544317 296951 149346 149257 299027 299670 + 262144 512 100486 166724 263070 244355 262700 166282 260518 556339 260549 131056 131044 262557 262988 + 262144 1024 95380 153078 211833 199591 207545 153018 210888 324728 210223 110796 110729 211268 207711 + 262144 2048 91384 142689 176506 168217 176610 142538 176400 164159 176496 101693 101779 175330 176267 + 262144 4096 89877 138476 164624 157385 164681 138474 164734 147854 165146 99457 99474 164752 164337 + 262144 8192 89777 137822 163119 156010 163126 137962 163258 146106 163272 99228 99301 163170 163176 + 262144 16384 89746 138290 163080 156668 163621 138319 163816 146164 163653 99030 99000 163817 163616 + 524288 64 99570 170849 326940 298947 326141 172346 321165 462987 324978 159041 157407 325485 325701 + 524288 128 101686 175674 320133 292759 319437 175953 316947 520033 318289 158680 155071 319095 319083 + 524288 256 102589 173927 299620 275856 298894 173725 297501 546112 298252 149043 147016 299433 299173 + 524288 512 100349 165718 260204 241624 260026 164597 259583 557023 258697 129011 130355 259933 260064 + 524288 1024 95445 153632 212357 200030 212384 153414 211822 324332 211673 111001 110483 212147 212352 + 524288 2048 91204 142710 175569 168424 176892 142584 176579 164609 176568 101606 101617 176806 176594 + 524288 4096 89824 138544 164376 157141 164100 138492 164385 152223 164688 99467 99468 164713 164703 + 524288 8192 89831 138452 163568 156551 163682 138203 163535 148260 163866 99301 99360 163519 163467 + 524288 16384 89779 138244 163085 156086 162949 137936 162678 146894 162898 98388 98517 163326 163262 + +iozone test complete. +Excel output is below: + +"Writer report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 54002 69708 80398 77564 98443 +"128" 65641 85860 93910 107658 106042 97563 +"256" 25934 87675 103395 108845 104828 129682 115533 +"512" 69857 83456 97062 96365 100864 126577 81959 106222 +"1024" 65218 81385 103613 98272 113273 104384 117039 106755 102544 +"2048" 67621 81958 95172 97999 109430 105490 112029 100895 102057 91309 +"4096" 66648 78723 91339 100225 105146 108193 106900 104123 98620 93929 88738 +"8192" 65841 78737 90528 98090 102402 104471 103528 101430 95799 92977 92238 90378 +"16384" 65537 77659 89579 97572 101477 102795 104045 100875 95676 92414 90982 91386 89645 +"32768" 0 0 0 0 100518 94197 103097 101035 95768 91379 90523 90276 90288 +"65536" 0 0 0 0 99673 102216 103198 100693 95156 91462 89889 89858 90040 +"131072" 0 0 0 0 99699 102334 103082 100339 95586 91508 90078 90091 90186 +"262144" 0 0 0 0 99714 101915 101863 100486 95380 91384 89877 89777 89746 +"524288" 0 0 0 0 99570 101686 102589 100349 95445 91204 89824 89831 89779 + +"Re-writer report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 117900 153848 214781 245190 280553 +"128" 140659 179507 246632 274080 256531 203529 +"256" 32372 196016 234246 292508 340027 291872 328180 +"512" 144428 212373 246644 245096 346435 358283 223770 295804 +"1024" 117835 187129 200392 210302 223326 221073 212718 189909 206918 +"2048" 118586 147391 183562 199201 221980 208768 222129 205560 171524 153096 +"4096" 112502 138038 158845 179615 190974 188945 190078 167531 157326 145650 142112 +"8192" 110741 132677 154103 171610 183487 186530 181720 168594 155619 145702 141487 139526 +"16384" 107844 132704 151110 169494 179036 180331 179146 168689 153097 145259 141026 140560 137768 +"32768" 0 0 0 0 176971 177065 176990 167780 153873 143551 140886 139845 140233 +"65536" 0 0 0 0 174433 178852 175205 166563 151218 142076 138807 138926 138813 +"131072" 0 0 0 0 173619 177498 174711 166579 153909 142804 139044 139102 139243 +"262144" 0 0 0 0 173404 176234 174533 166724 153078 142689 138476 137822 138290 +"524288" 0 0 0 0 170849 175674 173927 165718 153632 142710 138544 138452 138244 + +"Reader report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 312204 357529 647135 679917 516414 +"128" 488594 544601 731625 692017 606767 592699 +"256" 508929 557844 576720 592636 768712 604991 666586 +"512" 546460 675350 694797 451454 762112 806171 480227 618371 +"1024" 409402 525969 388024 471001 397211 377154 321797 310199 242204 +"2048" 295229 335848 358484 428729 395670 414576 368875 350440 119222 178226 +"4096" 290992 323307 351946 324645 331288 312481 299239 258375 218346 165971 163735 +"8192" 282065 315256 323935 321998 320338 322979 293578 256594 209916 175608 166700 157680 +"16384" 282103 313095 324307 320764 318910 310138 293153 252422 201314 176177 166237 164431 152920 +"32768" 0 0 0 0 320112 310461 290264 253540 206025 175813 165943 164569 161049 +"65536" 0 0 0 0 320956 315098 295177 257401 208433 174283 164119 163703 161472 +"131072" 0 0 0 0 326411 316453 297416 260214 212513 173585 164617 163556 163011 +"262144" 0 0 0 0 320209 318818 299916 263070 211833 176506 164624 163119 163080 +"524288" 0 0 0 0 326940 320133 299620 260204 212357 175569 164376 163568 163085 + +"Re-Reader report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 435202 551422 571375 633392 780776 +"128" 544601 597981 294207 618653 670413 695603 +"256" 528987 602276 557554 560172 670332 615393 900936 +"512" 582478 619978 680918 430978 706221 742096 437654 774480 +"1024" 473859 498788 369021 490192 360170 349019 337712 389078 238307 +"2048" 291611 309504 312057 406512 380172 399926 360001 358918 203356 192846 +"4096" 290353 290741 302890 309247 251890 305785 254932 268734 213023 179879 171360 +"8192" 282657 290982 298181 295164 294367 294211 274062 244398 202450 171498 165515 167601 +"16384" 280749 288375 294989 295858 285769 277826 266918 239039 195896 169912 161617 161942 160325 +"32768" 0 0 0 0 294827 267112 270202 241526 203363 169633 151238 158580 161259 +"65536" 0 0 0 0 298810 292086 277020 245254 201644 167215 157697 156592 158432 +"131072" 0 0 0 0 298937 292966 275855 240191 201096 165672 157582 157103 157758 +"262144" 0 0 0 0 298489 291885 275040 244355 199591 168217 157385 156010 156668 +"524288" 0 0 0 0 298947 292759 275856 241624 200030 168424 157141 156551 156086 + +"Random read report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 410573 488236 598110 633392 571375 +"128" 488594 624409 612303 687585 653282 533241 +"256" 512818 560172 598249 598249 817898 661657 773140 +"512" 531055 688118 772252 582478 750656 861818 528441 714207 +"1024" 473232 572677 452921 642400 523788 531699 437244 395674 227960 +"2048" 300380 377499 338069 495159 442624 471694 375930 357484 200433 190850 +"4096" 289795 310723 343880 364058 281684 339551 297848 250319 208841 184646 171423 +"8192" 277459 308596 323103 326245 326542 308746 294857 255992 199985 178584 171348 165843 +"16384" 270242 300982 318389 323352 320018 312082 291696 253092 200676 177331 166524 165147 160462 +"32768" 0 0 0 0 326020 318089 299582 261351 213993 177196 166411 164541 164707 +"65536" 0 0 0 0 326104 317099 299460 262460 210890 175081 164260 164726 164698 +"131072" 0 0 0 0 326454 318546 297015 259430 213837 176190 164913 163885 164480 +"262144" 0 0 0 0 326041 318585 297207 262700 207545 176610 164681 163126 163621 +"524288" 0 0 0 0 326141 319437 298894 260026 212384 176892 164100 163682 162949 + +"Random write report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 129316 49382 190947 264258 279385 +"128" 145936 184694 245616 290859 317711 217991 +"256" 32245 186841 242883 265003 364622 335564 385433 +"512" 142777 217978 277352 248183 347556 386947 194152 364672 +"1024" 144876 196919 194089 277420 287254 300036 272129 241279 178371 +"2048" 112720 160689 164470 234615 211393 255327 215487 218058 168239 155070 +"4096" 111273 136125 158858 177072 187813 186819 190413 172827 158067 146995 140774 +"8192" 109690 131899 155667 170297 183755 182616 179329 168462 154110 144194 141248 138495 +"16384" 107112 130745 150588 167762 176881 180186 178797 167970 154616 144759 139707 139306 135038 +"32768" 0 0 0 0 173802 174921 175632 168100 154402 143577 134797 138526 138540 +"65536" 0 0 0 0 171399 176802 174958 166880 153517 141982 138665 138468 138326 +"131072" 0 0 0 0 172619 176234 174049 166266 153992 142592 138651 138561 138613 +"262144" 0 0 0 0 172291 176135 174432 166282 153018 142538 138474 137962 138319 +"524288" 0 0 0 0 172346 175953 173725 164597 153414 142584 138492 138203 137936 + +"Backward read report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 271200 321554 443837 435202 463774 +"128" 365764 465708 410199 540217 533241 351630 +"256" 358056 436870 531080 467290 703721 349206 585526 +"512" 420020 599553 699322 433939 680918 834037 431671 538918 +"1024" 396917 466296 362236 605538 516917 581516 506135 344788 227153 +"2048" 300864 370818 380729 467484 334423 458893 323491 343751 216584 195306 +"4096" 271941 332763 342216 349164 365818 345158 338707 267050 220015 181736 171551 +"8192" 260419 295965 320762 315682 330043 327785 303496 263349 210856 181762 169064 164806 +"16384" 249130 297485 318730 318087 314648 293963 289843 254063 204118 176145 168260 163070 162230 +"32768" 0 0 0 0 315206 311148 289551 251639 206577 175918 165923 165586 166524 +"65536" 0 0 0 0 318123 309199 293350 256944 206626 174354 164529 165742 164635 +"131072" 0 0 0 0 321262 315274 296870 259838 211843 176080 165129 163914 164554 +"262144" 0 0 0 0 322327 315553 297790 260518 210888 176400 164734 163258 163816 +"524288" 0 0 0 0 321165 316947 297501 259583 211822 176579 164385 163535 162678 + +"Record rewrite report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 142875 200653 248136 274809 280553 +"128" 170879 231429 239268 327199 338761 198414 +"256" 168092 264741 335564 357937 414283 341759 375459 +"512" 177348 285425 348063 374269 357032 468493 257807 340448 +"1024" 187924 280867 352138 391883 436134 453734 434809 315671 193896 +"2048" 191565 276498 361440 395907 425248 483020 384874 401646 197434 154891 +"4096" 192725 289985 367689 396857 461938 492304 506365 420099 221117 154455 142589 +"8192" 193678 292717 371369 405808 462696 469807 508379 452168 221903 159601 146623 139352 +"16384" 191125 293336 371619 401183 452435 426964 491096 446721 289716 164623 147010 143700 136345 +"32768" 0 0 0 0 469434 514523 523164 541467 310768 164190 147228 144846 143497 +"65536" 0 0 0 0 472110 514040 541848 543574 314958 161699 146819 146436 143752 +"131072" 0 0 0 0 474723 517680 444329 555206 325153 164782 147795 146168 146106 +"262144" 0 0 0 0 473060 519471 544317 556339 324728 164159 147854 146106 146164 +"524288" 0 0 0 0 462987 520033 546112 557023 324332 164609 152223 148260 146894 + +"Stride read report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 444572 576282 571375 492717 529661 +"128" 398625 826202 419822 666253 649331 348661 +"256" 428843 515032 882426 699594 828628 627623 680960 +"512" 367291 503418 728502 850554 717069 912733 667582 525082 +"1024" 466094 478291 456095 634055 872286 909410 874417 600963 263773 +"2048" 295188 327462 410337 398608 333877 950865 716855 843821 282323 187525 +"4096" 267537 335657 343132 335381 363312 336571 966746 827474 242124 191312 172702 +"8192" 269801 307058 315841 327891 324783 307704 294136 823642 333823 185932 169690 165531 +"16384" 269594 304009 318042 324660 323678 284701 299338 266575 472338 188378 164545 166585 162555 +"32768" 0 0 0 0 321128 313221 291906 256591 206605 185869 168261 167084 166659 +"65536" 0 0 0 0 319315 314167 291703 251791 208027 174642 165145 165073 164450 +"131072" 0 0 0 0 320924 315511 286940 256165 210439 175868 165182 164348 164729 +"262144" 0 0 0 0 323206 316594 296951 260549 210223 176496 165146 163272 163653 +"524288" 0 0 0 0 324978 318289 298252 258697 211673 176568 164688 163866 162898 + +"Fwrite report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 106319 128265 159991 134769 266620 +"128" 123084 166481 114917 200339 164744 164542 +"256" 130406 178516 193963 181442 264481 182863 365242 +"512" 112552 161710 230743 227251 181102 270914 145131 323864 +"1024" 140814 179459 202403 256189 248899 238413 202891 143099 198264 +"2048" 112893 138108 179192 173454 190722 176432 157092 134833 116628 153075 +"4096" 109812 131361 152596 167334 173390 170652 164082 135544 116139 99717 143331 +"8192" 106967 127962 144535 159856 165574 160870 146447 128695 112200 104202 99166 141163 +"16384" 105630 125047 142514 156694 161742 160913 148743 127001 111232 103403 101305 97515 135472 +"32768" 0 0 0 0 161501 159893 149130 129784 111571 102097 99992 99890 95763 +"65536" 0 0 0 0 159545 159211 149396 130994 111076 101625 99342 99717 99469 +"131072" 0 0 0 0 159388 158517 149385 130764 111303 101700 99576 99535 99687 +"262144" 0 0 0 0 158794 158608 149346 131056 110796 101693 99457 99228 99030 +"524288" 0 0 0 0 159041 158680 149043 129011 111001 101606 99467 99301 98388 + +"Re-Fwrite report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 120331 167583 192730 173429 278516 +"128" 138554 147946 143671 245168 195737 183432 +"256" 135167 188514 223379 204959 320347 267512 374935 +"512" 118824 158605 249858 268912 216222 332901 208558 312370 +"1024" 145845 185537 225058 277815 296556 282716 240549 158049 206540 +"2048" 115256 144500 182987 184373 205374 186811 176171 138013 122092 158562 +"4096" 110244 130671 147163 165267 171374 168865 150516 127664 112130 104482 143891 +"8192" 106454 126110 144240 157632 164260 162323 150201 129724 111518 103452 102031 139726 +"16384" 105330 125518 144111 157397 161881 161278 147959 129013 110130 103192 101404 101440 137670 +"32768" 0 0 0 0 161534 160154 148485 128800 110994 102274 100757 100325 99373 +"65536" 0 0 0 0 159839 159476 148300 130085 110836 101668 99703 100152 99683 +"131072" 0 0 0 0 159492 158811 149204 130765 111203 101366 99553 99576 99691 +"262144" 0 0 0 0 158988 158328 149257 131044 110729 101779 99474 99301 99000 +"524288" 0 0 0 0 157407 155071 147016 130355 110483 101617 99468 99360 98517 + +"Fread report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 286849 467002 503814 359444 477808 +"128" 499970 408949 509943 606767 492178 430942 +"256" 466275 585845 585845 474937 802014 663292 716876 +"512" 355319 438011 755144 748563 435700 851903 584539 537838 +"1024" 557729 580181 703769 749583 722230 630518 585799 354815 274372 +"2048" 329421 376259 442442 396602 436484 382151 380594 277248 230343 196566 +"4096" 287843 326664 338067 318956 330561 310516 277278 260807 209105 183070 172987 +"8192" 280607 313196 319861 323115 319251 308757 280433 243165 204121 176134 170414 166399 +"16384" 278957 308525 320895 322360 314654 308800 287292 248796 199120 175926 168331 168344 162533 +"32768" 0 0 0 0 318898 309672 289408 251369 205200 175730 167505 166179 166405 +"65536" 0 0 0 0 319251 312052 293892 217541 209171 174694 164013 165121 164091 +"131072" 0 0 0 0 325895 318138 298919 260470 212527 176033 164871 164213 164304 +"262144" 0 0 0 0 326698 318906 299027 262557 211268 175330 164752 163170 163817 +"524288" 0 0 0 0 325485 319095 299433 259933 212147 176806 164713 163519 163326 + +"Re-Fread report" + "4" "8" "16" "32" "64" "128" "256" "512" "1024" "2048" "4096" "8192" "16384" +"64" 378715 503814 659861 385237 587635 +"128" 547377 387686 537512 677178 517812 621518 +"256" 519266 618584 510866 576411 792543 716876 856386 +"512" 392678 466255 798675 784094 606667 855978 695472 467575 +"1024" 374687 591201 768219 804330 793189 708646 679067 420506 273864 +"2048" 352627 403533 480399 404731 476297 396437 400952 284644 233417 192898 +"4096" 282248 326738 341020 325340 332416 317958 293915 261893 210126 181473 172688 +"8192" 281455 313079 322836 323553 314342 308248 286644 252691 203129 179028 168824 166068 +"16384" 276503 310062 319214 317809 315860 310074 281893 253007 203740 176835 169053 166060 160600 +"32768" 0 0 0 0 321566 313030 294488 257644 213604 177497 165933 164616 164719 +"65536" 0 0 0 0 325261 319301 300441 261817 212611 175282 164483 164877 164078 +"131072" 0 0 0 0 326394 319006 299437 259510 213064 176177 164751 163900 164366 +"262144" 0 0 0 0 326258 319312 299670 262988 207711 176267 164337 163176 163616 +"524288" 0 0 0 0 325701 319083 299173 260064 212352 176594 164703 163467 163262 + +real 14m34.837s +user 0m47.578s +sys 13m17.443s</code></pre></div> + +<!-- raw HTML omitted --> +<p>IOzone <em>writer report</em> plotted with LibreOffice Calc. +<figure><img src="https://www.gaborsamu.com/images/unmatched_writer.png" /> +</figure> +</p> + +<p>IOzone <em>reader report</em> plotted with LibreOffice Calc. +<figure><img src="https://www.gaborsamu.com/images/unmatched_reader.png" /> +</figure> +</p> + +<p><strong>High Performance Conjugate Gradient Benchmark (HPCG)</strong></p> + +<p><a href="http://www.hpcg-benchmark.org/">HPCG</a> is another benchmark for ranking HPC +systems. As per the <a href="https://www.hpcg-benchmark.org/">HPCG page</a> it&rsquo;s <em>&hellip;designed to exercise computational and +data access patterns that more closely match a different and broad set of +important applications, and to give incentive to computer system designers +to invest in capabilities that will have impact on the collective performance +of these applications.</em> In other words, it&rsquo;s designed to give a more rounded +assessment of the capabilites of a system as opposed to HPL. HPCG may provide +a more rounded assessment of a system and how it will perform with real +world applications.</p> + +<p>Like HPL, I built HPCG using the standard Ubuntu supplied compilers, OpenMPI +and other libraries. So this is not really considered to be an optimized run.</p> + +<p>The result from the HPCG run follows:</p> + +<div class="highlight"><pre><code class="language-plaintext">Final Summary= +Final Summary::HPCG result is VALID with a GFLOP/s rating of=0.167441 +Final Summary::HPCG 2.4 rating for historical reasons is=0.168344 +Final Summary::Reference version of ComputeDotProduct used=Performance results a +re most likely suboptimal +Final Summary::Reference version of ComputeSPMV used=Performance results are mos +t likely suboptimal +Final Summary::Reference version of ComputeMG used=Performance results are most +likely suboptimal +Final Summary::Reference version of ComputeWAXPBY used=Performance results are m +ost likely suboptimal +Final Summary::Results are valid but execution time (sec) is=761.216 +Final Summary::Official results execution time (sec) must be at least=1800</code></pre></div> + +<!-- raw HTML omitted --> +<div class="highlight"><pre><code class="language-plaintext">ubuntu@unmatched:~/hpcg/bin$ more HPCG-Benchmark_3.1_2021-07-08_19-18-43.txt +HPCG-Benchmark +version=3.1 +Release date=March 28, 2019 +Machine Summary= +Machine Summary::Distributed Processes=4 +Machine Summary::Threads per processes=1 +Global Problem Dimensions= +Global Problem Dimensions::Global nx=240 +Global Problem Dimensions::Global ny=240 +Global Problem Dimensions::Global nz=120 +Processor Dimensions= +Processor Dimensions::npx=2 +Processor Dimensions::npy=2 +Processor Dimensions::npz=1 +Local Domain Dimensions= +Local Domain Dimensions::nx=120 +Local Domain Dimensions::ny=120 +Local Domain Dimensions::Lower ipz=0 +Local Domain Dimensions::Upper ipz=0 +Local Domain Dimensions::nz=120 +########## Problem Summary ##########= +Setup Information= +Setup Information::Setup Time=41.0377 +Linear System Information= +Linear System Information::Number of Equations=6912000 +Linear System Information::Number of Nonzero Terms=184557592 +Multigrid Information= +Multigrid Information::Number of coarse grid levels=3 +Multigrid Information::Coarse Grids= +Multigrid Information::Coarse Grids::Grid Level=1 +Multigrid Information::Coarse Grids::Number of Equations=864000 +Multigrid Information::Coarse Grids::Number of Nonzero Terms=22813192 +Multigrid Information::Coarse Grids::Number of Presmoother Steps=1 +Multigrid Information::Coarse Grids::Number of Postsmoother Steps=1 +Multigrid Information::Coarse Grids::Grid Level=2 +Multigrid Information::Coarse Grids::Number of Equations=108000 +Multigrid Information::Coarse Grids::Number of Nonzero Terms=2788192 +Multigrid Information::Coarse Grids::Number of Presmoother Steps=1 +Multigrid Information::Coarse Grids::Number of Postsmoother Steps=1 +Multigrid Information::Coarse Grids::Grid Level=3 +Multigrid Information::Coarse Grids::Number of Equations=13500 +Multigrid Information::Coarse Grids::Number of Nonzero Terms=332992 +Multigrid Information::Coarse Grids::Number of Presmoother Steps=1 +Multigrid Information::Coarse Grids::Number of Postsmoother Steps=1 +########## Memory Use Summary ##########= +Memory Use Information= +Memory Use Information::Total memory used for data (Gbytes)=4.94251 +Memory Use Information::Memory used for OptimizeProblem data (Gbytes)=0 +Memory Use Information::Bytes per equation (Total memory / Number of Equations)= +715.063 +Memory Use Information::Memory used for linear system and CG (Gbytes)=4.3495 +Memory Use Information::Coarse Grids= +Memory Use Information::Coarse Grids::Grid Level=1 +Memory Use Information::Coarse Grids::Memory used=0.519816 +Memory Use Information::Coarse Grids::Grid Level=2 +Memory Use Information::Coarse Grids::Memory used=0.0650476 +Memory Use Information::Coarse Grids::Grid Level=3 +Memory Use Information::Coarse Grids::Memory used=0.00814937 +########## V&amp;V Testing Summary ##########= +Spectral Convergence Tests= +Spectral Convergence Tests::Result=PASSED +Spectral Convergence Tests::Unpreconditioned= +Spectral Convergence Tests::Unpreconditioned::Maximum iteration count=11 +Spectral Convergence Tests::Unpreconditioned::Expected iteration count=12 +Spectral Convergence Tests::Preconditioned= +Spectral Convergence Tests::Preconditioned::Maximum iteration count=2 +Spectral Convergence Tests::Preconditioned::Expected iteration count=2 +Departure from Symmetry |x'Ay-y'Ax|/(2*||x||*||A||*||y||)/epsilon= +Departure from Symmetry |x'Ay-y'Ax|/(2*||x||*||A||*||y||)/epsilon::Result=PASSED +Departure from Symmetry |x'Ay-y'Ax|/(2*||x||*||A||*||y||)/epsilon::Departure for + SpMV=2.57379e-08 +Departure from Symmetry |x'Ay-y'Ax|/(2*||x||*||A||*||y||)/epsilon::Departure for + MG=1.89158e-08 +########## Iterations Summary ##########= +Iteration Count Information= +Iteration Count Information::Result=PASSED +Iteration Count Information::Reference CG iterations per set=50 +Iteration Count Information::Optimized CG iterations per set=50 +Iteration Count Information::Total number of reference iterations=50 +Iteration Count Information::Total number of optimized iterations=50 +########## Reproducibility Summary ##########= +Reproducibility Information= +Reproducibility Information::Result=PASSED +Reproducibility Information::Scaled residual mean=0.000139647 +Reproducibility Information::Scaled residual variance=0 +########## Performance Summary (times in sec) ##########= +Benchmark Time Summary= +Benchmark Time Summary::Optimization phase=1e-06 +Benchmark Time Summary::DDOT=20.1473 +Benchmark Time Summary::WAXPBY=14.5966 +Benchmark Time Summary::SpMV=103.923 +Benchmark Time Summary::MG=622.465 +Benchmark Time Summary::Total=761.216 +Floating Point Operations Summary= +Floating Point Operations Summary::Raw DDOT=2.08742e+09 +Floating Point Operations Summary::Raw WAXPBY=2.08742e+09 +Floating Point Operations Summary::Raw SpMV=1.88249e+10 +Floating Point Operations Summary::Raw MG=1.05146e+11 +Floating Point Operations Summary::Total=1.28146e+11 +Floating Point Operations Summary::Total with convergence overhead=1.28146e+11 +GB/s Summary= +GB/s Summary::Raw Read B/W=1.03717 +GB/s Summary::Raw Write B/W=0.23969 +GB/s Summary::Raw Total B/W=1.27686 +GB/s Summary::Total with convergence and optimization phase overhead=1.27001 +GFLOP/s Summary= +GFLOP/s Summary::Raw DDOT=0.103608 +GFLOP/s Summary::Raw WAXPBY=0.143007 +GFLOP/s Summary::Raw SpMV=0.181142 +GFLOP/s Summary::Raw MG=0.168919 +GFLOP/s Summary::Raw Total=0.168344 +GFLOP/s Summary::Total with convergence overhead=0.168344 +GFLOP/s Summary::Total with convergence and optimization phase overhead=0.167441 +User Optimization Overheads= +User Optimization Overheads::Optimization phase time (sec)=1e-06 +User Optimization Overheads::Optimization phase time vs reference SpMV+MG time=6 +.85121e-08 +DDOT Timing Variations= +DDOT Timing Variations::Min DDOT MPI_Allreduce time=6.31348 +DDOT Timing Variations::Max DDOT MPI_Allreduce time=10.9565 +DDOT Timing Variations::Avg DDOT MPI_Allreduce time=8.88789 +Final Summary= +Final Summary::HPCG result is VALID with a GFLOP/s rating of=0.167441 +Final Summary::HPCG 2.4 rating for historical reasons is=0.168344 +Final Summary::Reference version of ComputeDotProduct used=Performance results a +re most likely suboptimal +Final Summary::Reference version of ComputeSPMV used=Performance results are mos +t likely suboptimal +Final Summary::Reference version of ComputeMG used=Performance results are most +likely suboptimal +Final Summary::Reference version of ComputeWAXPBY used=Performance results are m +ost likely suboptimal +Final Summary::Results are valid but execution time (sec) is=761.216 +Final Summary::Official results execution time (sec) must be at least=1800</code></pre></div> + +<!-- raw HTML omitted --> +<p><strong>syslog-ng</strong></p> + +<p>And for something different to close out. Peter Czanik of One Identity ran +through some benchmarks of syslog-ng on Unmatched. Peter noted that +the Unmatched is between the Raspberry Pi 3 and the SolidRun macchitoBIN. +The boards were tested using a simple script generating various synthetic +loads for stress testing of syslog-ng. The Raspberry Pi 3 was only +faster when there was a low number of concurrent connections using a +complex configuration. For all other tests of syslog-ng, the Unmatched +board was faster. For higher loads, performance of the Unmatched board +was close to the macchiatoBIN, which is a quad-core Cortex A-72 based +board. For the sake of comparison, the same syslog-ng benchmarks ran +more than 15x faster on an entry level AMD Ryzen desktop CPU.</p> + +<p>syslog-ng benchmark results. +<figure><img src="https://www.gaborsamu.com/images/syslog-ng.jpg" /> +</figure> +</p> + +<p>For more details, feel free to reach out to Peter on Twitter.</p> + +<blockquote class="twitter-tweet"><p dir="ltr" lang="en">Thanks to <a href="https://twitter.com/gabor_samu?ref_src=twsrc%5Etfw">@gabor_samu</a> I had some remote access to a <a href="https://twitter.com/SiFive?ref_src=twsrc%5Etfw">@SiFive</a> Unmatched <a href="https://twitter.com/hashtag/RiscV?src=hash&amp;ref_src=twsrc%5Etfw">#RiscV</a> board. Of course I tested it with syslog-ng. It's performance seems to be better than <a href="https://twitter.com/Raspberry_Pi?ref_src=twsrc%5Etfw">@Raspberry_Pi</a> 3 but slightly slower than a <a href="https://twitter.com/solid_run?ref_src=twsrc%5Etfw">@solid_run</a> MacchiatoBIN.<a href="https://t.co/ObM9LTgtq1">https://t.co/ObM9LTgtq1</a></p> +&mdash; Peter Czanik (@PCzanik) <a href="https://twitter.com/PCzanik/status/1403019715375665156?ref_src=twsrc%5Etfw">June 10, 2021</a></blockquote> + +<p>That concludes this post. What struck me was how easily I could compile +things such as HPL, HPCG on the system. Certainly, there are no record +breaking results to report here, but that was anticipated. Personally, I&rsquo;m +really keen to see how RISCV develops over time. Indeed, it&rsquo;s certainly +something which is being eyed for HPC. Take for example the <a href="https://www.european-processor-initiative.eu/">European Processor +Initiative (EPI)</a> where the +(vector) accelerator will rely upon RISCV. So I&rsquo;d wager that the future is +bright for RISCV in HPC - and as the saying goes, <em>variety is the +spice of life</em>.</p> + + + + + To Compete, Your Team Needs a Specialty + + 2021-09-11T01:00:00-06:00 + https://hpc.social/2021/to-compete-your-team-needs-a-specialty + <h2 id="and-hpc-or-research-software-development-isnt-a-specialty">And ‘HPC’ or ‘Research Software Development’ isn’t a specialty</h2> + +<p>(Note: This post is adapted from <a href="https://www.researchcomputingteams.org/newsletter_issues/0090">#90</a> of the <a href="https://www.researchcomputingteams.org">Research Computing Teams Newsletter</a>)</p> + +<p>Quick: what’s your team’s specialty?</p> + +<p>Your team’s specialty is its reputation for what it’s good at. Not what <em>you</em> think your team is good at; what matters is what specific thing your stakeholders (funders, clients, institutional decision makers) think your specialty is. What they recommend you for to peers, what they recommend funding you for to decision makers.</p> + +<p>In the post-pandemic world, researchers are used to getting their support remotely from anywhere. To compete, your team will need well-defined specialties; and “HPC” or “research software development” isn’t a specialty.</p> + +<figure style="width: 45%; float: right;"> + <img alt="Standout from the crowd by choosing a specific path." src="https://www.dursi.ca/assets/imgs/standout_sm.jpg" /> + <figcaption><i>Stand out from the crowd by having your team choose a specific path and owning it.</i></figcaption> +</figure> + +<p>The pandemic isn’t over, but the end of this phase has begun, and with September (“academic new years”) here, it’s a good time to think about the future. Last October <a href="https://www.dursi.ca/post/research-computing-in-the-aftertimes">I wrote about</a> what post-pandemic research computing is going to look like, and it’s holding up pretty well. With researchers now very comfortable getting research computing and data support virtually and with budgets under pressure, there is going to be a lot more competition for research computing and data teams. Research collaborations are going to be looking elsewhere more and more often - academic teams at other institutions, or with commercial companies (either commercial cloud vendors for compute, or emerging collaborations between well-known names, like <a href="https://www.nag.com/news/machine-learning-expertise-new-azure-hpc-ai-collaboration-centre">NAG and Azure</a>, for services).</p> + +<p>This is an opportunity for well run, focussed teams to grow and prosper. But it’s going to take more planning and forethought than decades past, where one could count on having a near monopsony, of being the only available seller of services to local researchers. It’s going to take developing and maintaining a strong reputation for a small set of specialties.</p> + +<p>“HPC” may sound and feel like a specialty within the community, but to researchers and decision makers it’s incredibly generic and so meaningless. It’s not a technical term, but a term of advocacy and marketing which has been come to mean resources for anything from high throughput batch services to huge tightly coupled simulations to single-node multi-GPU code runs. Even <em>advocates</em> for the term define it as “anything bigger than what a researcher could provide on their own” which is incredibly generic, and so necessarily meaningless. How can your team’s <em>specialty</em> be “anything”? A team is expecting researchers to recommend them for “anything?” There’s a reason why VPRs would be just as happy contracting it out (<em>e.g.</em> see table 2 <a href="https://www.srainternational.org/blogs/srai-jra1/2019/12/09/operational-fiscal-management-of-core-facilities">here</a>).</p> + +<p>“Services and expertise for quickly analyzing public-health bioinformatics data”, “a platform for firing off and monitoring aerospace CFD calculations”, “a centre of excellence for digital humanities data curation and archiving”: these are examples of specialities - products, services - that researchers and institutional decision makers can see the value of and be willing to put money into, services and products and teams that researchers can recommend to each other. They are areas where a team could build a strong reputation - they could be the group that researchers recommend to collaborators when they chat about research needs.</p> + +<p>“Research Software Development” at least, to its credit, doesn’t pretend to be a narrow specialty - it’s a broad area which can encompass any area of software development in support of research work. As a result, a team can’t have a specialty in “Research Software Development”; it can have a specialty in “web applications and mobile apps for data collection”, or “GIS analysis tools” or “agent-based simulations for social sciences modelling”. But almost certainly not all three at the same time.</p> + +<p>Even so, research software development is too specific in one unhelpful sense. It could be that researchers are just looking for your team to write some software for them, hand it over, and be done. But increasingly, researchers are looking not just to be delivered some software, but for a team to host the software, run it, operate it - and/or collect and curate data to be used with the tool, for tests or otherwise. Focusing solely on research software development, as a separate activity from systems operation or data analysis and management, can be overly limiting.</p> + +<p>Ok, so what does all of this have to do with competition?</p> + +<p>One of my venial weaknesses is spending too much time on twitter. I’m seeing increasing concern there from research computing teams that cloud vendors or teams using cloud vendors are coming into their institutions and winning or trying to win contracts for projects that “should” have gone to the in-house teams. I’m hearing complaints that the external bids are for amounts of money 2x or more what the in-house team says they could do it for. Incredibly (and almost certainly incorrectly) I’ve even heard 10x.</p> + +<p>Reader, as hard as it is to believe, those complaining see this as an affront<sup id="fnref:1"><a class="footnote" href="https://www.dursi.ca/feed.xml#fn:1" rel="footnote">1</a></sup>, and a threat, rather than the enormous opportunity it is.</p> + +<p>If a contract at your institution is won - or even in serious contention - that is 2x what you estimate you could have provided the services for, that’s <strong>not</strong> evidence that the external contractor is overcharging. It’s evidence that your team is <em>undercharging</em>, that you could have proposed doing more to support that project and the researchers, and that you’re leaving money on the table. It’s also evidence that you haven’t fully convinced the relevant decision makers that you can provide that service; they don’t see it as being part of your specialty.</p> + +<p>Clearly your institution found it worthwhile to spend or consider spending that 2x, because they understood that it was worth at least that much to them to have those services. A bid for half that amount having failed or being questioned means that they really didn’t believe the in-house team could do it as well. That’s revealed-preferences data that you can use. (And if I truly believed someone at my institution was seriously considering spending 10x (1000%!) to work with an outside company rather than work with my team, well, that would occasion some serious soul searching.)</p> + +<p>Cloud providers and other external contractors do have advantages. They have a library of reference architectures they can deploy, so they can pitch (say) CFD solutions to the mech eng department, and bioinformatics pipeline solutions to the biology department. They can pull from a library of testimonials to demonstrate that they can do the work.</p> + +<p>But so can you. You have access to all the literature to search for how others have deployed such solutions. You have (or should have) testimonials from the people that matter - researchers at that very institution. And you have a network of deep relationships in the institution, relationships based on collaboration on research problems. Those relationships and shared expertise and history of collaboration is something the external contractors have no chance of matching.</p> + +<p>If you’re in danger of losing out on these sorts of competitions, it’s because you’re not communicating your specialities in a way that matters, in a way that’s convincing, to the people who could pay for your services. They can’t see how your “HPC batch services” connects with “a digital twinning platform for building simulation”. They don’t see “GIS exploration for private social sciences data” as being an obvious of your “Research Software Development” effort - where’s the data part? If there’s a miscommunication there about what your team can provide, that’s on you and your team, not on the researchers or other decision makers.</p> + +<p>You have specialities - if you don’t know what they are, ask the researchers who keep coming back. How do they describe what you do? What would they say your speciality is, how do they talk about you to their colleagues? What would you have to demonstrate to them to have them recommend their colleagues to you?</p> + +<p>Similarly, you already have a million things you <em>don’t</em> do. You won’t fix a researcher’s printer, you don’t help them do graphic design for their posters, my guess is you don’t help them set up spreadsheets in OneDrive or set up lab webpages. So it’s not like declaring that there’s computing stuff you do and don’t help researchers with is some completely new thing, previously utterly unknown to your organization.</p> + +<p>Once you make explicit your specialties, you can start playing to your strengths, and communicating them endlessly. You can make a point of reaching out, having your team talk at conferences in the specialties, and at departmental colloquia. You can be well-regarded enough in your institution for those specialties that external contractors pitching work within your speciality never get in the door. You can start more easily hiring people that are interested in that specialty. A specialty builds on itself, snowballs. You can start steering future work towards that specialty to build on it, and start directing work well outside the specialty to somewhere else - where it does fit inside their specialty.</p> + +<p>Yeah, that last part is scary. Sticking to this path isn’t easy. It means turning down opportunities that aren’t in or adjacent to your specialities. Especially for new teams, eager to please, this can be scary.</p> + +<p>But as anywhere in research, your team’s reputation is all that matters. Your team <em>has</em> a reputation, has stuff it does and doesn’t do. Did you choose it, did you shape it, or are you content to just let it happen?</p> + +<p>Your team can be extremely strong in, specialize in, develop a reputation in, any of a number of things. But not all of the things. Being a manager or leader means choosing.</p> + +<hr /> + +<div class="footnotes"> + <ol> + <li id="fn:1"> + <p>And affront was taken. There were lots of dark murmurings about slick sales teams trying to fool gullible senior administrators. And, you know, I’m sure it’s comforting for the teams that might lose out on these contracts to think that the vendor mesmerized the simpleton decision makers with their entrancing slide decks, and so hoodwinked them into considering an overpriced contract. But (a) have they never <em>seen</em> a vendor pitch? and (b) it’s self-serving twaddle to imagine that just because someone higher up made a decision to work with someone else they must clearly be dumb. Dismissing out of hand the possibility that there might be valid reasons to direct work elswhere means they’re going to end up making a lot of poor and uninformed decisions. <a class="reversefootnote" href="https://www.dursi.ca/feed.xml#fnref:1">&#8617;</a></p> + + </li> + </ol> +</div> + + + + + Crimson vs Classic 1 NVMe Multi-OSD Analysis + + 2021-08-30T01:00:00-06:00 + https://hpc.social/2021/crimson-vs-classic-1-nvme-multi-osd-analysis + <p><a href="https://docs.google.com/spreadsheets/d/14HMaGxstvWSjobdyAlTdTG_yqhJh7K71Q5rSaxb1S6M/edit?usp=sharing">Spreadsheet</a> looking at Crimson vs Classic performance when scaling multiple OSDs on one NVMe drive. Done to simulate what we can hopefully expect from multi-reactor down the road. Includes cycles/OP comparisons as well.</p> + + + + + Booting HiFive Unmatched + + 2021-08-08T11:57:08-06:00 + https://hpc.social/2021/booting-hifive-unmatched + <p>For those of you who like system bootup videos, here is the latest addition +to my collection. Here I&rsquo;ve captured the bootup of Ubuntu 21.04 on a +SiFive HiFive Unmatched developer board. This is a capture of the bootup +mesages over the serial console using minicom and the appropriate USB +serial cable.</p> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + + + + + Ceph Crimson 2021 Q2 Project Update + + 2021-07-29T01:00:00-06:00 + https://hpc.social/2021/ceph-crimson-2021-q2-project-update + <p>Slides are available <a href="https://docs.google.com/presentation/d/1S7QRmN9n7E6ffDdAIVibJipO0prd2bD2I5PFfzXMf0Y/edit?usp=sharing">here</a>.</p> + + + + + Very risqué computing + + 2021-06-16T00:57:31-06:00 + https://hpc.social/2021/very-risqué-computing + <p>This spring, we’ve been blessed with fantastic and almost tropical weather here +in Southern Ontario, Canada. Normally at this time, after a long winter the last +thing on my mind are indoor activities. However on June 3rd, I was greeted one +morning by an email about an incoming delivery. It turns out it was on of the +items I&rsquo;ve been waiting patiently for from [Crowd Supply](<a href="https://www.crowdsuppl">https://www.crowdsuppl</a> +y.com/) in the hopes of keeping me busy during what I thought would be a cold +spring season.</p> + +<p><strong>Christmas in June</strong></p> + +<p>As I have multiple things from Crowd Supply on order (don&rsquo;t ask!) I didn&rsquo;t quite +know which item was arriving. It turns out it was the long awaited <a href="https://www.sifive.com/boards/hifive-unmatched">SiFive +HiFive Unmatched</a> RISCV powered +board. Those who know me (and I&rsquo;ve said this many times) understand that I don&rsquo;t +like mainstream anything. And that also applies to computers. My interest in Arm +based systems dates from the 1990&rsquo;s with the venerable Acorn Archimedes +computers. However, all of the news around the RISCV community has really piqued +my interest. I passed on the SiFive Unleashed primarily because it didn&rsquo;t have a +PCIe slot - although this was remedied with an optional, but costly add-on +board.</p> + +<p>So when the SiFive Unmatched was announced with a competitive price and a bump +to 16GB I jumped at the opportunity to purchase one. And it turned out to be a +great decision.</p> + +<p>The HiFive Unmatched is based on the SiFive Freedom U740 SOC with four U74 cores +and one S7 core and features an all important PCIe slot. With 16GB of onboard +RAM and a M.2 Key M for an SSD, my goal was to get the Unmatched setup as a desk +top. For those looking to learn more about RISCV, I&rsquo;d recommend starting with +the RISCV International foundation <a href="https://riscv.org/">site</a>. As per the RISCV +site, &ldquo;RISC-V is a free and open ISA enabling a new era of processor innovation +through open standard collaboration.&rdquo; In simple terms, the ISA or instruction +set architecture defines the set of instructions that are supported by the +processor – so things like arithmetic, logic, and branch instructions to name +a few. So it’s the way that programmers can issue commands to the processor to +do “things”.</p> + +<p><strong>First impressions</strong></p> + +<p>I’ve become accustomed to developer boards being packaged in rather non-descript +packaging. The first impression of the Unmatched board could not be further from +this. The board was shipped in a lovely box and included an SD card with a +bootable Freedom U SDK image and I/O shield and a USB cable. So the first +impression for me was quite positive.</p> + +<figure><img src="https://www.gaborsamu.com/images/unmatched_collage.jpg" /> +</figure> + +<p><strong>Bootstrapping</strong></p> + +<p>I mounted the Unmatched board to my Streacom BC1 benchmark table and installed +a XFX Radeon 2GB Heatsink edition to the PCIe slot. It’s an old GPU, but fanless +– which I always appreciate. Plus, I’m not looking to do any serious gaming on +the system.</p> + +<p>The first boot of the system from the SD card was a success (albeit a bit slow). +I monitored the boot over the serial console (minicom) from another system. The +Unmatched sprang to life and eventually booted up to a fully working XFCE +desktop. This was actually a lot smoother than what I anticipated. Once I +confirmed that everything was working as expected, I installed a Samsung 780 +NVME SSD to the M.2 Key M slot and turned my focus to Ubuntu 21.04. The <a href="https://forums.sifive.com/">SiFive +Forums</a> have proven an invaluable resource to help +me get Ubuntu up and runing on the system and to make sure the board was booting +Ubuntu with a clock of 1.2 Ghz. Of course, I followed the steps to install +Ubuntu to the NVME onboard, so I/O performance is much better now naturally.</p> + +<p><strong>Burning in</strong></p> + +<p>Does it run Linpack? Of course it does :) As with any new board I receive, +running a High Performance Linpack benchmark is often one of the first things I +do. It’s a well known bechmark which provides data for the Top500 ranking of +supercomputers.</p> + +<p>I used the current <a href="https://www.netlib.org/benchmark/hpl/">HPL v2.3</a> and +compiled it using the Ubuntu supplied gcc, openmpi and math libraries. +A few runs of HPL yielded a result of <em>2 GFlops</em> (see screenshots below). +Although I&rsquo;ve not looked closely at what the theoretical peak of the U740 SOC +is, the result is roughly what I expected given what I&rsquo;ve been reading up on +the board. Ultimately, I was pleased that HPL compiled and ran to completion and it was a great way to stress the board.</p> + +<figure><img src="https://www.gaborsamu.com/images/linpack_collage.jpg" /> +</figure> + +<p>Stay tuned to this channel for more risqué computing escapades&hellip;</p> + + + + + Research Computing Funding Should Mostly Just Go To Researchers + + 2021-06-08T01:00:00-06:00 + https://hpc.social/2021/research-computing-funding-should-mostly-just-go-to-researchers + <p>Research computing and data — supporting research efforts with +software, computer and data expertise and resources — is fundamentally +all of a piece. Today there’s fewer and fewer hard boundaries +between where the system requirements end and where the software +or data resource requirements begin; and teams supporting researchers +must have expertise across the stack.</p> + +<p>This convergence is a huge opportunity for research computing, but +it’s also a challenge for funders. How to know how much to allocate to software, +and how much to hardware? Within software, how many resources +should go to new software development or procurement, and how much +to maintenance? In hardware, what is the right balance between +GPUs and CPUs or FPGAs, and within data, how much should we support +curation efforts vs discovery, or archival vs near-line storage?</p> + +<p>Luckily, there is a simple, robust, time-tested mechanism research +computing funders can easily take advantage of, and they should do so. +Funders for research computing and data efforts manage their portfolio +effortlessly — in exactly the same way health funders +know how to balance spending between reagents and lab staff, or the +same way physical science funders know how much to allocate to +trainee salaries vs tabletop equipment.</p> + +<p>Most research computing funding should go directly to researchers, +via traditional funding councils, and the researchers should spend +that research computing and data portion of their grants as and where +they see fit.</p> + +<p>With research computing and data funding as an integral component +of project funding, the same research review process that adjudicates +the research proposal would weigh in on the computing and data +resources requested to conduct it. This eliminates nonsensical but +all-too-common situations where a researcher successfully wins computing +cycles for a non-funded project, or gets funding for a postdoc for +a project but doesn’t get enough compute or storage resources for +the trainee to perform the project. It would also allow the +researcher to adjust how they were using resources mid-stream; if +after initial efforts it turned out that software development effort +to improve the code was a better use of funding than throwing +hardware at the problem, the money could be spent that way, rather +than applying ahead of time for people time and computing resources +separately and hoping that it all works out in the end.</p> + +<figure style="width: 50%; float: left;"> + <img alt="A technician validates genetic variants identified through whole-exome sequencing at the Cancer Genomics Research Laboratory, part of the National Cancer Institute's Division of Cancer Epidemiology and Genetics (DCEG)." src="https://www.dursi.ca/assets/research_computing_funding_to_researchers/national-cancer-institute-LxPrHCm8-TI-unsplash.jpg" /> + <figcaption>We fund researchers to buy all kinds of complex equipment, they can handle buying research computing services.</figcaption> +</figure> +<p>In this model, a researcher would include in their grant proposal +a research computing and data component where necessary. As with +the purchasing wet lab equipment, animal experiments, or large +physical apparatus — undertakings which are no less technical or +complex than research computing — research grants would include +cost justifications for the proposed research computing services +or equipment, and funding agencies would rate the quality of the +justification and the worthiness of the proposed goals versus the +cost.</p> + +<p>A researcher whose proposal was successful would then, as with other +line items, be free to spend that research computing and data +component of their grant where they wish on for software development, +data management and analysis, or access to storage and compute +resources. Obviously as known entities with existing working +relationships, local research computing centres — now working in a +familiar core facility model — would have a huge advantage. But +the researcher would not be limited to working with those centres, +nor to working with only one service provider.</p> + +<p>This approach will work well for capacity computing, data, and +expertise — those needs where there are many possible service +providers. And in those areas, having the researcher in control +of what services they can use where will help drive those vendors +to providing the kinds and quality of services that researchers +need. But not every kind of computing or expertise capability is +available enough for researchers to be able to easily buy needed +quantities of. Researchers can’t conjure into existence a (say) +quantum computing shared facility one investigator-led grant at a +time. Those new and emerging capabilities have to be handled +separately, with existing funding councils setting priorities. Once +those new capabilities are operational, they can and should be +sustained with the same core-facility portable-funding model; if +they can’t, maybe they didn’t need to be built. Other needs like +foundational infrastructures — research and education networks, +advisory bodies — will also need to be handled separately by funders.</p> + +<p>But for the bulk of research computing, for capacity support of +research using computing, data and related expertise, there’s no +longer need for endless surveys and consultations and projections +to indirectly inform decision making. Parallel competitions for +different kinds of support for a research project have long since +stopped making sense. Internal computing organization debates about +what kinds of services to offer should make way for researchers +allocating the funds themselves. Let researchers decide what works +best for advancing their research.</p> + + + + + Nobody Else Cares About Your Tech Stack + + 2021-06-06T01:00:00-06:00 + https://hpc.social/2021/nobody-else-cares-about-your-tech-stack + <h2 id="focus-on-your-researchers-and-funders-problems-not-your-technical-solution">Focus on your researchers’ and funders’ problems, not your technical solution</h2> + +<p>(Note: This post is adapted from <a href="https://newsletter.researchcomputingteams.org/archive/research-computing-teams-link-roundup-22-may-2021/">#75</a> of the <a href="https://www.researchcomputingteams.org">Research Computing Teams Newsletter</a>)</p> + +<p>Many of us who are managing research computing and data teams come up through the ranks doing research ourselves, and have +experience in grantwriting for open research calls. That can actually <em>hold us back</em> from succeeding with getting grants +for “digital research infrastructure” — building teams and infrastructure to support research.</p> + +<p>The thing is, digital research infrastructure calls, the sort that support research computing and data teams and tools, +are more like applying to grants as a nonprofit than as a researcher. And we can learn a lot from how the nonprofit +community writes funding proposals.</p> + +<p><img alt="We're not proposing a research project, we're proposing to solve problems a funder sees for a research community." src="https://www.dursi.ca/assets/nobody_tech_stack/nonprofit_not_researcher.png" style="float: right; width: 50%;" /></p> + +<p>Any funder has things they want to accomplish, and the goal as a potential fundee is to find something in the intersection of +“work that helps the funder accomplish their goals” and “work that we are able to do and that is aligned +with our goals”. Excellent work that isn’t in that first set won’t get funding. Money attached to work that isn’t +in the second set is at best a distraction, at worst drains your teams’ credibility.</p> + +<p>Most of us in research got our experience in grants from open disciplinary competitions where the funders and fundees goals +are aligned — be seen to be funding/doing the best research. That means you don’t have to think about the distinction +very much. The funder wants a portfolio of projects that are promising and could have impact - some will pan out and some +won’t, but such is research. So everyone is focussed on “the best” work. There’s a lot of focus on methods and technology +used, because those are relevant for assessing the best work. A new technology or method might be why it’s important to +fund this work now - some key observation wasn’t possible before, but now it is, and the funder and team who makes the +observation now will get the impact. And methods can sabotage a project - a team that does great work with the wrong +methods won’t get the best results.</p> + +<p>Special digital research infrastructure calls — like those that research computing projects typically fall under — +and calls by nonprofit funders, are different. The funder has some particular change they want to see in the world; +some community they want to see better served. They are generally much less willing to take a flyer on projects with +only a modest chance of success, because failures won’t serve the community they want to see served. Something that +successfully serves the community can always be improved in future iterations; something that fails to meet the communities +needs may well be unsalvagable.</p> + +<p>Methods and technology matter much less to these funders. They want to know that you can credibly deliver on the proposal, +and that you have a plan, but the nuts and bolts typically are much less interesting.</p> + +<p>A nonprofit funder absolutely wants to understand how the after-school homework tutoring program you’re proposing will +interact with the community — how it will find underserved students, how the tutoring will be delivered to the +students, what indicators will be used to measure success — but the behind the scenes tech stack like what task +management and tutor booking software you’ll use is completely irrelevant unless it’s to justify that you’ll +be able to deliver the program. (And if you are in a position where you need details like that to justify your +credibility for delivering the program, you are probably not in serious contention for the funding). Every paragraph +you spend talking about the cool new tutor booking software you’re going to use is a paragraph that doesn’t get spent +highlighting the funder’s goals being achieved — more underserved students doing better in school.</p> + +<p>A research computing funder who’s receptive to a “we’ll run a new research data management platform specifically +aimed at [discipline X]” proposal absolutely wants to know that you’re familiar with the underserved area, that +you’ve been successful delivering similar things before, and what metrics you’ll use for success. They do not care +that your roadmap includes Kubernetes and some exciting new operators. Would they be disappointed if mid-stream, you +pivoted to running the tasks on bare metal with Ansible? If not, why draw their attention and yours to obscure and +uncertain details rather than to how your work will best advance their goals?</p> + +<p>The thing is, this same approach applies to not just research funders, but anyone you plan to work with; any research +group that contacts your team looking for something. They have a problem; the greater the up-front focus on understanding + and solving researcher’s problem, the better the chance of success.</p> + +<p>How will you know what the funder’s or researcher’s problems and goals are? In the funder’s case, the call will sometimes +spell it out; in the researcher’s case, they’ll usually say something. In both cases, it may require some question-asking +and digging deeper; the researcher’s or even the funder’s “presenting problem” may not be the underlying issue, +and the funder’s call may focus on one particular aspect rather than the overarching goals. But the solution is the same; +just ask a bunch of questions.</p> + +<p>“Do you mean they will they just tell you?” I know a team in a Hackathon who went to an open pre-hackathon info +session, and approached the organizer and sponsor in a gaggle afterwards. They asked the sponsor — the lead judge — what +a successful Hackathon would be from their point of view. The sponsor — who, again, was the <em>lead judge</em> — answered with +a particular problem they’d like solved as an example. That team and mystifyingly only that team delivered a partial but +promising solution to the exact problem described in detail and in public, and they of course won first prize. How could +they not? People organize special funding calls and hackathons <em>because</em> <em>they</em> <em>want</em> <em>other</em> <em>people</em> <em>to</em> <em>help</em> <em>them</em> +<em>achieve</em> <em>their</em> <em>goals</em>. Yes, they’ll tell you, and if you keep asking questions they’ll keep talking about it until you politely explain +that you have to leave for the evening. They put that contact information there and run informational sessions for a reason.</p> + +<p>The stakeholder side of research computing isn’t rocket surgery. But listening, digging in, and focussing on their goals +is still rare enough that doing it well is almost an unfair advantage.</p> + + + + + Fun with an AArch64 NAS + + 2021-03-05T20:57:31-07:00 + https://hpc.social/2021/fun-with-an-aarch64-nas + <p>If you&rsquo;re anything like me, managing the data that is produced by our modern +lifestyles is a chore. I&rsquo;m the designated archival person in the family and as +such I&rsquo;m always looking for better ways to manage the huge volumes of data, +from family photos and video to all of my music which I’ve digitized from my +huge personal CD library.</p> + +<p>My NAS over approximately the last 4 years has been my Turris Omnia router, +which I&rsquo;ve equipped with both an external USB disk and an internal mSATA drive. +The <a href="https://www.turris.com/en/omnia/overview/">Turris Omnia</a> for those who +aren&rsquo;t familiar with it, is a high performance router that runs open source +software based on OpenWrt. It&rsquo;s served me very well without ever missing a beatiand will continue as my home router for the foreseeable future. Although a NAS +enclosure is available for the Turris Omnia, I never purchased one. That&rsquo;s +because the Turris Omnia NAS option only has 2 drive bays. About a week ago, +I started to ponder building a NAS which would take me to the next level of +capacity and performance.</p> + +<p>As luck would have it, some acquaintances have also been looking at looking at +modernizing their NAS setups. So this fact has also provide some motivation. +Most are looking at turnkey NAS solutions through, of which many shapes and +sizes exist. When it comes to IT, I’m never one to take the easy path. I like +to push the envelope and try to build using non-X86 architectures whenever +possible. And it’s no different when it comes to a NAS for my household.</p> + +<p>A device which recently caught my attention is the <a href="https://www.crowdsupply.com/traverse-technologies/ten64">Traverse Ten64</a>, which is a high-performance Arm +based networking platform. Certainly the fact that it’s Arm based got my +attention. Furthermore, it looks quite similar to the Turris Omnia but more +powerful. What really piqued my interest was the writeup <a href="https://www.crowdsupply.com/traverse-technologies/ten64/updates/building-a-nas-with-ten64-and-rockstor-and-new-turnkey-nas-bundle">Building a NAS with Ten64 and Rockstor</a>. Certainly I&rsquo;ve come across Rockstor before, which is a well known NAS and cloud solution. +But I was not aware of the beta <a href="https://forum.rockstor.com/t/rockstor-4-installer-recipe-call-for-beta-testers/7237">Rockstor 4 recipe for aarch64</a>. This +really got me thinking! As the Traverse Ten64 is not shipping yet, how could I +try out Rockstor on aarch64? Of course, there was the teasing of my +acquaintances that I should just get a brand X NAS and be done with it. With +that the (NAS) gauntlet was laid down.</p> + +<p>Lurking in my basement is a very well used <a href="https://www.solid-run.com/arm-servers-networking-platforms/macchiatobin/">SolidRun MACCHIATObin</a> system. It ticked +a number of important boxes: it&rsquo;s Arm (aarch64) based, has 3 SATA ports, +doesn&rsquo;t consume tons of power and is quiet. It struck me that this would be a +great platform to try out Rockstor on aarch64.</p> + +<p><strong>The plan</strong></p> + +<p>Much like what was described in the <em>Building a NAS with Ten64 and Rockstor</em> +article, I decided to run Rockstor on my MACCHIATObin system under KVM. This +would prevent me from having to change the host OS to OpenSUSE (from Fedora). +The Rockstor 4 ARM64EFI installation recipe is based on OpenSUSE. As a first +step, I had to make certain that it was possible to use KVM on MACCHIATObin. +After a bit of reading I had my answer and proceeded to get OpenSUSE Leap 15.2 +running under KVM with minimal fuss.</p> + +<p>Next, I followed the detailed instructions at the Rockstor site for setting up +the version 4 beta on top of an existing OpenSUSE 15.2 installation. For this, +I followed instructions from the following pages:</p> + +<p><a href="https://forum.rockstor.com/t/rockstor-4-installer-recipe-call-for-beta-testers/7237">https://forum.rockstor.com/t/rockstor-4-installer-recipe-call-for-beta-testers/7237</a></p> + +<p><a href="https://github.com/rockstor/rockstor-installer/blob/master/README.md">https://github.com/rockstor/rockstor-installer/blob/master/README.md</a></p> + +<p><a href="https://forum.rockstor.com/t/rockstor-4-pi-build-first-observations/7367/2">https://forum.rockstor.com/t/rockstor-4-pi-build-first-observations/7367/2</a></p> + +<p>For storage, I relied on spinning rust. I attached 2 x 4TB Seagate IronWolf +SATA drives to the MACCHIATObin. Overall the installation of Rockstar went very +smoothly and with no real hiccups to speak of. At the end of the installation +I simply pointed my web browser to the Rockstor node and logged in. Via the +Rockstor UI, I parepared the 2 Seagate disks and created a storage pool +configured with RAID1 (mirroring). Finally, I defined NFS, SMB and SFTP shares +against the storage pool in order to start using the system - in anger.</p> + +<p>Below are a few photos of what the rig looks like.</p> + +<p><figure><img src="https://www.gaborsamu.com/images/nas1.jpg" /> +</figure> + +<figure><img src="https://www.gaborsamu.com/images/nas2.jpg" /> +</figure> + +<figure><img src="https://www.gaborsamu.com/images/nas3.jpg" /> +</figure> +</p> + +<p>For performance testing, I reliped upon some tried and trusted tools: <em>iperf</em>, +<em>dd</em> and <em>iozone</em>. I NFS mounted the Rockstor server from a dual socket POWER9 +system running CentOS Stream 8. And of course, my switch in this case +connecting it all together is the Turris Omnia (Gigabit Ethernet throughout)</p> + +<p>Some data from the sample runs follows:</p> + +<p><strong>A quick bandwidth test with iperf</strong></p> + +<p>a. Client side output +<div class="highlight"><pre><code class="language-bash"><span style="color: #f92672;">]</span>$ ./iperf3 -c 192.168.1.YYY +Connecting to host 192.168.1.YYY, port <span style="color: #ae81ff;">5201</span> +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> local 192.168.1.XXX port <span style="color: #ae81ff;">33738</span> connected to 192.168.1.YYY port <span style="color: #ae81ff;">5201</span> +<span style="color: #f92672;">[</span> ID<span style="color: #f92672;">]</span> Interval Transfer Bandwidth Retr Cwnd +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 0.00-1.00 sec <span style="color: #ae81ff;">114</span> MBytes <span style="color: #ae81ff;">959</span> Mbits/sec <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">444</span> KBytes +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 1.00-2.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">942</span> Mbits/sec <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">465</span> KBytes +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 2.00-3.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">941</span> Mbits/sec <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">488</span> KBytes +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 3.00-4.00 sec <span style="color: #ae81ff;">113</span> MBytes <span style="color: #ae81ff;">948</span> Mbits/sec <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">667</span> KBytes +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 4.00-5.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">942</span> Mbits/sec <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">700</span> KBytes +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 5.00-6.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">940</span> Mbits/sec <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">732</span> KBytes +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 6.00-7.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">937</span> Mbits/sec <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">882</span> KBytes +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 7.00-8.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">941</span> Mbits/sec <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">926</span> KBytes +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 8.00-9.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">941</span> Mbits/sec <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">970</span> KBytes +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 9.00-10.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">941</span> Mbits/sec <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1017</span> KBytes +- - - - - - - - - - - - - - - - - - - - - - - - - +<span style="color: #f92672;">[</span> ID<span style="color: #f92672;">]</span> Interval Transfer Bandwidth Retr +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 0.00-10.00 sec 1.10 GBytes <span style="color: #ae81ff;">943</span> Mbits/sec <span style="color: #ae81ff;">0</span> sender +<span style="color: #f92672;">[</span> 4<span style="color: #f92672;">]</span> 0.00-10.00 sec 1.09 GBytes <span style="color: #ae81ff;">940</span> Mbits/sec receiver + +iperf Done.</code></pre></div> +</p> + +<p>b. Server side output +<div class="highlight"><pre><code class="language-bash"> <span style="color: #75715e;"># ./iperf3 -s</span> +warning: this system does not seem to support IPv6 - trying IPv4 +----------------------------------------------------------- +Server listening on <span style="color: #ae81ff;">5201</span> +----------------------------------------------------------- +Accepted connection from 192.168.1.XXX, port <span style="color: #ae81ff;">33736</span> +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> local 192.168.1.YYY port <span style="color: #ae81ff;">5201</span> connected to 192.168.1.XXX port <span style="color: #ae81ff;">33738</span> +<span style="color: #f92672;">[</span> ID<span style="color: #f92672;">]</span> Interval Transfer Bandwidth +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 0.00-1.00 sec <span style="color: #ae81ff;">108</span> MBytes <span style="color: #ae81ff;">901</span> Mbits/sec +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 1.00-2.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">938</span> Mbits/sec +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 2.00-3.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">941</span> Mbits/sec +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 3.00-4.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">938</span> Mbits/sec +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 4.00-5.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">941</span> Mbits/sec +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 5.00-6.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">941</span> Mbits/sec +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 6.00-7.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">936</span> Mbits/sec +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 7.00-8.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">941</span> Mbits/sec +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 8.00-9.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">941</span> Mbits/sec +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 9.00-10.00 sec <span style="color: #ae81ff;">112</span> MBytes <span style="color: #ae81ff;">941</span> Mbits/sec +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 10.00-10.05 sec 4.96 MBytes <span style="color: #ae81ff;">941</span> Mbits/sec +- - - - - - - - - - - - - - - - - - - - - - - - - +<span style="color: #f92672;">[</span> ID<span style="color: #f92672;">]</span> Interval Transfer Bandwidth +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 0.00-10.05 sec 0.00 Bytes 0.00 bits/sec sender +<span style="color: #f92672;">[</span> 5<span style="color: #f92672;">]</span> 0.00-10.05 sec 1.09 GBytes <span style="color: #ae81ff;">936</span> Mbits/sec receiver +----------------------------------------------------------- +Server listening on <span style="color: #ae81ff;">5201</span> +-----------------------------------------------------------</code></pre></div> +</p> + +<p><strong>Write 8GB to mountpoint with dd</strong></p> + +<div class="highlight"><pre><code class="language-bash">$ time dd <span style="color: #66d9ef;">if</span><span style="color: #f92672;">=</span>/dev/zero of<span style="color: #f92672;">=</span>/mnt/raktar/test_dd bs<span style="color: #f92672;">=</span>8K count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1024000</span> +1024000+0 records in +1024000+0 records out +<span style="color: #ae81ff;">8388608000</span> bytes <span style="color: #f92672;">(</span>8.4 GB, 7.8 GiB<span style="color: #f92672;">)</span> copied, 76.8675 s, <span style="color: #ae81ff;">109</span> MB/s + +real 1m16.872s +user 0m0.925s +sys 0m2.584s</code></pre></div> + +<p><strong>Write with 4GB to mountpoint with dd</strong></p> + +<div class="highlight"><pre><code class="language-bash">$ time dd <span style="color: #66d9ef;">if</span><span style="color: #f92672;">=</span>/dev/zero of<span style="color: #f92672;">=</span>/mnt/raktar/test_dd bs<span style="color: #f92672;">=</span>4K count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1024000</span> +1024000+0 records in +1024000+0 records out +<span style="color: #ae81ff;">4194304000</span> bytes <span style="color: #f92672;">(</span>4.2 GB, 3.9 GiB<span style="color: #f92672;">)</span> copied, 39.5062 s, <span style="color: #ae81ff;">106</span> MB/s + +real 0m39.509s +user 0m0.815s +sys 0m2.093s</code></pre></div> + +<p><strong>Thrashing the spinning rust with iozone</strong></p> + +<div class="highlight"><pre><code class="language-bash">$ time ./iozone -a -R -c -f /mnt/raktar/testfile + Iozone: Performance Test of File I/O + Version $Revision: 3.489 $ + Compiled <span style="color: #66d9ef;">for</span> <span style="color: #ae81ff;">64</span> bit mode. + Build: linux-powerpc64 + + Contributors:William Norcott, Don Capps, Isom Crawford, Kirby Collins + Al Slater, Scott Rhine, Mike Wisner, Ken Goss + Steve Landherr, Brad Smith, Mark Kelly, Dr. Alain CYR, + Randy Dunlap, Mark Montague, Dan Million, Gavin Brebner, + Jean-Marc Zucconi, Jeff Blomberg, Benny Halevy, Dave Boone, + Erik Habbinga, Kris Strecker, Walter Wong, Joshua Root, + Fabrice Bacchella, Zhenghua Xue, Qin Li, Darren Sawyer, + Vangel Bojaxhi, Ben England, Vikentsi Lapa, + Alexey Skidanov, Sudhir Kumar. + + Run began: Sat Mar <span style="color: #ae81ff;">6</span> 15:06:52 <span style="color: #ae81ff;">2021</span> + + Auto Mode + Excel chart generation enabled + Include close in write timing + Command line used: ./iozone -a -R -c -f /mnt/raktar/testfile + Output is in kBytes/sec + Time Resolution <span style="color: #f92672;">=</span> 0.000001 seconds. + Processor cache size set to <span style="color: #ae81ff;">1024</span> kBytes. + Processor cache line size set to <span style="color: #ae81ff;">32</span> bytes. + File stride size set to <span style="color: #ae81ff;">17</span> * record size. + random random bkwd record stride + kB reclen write rewrite read reread read write read rewrite read fwrite frewrite fread freread + <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">766</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">161629</span> <span style="color: #ae81ff;">160374</span> <span style="color: #ae81ff;">177677</span> <span style="color: #ae81ff;">1239</span> <span style="color: #ae81ff;">161629</span> <span style="color: #ae81ff;">417</span> <span style="color: #ae81ff;">59867</span> <span style="color: #ae81ff;">1201</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">163699</span> <span style="color: #ae81ff;">170672</span> + <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">8</span> <span style="color: #ae81ff;">1318</span> <span style="color: #ae81ff;">665</span> <span style="color: #ae81ff;">162019</span> <span style="color: #ae81ff;">168847</span> <span style="color: #ae81ff;">165823</span> <span style="color: #ae81ff;">1612</span> <span style="color: #ae81ff;">111531</span> <span style="color: #ae81ff;">473</span> <span style="color: #ae81ff;">112892</span> <span style="color: #ae81ff;">1028</span> <span style="color: #ae81ff;">551</span> <span style="color: #ae81ff;">102074</span> <span style="color: #ae81ff;">103891</span> + <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">16</span> <span style="color: #ae81ff;">1425</span> <span style="color: #ae81ff;">558</span> <span style="color: #ae81ff;">87901</span> <span style="color: #ae81ff;">104742</span> <span style="color: #ae81ff;">100620</span> <span style="color: #ae81ff;">1271</span> <span style="color: #ae81ff;">101764</span> <span style="color: #ae81ff;">557</span> <span style="color: #ae81ff;">102229</span> <span style="color: #ae81ff;">1556</span> <span style="color: #ae81ff;">550</span> <span style="color: #ae81ff;">177324</span> <span style="color: #ae81ff;">168002</span> + <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">601</span> <span style="color: #ae81ff;">602</span> <span style="color: #ae81ff;">171108</span> <span style="color: #ae81ff;">180788</span> <span style="color: #ae81ff;">175355</span> <span style="color: #ae81ff;">1272</span> <span style="color: #ae81ff;">101113</span> <span style="color: #ae81ff;">563</span> <span style="color: #ae81ff;">111300</span> <span style="color: #ae81ff;">1224</span> <span style="color: #ae81ff;">604</span> <span style="color: #ae81ff;">91684</span> <span style="color: #ae81ff;">94960</span> + <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">1480</span> <span style="color: #ae81ff;">601</span> <span style="color: #ae81ff;">163301</span> <span style="color: #ae81ff;">172982</span> <span style="color: #ae81ff;">173429</span> <span style="color: #ae81ff;">1551</span> <span style="color: #ae81ff;">94692</span> <span style="color: #ae81ff;">565</span> <span style="color: #ae81ff;">105941</span> <span style="color: #ae81ff;">1542</span> <span style="color: #ae81ff;">548</span> <span style="color: #ae81ff;">103053</span> <span style="color: #ae81ff;">181276</span> + <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">3441</span> <span style="color: #ae81ff;">1191</span> <span style="color: #ae81ff;">297797</span> <span style="color: #ae81ff;">295665</span> <span style="color: #ae81ff;">302495</span> <span style="color: #ae81ff;">3197</span> <span style="color: #ae81ff;">188520</span> <span style="color: #ae81ff;">1236</span> <span style="color: #ae81ff;">186555</span> <span style="color: #ae81ff;">2446</span> <span style="color: #ae81ff;">1337</span> <span style="color: #ae81ff;">347758</span> <span style="color: #ae81ff;">199668</span> + <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">8</span> <span style="color: #ae81ff;">2800</span> <span style="color: #ae81ff;">392</span> <span style="color: #ae81ff;">193899</span> <span style="color: #ae81ff;">342216</span> <span style="color: #ae81ff;">268999</span> <span style="color: #ae81ff;">1559</span> <span style="color: #ae81ff;">302495</span> <span style="color: #ae81ff;">1123</span> <span style="color: #ae81ff;">198708</span> <span style="color: #ae81ff;">3049</span> <span style="color: #ae81ff;">1101</span> <span style="color: #ae81ff;">199372</span> <span style="color: #ae81ff;">288205</span> + <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">16</span> <span style="color: #ae81ff;">1246</span> <span style="color: #ae81ff;">1224</span> <span style="color: #ae81ff;">219148</span> <span style="color: #ae81ff;">333293</span> <span style="color: #ae81ff;">338761</span> <span style="color: #ae81ff;">2072</span> <span style="color: #ae81ff;">222971</span> <span style="color: #ae81ff;">1123</span> <span style="color: #ae81ff;">211966</span> <span style="color: #ae81ff;">3126</span> <span style="color: #ae81ff;">1201</span> <span style="color: #ae81ff;">326403</span> <span style="color: #ae81ff;">350711</span> + <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">3066</span> <span style="color: #ae81ff;">1192</span> <span style="color: #ae81ff;">200939</span> <span style="color: #ae81ff;">356533</span> <span style="color: #ae81ff;">340264</span> <span style="color: #ae81ff;">3225</span> <span style="color: #ae81ff;">207141</span> <span style="color: #ae81ff;">1348</span> <span style="color: #ae81ff;">202913</span> <span style="color: #ae81ff;">2506</span> <span style="color: #ae81ff;">1324</span> <span style="color: #ae81ff;">228961</span> <span style="color: #ae81ff;">349797</span> + <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">2307</span> <span style="color: #ae81ff;">1192</span> <span style="color: #ae81ff;">231529</span> <span style="color: #ae81ff;">344190</span> <span style="color: #ae81ff;">334331</span> <span style="color: #ae81ff;">3208</span> <span style="color: #ae81ff;">229353</span> <span style="color: #ae81ff;">1112</span> <span style="color: #ae81ff;">226164</span> <span style="color: #ae81ff;">3069</span> <span style="color: #ae81ff;">1198</span> <span style="color: #ae81ff;">242839</span> <span style="color: #ae81ff;">320748</span> + <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">1021</span> <span style="color: #ae81ff;">1103</span> <span style="color: #ae81ff;">330828</span> <span style="color: #ae81ff;">343091</span> <span style="color: #ae81ff;">329812</span> <span style="color: #ae81ff;">3123</span> <span style="color: #ae81ff;">227796</span> <span style="color: #ae81ff;">1127</span> <span style="color: #ae81ff;">224932</span> <span style="color: #ae81ff;">3033</span> <span style="color: #ae81ff;">1198</span> <span style="color: #ae81ff;">343091</span> <span style="color: #ae81ff;">259633</span> + <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">5550</span> <span style="color: #ae81ff;">2182</span> <span style="color: #ae81ff;">561637</span> <span style="color: #ae81ff;">580461</span> <span style="color: #ae81ff;">552960</span> <span style="color: #ae81ff;">5128</span> <span style="color: #ae81ff;">381597</span> <span style="color: #ae81ff;">2449</span> <span style="color: #ae81ff;">323630</span> <span style="color: #ae81ff;">3618</span> <span style="color: #ae81ff;">2407</span> <span style="color: #ae81ff;">367241</span> <span style="color: #ae81ff;">587127</span> + <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">8</span> <span style="color: #ae81ff;">2831</span> <span style="color: #ae81ff;">2038</span> <span style="color: #ae81ff;">375328</span> <span style="color: #ae81ff;">582983</span> <span style="color: #ae81ff;">621448</span> <span style="color: #ae81ff;">4151</span> <span style="color: #ae81ff;">585845</span> <span style="color: #ae81ff;">2472</span> <span style="color: #ae81ff;">641497</span> <span style="color: #ae81ff;">6170</span> <span style="color: #ae81ff;">1371</span> <span style="color: #ae81ff;">603630</span> <span style="color: #ae81ff;">609455</span> + <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">16</span> <span style="color: #ae81ff;">5321</span> <span style="color: #ae81ff;">2419</span> <span style="color: #ae81ff;">735537</span> <span style="color: #ae81ff;">750970</span> <span style="color: #ae81ff;">561343</span> <span style="color: #ae81ff;">6295</span> <span style="color: #ae81ff;">707430</span> <span style="color: #ae81ff;">2225</span> <span style="color: #ae81ff;">624339</span> <span style="color: #ae81ff;">6049</span> <span style="color: #ae81ff;">2382</span> <span style="color: #ae81ff;">632429</span> <span style="color: #ae81ff;">688381</span> + <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">4422</span> <span style="color: #ae81ff;">2200</span> <span style="color: #ae81ff;">658411</span> <span style="color: #ae81ff;">668246</span> <span style="color: #ae81ff;">718796</span> <span style="color: #ae81ff;">6395</span> <span style="color: #ae81ff;">688381</span> <span style="color: #ae81ff;">2413</span> <span style="color: #ae81ff;">595594</span> <span style="color: #ae81ff;">4987</span> <span style="color: #ae81ff;">2400</span> <span style="color: #ae81ff;">773140</span> <span style="color: #ae81ff;">807442</span> + <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">6770</span> <span style="color: #ae81ff;">2399</span> <span style="color: #ae81ff;">785010</span> <span style="color: #ae81ff;">800221</span> <span style="color: #ae81ff;">686620</span> <span style="color: #ae81ff;">5007</span> <span style="color: #ae81ff;">717355</span> <span style="color: #ae81ff;">2429</span> <span style="color: #ae81ff;">609455</span> <span style="color: #ae81ff;">4842</span> <span style="color: #ae81ff;">995</span> <span style="color: #ae81ff;">654798</span> <span style="color: #ae81ff;">738065</span> + <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">6178</span> <span style="color: #ae81ff;">2180</span> <span style="color: #ae81ff;">663292</span> <span style="color: #ae81ff;">654399</span> <span style="color: #ae81ff;">684432</span> <span style="color: #ae81ff;">5100</span> <span style="color: #ae81ff;">658008</span> <span style="color: #ae81ff;">2247</span> <span style="color: #ae81ff;">668246</span> <span style="color: #ae81ff;">5947</span> <span style="color: #ae81ff;">2047</span> <span style="color: #ae81ff;">635047</span> <span style="color: #ae81ff;">678807</span> + <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">4612</span> <span style="color: #ae81ff;">2035</span> <span style="color: #ae81ff;">670332</span> <span style="color: #ae81ff;">748875</span> <span style="color: #ae81ff;">644965</span> <span style="color: #ae81ff;">6361</span> <span style="color: #ae81ff;">727070</span> <span style="color: #ae81ff;">2051</span> <span style="color: #ae81ff;">810490</span> <span style="color: #ae81ff;">7763</span> <span style="color: #ae81ff;">2052</span> <span style="color: #ae81ff;">686182</span> <span style="color: #ae81ff;">697776</span> + <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">8478</span> <span style="color: #ae81ff;">4432</span> <span style="color: #ae81ff;">1155924</span> <span style="color: #ae81ff;">1160923</span> <span style="color: #ae81ff;">911184</span> <span style="color: #ae81ff;">10157</span> <span style="color: #ae81ff;">966113</span> <span style="color: #ae81ff;">2508</span> <span style="color: #ae81ff;">936209</span> <span style="color: #ae81ff;">5425</span> <span style="color: #ae81ff;">3468</span> <span style="color: #ae81ff;">982467</span> <span style="color: #ae81ff;">994294</span> + <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">8</span> <span style="color: #ae81ff;">12369</span> <span style="color: #ae81ff;">4387</span> <span style="color: #ae81ff;">1099128</span> <span style="color: #ae81ff;">1169140</span> <span style="color: #ae81ff;">1135150</span> <span style="color: #ae81ff;">10039</span> <span style="color: #ae81ff;">1073309</span> <span style="color: #ae81ff;">3805</span> <span style="color: #ae81ff;">1053821</span> <span style="color: #ae81ff;">9847</span> <span style="color: #ae81ff;">4367</span> <span style="color: #ae81ff;">1140576</span> <span style="color: #ae81ff;">1152822</span> + <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">16</span> <span style="color: #ae81ff;">10894</span> <span style="color: #ae81ff;">4782</span> <span style="color: #ae81ff;">1355045</span> <span style="color: #ae81ff;">1380303</span> <span style="color: #ae81ff;">1166599</span> <span style="color: #ae81ff;">10031</span> <span style="color: #ae81ff;">1276891</span> <span style="color: #ae81ff;">4449</span> <span style="color: #ae81ff;">1227269</span> <span style="color: #ae81ff;">7313</span> <span style="color: #ae81ff;">4324</span> <span style="color: #ae81ff;">1383861</span> <span style="color: #ae81ff;">1518938</span> + <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">7773</span> <span style="color: #ae81ff;">3488</span> <span style="color: #ae81ff;">1248678</span> <span style="color: #ae81ff;">1329871</span> <span style="color: #ae81ff;">1308798</span> <span style="color: #ae81ff;">12665</span> <span style="color: #ae81ff;">1391032</span> <span style="color: #ae81ff;">5389</span> <span style="color: #ae81ff;">1199171</span> <span style="color: #ae81ff;">7036</span> <span style="color: #ae81ff;">4065</span> <span style="color: #ae81ff;">1422357</span> <span style="color: #ae81ff;">1471074</span> + <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">10716</span> <span style="color: #ae81ff;">4810</span> <span style="color: #ae81ff;">1375881</span> <span style="color: #ae81ff;">1458090</span> <span style="color: #ae81ff;">1276891</span> <span style="color: #ae81ff;">9979</span> <span style="color: #ae81ff;">1286067</span> <span style="color: #ae81ff;">4868</span> <span style="color: #ae81ff;">1216149</span> <span style="color: #ae81ff;">7310</span> <span style="color: #ae81ff;">3990</span> <span style="color: #ae81ff;">1537424</span> <span style="color: #ae81ff;">1528669</span> + <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">13507</span> <span style="color: #ae81ff;">4776</span> <span style="color: #ae81ff;">1417662</span> <span style="color: #ae81ff;">1429934</span> <span style="color: #ae81ff;">1242896</span> <span style="color: #ae81ff;">8310</span> <span style="color: #ae81ff;">1347393</span> <span style="color: #ae81ff;">4543</span> <span style="color: #ae81ff;">1115688</span> <span style="color: #ae81ff;">7989</span> <span style="color: #ae81ff;">3484</span> <span style="color: #ae81ff;">1127995</span> <span style="color: #ae81ff;">1289930</span> + <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">5408</span> <span style="color: #ae81ff;">4314</span> <span style="color: #ae81ff;">1369738</span> <span style="color: #ae81ff;">1382969</span> <span style="color: #ae81ff;">1296159</span> <span style="color: #ae81ff;">12632</span> <span style="color: #ae81ff;">1333173</span> <span style="color: #ae81ff;">4149</span> <span style="color: #ae81ff;">1458090</span> <span style="color: #ae81ff;">7946</span> <span style="color: #ae81ff;">4821</span> <span style="color: #ae81ff;">1245780</span> <span style="color: #ae81ff;">1368865</span> + <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">5106</span> <span style="color: #ae81ff;">3765</span> <span style="color: #ae81ff;">1163439</span> <span style="color: #ae81ff;">1225169</span> <span style="color: #ae81ff;">1296159</span> <span style="color: #ae81ff;">10144</span> <span style="color: #ae81ff;">1387437</span> <span style="color: #ae81ff;">4428</span> <span style="color: #ae81ff;">1273861</span> <span style="color: #ae81ff;">9715</span> <span style="color: #ae81ff;">3742</span> <span style="color: #ae81ff;">1355045</span> <span style="color: #ae81ff;">1413929</span> + <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">11637</span> <span style="color: #ae81ff;">8251</span> <span style="color: #ae81ff;">1600327</span> <span style="color: #ae81ff;">1609925</span> <span style="color: #ae81ff;">1340164</span> <span style="color: #ae81ff;">14150</span> <span style="color: #ae81ff;">1326505</span> <span style="color: #ae81ff;">8366</span> <span style="color: #ae81ff;">1220575</span> <span style="color: #ae81ff;">13853</span> <span style="color: #ae81ff;">7632</span> <span style="color: #ae81ff;">1434138</span> <span style="color: #ae81ff;">1594978</span> + <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">8</span> <span style="color: #ae81ff;">13016</span> <span style="color: #ae81ff;">8906</span> <span style="color: #ae81ff;">1874871</span> <span style="color: #ae81ff;">1875690</span> <span style="color: #ae81ff;">1762539</span> <span style="color: #ae81ff;">14240</span> <span style="color: #ae81ff;">1633188</span> <span style="color: #ae81ff;">8954</span> <span style="color: #ae81ff;">1695065</span> <span style="color: #ae81ff;">16636</span> <span style="color: #ae81ff;">7555</span> <span style="color: #ae81ff;">1790460</span> <span style="color: #ae81ff;">2004366</span> + <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">16</span> <span style="color: #ae81ff;">15004</span> <span style="color: #ae81ff;">8720</span> <span style="color: #ae81ff;">2102484</span> <span style="color: #ae81ff;">2120127</span> <span style="color: #ae81ff;">1976691</span> <span style="color: #ae81ff;">12715</span> <span style="color: #ae81ff;">1968537</span> <span style="color: #ae81ff;">7704</span> <span style="color: #ae81ff;">1614767</span> <span style="color: #ae81ff;">19160</span> <span style="color: #ae81ff;">6546</span> <span style="color: #ae81ff;">2052253</span> <span style="color: #ae81ff;">2206161</span> + <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">14118</span> <span style="color: #ae81ff;">9301</span> <span style="color: #ae81ff;">2236023</span> <span style="color: #ae81ff;">2360124</span> <span style="color: #ae81ff;">2321847</span> <span style="color: #ae81ff;">13136</span> <span style="color: #ae81ff;">2106609</span> <span style="color: #ae81ff;">9533</span> <span style="color: #ae81ff;">2169388</span> <span style="color: #ae81ff;">19593</span> <span style="color: #ae81ff;">8130</span> <span style="color: #ae81ff;">2198257</span> <span style="color: #ae81ff;">2338280</span> + <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">15127</span> <span style="color: #ae81ff;">7499</span> <span style="color: #ae81ff;">2326879</span> <span style="color: #ae81ff;">2409105</span> <span style="color: #ae81ff;">2155236</span> <span style="color: #ae81ff;">14981</span> <span style="color: #ae81ff;">2286009</span> <span style="color: #ae81ff;">8050</span> <span style="color: #ae81ff;">2391665</span> <span style="color: #ae81ff;">19566</span> <span style="color: #ae81ff;">9624</span> <span style="color: #ae81ff;">2120127</span> <span style="color: #ae81ff;">2516377</span> + <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">13110</span> <span style="color: #ae81ff;">8832</span> <span style="color: #ae81ff;">2360124</span> <span style="color: #ae81ff;">2409105</span> <span style="color: #ae81ff;">2503178</span> <span style="color: #ae81ff;">14925</span> <span style="color: #ae81ff;">2265510</span> <span style="color: #ae81ff;">8879</span> <span style="color: #ae81ff;">2321847</span> <span style="color: #ae81ff;">15910</span> <span style="color: #ae81ff;">7608</span> <span style="color: #ae81ff;">2165014</span> <span style="color: #ae81ff;">2478620</span> + <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">14024</span> <span style="color: #ae81ff;">9611</span> <span style="color: #ae81ff;">2432298</span> <span style="color: #ae81ff;">2450338</span> <span style="color: #ae81ff;">2485792</span> <span style="color: #ae81ff;">12700</span> <span style="color: #ae81ff;">2286009</span> <span style="color: #ae81ff;">8327</span> <span style="color: #ae81ff;">2316837</span> <span style="color: #ae81ff;">15960</span> <span style="color: #ae81ff;">8814</span> <span style="color: #ae81ff;">2514903</span> <span style="color: #ae81ff;">2546213</span> + <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">15271</span> <span style="color: #ae81ff;">8842</span> <span style="color: #ae81ff;">1783768</span> <span style="color: #ae81ff;">2169388</span> <span style="color: #ae81ff;">2290886</span> <span style="color: #ae81ff;">12649</span> <span style="color: #ae81ff;">2337008</span> <span style="color: #ae81ff;">5856</span> <span style="color: #ae81ff;">2003431</span> <span style="color: #ae81ff;">12456</span> <span style="color: #ae81ff;">8124</span> <span style="color: #ae81ff;">2573677</span> <span style="color: #ae81ff;">2632033</span> + <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">14946</span> <span style="color: #ae81ff;">9055</span> <span style="color: #ae81ff;">2592318</span> <span style="color: #ae81ff;">2751762</span> <span style="color: #ae81ff;">2767722</span> <span style="color: #ae81ff;">13950</span> <span style="color: #ae81ff;">2426801</span> <span style="color: #ae81ff;">8293</span> <span style="color: #ae81ff;">2503178</span> <span style="color: #ae81ff;">13786</span> <span style="color: #ae81ff;">8807</span> <span style="color: #ae81ff;">2516377</span> <span style="color: #ae81ff;">2111788</span> + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">20893</span> <span style="color: #ae81ff;">14967</span> <span style="color: #ae81ff;">2214011</span> <span style="color: #ae81ff;">2240576</span> <span style="color: #ae81ff;">1747411</span> <span style="color: #ae81ff;">22968</span> <span style="color: #ae81ff;">1739978</span> <span style="color: #ae81ff;">16843</span> <span style="color: #ae81ff;">1762469</span> <span style="color: #ae81ff;">27893</span> <span style="color: #ae81ff;">13966</span> <span style="color: #ae81ff;">2124220</span> <span style="color: #ae81ff;">2194777</span> + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">8</span> <span style="color: #ae81ff;">23866</span> <span style="color: #ae81ff;">15022</span> <span style="color: #ae81ff;">2752473</span> <span style="color: #ae81ff;">2592166</span> <span style="color: #ae81ff;">2386884</span> <span style="color: #ae81ff;">22881</span> <span style="color: #ae81ff;">2359349</span> <span style="color: #ae81ff;">16556</span> <span style="color: #ae81ff;">2434922</span> <span style="color: #ae81ff;">20044</span> <span style="color: #ae81ff;">14155</span> <span style="color: #ae81ff;">2760434</span> <span style="color: #ae81ff;">2809182</span> + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">16</span> <span style="color: #ae81ff;">15582</span> <span style="color: #ae81ff;">16221</span> <span style="color: #ae81ff;">3346546</span> <span style="color: #ae81ff;">3384788</span> <span style="color: #ae81ff;">3061485</span> <span style="color: #ae81ff;">22716</span> <span style="color: #ae81ff;">2938934</span> <span style="color: #ae81ff;">16490</span> <span style="color: #ae81ff;">3074635</span> <span style="color: #ae81ff;">28567</span> <span style="color: #ae81ff;">12449</span> <span style="color: #ae81ff;">3374151</span> <span style="color: #ae81ff;">3229534</span> + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">22680</span> <span style="color: #ae81ff;">14979</span> <span style="color: #ae81ff;">3777778</span> <span style="color: #ae81ff;">3841986</span> <span style="color: #ae81ff;">3717289</span> <span style="color: #ae81ff;">25448</span> <span style="color: #ae81ff;">3561619</span> <span style="color: #ae81ff;">18210</span> <span style="color: #ae81ff;">3334853</span> <span style="color: #ae81ff;">27811</span> <span style="color: #ae81ff;">16144</span> <span style="color: #ae81ff;">3901313</span> <span style="color: #ae81ff;">3929871</span> + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">21076</span> <span style="color: #ae81ff;">15019</span> <span style="color: #ae81ff;">3999400</span> <span style="color: #ae81ff;">3922692</span> <span style="color: #ae81ff;">3806235</span> <span style="color: #ae81ff;">20393</span> <span style="color: #ae81ff;">3749743</span> <span style="color: #ae81ff;">15463</span> <span style="color: #ae81ff;">3799501</span> <span style="color: #ae81ff;">32083</span> <span style="color: #ae81ff;">14991</span> <span style="color: #ae81ff;">4038890</span> <span style="color: #ae81ff;">4240255</span> + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">19838</span> <span style="color: #ae81ff;">16169</span> <span style="color: #ae81ff;">4229816</span> <span style="color: #ae81ff;">4250747</span> <span style="color: #ae81ff;">4164199</span> <span style="color: #ae81ff;">16931</span> <span style="color: #ae81ff;">3864455</span> <span style="color: #ae81ff;">17908</span> <span style="color: #ae81ff;">4162181</span> <span style="color: #ae81ff;">22159</span> <span style="color: #ae81ff;">15187</span> <span style="color: #ae81ff;">4248644</span> <span style="color: #ae81ff;">4453499</span> + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">12745</span> <span style="color: #ae81ff;">9084</span> <span style="color: #ae81ff;">4259178</span> <span style="color: #ae81ff;">4196751</span> <span style="color: #ae81ff;">4293237</span> <span style="color: #ae81ff;">20211</span> <span style="color: #ae81ff;">3812993</span> <span style="color: #ae81ff;">17887</span> <span style="color: #ae81ff;">4054139</span> <span style="color: #ae81ff;">28122</span> <span style="color: #ae81ff;">13249</span> <span style="color: #ae81ff;">4154130</span> <span style="color: #ae81ff;">4441984</span> + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">26016</span> <span style="color: #ae81ff;">15202</span> <span style="color: #ae81ff;">4267642</span> <span style="color: #ae81ff;">4229816</span> <span style="color: #ae81ff;">4453499</span> <span style="color: #ae81ff;">24891</span> <span style="color: #ae81ff;">4154130</span> <span style="color: #ae81ff;">16492</span> <span style="color: #ae81ff;">4444282</span> <span style="color: #ae81ff;">28216</span> <span style="color: #ae81ff;">15017</span> <span style="color: #ae81ff;">4188565</span> <span style="color: #ae81ff;">4405533</span> + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">23457</span> <span style="color: #ae81ff;">19229</span> <span style="color: #ae81ff;">3917325</span> <span style="color: #ae81ff;">4130162</span> <span style="color: #ae81ff;">4154130</span> <span style="color: #ae81ff;">20590</span> <span style="color: #ae81ff;">4196751</span> <span style="color: #ae81ff;">11737</span> <span style="color: #ae81ff;">3650932</span> <span style="color: #ae81ff;">24566</span> <span style="color: #ae81ff;">15136</span> <span style="color: #ae81ff;">2482779</span> <span style="color: #ae81ff;">2659582</span> + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">16566</span> <span style="color: #ae81ff;">16249</span> <span style="color: #ae81ff;">2494314</span> <span style="color: #ae81ff;">2635106</span> <span style="color: #ae81ff;">2653011</span> <span style="color: #ae81ff;">23273</span> <span style="color: #ae81ff;">1933883</span> <span style="color: #ae81ff;">15579</span> <span style="color: #ae81ff;">2176427</span> <span style="color: #ae81ff;">25411</span> <span style="color: #ae81ff;">16236</span> <span style="color: #ae81ff;">2133188</span> <span style="color: #ae81ff;">1906837</span> + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">38048</span> <span style="color: #ae81ff;">24611</span> <span style="color: #ae81ff;">2298370</span> <span style="color: #ae81ff;">2162402</span> <span style="color: #ae81ff;">1788674</span> <span style="color: #ae81ff;">35265</span> <span style="color: #ae81ff;">1640426</span> <span style="color: #ae81ff;">32323</span> <span style="color: #ae81ff;">1618939</span> <span style="color: #ae81ff;">40241</span> <span style="color: #ae81ff;">24720</span> <span style="color: #ae81ff;">2269226</span> <span style="color: #ae81ff;">2391139</span> + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">8</span> <span style="color: #ae81ff;">39802</span> <span style="color: #ae81ff;">26380</span> <span style="color: #ae81ff;">3503374</span> <span style="color: #ae81ff;">3519162</span> <span style="color: #ae81ff;">3327622</span> <span style="color: #ae81ff;">37629</span> <span style="color: #ae81ff;">2294686</span> <span style="color: #ae81ff;">34059</span> <span style="color: #ae81ff;">2233238</span> <span style="color: #ae81ff;">46115</span> <span style="color: #ae81ff;">19740</span> <span style="color: #ae81ff;">2363506</span> <span style="color: #ae81ff;">2585847</span> + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">16</span> <span style="color: #ae81ff;">23628</span> <span style="color: #ae81ff;">27804</span> <span style="color: #ae81ff;">2970339</span> <span style="color: #ae81ff;">3013586</span> <span style="color: #ae81ff;">2756362</span> <span style="color: #ae81ff;">35645</span> <span style="color: #ae81ff;">3821310</span> <span style="color: #ae81ff;">33360</span> <span style="color: #ae81ff;">3817065</span> <span style="color: #ae81ff;">45585</span> <span style="color: #ae81ff;">28058</span> <span style="color: #ae81ff;">4149921</span> <span style="color: #ae81ff;">4280194</span> + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">38047</span> <span style="color: #ae81ff;">26565</span> <span style="color: #ae81ff;">3290024</span> <span style="color: #ae81ff;">3543113</span> <span style="color: #ae81ff;">3117487</span> <span style="color: #ae81ff;">38176</span> <span style="color: #ae81ff;">5761565</span> <span style="color: #ae81ff;">32992</span> <span style="color: #ae81ff;">5818150</span> <span style="color: #ae81ff;">44147</span> <span style="color: #ae81ff;">22494</span> <span style="color: #ae81ff;">4934183</span> <span style="color: #ae81ff;">6847827</span> + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">34621</span> <span style="color: #ae81ff;">26347</span> <span style="color: #ae81ff;">114155</span> <span style="color: #ae81ff;">5007531</span> <span style="color: #ae81ff;">4791076</span> <span style="color: #ae81ff;">33686</span> <span style="color: #ae81ff;">4270618</span> <span style="color: #ae81ff;">33444</span> <span style="color: #ae81ff;">3706707</span> <span style="color: #ae81ff;">45574</span> <span style="color: #ae81ff;">26252</span> <span style="color: #ae81ff;">3558525</span> <span style="color: #ae81ff;">3621537</span> + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">29910</span> <span style="color: #ae81ff;">27839</span> <span style="color: #ae81ff;">3533639</span> <span style="color: #ae81ff;">3580776</span> <span style="color: #ae81ff;">3586757</span> <span style="color: #ae81ff;">38303</span> <span style="color: #ae81ff;">3489144</span> <span style="color: #ae81ff;">40310</span> <span style="color: #ae81ff;">4735608</span> <span style="color: #ae81ff;">37164</span> <span style="color: #ae81ff;">18835</span> <span style="color: #ae81ff;">3717133</span> <span style="color: #ae81ff;">3680502</span> + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">33432</span> <span style="color: #ae81ff;">26663</span> <span style="color: #ae81ff;">4934183</span> <span style="color: #ae81ff;">5101205</span> <span style="color: #ae81ff;">5424983</span> <span style="color: #ae81ff;">40428</span> <span style="color: #ae81ff;">3531460</span> <span style="color: #ae81ff;">22529</span> <span style="color: #ae81ff;">4906002</span> <span style="color: #ae81ff;">40634</span> <span style="color: #ae81ff;">20792</span> <span style="color: #ae81ff;">3343814</span> <span style="color: #ae81ff;">3433360</span> + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">32295</span> <span style="color: #ae81ff;">20977</span> <span style="color: #ae81ff;">5883908</span> <span style="color: #ae81ff;">5424983</span> <span style="color: #ae81ff;">5468151</span> <span style="color: #ae81ff;">37088</span> <span style="color: #ae81ff;">3664018</span> <span style="color: #ae81ff;">27316</span> <span style="color: #ae81ff;">5332372</span> <span style="color: #ae81ff;">36376</span> <span style="color: #ae81ff;">15273</span> <span style="color: #ae81ff;">5217395</span> <span style="color: #ae81ff;">5558378</span> + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">40430</span> <span style="color: #ae81ff;">20977</span> <span style="color: #ae81ff;">3682869</span> <span style="color: #ae81ff;">3385325</span> <span style="color: #ae81ff;">3647681</span> <span style="color: #ae81ff;">41777</span> <span style="color: #ae81ff;">3771811</span> <span style="color: #ae81ff;">31407</span> <span style="color: #ae81ff;">3792628</span> <span style="color: #ae81ff;">36666</span> <span style="color: #ae81ff;">27891</span> <span style="color: #ae81ff;">4157956</span> <span style="color: #ae81ff;">3706707</span> + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">21802</span> <span style="color: #ae81ff;">26450</span> <span style="color: #ae81ff;">3486312</span> <span style="color: #ae81ff;">3911800</span> <span style="color: #ae81ff;">3719547</span> <span style="color: #ae81ff;">38135</span> <span style="color: #ae81ff;">4982841</span> <span style="color: #ae81ff;">33183</span> <span style="color: #ae81ff;">5375755</span> <span style="color: #ae81ff;">36398</span> <span style="color: #ae81ff;">23427</span> <span style="color: #ae81ff;">4904602</span> <span style="color: #ae81ff;">5020702</span> + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">32822</span> <span style="color: #ae81ff;">26575</span> <span style="color: #ae81ff;">5382492</span> <span style="color: #ae81ff;">5190597</span> <span style="color: #ae81ff;">5263753</span> <span style="color: #ae81ff;">37140</span> <span style="color: #ae81ff;">3404782</span> <span style="color: #ae81ff;">27528</span> <span style="color: #ae81ff;">4965558</span> <span style="color: #ae81ff;">39529</span> <span style="color: #ae81ff;">26352</span> <span style="color: #ae81ff;">14427096</span> <span style="color: #ae81ff;">14271297</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">41208</span> <span style="color: #ae81ff;">34571</span> <span style="color: #ae81ff;">2528346</span> <span style="color: #ae81ff;">2205119</span> <span style="color: #ae81ff;">1642685</span> <span style="color: #ae81ff;">37514</span> <span style="color: #ae81ff;">1790147</span> <span style="color: #ae81ff;">61064</span> <span style="color: #ae81ff;">1945315</span> <span style="color: #ae81ff;">49482</span> <span style="color: #ae81ff;">29458</span> <span style="color: #ae81ff;">3030545</span> <span style="color: #ae81ff;">3136499</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">8</span> <span style="color: #ae81ff;">45073</span> <span style="color: #ae81ff;">31839</span> <span style="color: #ae81ff;">4171488</span> <span style="color: #ae81ff;">4498640</span> <span style="color: #ae81ff;">3852936</span> <span style="color: #ae81ff;">49560</span> <span style="color: #ae81ff;">3365696</span> <span style="color: #ae81ff;">63898</span> <span style="color: #ae81ff;">3840876</span> <span style="color: #ae81ff;">42399</span> <span style="color: #ae81ff;">31969</span> <span style="color: #ae81ff;">3944043</span> <span style="color: #ae81ff;">4164914</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">16</span> <span style="color: #ae81ff;">47033</span> <span style="color: #ae81ff;">29557</span> <span style="color: #ae81ff;">4500997</span> <span style="color: #ae81ff;">4373801</span> <span style="color: #ae81ff;">3995873</span> <span style="color: #ae81ff;">42362</span> <span style="color: #ae81ff;">3912159</span> <span style="color: #ae81ff;">63290</span> <span style="color: #ae81ff;">4067778</span> <span style="color: #ae81ff;">26672</span> <span style="color: #ae81ff;">44661</span> <span style="color: #ae81ff;">111474</span> <span style="color: #ae81ff;">5271672</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">53750</span> <span style="color: #ae81ff;">34202</span> <span style="color: #ae81ff;">2863357</span> <span style="color: #ae81ff;">5287898</span> <span style="color: #ae81ff;">2123356</span> <span style="color: #ae81ff;">30104</span> <span style="color: #ae81ff;">4662231</span> <span style="color: #ae81ff;">62863</span> <span style="color: #ae81ff;">5000831</span> <span style="color: #ae81ff;">58402</span> <span style="color: #ae81ff;">30695</span> <span style="color: #ae81ff;">6480761</span> <span style="color: #ae81ff;">6809564</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">42802</span> <span style="color: #ae81ff;">34921</span> <span style="color: #ae81ff;">5901904</span> <span style="color: #ae81ff;">5971654</span> <span style="color: #ae81ff;">5602613</span> <span style="color: #ae81ff;">37601</span> <span style="color: #ae81ff;">5313247</span> <span style="color: #ae81ff;">67584</span> <span style="color: #ae81ff;">5723946</span> <span style="color: #ae81ff;">40963</span> <span style="color: #ae81ff;">38040</span> <span style="color: #ae81ff;">5712526</span> <span style="color: #ae81ff;">5871647</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">43970</span> <span style="color: #ae81ff;">34463</span> <span style="color: #ae81ff;">5817959</span> <span style="color: #ae81ff;">5988306</span> <span style="color: #ae81ff;">6071906</span> <span style="color: #ae81ff;">46729</span> <span style="color: #ae81ff;">5454094</span> <span style="color: #ae81ff;">67699</span> <span style="color: #ae81ff;">6140266</span> <span style="color: #ae81ff;">42889</span> <span style="color: #ae81ff;">37885</span> <span style="color: #ae81ff;">5785631</span> <span style="color: #ae81ff;">5975808</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">43887</span> <span style="color: #ae81ff;">33123</span> <span style="color: #ae81ff;">5910025</span> <span style="color: #ae81ff;">5940679</span> <span style="color: #ae81ff;">6054786</span> <span style="color: #ae81ff;">43865</span> <span style="color: #ae81ff;">5745001</span> <span style="color: #ae81ff;">68143</span> <span style="color: #ae81ff;">5988306</span> <span style="color: #ae81ff;">43016</span> <span style="color: #ae81ff;">29500</span> <span style="color: #ae81ff;">5814021</span> <span style="color: #ae81ff;">6196743</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">41983</span> <span style="color: #ae81ff;">34669</span> <span style="color: #ae81ff;">5944791</span> <span style="color: #ae81ff;">5940679</span> <span style="color: #ae81ff;">6239504</span> <span style="color: #ae81ff;">44618</span> <span style="color: #ae81ff;">5696426</span> <span style="color: #ae81ff;">66238</span> <span style="color: #ae81ff;">6947247</span> <span style="color: #ae81ff;">38716</span> <span style="color: #ae81ff;">31906</span> <span style="color: #ae81ff;">6145757</span> <span style="color: #ae81ff;">5997714</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">39435</span> <span style="color: #ae81ff;">37870</span> <span style="color: #ae81ff;">6399884</span> <span style="color: #ae81ff;">5830794</span> <span style="color: #ae81ff;">6067617</span> <span style="color: #ae81ff;">41948</span> <span style="color: #ae81ff;">6094523</span> <span style="color: #ae81ff;">62955</span> <span style="color: #ae81ff;">7937463</span> <span style="color: #ae81ff;">40642</span> <span style="color: #ae81ff;">28386</span> <span style="color: #ae81ff;">6924845</span> <span style="color: #ae81ff;">7735533</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">39275</span> <span style="color: #ae81ff;">25258</span> <span style="color: #ae81ff;">6072979</span> <span style="color: #ae81ff;">7427838</span> <span style="color: #ae81ff;">7440706</span> <span style="color: #ae81ff;">43609</span> <span style="color: #ae81ff;">6374949</span> <span style="color: #ae81ff;">35618</span> <span style="color: #ae81ff;">8743368</span> <span style="color: #ae81ff;">40711</span> <span style="color: #ae81ff;">31933</span> <span style="color: #ae81ff;">5336352</span> <span style="color: #ae81ff;">5142283</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">46473</span> <span style="color: #ae81ff;">28679</span> <span style="color: #ae81ff;">4946832</span> <span style="color: #ae81ff;">5126937</span> <span style="color: #ae81ff;">4876623</span> <span style="color: #ae81ff;">33948</span> <span style="color: #ae81ff;">5097274</span> <span style="color: #ae81ff;">47802</span> <span style="color: #ae81ff;">8086916</span> <span style="color: #ae81ff;">39041</span> <span style="color: #ae81ff;">37522</span> <span style="color: #ae81ff;">5010310</span> <span style="color: #ae81ff;">4876623</span> + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">40918</span> <span style="color: #ae81ff;">29668</span> <span style="color: #ae81ff;">4998648</span> <span style="color: #ae81ff;">5056765</span> <span style="color: #ae81ff;">4979812</span> <span style="color: #ae81ff;">40625</span> <span style="color: #ae81ff;">5088216</span> <span style="color: #ae81ff;">29293</span> <span style="color: #ae81ff;">5688881</span> <span style="color: #ae81ff;">51283</span> <span style="color: #ae81ff;">33280</span> <span style="color: #ae81ff;">4929091</span> <span style="color: #ae81ff;">4710810</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">58065</span> <span style="color: #ae81ff;">45957</span> <span style="color: #ae81ff;">3120803</span> <span style="color: #ae81ff;">3071013</span> <span style="color: #ae81ff;">2228850</span> <span style="color: #ae81ff;">55784</span> <span style="color: #ae81ff;">2116542</span> <span style="color: #ae81ff;">103626</span> <span style="color: #ae81ff;">2184496</span> <span style="color: #ae81ff;">57936</span> <span style="color: #ae81ff;">42028</span> <span style="color: #ae81ff;">2900803</span> <span style="color: #ae81ff;">3032391</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">8</span> <span style="color: #ae81ff;">57669</span> <span style="color: #ae81ff;">47322</span> <span style="color: #ae81ff;">4914218</span> <span style="color: #ae81ff;">4948188</span> <span style="color: #ae81ff;">2688155</span> <span style="color: #ae81ff;">51293</span> <span style="color: #ae81ff;">3717820</span> <span style="color: #ae81ff;">131949</span> <span style="color: #ae81ff;">3654745</span> <span style="color: #ae81ff;">51302</span> <span style="color: #ae81ff;">43187</span> <span style="color: #ae81ff;">4729877</span> <span style="color: #ae81ff;">4761338</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">16</span> <span style="color: #ae81ff;">52989</span> <span style="color: #ae81ff;">44720</span> <span style="color: #ae81ff;">5923681</span> <span style="color: #ae81ff;">5845080</span> <span style="color: #ae81ff;">4901250</span> <span style="color: #ae81ff;">60184</span> <span style="color: #ae81ff;">4996397</span> <span style="color: #ae81ff;">139152</span> <span style="color: #ae81ff;">5012066</span> <span style="color: #ae81ff;">58686</span> <span style="color: #ae81ff;">47250</span> <span style="color: #ae81ff;">5811959</span> <span style="color: #ae81ff;">5964297</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">59565</span> <span style="color: #ae81ff;">55140</span> <span style="color: #ae81ff;">7817198</span> <span style="color: #ae81ff;">7717122</span> <span style="color: #ae81ff;">6889995</span> <span style="color: #ae81ff;">41780</span> <span style="color: #ae81ff;">6452650</span> <span style="color: #ae81ff;">125794</span> <span style="color: #ae81ff;">6537361</span> <span style="color: #ae81ff;">60210</span> <span style="color: #ae81ff;">39198</span> <span style="color: #ae81ff;">8233027</span> <span style="color: #ae81ff;">7481050</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">48498</span> <span style="color: #ae81ff;">47273</span> <span style="color: #ae81ff;">7724930</span> <span style="color: #ae81ff;">7853828</span> <span style="color: #ae81ff;">7350615</span> <span style="color: #ae81ff;">59972</span> <span style="color: #ae81ff;">8312700</span> <span style="color: #ae81ff;">147326</span> <span style="color: #ae81ff;">8484089</span> <span style="color: #ae81ff;">60358</span> <span style="color: #ae81ff;">47194</span> <span style="color: #ae81ff;">7765083</span> <span style="color: #ae81ff;">7827884</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">52363</span> <span style="color: #ae81ff;">46150</span> <span style="color: #ae81ff;">8257760</span> <span style="color: #ae81ff;">7933620</span> <span style="color: #ae81ff;">7692072</span> <span style="color: #ae81ff;">51845</span> <span style="color: #ae81ff;">7877236</span> <span style="color: #ae81ff;">126376</span> <span style="color: #ae81ff;">9277883</span> <span style="color: #ae81ff;">58286</span> <span style="color: #ae81ff;">50090</span> <span style="color: #ae81ff;">7926299</span> <span style="color: #ae81ff;">8270680</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">57480</span> <span style="color: #ae81ff;">47199</span> <span style="color: #ae81ff;">9199633</span> <span style="color: #ae81ff;">8225143</span> <span style="color: #ae81ff;">8003847</span> <span style="color: #ae81ff;">53723</span> <span style="color: #ae81ff;">7406867</span> <span style="color: #ae81ff;">151085</span> <span style="color: #ae81ff;">8459025</span> <span style="color: #ae81ff;">54447</span> <span style="color: #ae81ff;">36649</span> <span style="color: #ae81ff;">9729770</span> <span style="color: #ae81ff;">9786581</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">56439</span> <span style="color: #ae81ff;">58500</span> <span style="color: #ae81ff;">8533606</span> <span style="color: #ae81ff;">8240925</span> <span style="color: #ae81ff;">8673628</span> <span style="color: #ae81ff;">53771</span> <span style="color: #ae81ff;">94848</span> <span style="color: #ae81ff;">49413</span> <span style="color: #ae81ff;">9267872</span> <span style="color: #ae81ff;">60466</span> <span style="color: #ae81ff;">51694</span> <span style="color: #ae81ff;">8623560</span> <span style="color: #ae81ff;">8351087</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">56339</span> <span style="color: #ae81ff;">45925</span> <span style="color: #ae81ff;">7182365</span> <span style="color: #ae81ff;">8291637</span> <span style="color: #ae81ff;">8291637</span> <span style="color: #ae81ff;">67496</span> <span style="color: #ae81ff;">10304615</span> <span style="color: #ae81ff;">85962</span> <span style="color: #ae81ff;">19068585</span> <span style="color: #ae81ff;">46184</span> <span style="color: #ae81ff;">53250</span> <span style="color: #ae81ff;">8351087</span> <span style="color: #ae81ff;">8659420</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">69858</span> <span style="color: #ae81ff;">50186</span> <span style="color: #ae81ff;">11009550</span> <span style="color: #ae81ff;">10290727</span> <span style="color: #ae81ff;">10272267</span> <span style="color: #ae81ff;">55174</span> <span style="color: #ae81ff;">8168437</span> <span style="color: #ae81ff;">91274</span> <span style="color: #ae81ff;">12859651</span> <span style="color: #ae81ff;">45328</span> <span style="color: #ae81ff;">45915</span> <span style="color: #ae81ff;">8286637</span> <span style="color: #ae81ff;">8467363</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">57811</span> <span style="color: #ae81ff;">48674</span> <span style="color: #ae81ff;">8919040</span> <span style="color: #ae81ff;">8696680</span> <span style="color: #ae81ff;">8692280</span> <span style="color: #ae81ff;">67235</span> <span style="color: #ae81ff;">9334595</span> <span style="color: #ae81ff;">81668</span> <span style="color: #ae81ff;">10659461</span> <span style="color: #ae81ff;">60520</span> <span style="color: #ae81ff;">47228</span> <span style="color: #ae81ff;">8359214</span> <span style="color: #ae81ff;">8346016</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">54791</span> <span style="color: #ae81ff;">39197</span> <span style="color: #ae81ff;">8240925</span> <span style="color: #ae81ff;">9984224</span> <span style="color: #ae81ff;">9056567</span> <span style="color: #ae81ff;">58229</span> <span style="color: #ae81ff;">7628034</span> <span style="color: #ae81ff;">63197</span> <span style="color: #ae81ff;">8440323</span> <span style="color: #ae81ff;">56615</span> <span style="color: #ae81ff;">46231</span> <span style="color: #ae81ff;">7460745</span> <span style="color: #ae81ff;">7904418</span> + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">52309</span> <span style="color: #ae81ff;">48775</span> <span style="color: #ae81ff;">7239873</span> <span style="color: #ae81ff;">7606082</span> <span style="color: #ae81ff;">7692072</span> <span style="color: #ae81ff;">59469</span> <span style="color: #ae81ff;">6798654</span> <span style="color: #ae81ff;">47149</span> <span style="color: #ae81ff;">7010184</span> <span style="color: #ae81ff;">66211</span> <span style="color: #ae81ff;">50302</span> <span style="color: #ae81ff;">7135381</span> <span style="color: #ae81ff;">7331011</span> + <span style="color: #ae81ff;">32768</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">68452</span> <span style="color: #ae81ff;">63438</span> <span style="color: #ae81ff;">10767866</span> <span style="color: #ae81ff;">11145983</span> <span style="color: #ae81ff;">10587860</span> <span style="color: #ae81ff;">66188</span> <span style="color: #ae81ff;">8553152</span> <span style="color: #ae81ff;">196836</span> <span style="color: #ae81ff;">10262914</span> <span style="color: #ae81ff;">70797</span> <span style="color: #ae81ff;">63662</span> <span style="color: #ae81ff;">10445431</span> <span style="color: #ae81ff;">12477655</span> + <span style="color: #ae81ff;">32768</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">74330</span> <span style="color: #ae81ff;">56587</span> <span style="color: #ae81ff;">10937536</span> <span style="color: #ae81ff;">11203222</span> <span style="color: #ae81ff;">11976409</span> <span style="color: #ae81ff;">68123</span> <span style="color: #ae81ff;">10425622</span> <span style="color: #ae81ff;">270919</span> <span style="color: #ae81ff;">12794771</span> <span style="color: #ae81ff;">69441</span> <span style="color: #ae81ff;">63307</span> <span style="color: #ae81ff;">10560200</span> <span style="color: #ae81ff;">11665355</span> + <span style="color: #ae81ff;">32768</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">73302</span> <span style="color: #ae81ff;">59555</span> <span style="color: #ae81ff;">10796626</span> <span style="color: #ae81ff;">11473510</span> <span style="color: #ae81ff;">12383220</span> <span style="color: #ae81ff;">65037</span> <span style="color: #ae81ff;">10701629</span> <span style="color: #ae81ff;">164278</span> <span style="color: #ae81ff;">12277028</span> <span style="color: #ae81ff;">68121</span> <span style="color: #ae81ff;">58698</span> <span style="color: #ae81ff;">10958466</span> <span style="color: #ae81ff;">12068958</span> + <span style="color: #ae81ff;">32768</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">72445</span> <span style="color: #ae81ff;">60667</span> <span style="color: #ae81ff;">11631788</span> <span style="color: #ae81ff;">12359834</span> <span style="color: #ae81ff;">13567777</span> <span style="color: #ae81ff;">72396</span> <span style="color: #ae81ff;">10879527</span> <span style="color: #ae81ff;">171307</span> <span style="color: #ae81ff;">12724879</span> <span style="color: #ae81ff;">69853</span> <span style="color: #ae81ff;">61717</span> <span style="color: #ae81ff;">9849567</span> <span style="color: #ae81ff;">11800570</span> + <span style="color: #ae81ff;">32768</span> <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">71159</span> <span style="color: #ae81ff;">67179</span> <span style="color: #ae81ff;">9858045</span> <span style="color: #ae81ff;">10186091</span> <span style="color: #ae81ff;">11123431</span> <span style="color: #ae81ff;">65485</span> <span style="color: #ae81ff;">11148695</span> <span style="color: #ae81ff;">275460</span> <span style="color: #ae81ff;">12961285</span> <span style="color: #ae81ff;">69705</span> <span style="color: #ae81ff;">61881</span> <span style="color: #ae81ff;">10151482</span> <span style="color: #ae81ff;">10709134</span> + <span style="color: #ae81ff;">32768</span> <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">73047</span> <span style="color: #ae81ff;">60915</span> <span style="color: #ae81ff;">11582774</span> <span style="color: #ae81ff;">12755585</span> <span style="color: #ae81ff;">13128427</span> <span style="color: #ae81ff;">72514</span> <span style="color: #ae81ff;">11222432</span> <span style="color: #ae81ff;">219878</span> <span style="color: #ae81ff;">20532259</span> <span style="color: #ae81ff;">68973</span> <span style="color: #ae81ff;">62478</span> <span style="color: #ae81ff;">10807663</span> <span style="color: #ae81ff;">12337643</span> + <span style="color: #ae81ff;">32768</span> <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">68780</span> <span style="color: #ae81ff;">64220</span> <span style="color: #ae81ff;">10252961</span> <span style="color: #ae81ff;">10835782</span> <span style="color: #ae81ff;">11594500</span> <span style="color: #ae81ff;">62992</span> <span style="color: #ae81ff;">10227021</span> <span style="color: #ae81ff;">132334</span> <span style="color: #ae81ff;">18702216</span> <span style="color: #ae81ff;">69636</span> <span style="color: #ae81ff;">63082</span> <span style="color: #ae81ff;">9683705</span> <span style="color: #ae81ff;">12305608</span> + <span style="color: #ae81ff;">32768</span> <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">70705</span> <span style="color: #ae81ff;">63127</span> <span style="color: #ae81ff;">9825626</span> <span style="color: #ae81ff;">10875222</span> <span style="color: #ae81ff;">11311097</span> <span style="color: #ae81ff;">70012</span> <span style="color: #ae81ff;">10246845</span> <span style="color: #ae81ff;">118346</span> <span style="color: #ae81ff;">10694135</span> <span style="color: #ae81ff;">64358</span> <span style="color: #ae81ff;">64404</span> <span style="color: #ae81ff;">9902793</span> <span style="color: #ae81ff;">12516290</span> + <span style="color: #ae81ff;">32768</span> <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">72368</span> <span style="color: #ae81ff;">59947</span> <span style="color: #ae81ff;">9375862</span> <span style="color: #ae81ff;">9728947</span> <span style="color: #ae81ff;">9881434</span> <span style="color: #ae81ff;">67853</span> <span style="color: #ae81ff;">8419541</span> <span style="color: #ae81ff;">89947</span> <span style="color: #ae81ff;">8839196</span> <span style="color: #ae81ff;">60092</span> <span style="color: #ae81ff;">58554</span> <span style="color: #ae81ff;">112629</span> <span style="color: #ae81ff;">9533907</span> + <span style="color: #ae81ff;">65536</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">82509</span> <span style="color: #ae81ff;">79055</span> <span style="color: #ae81ff;">10401087</span> <span style="color: #ae81ff;">11421453</span> <span style="color: #ae81ff;">11023864</span> <span style="color: #ae81ff;">77645</span> <span style="color: #ae81ff;">9652016</span> <span style="color: #ae81ff;">440101</span> <span style="color: #ae81ff;">10654675</span> <span style="color: #ae81ff;">77948</span> <span style="color: #ae81ff;">77407</span> <span style="color: #ae81ff;">10149535</span> <span style="color: #ae81ff;">11630216</span> + <span style="color: #ae81ff;">65536</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">84080</span> <span style="color: #ae81ff;">78150</span> <span style="color: #ae81ff;">10842965</span> <span style="color: #ae81ff;">12055622</span> <span style="color: #ae81ff;">12293394</span> <span style="color: #ae81ff;">82366</span> <span style="color: #ae81ff;">10710305</span> <span style="color: #ae81ff;">535691</span> <span style="color: #ae81ff;">12327576</span> <span style="color: #ae81ff;">79992</span> <span style="color: #ae81ff;">77452</span> <span style="color: #ae81ff;">11395884</span> <span style="color: #ae81ff;">13320434</span> + <span style="color: #ae81ff;">65536</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">80498</span> <span style="color: #ae81ff;">72047</span> <span style="color: #ae81ff;">10597577</span> <span style="color: #ae81ff;">12559643</span> <span style="color: #ae81ff;">12197385</span> <span style="color: #ae81ff;">80466</span> <span style="color: #ae81ff;">11233809</span> <span style="color: #ae81ff;">290491</span> <span style="color: #ae81ff;">11908856</span> <span style="color: #ae81ff;">81132</span> <span style="color: #ae81ff;">76306</span> <span style="color: #ae81ff;">10447339</span> <span style="color: #ae81ff;">12629467</span> + <span style="color: #ae81ff;">65536</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">81152</span> <span style="color: #ae81ff;">74496</span> <span style="color: #ae81ff;">10827162</span> <span style="color: #ae81ff;">12076278</span> <span style="color: #ae81ff;">13295950</span> <span style="color: #ae81ff;">75815</span> <span style="color: #ae81ff;">11061126</span> <span style="color: #ae81ff;">460655</span> <span style="color: #ae81ff;">12509344</span> <span style="color: #ae81ff;">78097</span> <span style="color: #ae81ff;">76545</span> <span style="color: #ae81ff;">11636124</span> <span style="color: #ae81ff;">14444598</span> + <span style="color: #ae81ff;">65536</span> <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">83013</span> <span style="color: #ae81ff;">71312</span> <span style="color: #ae81ff;">11366197</span> <span style="color: #ae81ff;">12867728</span> <span style="color: #ae81ff;">13396389</span> <span style="color: #ae81ff;">80359</span> <span style="color: #ae81ff;">11770154</span> <span style="color: #ae81ff;">247265</span> <span style="color: #ae81ff;">12900943</span> <span style="color: #ae81ff;">81124</span> <span style="color: #ae81ff;">76239</span> <span style="color: #ae81ff;">11403448</span> <span style="color: #ae81ff;">13896193</span> + <span style="color: #ae81ff;">65536</span> <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">83236</span> <span style="color: #ae81ff;">76272</span> <span style="color: #ae81ff;">119806</span> <span style="color: #ae81ff;">11270196</span> <span style="color: #ae81ff;">10942676</span> <span style="color: #ae81ff;">82650</span> <span style="color: #ae81ff;">11677648</span> <span style="color: #ae81ff;">247548</span> <span style="color: #ae81ff;">13306893</span> <span style="color: #ae81ff;">82129</span> <span style="color: #ae81ff;">78141</span> <span style="color: #ae81ff;">12022929</span> <span style="color: #ae81ff;">13579712</span> + <span style="color: #ae81ff;">65536</span> <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">82357</span> <span style="color: #ae81ff;">76272</span> <span style="color: #ae81ff;">11948197</span> <span style="color: #ae81ff;">12920955</span> <span style="color: #ae81ff;">14283968</span> <span style="color: #ae81ff;">82385</span> <span style="color: #ae81ff;">11696531</span> <span style="color: #ae81ff;">258841</span> <span style="color: #ae81ff;">22230669</span> <span style="color: #ae81ff;">79791</span> <span style="color: #ae81ff;">78366</span> <span style="color: #ae81ff;">12113531</span> <span style="color: #ae81ff;">14039564</span> + <span style="color: #ae81ff;">65536</span> <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">85060</span> <span style="color: #ae81ff;">77279</span> <span style="color: #ae81ff;">13008396</span> <span style="color: #ae81ff;">13655947</span> <span style="color: #ae81ff;">14105847</span> <span style="color: #ae81ff;">78461</span> <span style="color: #ae81ff;">12654468</span> <span style="color: #ae81ff;">222259</span> <span style="color: #ae81ff;">14807195</span> <span style="color: #ae81ff;">78973</span> <span style="color: #ae81ff;">77165</span> <span style="color: #ae81ff;">11548130</span> <span style="color: #ae81ff;">12850283</span> + <span style="color: #ae81ff;">65536</span> <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">82554</span> <span style="color: #ae81ff;">77219</span> <span style="color: #ae81ff;">11929012</span> <span style="color: #ae81ff;">11872336</span> <span style="color: #ae81ff;">11831964</span> <span style="color: #ae81ff;">79076</span> <span style="color: #ae81ff;">11770154</span> <span style="color: #ae81ff;">176494</span> <span style="color: #ae81ff;">12284603</span> <span style="color: #ae81ff;">81136</span> <span style="color: #ae81ff;">76313</span> <span style="color: #ae81ff;">11644504</span> <span style="color: #ae81ff;">11700514</span> + <span style="color: #ae81ff;">131072</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">88544</span> <span style="color: #ae81ff;">83386</span> <span style="color: #ae81ff;">11835991</span> <span style="color: #ae81ff;">12145325</span> <span style="color: #ae81ff;">11670659</span> <span style="color: #ae81ff;">87840</span> <span style="color: #ae81ff;">11391587</span> <span style="color: #ae81ff;">419979</span> <span style="color: #ae81ff;">11686040</span> <span style="color: #ae81ff;">88820</span> <span style="color: #ae81ff;">82872</span> <span style="color: #ae81ff;">11811578</span> <span style="color: #ae81ff;">12481740</span> + <span style="color: #ae81ff;">131072</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">88630</span> <span style="color: #ae81ff;">81787</span> <span style="color: #ae81ff;">12860445</span> <span style="color: #ae81ff;">13091354</span> <span style="color: #ae81ff;">13302644</span> <span style="color: #ae81ff;">86476</span> <span style="color: #ae81ff;">12881840</span> <span style="color: #ae81ff;">467063</span> <span style="color: #ae81ff;">13230293</span> <span style="color: #ae81ff;">88445</span> <span style="color: #ae81ff;">79831</span> <span style="color: #ae81ff;">12765186</span> <span style="color: #ae81ff;">13511230</span> + <span style="color: #ae81ff;">131072</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">90497</span> <span style="color: #ae81ff;">84498</span> <span style="color: #ae81ff;">12881840</span> <span style="color: #ae81ff;">13012339</span> <span style="color: #ae81ff;">12985292</span> <span style="color: #ae81ff;">87772</span> <span style="color: #ae81ff;">13108523</span> <span style="color: #ae81ff;">414247</span> <span style="color: #ae81ff;">13319081</span> <span style="color: #ae81ff;">89824</span> <span style="color: #ae81ff;">83386</span> <span style="color: #ae81ff;">12600753</span> <span style="color: #ae81ff;">13263809</span> + <span style="color: #ae81ff;">131072</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">88596</span> <span style="color: #ae81ff;">82448</span> <span style="color: #ae81ff;">12769930</span> <span style="color: #ae81ff;">12958357</span> <span style="color: #ae81ff;">13536514</span> <span style="color: #ae81ff;">85905</span> <span style="color: #ae81ff;">13670482</span> <span style="color: #ae81ff;">416191</span> <span style="color: #ae81ff;">14118093</span> <span style="color: #ae81ff;">83436</span> <span style="color: #ae81ff;">84523</span> <span style="color: #ae81ff;">13137656</span> <span style="color: #ae81ff;">13840848</span> + <span style="color: #ae81ff;">131072</span> <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">89407</span> <span style="color: #ae81ff;">78835</span> <span style="color: #ae81ff;">13194095</span> <span style="color: #ae81ff;">13263809</span> <span style="color: #ae81ff;">13855499</span> <span style="color: #ae81ff;">84908</span> <span style="color: #ae81ff;">13827619</span> <span style="color: #ae81ff;">442800</span> <span style="color: #ae81ff;">13946536</span> <span style="color: #ae81ff;">82538</span> <span style="color: #ae81ff;">78850</span> <span style="color: #ae81ff;">13371887</span> <span style="color: #ae81ff;">14115193</span> + <span style="color: #ae81ff;">131072</span> <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">88707</span> <span style="color: #ae81ff;">84477</span> <span style="color: #ae81ff;">13946536</span> <span style="color: #ae81ff;">14042363</span> <span style="color: #ae81ff;">14785216</span> <span style="color: #ae81ff;">86420</span> <span style="color: #ae81ff;">14674318</span> <span style="color: #ae81ff;">457513</span> <span style="color: #ae81ff;">14565072</span> <span style="color: #ae81ff;">86827</span> <span style="color: #ae81ff;">83928</span> <span style="color: #ae81ff;">12639283</span> <span style="color: #ae81ff;">13414303</span> + <span style="color: #ae81ff;">131072</span> <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">89182</span> <span style="color: #ae81ff;">83355</span> <span style="color: #ae81ff;">13417250</span> <span style="color: #ae81ff;">13516877</span> <span style="color: #ae81ff;">13986987</span> <span style="color: #ae81ff;">85565</span> <span style="color: #ae81ff;">13788430</span> <span style="color: #ae81ff;">497857</span> <span style="color: #ae81ff;">14016943</span> <span style="color: #ae81ff;">83531</span> <span style="color: #ae81ff;">83394</span> <span style="color: #ae81ff;">13105710</span> <span style="color: #ae81ff;">13740526</span> + <span style="color: #ae81ff;">131072</span> <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">88133</span> <span style="color: #ae81ff;">82921</span> <span style="color: #ae81ff;">13332647</span> <span style="color: #ae81ff;">13313597</span> <span style="color: #ae81ff;">13712081</span> <span style="color: #ae81ff;">86749</span> <span style="color: #ae81ff;">13703194</span> <span style="color: #ae81ff;">415595</span> <span style="color: #ae81ff;">1591452</span> <span style="color: #ae81ff;">90140</span> <span style="color: #ae81ff;">82009</span> <span style="color: #ae81ff;">13259651</span> <span style="color: #ae81ff;">13324246</span> + <span style="color: #ae81ff;">131072</span> <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">88919</span> <span style="color: #ae81ff;">83384</span> <span style="color: #ae81ff;">12569350</span> <span style="color: #ae81ff;">12383894</span> <span style="color: #ae81ff;">12445289</span> <span style="color: #ae81ff;">87123</span> <span style="color: #ae81ff;">12239423</span> <span style="color: #ae81ff;">273752</span> <span style="color: #ae81ff;">12745062</span> <span style="color: #ae81ff;">87518</span> <span style="color: #ae81ff;">81834</span> <span style="color: #ae81ff;">11830642</span> <span style="color: #ae81ff;">11918875</span> + <span style="color: #ae81ff;">262144</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">92046</span> <span style="color: #ae81ff;">89238</span> <span style="color: #ae81ff;">12412246</span> <span style="color: #ae81ff;">12322798</span> <span style="color: #ae81ff;">11471432</span> <span style="color: #ae81ff;">90962</span> <span style="color: #ae81ff;">11729024</span> <span style="color: #ae81ff;">910557</span> <span style="color: #ae81ff;">11846678</span> <span style="color: #ae81ff;">92929</span> <span style="color: #ae81ff;">85955</span> <span style="color: #ae81ff;">12535646</span> <span style="color: #ae81ff;">12852148</span> + <span style="color: #ae81ff;">262144</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">93163</span> <span style="color: #ae81ff;">87728</span> <span style="color: #ae81ff;">13213567</span> <span style="color: #ae81ff;">13137311</span> <span style="color: #ae81ff;">12926184</span> <span style="color: #ae81ff;">90851</span> <span style="color: #ae81ff;">12826360</span> <span style="color: #ae81ff;">900170</span> <span style="color: #ae81ff;">13110368</span> <span style="color: #ae81ff;">90907</span> <span style="color: #ae81ff;">83813</span> <span style="color: #ae81ff;">106392</span> <span style="color: #ae81ff;">11100705</span> + <span style="color: #ae81ff;">262144</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">89530</span> <span style="color: #ae81ff;">87436</span> <span style="color: #ae81ff;">13288465</span> <span style="color: #ae81ff;">13291356</span> <span style="color: #ae81ff;">13391725</span> <span style="color: #ae81ff;">89797</span> <span style="color: #ae81ff;">12979437</span> <span style="color: #ae81ff;">924856</span> <span style="color: #ae81ff;">13410345</span> <span style="color: #ae81ff;">91811</span> <span style="color: #ae81ff;">88369</span> <span style="color: #ae81ff;">13264418</span> <span style="color: #ae81ff;">13639078</span> + <span style="color: #ae81ff;">262144</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">88940</span> <span style="color: #ae81ff;">83024</span> <span style="color: #ae81ff;">13982505</span> <span style="color: #ae81ff;">13753728</span> <span style="color: #ae81ff;">14127128</span> <span style="color: #ae81ff;">91268</span> <span style="color: #ae81ff;">13858259</span> <span style="color: #ae81ff;">863287</span> <span style="color: #ae81ff;">13773196</span> <span style="color: #ae81ff;">91523</span> <span style="color: #ae81ff;">87486</span> <span style="color: #ae81ff;">13691728</span> <span style="color: #ae81ff;">14047711</span> + <span style="color: #ae81ff;">262144</span> <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">94344</span> <span style="color: #ae81ff;">87151</span> <span style="color: #ae81ff;">14090737</span> <span style="color: #ae81ff;">13934836</span> <span style="color: #ae81ff;">14349852</span> <span style="color: #ae81ff;">91634</span> <span style="color: #ae81ff;">14453412</span> <span style="color: #ae81ff;">884871</span> <span style="color: #ae81ff;">14068741</span> <span style="color: #ae81ff;">87858</span> <span style="color: #ae81ff;">87861</span> <span style="color: #ae81ff;">14205245</span> <span style="color: #ae81ff;">14639112</span> + <span style="color: #ae81ff;">262144</span> <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">93323</span> <span style="color: #ae81ff;">83784</span> <span style="color: #ae81ff;">14630931</span> <span style="color: #ae81ff;">14471865</span> <span style="color: #ae81ff;">15098659</span> <span style="color: #ae81ff;">89320</span> <span style="color: #ae81ff;">14816258</span> <span style="color: #ae81ff;">837400</span> <span style="color: #ae81ff;">14496670</span> <span style="color: #ae81ff;">91384</span> <span style="color: #ae81ff;">87290</span> <span style="color: #ae81ff;">14246105</span> <span style="color: #ae81ff;">14644962</span> + <span style="color: #ae81ff;">262144</span> <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">91123</span> <span style="color: #ae81ff;">85736</span> <span style="color: #ae81ff;">14333390</span> <span style="color: #ae81ff;">14121866</span> <span style="color: #ae81ff;">14464820</span> <span style="color: #ae81ff;">86524</span> <span style="color: #ae81ff;">14602367</span> <span style="color: #ae81ff;">831004</span> <span style="color: #ae81ff;">14610711</span> <span style="color: #ae81ff;">92440</span> <span style="color: #ae81ff;">87452</span> <span style="color: #ae81ff;">14302812</span> <span style="color: #ae81ff;">14770479</span> + <span style="color: #ae81ff;">262144</span> <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">93882</span> <span style="color: #ae81ff;">88322</span> <span style="color: #ae81ff;">14379692</span> <span style="color: #ae81ff;">14047711</span> <span style="color: #ae81ff;">14591902</span> <span style="color: #ae81ff;">89510</span> <span style="color: #ae81ff;">14455122</span> <span style="color: #ae81ff;">681831</span> <span style="color: #ae81ff;">14176855</span> <span style="color: #ae81ff;">89511</span> <span style="color: #ae81ff;">87001</span> <span style="color: #ae81ff;">13901010</span> <span style="color: #ae81ff;">14244628</span> + <span style="color: #ae81ff;">262144</span> <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">89497</span> <span style="color: #ae81ff;">85428</span> <span style="color: #ae81ff;">12697054</span> <span style="color: #ae81ff;">12364092</span> <span style="color: #ae81ff;">12420940</span> <span style="color: #ae81ff;">91501</span> <span style="color: #ae81ff;">12849594</span> <span style="color: #ae81ff;">568459</span> <span style="color: #ae81ff;">13704015</span> <span style="color: #ae81ff;">90625</span> <span style="color: #ae81ff;">80203</span> <span style="color: #ae81ff;">12796951</span> <span style="color: #ae81ff;">12740160</span> + <span style="color: #ae81ff;">524288</span> <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">94358</span> <span style="color: #ae81ff;">91054</span> <span style="color: #ae81ff;">13009600</span> <span style="color: #ae81ff;">12589457</span> <span style="color: #ae81ff;">11779147</span> <span style="color: #ae81ff;">91311</span> <span style="color: #ae81ff;">11947459</span> <span style="color: #ae81ff;">1255265</span> <span style="color: #ae81ff;">11876481</span> <span style="color: #ae81ff;">89799</span> <span style="color: #ae81ff;">87753</span> <span style="color: #ae81ff;">12691250</span> <span style="color: #ae81ff;">12857393</span> + <span style="color: #ae81ff;">524288</span> <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">92317</span> <span style="color: #ae81ff;">88896</span> <span style="color: #ae81ff;">13905388</span> <span style="color: #ae81ff;">13609686</span> <span style="color: #ae81ff;">13317259</span> <span style="color: #ae81ff;">92937</span> <span style="color: #ae81ff;">13277137</span> <span style="color: #ae81ff;">1300817</span> <span style="color: #ae81ff;">13449123</span> <span style="color: #ae81ff;">92321</span> <span style="color: #ae81ff;">86529</span> <span style="color: #ae81ff;">13869692</span> <span style="color: #ae81ff;">14155027</span> + <span style="color: #ae81ff;">524288</span> <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">95697</span> <span style="color: #ae81ff;">90554</span> <span style="color: #ae81ff;">13770592</span> <span style="color: #ae81ff;">13504875</span> <span style="color: #ae81ff;">13470626</span> <span style="color: #ae81ff;">92289</span> <span style="color: #ae81ff;">13213552</span> <span style="color: #ae81ff;">1065322</span> <span style="color: #ae81ff;">13137375</span> <span style="color: #ae81ff;">93384</span> <span style="color: #ae81ff;">92184</span> <span style="color: #ae81ff;">13450851</span> <span style="color: #ae81ff;">13637369</span> + <span style="color: #ae81ff;">524288</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">92883</span> <span style="color: #ae81ff;">88220</span> <span style="color: #ae81ff;">14063505</span> <span style="color: #ae81ff;">13708355</span> <span style="color: #ae81ff;">13953121</span> <span style="color: #ae81ff;">92841</span> <span style="color: #ae81ff;">14061617</span> <span style="color: #ae81ff;">1341596</span> <span style="color: #ae81ff;">14121485</span> <span style="color: #ae81ff;">91142</span> <span style="color: #ae81ff;">90098</span> <span style="color: #ae81ff;">14090720</span> <span style="color: #ae81ff;">14200640</span> + <span style="color: #ae81ff;">524288</span> <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">95296</span> <span style="color: #ae81ff;">89887</span> <span style="color: #ae81ff;">14497129</span> <span style="color: #ae81ff;">14125204</span> <span style="color: #ae81ff;">14520774</span> <span style="color: #ae81ff;">91158</span> <span style="color: #ae81ff;">14661640</span> <span style="color: #ae81ff;">1315825</span> <span style="color: #ae81ff;">14669954</span> <span style="color: #ae81ff;">93129</span> <span style="color: #ae81ff;">88311</span> <span style="color: #ae81ff;">14578534</span> <span style="color: #ae81ff;">14875572</span> + <span style="color: #ae81ff;">524288</span> <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">95723</span> <span style="color: #ae81ff;">88312</span> <span style="color: #ae81ff;">14628576</span> <span style="color: #ae81ff;">14222223</span> <span style="color: #ae81ff;">14656851</span> <span style="color: #ae81ff;">90120</span> <span style="color: #ae81ff;">14689160</span> <span style="color: #ae81ff;">1008900</span> <span style="color: #ae81ff;">14199907</span> <span style="color: #ae81ff;">93524</span> <span style="color: #ae81ff;">90571</span> <span style="color: #ae81ff;">14814641</span> <span style="color: #ae81ff;">15050689</span> + <span style="color: #ae81ff;">524288</span> <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">96008</span> <span style="color: #ae81ff;">89645</span> <span style="color: #ae81ff;">14907236</span> <span style="color: #ae81ff;">14518761</span> <span style="color: #ae81ff;">15025186</span> <span style="color: #ae81ff;">93152</span> <span style="color: #ae81ff;">14806661</span> <span style="color: #ae81ff;">1239205</span> <span style="color: #ae81ff;">14519144</span> <span style="color: #ae81ff;">92925</span> <span style="color: #ae81ff;">90417</span> <span style="color: #ae81ff;">14841838</span> <span style="color: #ae81ff;">15099157</span> + <span style="color: #ae81ff;">524288</span> <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">94085</span> <span style="color: #ae81ff;">90818</span> <span style="color: #ae81ff;">14516748</span> <span style="color: #ae81ff;">14098760</span> <span style="color: #ae81ff;">14655484</span> <span style="color: #ae81ff;">93151</span> <span style="color: #ae81ff;">14690436</span> <span style="color: #ae81ff;">888697</span> <span style="color: #ae81ff;">14513490</span> <span style="color: #ae81ff;">88415</span> <span style="color: #ae81ff;">88908</span> <span style="color: #ae81ff;">14614285</span> <span style="color: #ae81ff;">14856578</span> + <span style="color: #ae81ff;">524288</span> <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">93354</span> <span style="color: #ae81ff;">90226</span> <span style="color: #ae81ff;">12920169</span> <span style="color: #ae81ff;">12536489</span> <span style="color: #ae81ff;">12795299</span> <span style="color: #ae81ff;">92156</span> <span style="color: #ae81ff;">12896679</span> <span style="color: #ae81ff;">798495</span> <span style="color: #ae81ff;">12860025</span> <span style="color: #ae81ff;">92265</span> <span style="color: #ae81ff;">89190</span> <span style="color: #ae81ff;">12914478</span> <span style="color: #ae81ff;">13190646</span> + +iozone test complete. +Excel output is below: + +<span style="color: #e6db74;">"Writer report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">766</span> <span style="color: #ae81ff;">1318</span> <span style="color: #ae81ff;">1425</span> <span style="color: #ae81ff;">601</span> <span style="color: #ae81ff;">1480</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">3441</span> <span style="color: #ae81ff;">2800</span> <span style="color: #ae81ff;">1246</span> <span style="color: #ae81ff;">3066</span> <span style="color: #ae81ff;">2307</span> <span style="color: #ae81ff;">1021</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">5550</span> <span style="color: #ae81ff;">2831</span> <span style="color: #ae81ff;">5321</span> <span style="color: #ae81ff;">4422</span> <span style="color: #ae81ff;">6770</span> <span style="color: #ae81ff;">6178</span> <span style="color: #ae81ff;">4612</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">8478</span> <span style="color: #ae81ff;">12369</span> <span style="color: #ae81ff;">10894</span> <span style="color: #ae81ff;">7773</span> <span style="color: #ae81ff;">10716</span> <span style="color: #ae81ff;">13507</span> <span style="color: #ae81ff;">5408</span> <span style="color: #ae81ff;">5106</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">11637</span> <span style="color: #ae81ff;">13016</span> <span style="color: #ae81ff;">15004</span> <span style="color: #ae81ff;">14118</span> <span style="color: #ae81ff;">15127</span> <span style="color: #ae81ff;">13110</span> <span style="color: #ae81ff;">14024</span> <span style="color: #ae81ff;">15271</span> <span style="color: #ae81ff;">14946</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">20893</span> <span style="color: #ae81ff;">23866</span> <span style="color: #ae81ff;">15582</span> <span style="color: #ae81ff;">22680</span> <span style="color: #ae81ff;">21076</span> <span style="color: #ae81ff;">19838</span> <span style="color: #ae81ff;">12745</span> <span style="color: #ae81ff;">26016</span> <span style="color: #ae81ff;">23457</span> <span style="color: #ae81ff;">16566</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">38048</span> <span style="color: #ae81ff;">39802</span> <span style="color: #ae81ff;">23628</span> <span style="color: #ae81ff;">38047</span> <span style="color: #ae81ff;">34621</span> <span style="color: #ae81ff;">29910</span> <span style="color: #ae81ff;">33432</span> <span style="color: #ae81ff;">32295</span> <span style="color: #ae81ff;">40430</span> <span style="color: #ae81ff;">21802</span> <span style="color: #ae81ff;">32822</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">41208</span> <span style="color: #ae81ff;">45073</span> <span style="color: #ae81ff;">47033</span> <span style="color: #ae81ff;">53750</span> <span style="color: #ae81ff;">42802</span> <span style="color: #ae81ff;">43970</span> <span style="color: #ae81ff;">43887</span> <span style="color: #ae81ff;">41983</span> <span style="color: #ae81ff;">39435</span> <span style="color: #ae81ff;">39275</span> <span style="color: #ae81ff;">46473</span> <span style="color: #ae81ff;">40918</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">58065</span> <span style="color: #ae81ff;">57669</span> <span style="color: #ae81ff;">52989</span> <span style="color: #ae81ff;">59565</span> <span style="color: #ae81ff;">48498</span> <span style="color: #ae81ff;">52363</span> <span style="color: #ae81ff;">57480</span> <span style="color: #ae81ff;">56439</span> <span style="color: #ae81ff;">56339</span> <span style="color: #ae81ff;">69858</span> <span style="color: #ae81ff;">57811</span> <span style="color: #ae81ff;">54791</span> <span style="color: #ae81ff;">52309</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">68452</span> <span style="color: #ae81ff;">74330</span> <span style="color: #ae81ff;">73302</span> <span style="color: #ae81ff;">72445</span> <span style="color: #ae81ff;">71159</span> <span style="color: #ae81ff;">73047</span> <span style="color: #ae81ff;">68780</span> <span style="color: #ae81ff;">70705</span> <span style="color: #ae81ff;">72368</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">82509</span> <span style="color: #ae81ff;">84080</span> <span style="color: #ae81ff;">80498</span> <span style="color: #ae81ff;">81152</span> <span style="color: #ae81ff;">83013</span> <span style="color: #ae81ff;">83236</span> <span style="color: #ae81ff;">82357</span> <span style="color: #ae81ff;">85060</span> <span style="color: #ae81ff;">82554</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">88544</span> <span style="color: #ae81ff;">88630</span> <span style="color: #ae81ff;">90497</span> <span style="color: #ae81ff;">88596</span> <span style="color: #ae81ff;">89407</span> <span style="color: #ae81ff;">88707</span> <span style="color: #ae81ff;">89182</span> <span style="color: #ae81ff;">88133</span> <span style="color: #ae81ff;">88919</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">92046</span> <span style="color: #ae81ff;">93163</span> <span style="color: #ae81ff;">89530</span> <span style="color: #ae81ff;">88940</span> <span style="color: #ae81ff;">94344</span> <span style="color: #ae81ff;">93323</span> <span style="color: #ae81ff;">91123</span> <span style="color: #ae81ff;">93882</span> <span style="color: #ae81ff;">89497</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">94358</span> <span style="color: #ae81ff;">92317</span> <span style="color: #ae81ff;">95697</span> <span style="color: #ae81ff;">92883</span> <span style="color: #ae81ff;">95296</span> <span style="color: #ae81ff;">95723</span> <span style="color: #ae81ff;">96008</span> <span style="color: #ae81ff;">94085</span> <span style="color: #ae81ff;">93354</span> + +<span style="color: #e6db74;">"Re-writer report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">665</span> <span style="color: #ae81ff;">558</span> <span style="color: #ae81ff;">602</span> <span style="color: #ae81ff;">601</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">1191</span> <span style="color: #ae81ff;">392</span> <span style="color: #ae81ff;">1224</span> <span style="color: #ae81ff;">1192</span> <span style="color: #ae81ff;">1192</span> <span style="color: #ae81ff;">1103</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">2182</span> <span style="color: #ae81ff;">2038</span> <span style="color: #ae81ff;">2419</span> <span style="color: #ae81ff;">2200</span> <span style="color: #ae81ff;">2399</span> <span style="color: #ae81ff;">2180</span> <span style="color: #ae81ff;">2035</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">4432</span> <span style="color: #ae81ff;">4387</span> <span style="color: #ae81ff;">4782</span> <span style="color: #ae81ff;">3488</span> <span style="color: #ae81ff;">4810</span> <span style="color: #ae81ff;">4776</span> <span style="color: #ae81ff;">4314</span> <span style="color: #ae81ff;">3765</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">8251</span> <span style="color: #ae81ff;">8906</span> <span style="color: #ae81ff;">8720</span> <span style="color: #ae81ff;">9301</span> <span style="color: #ae81ff;">7499</span> <span style="color: #ae81ff;">8832</span> <span style="color: #ae81ff;">9611</span> <span style="color: #ae81ff;">8842</span> <span style="color: #ae81ff;">9055</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">14967</span> <span style="color: #ae81ff;">15022</span> <span style="color: #ae81ff;">16221</span> <span style="color: #ae81ff;">14979</span> <span style="color: #ae81ff;">15019</span> <span style="color: #ae81ff;">16169</span> <span style="color: #ae81ff;">9084</span> <span style="color: #ae81ff;">15202</span> <span style="color: #ae81ff;">19229</span> <span style="color: #ae81ff;">16249</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">24611</span> <span style="color: #ae81ff;">26380</span> <span style="color: #ae81ff;">27804</span> <span style="color: #ae81ff;">26565</span> <span style="color: #ae81ff;">26347</span> <span style="color: #ae81ff;">27839</span> <span style="color: #ae81ff;">26663</span> <span style="color: #ae81ff;">20977</span> <span style="color: #ae81ff;">20977</span> <span style="color: #ae81ff;">26450</span> <span style="color: #ae81ff;">26575</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">34571</span> <span style="color: #ae81ff;">31839</span> <span style="color: #ae81ff;">29557</span> <span style="color: #ae81ff;">34202</span> <span style="color: #ae81ff;">34921</span> <span style="color: #ae81ff;">34463</span> <span style="color: #ae81ff;">33123</span> <span style="color: #ae81ff;">34669</span> <span style="color: #ae81ff;">37870</span> <span style="color: #ae81ff;">25258</span> <span style="color: #ae81ff;">28679</span> <span style="color: #ae81ff;">29668</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">45957</span> <span style="color: #ae81ff;">47322</span> <span style="color: #ae81ff;">44720</span> <span style="color: #ae81ff;">55140</span> <span style="color: #ae81ff;">47273</span> <span style="color: #ae81ff;">46150</span> <span style="color: #ae81ff;">47199</span> <span style="color: #ae81ff;">58500</span> <span style="color: #ae81ff;">45925</span> <span style="color: #ae81ff;">50186</span> <span style="color: #ae81ff;">48674</span> <span style="color: #ae81ff;">39197</span> <span style="color: #ae81ff;">48775</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">63438</span> <span style="color: #ae81ff;">56587</span> <span style="color: #ae81ff;">59555</span> <span style="color: #ae81ff;">60667</span> <span style="color: #ae81ff;">67179</span> <span style="color: #ae81ff;">60915</span> <span style="color: #ae81ff;">64220</span> <span style="color: #ae81ff;">63127</span> <span style="color: #ae81ff;">59947</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">79055</span> <span style="color: #ae81ff;">78150</span> <span style="color: #ae81ff;">72047</span> <span style="color: #ae81ff;">74496</span> <span style="color: #ae81ff;">71312</span> <span style="color: #ae81ff;">76272</span> <span style="color: #ae81ff;">76272</span> <span style="color: #ae81ff;">77279</span> <span style="color: #ae81ff;">77219</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">83386</span> <span style="color: #ae81ff;">81787</span> <span style="color: #ae81ff;">84498</span> <span style="color: #ae81ff;">82448</span> <span style="color: #ae81ff;">78835</span> <span style="color: #ae81ff;">84477</span> <span style="color: #ae81ff;">83355</span> <span style="color: #ae81ff;">82921</span> <span style="color: #ae81ff;">83384</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">89238</span> <span style="color: #ae81ff;">87728</span> <span style="color: #ae81ff;">87436</span> <span style="color: #ae81ff;">83024</span> <span style="color: #ae81ff;">87151</span> <span style="color: #ae81ff;">83784</span> <span style="color: #ae81ff;">85736</span> <span style="color: #ae81ff;">88322</span> <span style="color: #ae81ff;">85428</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">91054</span> <span style="color: #ae81ff;">88896</span> <span style="color: #ae81ff;">90554</span> <span style="color: #ae81ff;">88220</span> <span style="color: #ae81ff;">89887</span> <span style="color: #ae81ff;">88312</span> <span style="color: #ae81ff;">89645</span> <span style="color: #ae81ff;">90818</span> <span style="color: #ae81ff;">90226</span> + +<span style="color: #e6db74;">"Reader report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">161629</span> <span style="color: #ae81ff;">162019</span> <span style="color: #ae81ff;">87901</span> <span style="color: #ae81ff;">171108</span> <span style="color: #ae81ff;">163301</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">297797</span> <span style="color: #ae81ff;">193899</span> <span style="color: #ae81ff;">219148</span> <span style="color: #ae81ff;">200939</span> <span style="color: #ae81ff;">231529</span> <span style="color: #ae81ff;">330828</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">561637</span> <span style="color: #ae81ff;">375328</span> <span style="color: #ae81ff;">735537</span> <span style="color: #ae81ff;">658411</span> <span style="color: #ae81ff;">785010</span> <span style="color: #ae81ff;">663292</span> <span style="color: #ae81ff;">670332</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">1155924</span> <span style="color: #ae81ff;">1099128</span> <span style="color: #ae81ff;">1355045</span> <span style="color: #ae81ff;">1248678</span> <span style="color: #ae81ff;">1375881</span> <span style="color: #ae81ff;">1417662</span> <span style="color: #ae81ff;">1369738</span> <span style="color: #ae81ff;">1163439</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">1600327</span> <span style="color: #ae81ff;">1874871</span> <span style="color: #ae81ff;">2102484</span> <span style="color: #ae81ff;">2236023</span> <span style="color: #ae81ff;">2326879</span> <span style="color: #ae81ff;">2360124</span> <span style="color: #ae81ff;">2432298</span> <span style="color: #ae81ff;">1783768</span> <span style="color: #ae81ff;">2592318</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">2214011</span> <span style="color: #ae81ff;">2752473</span> <span style="color: #ae81ff;">3346546</span> <span style="color: #ae81ff;">3777778</span> <span style="color: #ae81ff;">3999400</span> <span style="color: #ae81ff;">4229816</span> <span style="color: #ae81ff;">4259178</span> <span style="color: #ae81ff;">4267642</span> <span style="color: #ae81ff;">3917325</span> <span style="color: #ae81ff;">2494314</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">2298370</span> <span style="color: #ae81ff;">3503374</span> <span style="color: #ae81ff;">2970339</span> <span style="color: #ae81ff;">3290024</span> <span style="color: #ae81ff;">114155</span> <span style="color: #ae81ff;">3533639</span> <span style="color: #ae81ff;">4934183</span> <span style="color: #ae81ff;">5883908</span> <span style="color: #ae81ff;">3682869</span> <span style="color: #ae81ff;">3486312</span> <span style="color: #ae81ff;">5382492</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">2528346</span> <span style="color: #ae81ff;">4171488</span> <span style="color: #ae81ff;">4500997</span> <span style="color: #ae81ff;">2863357</span> <span style="color: #ae81ff;">5901904</span> <span style="color: #ae81ff;">5817959</span> <span style="color: #ae81ff;">5910025</span> <span style="color: #ae81ff;">5944791</span> <span style="color: #ae81ff;">6399884</span> <span style="color: #ae81ff;">6072979</span> <span style="color: #ae81ff;">4946832</span> <span style="color: #ae81ff;">4998648</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">3120803</span> <span style="color: #ae81ff;">4914218</span> <span style="color: #ae81ff;">5923681</span> <span style="color: #ae81ff;">7817198</span> <span style="color: #ae81ff;">7724930</span> <span style="color: #ae81ff;">8257760</span> <span style="color: #ae81ff;">9199633</span> <span style="color: #ae81ff;">8533606</span> <span style="color: #ae81ff;">7182365</span> <span style="color: #ae81ff;">11009550</span> <span style="color: #ae81ff;">8919040</span> <span style="color: #ae81ff;">8240925</span> <span style="color: #ae81ff;">7239873</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">10767866</span> <span style="color: #ae81ff;">10937536</span> <span style="color: #ae81ff;">10796626</span> <span style="color: #ae81ff;">11631788</span> <span style="color: #ae81ff;">9858045</span> <span style="color: #ae81ff;">11582774</span> <span style="color: #ae81ff;">10252961</span> <span style="color: #ae81ff;">9825626</span> <span style="color: #ae81ff;">9375862</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">10401087</span> <span style="color: #ae81ff;">10842965</span> <span style="color: #ae81ff;">10597577</span> <span style="color: #ae81ff;">10827162</span> <span style="color: #ae81ff;">11366197</span> <span style="color: #ae81ff;">119806</span> <span style="color: #ae81ff;">11948197</span> <span style="color: #ae81ff;">13008396</span> <span style="color: #ae81ff;">11929012</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11835991</span> <span style="color: #ae81ff;">12860445</span> <span style="color: #ae81ff;">12881840</span> <span style="color: #ae81ff;">12769930</span> <span style="color: #ae81ff;">13194095</span> <span style="color: #ae81ff;">13946536</span> <span style="color: #ae81ff;">13417250</span> <span style="color: #ae81ff;">13332647</span> <span style="color: #ae81ff;">12569350</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">12412246</span> <span style="color: #ae81ff;">13213567</span> <span style="color: #ae81ff;">13288465</span> <span style="color: #ae81ff;">13982505</span> <span style="color: #ae81ff;">14090737</span> <span style="color: #ae81ff;">14630931</span> <span style="color: #ae81ff;">14333390</span> <span style="color: #ae81ff;">14379692</span> <span style="color: #ae81ff;">12697054</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">13009600</span> <span style="color: #ae81ff;">13905388</span> <span style="color: #ae81ff;">13770592</span> <span style="color: #ae81ff;">14063505</span> <span style="color: #ae81ff;">14497129</span> <span style="color: #ae81ff;">14628576</span> <span style="color: #ae81ff;">14907236</span> <span style="color: #ae81ff;">14516748</span> <span style="color: #ae81ff;">12920169</span> + +<span style="color: #e6db74;">"Re-Reader report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">160374</span> <span style="color: #ae81ff;">168847</span> <span style="color: #ae81ff;">104742</span> <span style="color: #ae81ff;">180788</span> <span style="color: #ae81ff;">172982</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">295665</span> <span style="color: #ae81ff;">342216</span> <span style="color: #ae81ff;">333293</span> <span style="color: #ae81ff;">356533</span> <span style="color: #ae81ff;">344190</span> <span style="color: #ae81ff;">343091</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">580461</span> <span style="color: #ae81ff;">582983</span> <span style="color: #ae81ff;">750970</span> <span style="color: #ae81ff;">668246</span> <span style="color: #ae81ff;">800221</span> <span style="color: #ae81ff;">654399</span> <span style="color: #ae81ff;">748875</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">1160923</span> <span style="color: #ae81ff;">1169140</span> <span style="color: #ae81ff;">1380303</span> <span style="color: #ae81ff;">1329871</span> <span style="color: #ae81ff;">1458090</span> <span style="color: #ae81ff;">1429934</span> <span style="color: #ae81ff;">1382969</span> <span style="color: #ae81ff;">1225169</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">1609925</span> <span style="color: #ae81ff;">1875690</span> <span style="color: #ae81ff;">2120127</span> <span style="color: #ae81ff;">2360124</span> <span style="color: #ae81ff;">2409105</span> <span style="color: #ae81ff;">2409105</span> <span style="color: #ae81ff;">2450338</span> <span style="color: #ae81ff;">2169388</span> <span style="color: #ae81ff;">2751762</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">2240576</span> <span style="color: #ae81ff;">2592166</span> <span style="color: #ae81ff;">3384788</span> <span style="color: #ae81ff;">3841986</span> <span style="color: #ae81ff;">3922692</span> <span style="color: #ae81ff;">4250747</span> <span style="color: #ae81ff;">4196751</span> <span style="color: #ae81ff;">4229816</span> <span style="color: #ae81ff;">4130162</span> <span style="color: #ae81ff;">2635106</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">2162402</span> <span style="color: #ae81ff;">3519162</span> <span style="color: #ae81ff;">3013586</span> <span style="color: #ae81ff;">3543113</span> <span style="color: #ae81ff;">5007531</span> <span style="color: #ae81ff;">3580776</span> <span style="color: #ae81ff;">5101205</span> <span style="color: #ae81ff;">5424983</span> <span style="color: #ae81ff;">3385325</span> <span style="color: #ae81ff;">3911800</span> <span style="color: #ae81ff;">5190597</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">2205119</span> <span style="color: #ae81ff;">4498640</span> <span style="color: #ae81ff;">4373801</span> <span style="color: #ae81ff;">5287898</span> <span style="color: #ae81ff;">5971654</span> <span style="color: #ae81ff;">5988306</span> <span style="color: #ae81ff;">5940679</span> <span style="color: #ae81ff;">5940679</span> <span style="color: #ae81ff;">5830794</span> <span style="color: #ae81ff;">7427838</span> <span style="color: #ae81ff;">5126937</span> <span style="color: #ae81ff;">5056765</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">3071013</span> <span style="color: #ae81ff;">4948188</span> <span style="color: #ae81ff;">5845080</span> <span style="color: #ae81ff;">7717122</span> <span style="color: #ae81ff;">7853828</span> <span style="color: #ae81ff;">7933620</span> <span style="color: #ae81ff;">8225143</span> <span style="color: #ae81ff;">8240925</span> <span style="color: #ae81ff;">8291637</span> <span style="color: #ae81ff;">10290727</span> <span style="color: #ae81ff;">8696680</span> <span style="color: #ae81ff;">9984224</span> <span style="color: #ae81ff;">7606082</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11145983</span> <span style="color: #ae81ff;">11203222</span> <span style="color: #ae81ff;">11473510</span> <span style="color: #ae81ff;">12359834</span> <span style="color: #ae81ff;">10186091</span> <span style="color: #ae81ff;">12755585</span> <span style="color: #ae81ff;">10835782</span> <span style="color: #ae81ff;">10875222</span> <span style="color: #ae81ff;">9728947</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11421453</span> <span style="color: #ae81ff;">12055622</span> <span style="color: #ae81ff;">12559643</span> <span style="color: #ae81ff;">12076278</span> <span style="color: #ae81ff;">12867728</span> <span style="color: #ae81ff;">11270196</span> <span style="color: #ae81ff;">12920955</span> <span style="color: #ae81ff;">13655947</span> <span style="color: #ae81ff;">11872336</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">12145325</span> <span style="color: #ae81ff;">13091354</span> <span style="color: #ae81ff;">13012339</span> <span style="color: #ae81ff;">12958357</span> <span style="color: #ae81ff;">13263809</span> <span style="color: #ae81ff;">14042363</span> <span style="color: #ae81ff;">13516877</span> <span style="color: #ae81ff;">13313597</span> <span style="color: #ae81ff;">12383894</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">12322798</span> <span style="color: #ae81ff;">13137311</span> <span style="color: #ae81ff;">13291356</span> <span style="color: #ae81ff;">13753728</span> <span style="color: #ae81ff;">13934836</span> <span style="color: #ae81ff;">14471865</span> <span style="color: #ae81ff;">14121866</span> <span style="color: #ae81ff;">14047711</span> <span style="color: #ae81ff;">12364092</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">12589457</span> <span style="color: #ae81ff;">13609686</span> <span style="color: #ae81ff;">13504875</span> <span style="color: #ae81ff;">13708355</span> <span style="color: #ae81ff;">14125204</span> <span style="color: #ae81ff;">14222223</span> <span style="color: #ae81ff;">14518761</span> <span style="color: #ae81ff;">14098760</span> <span style="color: #ae81ff;">12536489</span> + +<span style="color: #e6db74;">"Random read report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">177677</span> <span style="color: #ae81ff;">165823</span> <span style="color: #ae81ff;">100620</span> <span style="color: #ae81ff;">175355</span> <span style="color: #ae81ff;">173429</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">302495</span> <span style="color: #ae81ff;">268999</span> <span style="color: #ae81ff;">338761</span> <span style="color: #ae81ff;">340264</span> <span style="color: #ae81ff;">334331</span> <span style="color: #ae81ff;">329812</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">552960</span> <span style="color: #ae81ff;">621448</span> <span style="color: #ae81ff;">561343</span> <span style="color: #ae81ff;">718796</span> <span style="color: #ae81ff;">686620</span> <span style="color: #ae81ff;">684432</span> <span style="color: #ae81ff;">644965</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">911184</span> <span style="color: #ae81ff;">1135150</span> <span style="color: #ae81ff;">1166599</span> <span style="color: #ae81ff;">1308798</span> <span style="color: #ae81ff;">1276891</span> <span style="color: #ae81ff;">1242896</span> <span style="color: #ae81ff;">1296159</span> <span style="color: #ae81ff;">1296159</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">1340164</span> <span style="color: #ae81ff;">1762539</span> <span style="color: #ae81ff;">1976691</span> <span style="color: #ae81ff;">2321847</span> <span style="color: #ae81ff;">2155236</span> <span style="color: #ae81ff;">2503178</span> <span style="color: #ae81ff;">2485792</span> <span style="color: #ae81ff;">2290886</span> <span style="color: #ae81ff;">2767722</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">1747411</span> <span style="color: #ae81ff;">2386884</span> <span style="color: #ae81ff;">3061485</span> <span style="color: #ae81ff;">3717289</span> <span style="color: #ae81ff;">3806235</span> <span style="color: #ae81ff;">4164199</span> <span style="color: #ae81ff;">4293237</span> <span style="color: #ae81ff;">4453499</span> <span style="color: #ae81ff;">4154130</span> <span style="color: #ae81ff;">2653011</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">1788674</span> <span style="color: #ae81ff;">3327622</span> <span style="color: #ae81ff;">2756362</span> <span style="color: #ae81ff;">3117487</span> <span style="color: #ae81ff;">4791076</span> <span style="color: #ae81ff;">3586757</span> <span style="color: #ae81ff;">5424983</span> <span style="color: #ae81ff;">5468151</span> <span style="color: #ae81ff;">3647681</span> <span style="color: #ae81ff;">3719547</span> <span style="color: #ae81ff;">5263753</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">1642685</span> <span style="color: #ae81ff;">3852936</span> <span style="color: #ae81ff;">3995873</span> <span style="color: #ae81ff;">2123356</span> <span style="color: #ae81ff;">5602613</span> <span style="color: #ae81ff;">6071906</span> <span style="color: #ae81ff;">6054786</span> <span style="color: #ae81ff;">6239504</span> <span style="color: #ae81ff;">6067617</span> <span style="color: #ae81ff;">7440706</span> <span style="color: #ae81ff;">4876623</span> <span style="color: #ae81ff;">4979812</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">2228850</span> <span style="color: #ae81ff;">2688155</span> <span style="color: #ae81ff;">4901250</span> <span style="color: #ae81ff;">6889995</span> <span style="color: #ae81ff;">7350615</span> <span style="color: #ae81ff;">7692072</span> <span style="color: #ae81ff;">8003847</span> <span style="color: #ae81ff;">8673628</span> <span style="color: #ae81ff;">8291637</span> <span style="color: #ae81ff;">10272267</span> <span style="color: #ae81ff;">8692280</span> <span style="color: #ae81ff;">9056567</span> <span style="color: #ae81ff;">7692072</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">10587860</span> <span style="color: #ae81ff;">11976409</span> <span style="color: #ae81ff;">12383220</span> <span style="color: #ae81ff;">13567777</span> <span style="color: #ae81ff;">11123431</span> <span style="color: #ae81ff;">13128427</span> <span style="color: #ae81ff;">11594500</span> <span style="color: #ae81ff;">11311097</span> <span style="color: #ae81ff;">9881434</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11023864</span> <span style="color: #ae81ff;">12293394</span> <span style="color: #ae81ff;">12197385</span> <span style="color: #ae81ff;">13295950</span> <span style="color: #ae81ff;">13396389</span> <span style="color: #ae81ff;">10942676</span> <span style="color: #ae81ff;">14283968</span> <span style="color: #ae81ff;">14105847</span> <span style="color: #ae81ff;">11831964</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11670659</span> <span style="color: #ae81ff;">13302644</span> <span style="color: #ae81ff;">12985292</span> <span style="color: #ae81ff;">13536514</span> <span style="color: #ae81ff;">13855499</span> <span style="color: #ae81ff;">14785216</span> <span style="color: #ae81ff;">13986987</span> <span style="color: #ae81ff;">13712081</span> <span style="color: #ae81ff;">12445289</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11471432</span> <span style="color: #ae81ff;">12926184</span> <span style="color: #ae81ff;">13391725</span> <span style="color: #ae81ff;">14127128</span> <span style="color: #ae81ff;">14349852</span> <span style="color: #ae81ff;">15098659</span> <span style="color: #ae81ff;">14464820</span> <span style="color: #ae81ff;">14591902</span> <span style="color: #ae81ff;">12420940</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11779147</span> <span style="color: #ae81ff;">13317259</span> <span style="color: #ae81ff;">13470626</span> <span style="color: #ae81ff;">13953121</span> <span style="color: #ae81ff;">14520774</span> <span style="color: #ae81ff;">14656851</span> <span style="color: #ae81ff;">15025186</span> <span style="color: #ae81ff;">14655484</span> <span style="color: #ae81ff;">12795299</span> + +<span style="color: #e6db74;">"Random write report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">1239</span> <span style="color: #ae81ff;">1612</span> <span style="color: #ae81ff;">1271</span> <span style="color: #ae81ff;">1272</span> <span style="color: #ae81ff;">1551</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">3197</span> <span style="color: #ae81ff;">1559</span> <span style="color: #ae81ff;">2072</span> <span style="color: #ae81ff;">3225</span> <span style="color: #ae81ff;">3208</span> <span style="color: #ae81ff;">3123</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">5128</span> <span style="color: #ae81ff;">4151</span> <span style="color: #ae81ff;">6295</span> <span style="color: #ae81ff;">6395</span> <span style="color: #ae81ff;">5007</span> <span style="color: #ae81ff;">5100</span> <span style="color: #ae81ff;">6361</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">10157</span> <span style="color: #ae81ff;">10039</span> <span style="color: #ae81ff;">10031</span> <span style="color: #ae81ff;">12665</span> <span style="color: #ae81ff;">9979</span> <span style="color: #ae81ff;">8310</span> <span style="color: #ae81ff;">12632</span> <span style="color: #ae81ff;">10144</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">14150</span> <span style="color: #ae81ff;">14240</span> <span style="color: #ae81ff;">12715</span> <span style="color: #ae81ff;">13136</span> <span style="color: #ae81ff;">14981</span> <span style="color: #ae81ff;">14925</span> <span style="color: #ae81ff;">12700</span> <span style="color: #ae81ff;">12649</span> <span style="color: #ae81ff;">13950</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">22968</span> <span style="color: #ae81ff;">22881</span> <span style="color: #ae81ff;">22716</span> <span style="color: #ae81ff;">25448</span> <span style="color: #ae81ff;">20393</span> <span style="color: #ae81ff;">16931</span> <span style="color: #ae81ff;">20211</span> <span style="color: #ae81ff;">24891</span> <span style="color: #ae81ff;">20590</span> <span style="color: #ae81ff;">23273</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">35265</span> <span style="color: #ae81ff;">37629</span> <span style="color: #ae81ff;">35645</span> <span style="color: #ae81ff;">38176</span> <span style="color: #ae81ff;">33686</span> <span style="color: #ae81ff;">38303</span> <span style="color: #ae81ff;">40428</span> <span style="color: #ae81ff;">37088</span> <span style="color: #ae81ff;">41777</span> <span style="color: #ae81ff;">38135</span> <span style="color: #ae81ff;">37140</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">37514</span> <span style="color: #ae81ff;">49560</span> <span style="color: #ae81ff;">42362</span> <span style="color: #ae81ff;">30104</span> <span style="color: #ae81ff;">37601</span> <span style="color: #ae81ff;">46729</span> <span style="color: #ae81ff;">43865</span> <span style="color: #ae81ff;">44618</span> <span style="color: #ae81ff;">41948</span> <span style="color: #ae81ff;">43609</span> <span style="color: #ae81ff;">33948</span> <span style="color: #ae81ff;">40625</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">55784</span> <span style="color: #ae81ff;">51293</span> <span style="color: #ae81ff;">60184</span> <span style="color: #ae81ff;">41780</span> <span style="color: #ae81ff;">59972</span> <span style="color: #ae81ff;">51845</span> <span style="color: #ae81ff;">53723</span> <span style="color: #ae81ff;">53771</span> <span style="color: #ae81ff;">67496</span> <span style="color: #ae81ff;">55174</span> <span style="color: #ae81ff;">67235</span> <span style="color: #ae81ff;">58229</span> <span style="color: #ae81ff;">59469</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">66188</span> <span style="color: #ae81ff;">68123</span> <span style="color: #ae81ff;">65037</span> <span style="color: #ae81ff;">72396</span> <span style="color: #ae81ff;">65485</span> <span style="color: #ae81ff;">72514</span> <span style="color: #ae81ff;">62992</span> <span style="color: #ae81ff;">70012</span> <span style="color: #ae81ff;">67853</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">77645</span> <span style="color: #ae81ff;">82366</span> <span style="color: #ae81ff;">80466</span> <span style="color: #ae81ff;">75815</span> <span style="color: #ae81ff;">80359</span> <span style="color: #ae81ff;">82650</span> <span style="color: #ae81ff;">82385</span> <span style="color: #ae81ff;">78461</span> <span style="color: #ae81ff;">79076</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">87840</span> <span style="color: #ae81ff;">86476</span> <span style="color: #ae81ff;">87772</span> <span style="color: #ae81ff;">85905</span> <span style="color: #ae81ff;">84908</span> <span style="color: #ae81ff;">86420</span> <span style="color: #ae81ff;">85565</span> <span style="color: #ae81ff;">86749</span> <span style="color: #ae81ff;">87123</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">90962</span> <span style="color: #ae81ff;">90851</span> <span style="color: #ae81ff;">89797</span> <span style="color: #ae81ff;">91268</span> <span style="color: #ae81ff;">91634</span> <span style="color: #ae81ff;">89320</span> <span style="color: #ae81ff;">86524</span> <span style="color: #ae81ff;">89510</span> <span style="color: #ae81ff;">91501</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">91311</span> <span style="color: #ae81ff;">92937</span> <span style="color: #ae81ff;">92289</span> <span style="color: #ae81ff;">92841</span> <span style="color: #ae81ff;">91158</span> <span style="color: #ae81ff;">90120</span> <span style="color: #ae81ff;">93152</span> <span style="color: #ae81ff;">93151</span> <span style="color: #ae81ff;">92156</span> + +<span style="color: #e6db74;">"Backward read report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">161629</span> <span style="color: #ae81ff;">111531</span> <span style="color: #ae81ff;">101764</span> <span style="color: #ae81ff;">101113</span> <span style="color: #ae81ff;">94692</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">188520</span> <span style="color: #ae81ff;">302495</span> <span style="color: #ae81ff;">222971</span> <span style="color: #ae81ff;">207141</span> <span style="color: #ae81ff;">229353</span> <span style="color: #ae81ff;">227796</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">381597</span> <span style="color: #ae81ff;">585845</span> <span style="color: #ae81ff;">707430</span> <span style="color: #ae81ff;">688381</span> <span style="color: #ae81ff;">717355</span> <span style="color: #ae81ff;">658008</span> <span style="color: #ae81ff;">727070</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">966113</span> <span style="color: #ae81ff;">1073309</span> <span style="color: #ae81ff;">1276891</span> <span style="color: #ae81ff;">1391032</span> <span style="color: #ae81ff;">1286067</span> <span style="color: #ae81ff;">1347393</span> <span style="color: #ae81ff;">1333173</span> <span style="color: #ae81ff;">1387437</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">1326505</span> <span style="color: #ae81ff;">1633188</span> <span style="color: #ae81ff;">1968537</span> <span style="color: #ae81ff;">2106609</span> <span style="color: #ae81ff;">2286009</span> <span style="color: #ae81ff;">2265510</span> <span style="color: #ae81ff;">2286009</span> <span style="color: #ae81ff;">2337008</span> <span style="color: #ae81ff;">2426801</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">1739978</span> <span style="color: #ae81ff;">2359349</span> <span style="color: #ae81ff;">2938934</span> <span style="color: #ae81ff;">3561619</span> <span style="color: #ae81ff;">3749743</span> <span style="color: #ae81ff;">3864455</span> <span style="color: #ae81ff;">3812993</span> <span style="color: #ae81ff;">4154130</span> <span style="color: #ae81ff;">4196751</span> <span style="color: #ae81ff;">1933883</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">1640426</span> <span style="color: #ae81ff;">2294686</span> <span style="color: #ae81ff;">3821310</span> <span style="color: #ae81ff;">5761565</span> <span style="color: #ae81ff;">4270618</span> <span style="color: #ae81ff;">3489144</span> <span style="color: #ae81ff;">3531460</span> <span style="color: #ae81ff;">3664018</span> <span style="color: #ae81ff;">3771811</span> <span style="color: #ae81ff;">4982841</span> <span style="color: #ae81ff;">3404782</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">1790147</span> <span style="color: #ae81ff;">3365696</span> <span style="color: #ae81ff;">3912159</span> <span style="color: #ae81ff;">4662231</span> <span style="color: #ae81ff;">5313247</span> <span style="color: #ae81ff;">5454094</span> <span style="color: #ae81ff;">5745001</span> <span style="color: #ae81ff;">5696426</span> <span style="color: #ae81ff;">6094523</span> <span style="color: #ae81ff;">6374949</span> <span style="color: #ae81ff;">5097274</span> <span style="color: #ae81ff;">5088216</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">2116542</span> <span style="color: #ae81ff;">3717820</span> <span style="color: #ae81ff;">4996397</span> <span style="color: #ae81ff;">6452650</span> <span style="color: #ae81ff;">8312700</span> <span style="color: #ae81ff;">7877236</span> <span style="color: #ae81ff;">7406867</span> <span style="color: #ae81ff;">94848</span> <span style="color: #ae81ff;">10304615</span> <span style="color: #ae81ff;">8168437</span> <span style="color: #ae81ff;">9334595</span> <span style="color: #ae81ff;">7628034</span> <span style="color: #ae81ff;">6798654</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">8553152</span> <span style="color: #ae81ff;">10425622</span> <span style="color: #ae81ff;">10701629</span> <span style="color: #ae81ff;">10879527</span> <span style="color: #ae81ff;">11148695</span> <span style="color: #ae81ff;">11222432</span> <span style="color: #ae81ff;">10227021</span> <span style="color: #ae81ff;">10246845</span> <span style="color: #ae81ff;">8419541</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">9652016</span> <span style="color: #ae81ff;">10710305</span> <span style="color: #ae81ff;">11233809</span> <span style="color: #ae81ff;">11061126</span> <span style="color: #ae81ff;">11770154</span> <span style="color: #ae81ff;">11677648</span> <span style="color: #ae81ff;">11696531</span> <span style="color: #ae81ff;">12654468</span> <span style="color: #ae81ff;">11770154</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11391587</span> <span style="color: #ae81ff;">12881840</span> <span style="color: #ae81ff;">13108523</span> <span style="color: #ae81ff;">13670482</span> <span style="color: #ae81ff;">13827619</span> <span style="color: #ae81ff;">14674318</span> <span style="color: #ae81ff;">13788430</span> <span style="color: #ae81ff;">13703194</span> <span style="color: #ae81ff;">12239423</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11729024</span> <span style="color: #ae81ff;">12826360</span> <span style="color: #ae81ff;">12979437</span> <span style="color: #ae81ff;">13858259</span> <span style="color: #ae81ff;">14453412</span> <span style="color: #ae81ff;">14816258</span> <span style="color: #ae81ff;">14602367</span> <span style="color: #ae81ff;">14455122</span> <span style="color: #ae81ff;">12849594</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11947459</span> <span style="color: #ae81ff;">13277137</span> <span style="color: #ae81ff;">13213552</span> <span style="color: #ae81ff;">14061617</span> <span style="color: #ae81ff;">14661640</span> <span style="color: #ae81ff;">14689160</span> <span style="color: #ae81ff;">14806661</span> <span style="color: #ae81ff;">14690436</span> <span style="color: #ae81ff;">12896679</span> + +<span style="color: #e6db74;">"Record rewrite report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">417</span> <span style="color: #ae81ff;">473</span> <span style="color: #ae81ff;">557</span> <span style="color: #ae81ff;">563</span> <span style="color: #ae81ff;">565</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">1236</span> <span style="color: #ae81ff;">1123</span> <span style="color: #ae81ff;">1123</span> <span style="color: #ae81ff;">1348</span> <span style="color: #ae81ff;">1112</span> <span style="color: #ae81ff;">1127</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">2449</span> <span style="color: #ae81ff;">2472</span> <span style="color: #ae81ff;">2225</span> <span style="color: #ae81ff;">2413</span> <span style="color: #ae81ff;">2429</span> <span style="color: #ae81ff;">2247</span> <span style="color: #ae81ff;">2051</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">2508</span> <span style="color: #ae81ff;">3805</span> <span style="color: #ae81ff;">4449</span> <span style="color: #ae81ff;">5389</span> <span style="color: #ae81ff;">4868</span> <span style="color: #ae81ff;">4543</span> <span style="color: #ae81ff;">4149</span> <span style="color: #ae81ff;">4428</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">8366</span> <span style="color: #ae81ff;">8954</span> <span style="color: #ae81ff;">7704</span> <span style="color: #ae81ff;">9533</span> <span style="color: #ae81ff;">8050</span> <span style="color: #ae81ff;">8879</span> <span style="color: #ae81ff;">8327</span> <span style="color: #ae81ff;">5856</span> <span style="color: #ae81ff;">8293</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">16843</span> <span style="color: #ae81ff;">16556</span> <span style="color: #ae81ff;">16490</span> <span style="color: #ae81ff;">18210</span> <span style="color: #ae81ff;">15463</span> <span style="color: #ae81ff;">17908</span> <span style="color: #ae81ff;">17887</span> <span style="color: #ae81ff;">16492</span> <span style="color: #ae81ff;">11737</span> <span style="color: #ae81ff;">15579</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">32323</span> <span style="color: #ae81ff;">34059</span> <span style="color: #ae81ff;">33360</span> <span style="color: #ae81ff;">32992</span> <span style="color: #ae81ff;">33444</span> <span style="color: #ae81ff;">40310</span> <span style="color: #ae81ff;">22529</span> <span style="color: #ae81ff;">27316</span> <span style="color: #ae81ff;">31407</span> <span style="color: #ae81ff;">33183</span> <span style="color: #ae81ff;">27528</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">61064</span> <span style="color: #ae81ff;">63898</span> <span style="color: #ae81ff;">63290</span> <span style="color: #ae81ff;">62863</span> <span style="color: #ae81ff;">67584</span> <span style="color: #ae81ff;">67699</span> <span style="color: #ae81ff;">68143</span> <span style="color: #ae81ff;">66238</span> <span style="color: #ae81ff;">62955</span> <span style="color: #ae81ff;">35618</span> <span style="color: #ae81ff;">47802</span> <span style="color: #ae81ff;">29293</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">103626</span> <span style="color: #ae81ff;">131949</span> <span style="color: #ae81ff;">139152</span> <span style="color: #ae81ff;">125794</span> <span style="color: #ae81ff;">147326</span> <span style="color: #ae81ff;">126376</span> <span style="color: #ae81ff;">151085</span> <span style="color: #ae81ff;">49413</span> <span style="color: #ae81ff;">85962</span> <span style="color: #ae81ff;">91274</span> <span style="color: #ae81ff;">81668</span> <span style="color: #ae81ff;">63197</span> <span style="color: #ae81ff;">47149</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">196836</span> <span style="color: #ae81ff;">270919</span> <span style="color: #ae81ff;">164278</span> <span style="color: #ae81ff;">171307</span> <span style="color: #ae81ff;">275460</span> <span style="color: #ae81ff;">219878</span> <span style="color: #ae81ff;">132334</span> <span style="color: #ae81ff;">118346</span> <span style="color: #ae81ff;">89947</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">440101</span> <span style="color: #ae81ff;">535691</span> <span style="color: #ae81ff;">290491</span> <span style="color: #ae81ff;">460655</span> <span style="color: #ae81ff;">247265</span> <span style="color: #ae81ff;">247548</span> <span style="color: #ae81ff;">258841</span> <span style="color: #ae81ff;">222259</span> <span style="color: #ae81ff;">176494</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">419979</span> <span style="color: #ae81ff;">467063</span> <span style="color: #ae81ff;">414247</span> <span style="color: #ae81ff;">416191</span> <span style="color: #ae81ff;">442800</span> <span style="color: #ae81ff;">457513</span> <span style="color: #ae81ff;">497857</span> <span style="color: #ae81ff;">415595</span> <span style="color: #ae81ff;">273752</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">910557</span> <span style="color: #ae81ff;">900170</span> <span style="color: #ae81ff;">924856</span> <span style="color: #ae81ff;">863287</span> <span style="color: #ae81ff;">884871</span> <span style="color: #ae81ff;">837400</span> <span style="color: #ae81ff;">831004</span> <span style="color: #ae81ff;">681831</span> <span style="color: #ae81ff;">568459</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1255265</span> <span style="color: #ae81ff;">1300817</span> <span style="color: #ae81ff;">1065322</span> <span style="color: #ae81ff;">1341596</span> <span style="color: #ae81ff;">1315825</span> <span style="color: #ae81ff;">1008900</span> <span style="color: #ae81ff;">1239205</span> <span style="color: #ae81ff;">888697</span> <span style="color: #ae81ff;">798495</span> + +<span style="color: #e6db74;">"Stride read report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">59867</span> <span style="color: #ae81ff;">112892</span> <span style="color: #ae81ff;">102229</span> <span style="color: #ae81ff;">111300</span> <span style="color: #ae81ff;">105941</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">186555</span> <span style="color: #ae81ff;">198708</span> <span style="color: #ae81ff;">211966</span> <span style="color: #ae81ff;">202913</span> <span style="color: #ae81ff;">226164</span> <span style="color: #ae81ff;">224932</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">323630</span> <span style="color: #ae81ff;">641497</span> <span style="color: #ae81ff;">624339</span> <span style="color: #ae81ff;">595594</span> <span style="color: #ae81ff;">609455</span> <span style="color: #ae81ff;">668246</span> <span style="color: #ae81ff;">810490</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">936209</span> <span style="color: #ae81ff;">1053821</span> <span style="color: #ae81ff;">1227269</span> <span style="color: #ae81ff;">1199171</span> <span style="color: #ae81ff;">1216149</span> <span style="color: #ae81ff;">1115688</span> <span style="color: #ae81ff;">1458090</span> <span style="color: #ae81ff;">1273861</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">1220575</span> <span style="color: #ae81ff;">1695065</span> <span style="color: #ae81ff;">1614767</span> <span style="color: #ae81ff;">2169388</span> <span style="color: #ae81ff;">2391665</span> <span style="color: #ae81ff;">2321847</span> <span style="color: #ae81ff;">2316837</span> <span style="color: #ae81ff;">2003431</span> <span style="color: #ae81ff;">2503178</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">1762469</span> <span style="color: #ae81ff;">2434922</span> <span style="color: #ae81ff;">3074635</span> <span style="color: #ae81ff;">3334853</span> <span style="color: #ae81ff;">3799501</span> <span style="color: #ae81ff;">4162181</span> <span style="color: #ae81ff;">4054139</span> <span style="color: #ae81ff;">4444282</span> <span style="color: #ae81ff;">3650932</span> <span style="color: #ae81ff;">2176427</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">1618939</span> <span style="color: #ae81ff;">2233238</span> <span style="color: #ae81ff;">3817065</span> <span style="color: #ae81ff;">5818150</span> <span style="color: #ae81ff;">3706707</span> <span style="color: #ae81ff;">4735608</span> <span style="color: #ae81ff;">4906002</span> <span style="color: #ae81ff;">5332372</span> <span style="color: #ae81ff;">3792628</span> <span style="color: #ae81ff;">5375755</span> <span style="color: #ae81ff;">4965558</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">1945315</span> <span style="color: #ae81ff;">3840876</span> <span style="color: #ae81ff;">4067778</span> <span style="color: #ae81ff;">5000831</span> <span style="color: #ae81ff;">5723946</span> <span style="color: #ae81ff;">6140266</span> <span style="color: #ae81ff;">5988306</span> <span style="color: #ae81ff;">6947247</span> <span style="color: #ae81ff;">7937463</span> <span style="color: #ae81ff;">8743368</span> <span style="color: #ae81ff;">8086916</span> <span style="color: #ae81ff;">5688881</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">2184496</span> <span style="color: #ae81ff;">3654745</span> <span style="color: #ae81ff;">5012066</span> <span style="color: #ae81ff;">6537361</span> <span style="color: #ae81ff;">8484089</span> <span style="color: #ae81ff;">9277883</span> <span style="color: #ae81ff;">8459025</span> <span style="color: #ae81ff;">9267872</span> <span style="color: #ae81ff;">19068585</span> <span style="color: #ae81ff;">12859651</span> <span style="color: #ae81ff;">10659461</span> <span style="color: #ae81ff;">8440323</span> <span style="color: #ae81ff;">7010184</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">10262914</span> <span style="color: #ae81ff;">12794771</span> <span style="color: #ae81ff;">12277028</span> <span style="color: #ae81ff;">12724879</span> <span style="color: #ae81ff;">12961285</span> <span style="color: #ae81ff;">20532259</span> <span style="color: #ae81ff;">18702216</span> <span style="color: #ae81ff;">10694135</span> <span style="color: #ae81ff;">8839196</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">10654675</span> <span style="color: #ae81ff;">12327576</span> <span style="color: #ae81ff;">11908856</span> <span style="color: #ae81ff;">12509344</span> <span style="color: #ae81ff;">12900943</span> <span style="color: #ae81ff;">13306893</span> <span style="color: #ae81ff;">22230669</span> <span style="color: #ae81ff;">14807195</span> <span style="color: #ae81ff;">12284603</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11686040</span> <span style="color: #ae81ff;">13230293</span> <span style="color: #ae81ff;">13319081</span> <span style="color: #ae81ff;">14118093</span> <span style="color: #ae81ff;">13946536</span> <span style="color: #ae81ff;">14565072</span> <span style="color: #ae81ff;">14016943</span> <span style="color: #ae81ff;">1591452</span> <span style="color: #ae81ff;">12745062</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11846678</span> <span style="color: #ae81ff;">13110368</span> <span style="color: #ae81ff;">13410345</span> <span style="color: #ae81ff;">13773196</span> <span style="color: #ae81ff;">14068741</span> <span style="color: #ae81ff;">14496670</span> <span style="color: #ae81ff;">14610711</span> <span style="color: #ae81ff;">14176855</span> <span style="color: #ae81ff;">13704015</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11876481</span> <span style="color: #ae81ff;">13449123</span> <span style="color: #ae81ff;">13137375</span> <span style="color: #ae81ff;">14121485</span> <span style="color: #ae81ff;">14669954</span> <span style="color: #ae81ff;">14199907</span> <span style="color: #ae81ff;">14519144</span> <span style="color: #ae81ff;">14513490</span> <span style="color: #ae81ff;">12860025</span> + +<span style="color: #e6db74;">"Fwrite report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">1201</span> <span style="color: #ae81ff;">1028</span> <span style="color: #ae81ff;">1556</span> <span style="color: #ae81ff;">1224</span> <span style="color: #ae81ff;">1542</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">2446</span> <span style="color: #ae81ff;">3049</span> <span style="color: #ae81ff;">3126</span> <span style="color: #ae81ff;">2506</span> <span style="color: #ae81ff;">3069</span> <span style="color: #ae81ff;">3033</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">3618</span> <span style="color: #ae81ff;">6170</span> <span style="color: #ae81ff;">6049</span> <span style="color: #ae81ff;">4987</span> <span style="color: #ae81ff;">4842</span> <span style="color: #ae81ff;">5947</span> <span style="color: #ae81ff;">7763</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">5425</span> <span style="color: #ae81ff;">9847</span> <span style="color: #ae81ff;">7313</span> <span style="color: #ae81ff;">7036</span> <span style="color: #ae81ff;">7310</span> <span style="color: #ae81ff;">7989</span> <span style="color: #ae81ff;">7946</span> <span style="color: #ae81ff;">9715</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">13853</span> <span style="color: #ae81ff;">16636</span> <span style="color: #ae81ff;">19160</span> <span style="color: #ae81ff;">19593</span> <span style="color: #ae81ff;">19566</span> <span style="color: #ae81ff;">15910</span> <span style="color: #ae81ff;">15960</span> <span style="color: #ae81ff;">12456</span> <span style="color: #ae81ff;">13786</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">27893</span> <span style="color: #ae81ff;">20044</span> <span style="color: #ae81ff;">28567</span> <span style="color: #ae81ff;">27811</span> <span style="color: #ae81ff;">32083</span> <span style="color: #ae81ff;">22159</span> <span style="color: #ae81ff;">28122</span> <span style="color: #ae81ff;">28216</span> <span style="color: #ae81ff;">24566</span> <span style="color: #ae81ff;">25411</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">40241</span> <span style="color: #ae81ff;">46115</span> <span style="color: #ae81ff;">45585</span> <span style="color: #ae81ff;">44147</span> <span style="color: #ae81ff;">45574</span> <span style="color: #ae81ff;">37164</span> <span style="color: #ae81ff;">40634</span> <span style="color: #ae81ff;">36376</span> <span style="color: #ae81ff;">36666</span> <span style="color: #ae81ff;">36398</span> <span style="color: #ae81ff;">39529</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">49482</span> <span style="color: #ae81ff;">42399</span> <span style="color: #ae81ff;">26672</span> <span style="color: #ae81ff;">58402</span> <span style="color: #ae81ff;">40963</span> <span style="color: #ae81ff;">42889</span> <span style="color: #ae81ff;">43016</span> <span style="color: #ae81ff;">38716</span> <span style="color: #ae81ff;">40642</span> <span style="color: #ae81ff;">40711</span> <span style="color: #ae81ff;">39041</span> <span style="color: #ae81ff;">51283</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">57936</span> <span style="color: #ae81ff;">51302</span> <span style="color: #ae81ff;">58686</span> <span style="color: #ae81ff;">60210</span> <span style="color: #ae81ff;">60358</span> <span style="color: #ae81ff;">58286</span> <span style="color: #ae81ff;">54447</span> <span style="color: #ae81ff;">60466</span> <span style="color: #ae81ff;">46184</span> <span style="color: #ae81ff;">45328</span> <span style="color: #ae81ff;">60520</span> <span style="color: #ae81ff;">56615</span> <span style="color: #ae81ff;">66211</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">70797</span> <span style="color: #ae81ff;">69441</span> <span style="color: #ae81ff;">68121</span> <span style="color: #ae81ff;">69853</span> <span style="color: #ae81ff;">69705</span> <span style="color: #ae81ff;">68973</span> <span style="color: #ae81ff;">69636</span> <span style="color: #ae81ff;">64358</span> <span style="color: #ae81ff;">60092</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">77948</span> <span style="color: #ae81ff;">79992</span> <span style="color: #ae81ff;">81132</span> <span style="color: #ae81ff;">78097</span> <span style="color: #ae81ff;">81124</span> <span style="color: #ae81ff;">82129</span> <span style="color: #ae81ff;">79791</span> <span style="color: #ae81ff;">78973</span> <span style="color: #ae81ff;">81136</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">88820</span> <span style="color: #ae81ff;">88445</span> <span style="color: #ae81ff;">89824</span> <span style="color: #ae81ff;">83436</span> <span style="color: #ae81ff;">82538</span> <span style="color: #ae81ff;">86827</span> <span style="color: #ae81ff;">83531</span> <span style="color: #ae81ff;">90140</span> <span style="color: #ae81ff;">87518</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">92929</span> <span style="color: #ae81ff;">90907</span> <span style="color: #ae81ff;">91811</span> <span style="color: #ae81ff;">91523</span> <span style="color: #ae81ff;">87858</span> <span style="color: #ae81ff;">91384</span> <span style="color: #ae81ff;">92440</span> <span style="color: #ae81ff;">89511</span> <span style="color: #ae81ff;">90625</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">89799</span> <span style="color: #ae81ff;">92321</span> <span style="color: #ae81ff;">93384</span> <span style="color: #ae81ff;">91142</span> <span style="color: #ae81ff;">93129</span> <span style="color: #ae81ff;">93524</span> <span style="color: #ae81ff;">92925</span> <span style="color: #ae81ff;">88415</span> <span style="color: #ae81ff;">92265</span> + +<span style="color: #e6db74;">"Re-Fwrite report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">551</span> <span style="color: #ae81ff;">550</span> <span style="color: #ae81ff;">604</span> <span style="color: #ae81ff;">548</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">1337</span> <span style="color: #ae81ff;">1101</span> <span style="color: #ae81ff;">1201</span> <span style="color: #ae81ff;">1324</span> <span style="color: #ae81ff;">1198</span> <span style="color: #ae81ff;">1198</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">2407</span> <span style="color: #ae81ff;">1371</span> <span style="color: #ae81ff;">2382</span> <span style="color: #ae81ff;">2400</span> <span style="color: #ae81ff;">995</span> <span style="color: #ae81ff;">2047</span> <span style="color: #ae81ff;">2052</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">3468</span> <span style="color: #ae81ff;">4367</span> <span style="color: #ae81ff;">4324</span> <span style="color: #ae81ff;">4065</span> <span style="color: #ae81ff;">3990</span> <span style="color: #ae81ff;">3484</span> <span style="color: #ae81ff;">4821</span> <span style="color: #ae81ff;">3742</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">7632</span> <span style="color: #ae81ff;">7555</span> <span style="color: #ae81ff;">6546</span> <span style="color: #ae81ff;">8130</span> <span style="color: #ae81ff;">9624</span> <span style="color: #ae81ff;">7608</span> <span style="color: #ae81ff;">8814</span> <span style="color: #ae81ff;">8124</span> <span style="color: #ae81ff;">8807</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">13966</span> <span style="color: #ae81ff;">14155</span> <span style="color: #ae81ff;">12449</span> <span style="color: #ae81ff;">16144</span> <span style="color: #ae81ff;">14991</span> <span style="color: #ae81ff;">15187</span> <span style="color: #ae81ff;">13249</span> <span style="color: #ae81ff;">15017</span> <span style="color: #ae81ff;">15136</span> <span style="color: #ae81ff;">16236</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">24720</span> <span style="color: #ae81ff;">19740</span> <span style="color: #ae81ff;">28058</span> <span style="color: #ae81ff;">22494</span> <span style="color: #ae81ff;">26252</span> <span style="color: #ae81ff;">18835</span> <span style="color: #ae81ff;">20792</span> <span style="color: #ae81ff;">15273</span> <span style="color: #ae81ff;">27891</span> <span style="color: #ae81ff;">23427</span> <span style="color: #ae81ff;">26352</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">29458</span> <span style="color: #ae81ff;">31969</span> <span style="color: #ae81ff;">44661</span> <span style="color: #ae81ff;">30695</span> <span style="color: #ae81ff;">38040</span> <span style="color: #ae81ff;">37885</span> <span style="color: #ae81ff;">29500</span> <span style="color: #ae81ff;">31906</span> <span style="color: #ae81ff;">28386</span> <span style="color: #ae81ff;">31933</span> <span style="color: #ae81ff;">37522</span> <span style="color: #ae81ff;">33280</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">42028</span> <span style="color: #ae81ff;">43187</span> <span style="color: #ae81ff;">47250</span> <span style="color: #ae81ff;">39198</span> <span style="color: #ae81ff;">47194</span> <span style="color: #ae81ff;">50090</span> <span style="color: #ae81ff;">36649</span> <span style="color: #ae81ff;">51694</span> <span style="color: #ae81ff;">53250</span> <span style="color: #ae81ff;">45915</span> <span style="color: #ae81ff;">47228</span> <span style="color: #ae81ff;">46231</span> <span style="color: #ae81ff;">50302</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">63662</span> <span style="color: #ae81ff;">63307</span> <span style="color: #ae81ff;">58698</span> <span style="color: #ae81ff;">61717</span> <span style="color: #ae81ff;">61881</span> <span style="color: #ae81ff;">62478</span> <span style="color: #ae81ff;">63082</span> <span style="color: #ae81ff;">64404</span> <span style="color: #ae81ff;">58554</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">77407</span> <span style="color: #ae81ff;">77452</span> <span style="color: #ae81ff;">76306</span> <span style="color: #ae81ff;">76545</span> <span style="color: #ae81ff;">76239</span> <span style="color: #ae81ff;">78141</span> <span style="color: #ae81ff;">78366</span> <span style="color: #ae81ff;">77165</span> <span style="color: #ae81ff;">76313</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">82872</span> <span style="color: #ae81ff;">79831</span> <span style="color: #ae81ff;">83386</span> <span style="color: #ae81ff;">84523</span> <span style="color: #ae81ff;">78850</span> <span style="color: #ae81ff;">83928</span> <span style="color: #ae81ff;">83394</span> <span style="color: #ae81ff;">82009</span> <span style="color: #ae81ff;">81834</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">85955</span> <span style="color: #ae81ff;">83813</span> <span style="color: #ae81ff;">88369</span> <span style="color: #ae81ff;">87486</span> <span style="color: #ae81ff;">87861</span> <span style="color: #ae81ff;">87290</span> <span style="color: #ae81ff;">87452</span> <span style="color: #ae81ff;">87001</span> <span style="color: #ae81ff;">80203</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">87753</span> <span style="color: #ae81ff;">86529</span> <span style="color: #ae81ff;">92184</span> <span style="color: #ae81ff;">90098</span> <span style="color: #ae81ff;">88311</span> <span style="color: #ae81ff;">90571</span> <span style="color: #ae81ff;">90417</span> <span style="color: #ae81ff;">88908</span> <span style="color: #ae81ff;">89190</span> + +<span style="color: #e6db74;">"Fread report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">163699</span> <span style="color: #ae81ff;">102074</span> <span style="color: #ae81ff;">177324</span> <span style="color: #ae81ff;">91684</span> <span style="color: #ae81ff;">103053</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">347758</span> <span style="color: #ae81ff;">199372</span> <span style="color: #ae81ff;">326403</span> <span style="color: #ae81ff;">228961</span> <span style="color: #ae81ff;">242839</span> <span style="color: #ae81ff;">343091</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">367241</span> <span style="color: #ae81ff;">603630</span> <span style="color: #ae81ff;">632429</span> <span style="color: #ae81ff;">773140</span> <span style="color: #ae81ff;">654798</span> <span style="color: #ae81ff;">635047</span> <span style="color: #ae81ff;">686182</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">982467</span> <span style="color: #ae81ff;">1140576</span> <span style="color: #ae81ff;">1383861</span> <span style="color: #ae81ff;">1422357</span> <span style="color: #ae81ff;">1537424</span> <span style="color: #ae81ff;">1127995</span> <span style="color: #ae81ff;">1245780</span> <span style="color: #ae81ff;">1355045</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">1434138</span> <span style="color: #ae81ff;">1790460</span> <span style="color: #ae81ff;">2052253</span> <span style="color: #ae81ff;">2198257</span> <span style="color: #ae81ff;">2120127</span> <span style="color: #ae81ff;">2165014</span> <span style="color: #ae81ff;">2514903</span> <span style="color: #ae81ff;">2573677</span> <span style="color: #ae81ff;">2516377</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">2124220</span> <span style="color: #ae81ff;">2760434</span> <span style="color: #ae81ff;">3374151</span> <span style="color: #ae81ff;">3901313</span> <span style="color: #ae81ff;">4038890</span> <span style="color: #ae81ff;">4248644</span> <span style="color: #ae81ff;">4154130</span> <span style="color: #ae81ff;">4188565</span> <span style="color: #ae81ff;">2482779</span> <span style="color: #ae81ff;">2133188</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">2269226</span> <span style="color: #ae81ff;">2363506</span> <span style="color: #ae81ff;">4149921</span> <span style="color: #ae81ff;">4934183</span> <span style="color: #ae81ff;">3558525</span> <span style="color: #ae81ff;">3717133</span> <span style="color: #ae81ff;">3343814</span> <span style="color: #ae81ff;">5217395</span> <span style="color: #ae81ff;">4157956</span> <span style="color: #ae81ff;">4904602</span> <span style="color: #ae81ff;">14427096</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">3030545</span> <span style="color: #ae81ff;">3944043</span> <span style="color: #ae81ff;">111474</span> <span style="color: #ae81ff;">6480761</span> <span style="color: #ae81ff;">5712526</span> <span style="color: #ae81ff;">5785631</span> <span style="color: #ae81ff;">5814021</span> <span style="color: #ae81ff;">6145757</span> <span style="color: #ae81ff;">6924845</span> <span style="color: #ae81ff;">5336352</span> <span style="color: #ae81ff;">5010310</span> <span style="color: #ae81ff;">4929091</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">2900803</span> <span style="color: #ae81ff;">4729877</span> <span style="color: #ae81ff;">5811959</span> <span style="color: #ae81ff;">8233027</span> <span style="color: #ae81ff;">7765083</span> <span style="color: #ae81ff;">7926299</span> <span style="color: #ae81ff;">9729770</span> <span style="color: #ae81ff;">8623560</span> <span style="color: #ae81ff;">8351087</span> <span style="color: #ae81ff;">8286637</span> <span style="color: #ae81ff;">8359214</span> <span style="color: #ae81ff;">7460745</span> <span style="color: #ae81ff;">7135381</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">10445431</span> <span style="color: #ae81ff;">10560200</span> <span style="color: #ae81ff;">10958466</span> <span style="color: #ae81ff;">9849567</span> <span style="color: #ae81ff;">10151482</span> <span style="color: #ae81ff;">10807663</span> <span style="color: #ae81ff;">9683705</span> <span style="color: #ae81ff;">9902793</span> <span style="color: #ae81ff;">112629</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">10149535</span> <span style="color: #ae81ff;">11395884</span> <span style="color: #ae81ff;">10447339</span> <span style="color: #ae81ff;">11636124</span> <span style="color: #ae81ff;">11403448</span> <span style="color: #ae81ff;">12022929</span> <span style="color: #ae81ff;">12113531</span> <span style="color: #ae81ff;">11548130</span> <span style="color: #ae81ff;">11644504</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11811578</span> <span style="color: #ae81ff;">12765186</span> <span style="color: #ae81ff;">12600753</span> <span style="color: #ae81ff;">13137656</span> <span style="color: #ae81ff;">13371887</span> <span style="color: #ae81ff;">12639283</span> <span style="color: #ae81ff;">13105710</span> <span style="color: #ae81ff;">13259651</span> <span style="color: #ae81ff;">11830642</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">12535646</span> <span style="color: #ae81ff;">106392</span> <span style="color: #ae81ff;">13264418</span> <span style="color: #ae81ff;">13691728</span> <span style="color: #ae81ff;">14205245</span> <span style="color: #ae81ff;">14246105</span> <span style="color: #ae81ff;">14302812</span> <span style="color: #ae81ff;">13901010</span> <span style="color: #ae81ff;">12796951</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">12691250</span> <span style="color: #ae81ff;">13869692</span> <span style="color: #ae81ff;">13450851</span> <span style="color: #ae81ff;">14090720</span> <span style="color: #ae81ff;">14578534</span> <span style="color: #ae81ff;">14814641</span> <span style="color: #ae81ff;">14841838</span> <span style="color: #ae81ff;">14614285</span> <span style="color: #ae81ff;">12914478</span> + +<span style="color: #e6db74;">"Re-Fread report"</span> + <span style="color: #e6db74;">"4"</span> <span style="color: #e6db74;">"8"</span> <span style="color: #e6db74;">"16"</span> <span style="color: #e6db74;">"32"</span> <span style="color: #e6db74;">"64"</span> <span style="color: #e6db74;">"128"</span> <span style="color: #e6db74;">"256"</span> <span style="color: #e6db74;">"512"</span> <span style="color: #e6db74;">"1024"</span> <span style="color: #e6db74;">"2048"</span> <span style="color: #e6db74;">"4096"</span> <span style="color: #e6db74;">"8192"</span> <span style="color: #e6db74;">"16384"</span> +<span style="color: #e6db74;">"64"</span> <span style="color: #ae81ff;">170672</span> <span style="color: #ae81ff;">103891</span> <span style="color: #ae81ff;">168002</span> <span style="color: #ae81ff;">94960</span> <span style="color: #ae81ff;">181276</span> +<span style="color: #e6db74;">"128"</span> <span style="color: #ae81ff;">199668</span> <span style="color: #ae81ff;">288205</span> <span style="color: #ae81ff;">350711</span> <span style="color: #ae81ff;">349797</span> <span style="color: #ae81ff;">320748</span> <span style="color: #ae81ff;">259633</span> +<span style="color: #e6db74;">"256"</span> <span style="color: #ae81ff;">587127</span> <span style="color: #ae81ff;">609455</span> <span style="color: #ae81ff;">688381</span> <span style="color: #ae81ff;">807442</span> <span style="color: #ae81ff;">738065</span> <span style="color: #ae81ff;">678807</span> <span style="color: #ae81ff;">697776</span> +<span style="color: #e6db74;">"512"</span> <span style="color: #ae81ff;">994294</span> <span style="color: #ae81ff;">1152822</span> <span style="color: #ae81ff;">1518938</span> <span style="color: #ae81ff;">1471074</span> <span style="color: #ae81ff;">1528669</span> <span style="color: #ae81ff;">1289930</span> <span style="color: #ae81ff;">1368865</span> <span style="color: #ae81ff;">1413929</span> +<span style="color: #e6db74;">"1024"</span> <span style="color: #ae81ff;">1594978</span> <span style="color: #ae81ff;">2004366</span> <span style="color: #ae81ff;">2206161</span> <span style="color: #ae81ff;">2338280</span> <span style="color: #ae81ff;">2516377</span> <span style="color: #ae81ff;">2478620</span> <span style="color: #ae81ff;">2546213</span> <span style="color: #ae81ff;">2632033</span> <span style="color: #ae81ff;">2111788</span> +<span style="color: #e6db74;">"2048"</span> <span style="color: #ae81ff;">2194777</span> <span style="color: #ae81ff;">2809182</span> <span style="color: #ae81ff;">3229534</span> <span style="color: #ae81ff;">3929871</span> <span style="color: #ae81ff;">4240255</span> <span style="color: #ae81ff;">4453499</span> <span style="color: #ae81ff;">4441984</span> <span style="color: #ae81ff;">4405533</span> <span style="color: #ae81ff;">2659582</span> <span style="color: #ae81ff;">1906837</span> +<span style="color: #e6db74;">"4096"</span> <span style="color: #ae81ff;">2391139</span> <span style="color: #ae81ff;">2585847</span> <span style="color: #ae81ff;">4280194</span> <span style="color: #ae81ff;">6847827</span> <span style="color: #ae81ff;">3621537</span> <span style="color: #ae81ff;">3680502</span> <span style="color: #ae81ff;">3433360</span> <span style="color: #ae81ff;">5558378</span> <span style="color: #ae81ff;">3706707</span> <span style="color: #ae81ff;">5020702</span> <span style="color: #ae81ff;">14271297</span> +<span style="color: #e6db74;">"8192"</span> <span style="color: #ae81ff;">3136499</span> <span style="color: #ae81ff;">4164914</span> <span style="color: #ae81ff;">5271672</span> <span style="color: #ae81ff;">6809564</span> <span style="color: #ae81ff;">5871647</span> <span style="color: #ae81ff;">5975808</span> <span style="color: #ae81ff;">6196743</span> <span style="color: #ae81ff;">5997714</span> <span style="color: #ae81ff;">7735533</span> <span style="color: #ae81ff;">5142283</span> <span style="color: #ae81ff;">4876623</span> <span style="color: #ae81ff;">4710810</span> +<span style="color: #e6db74;">"16384"</span> <span style="color: #ae81ff;">3032391</span> <span style="color: #ae81ff;">4761338</span> <span style="color: #ae81ff;">5964297</span> <span style="color: #ae81ff;">7481050</span> <span style="color: #ae81ff;">7827884</span> <span style="color: #ae81ff;">8270680</span> <span style="color: #ae81ff;">9786581</span> <span style="color: #ae81ff;">8351087</span> <span style="color: #ae81ff;">8659420</span> <span style="color: #ae81ff;">8467363</span> <span style="color: #ae81ff;">8346016</span> <span style="color: #ae81ff;">7904418</span> <span style="color: #ae81ff;">7331011</span> +<span style="color: #e6db74;">"32768"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">12477655</span> <span style="color: #ae81ff;">11665355</span> <span style="color: #ae81ff;">12068958</span> <span style="color: #ae81ff;">11800570</span> <span style="color: #ae81ff;">10709134</span> <span style="color: #ae81ff;">12337643</span> <span style="color: #ae81ff;">12305608</span> <span style="color: #ae81ff;">12516290</span> <span style="color: #ae81ff;">9533907</span> +<span style="color: #e6db74;">"65536"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">11630216</span> <span style="color: #ae81ff;">13320434</span> <span style="color: #ae81ff;">12629467</span> <span style="color: #ae81ff;">14444598</span> <span style="color: #ae81ff;">13896193</span> <span style="color: #ae81ff;">13579712</span> <span style="color: #ae81ff;">14039564</span> <span style="color: #ae81ff;">12850283</span> <span style="color: #ae81ff;">11700514</span> +<span style="color: #e6db74;">"131072"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">12481740</span> <span style="color: #ae81ff;">13511230</span> <span style="color: #ae81ff;">13263809</span> <span style="color: #ae81ff;">13840848</span> <span style="color: #ae81ff;">14115193</span> <span style="color: #ae81ff;">13414303</span> <span style="color: #ae81ff;">13740526</span> <span style="color: #ae81ff;">13324246</span> <span style="color: #ae81ff;">11918875</span> +<span style="color: #e6db74;">"262144"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">12852148</span> <span style="color: #ae81ff;">11100705</span> <span style="color: #ae81ff;">13639078</span> <span style="color: #ae81ff;">14047711</span> <span style="color: #ae81ff;">14639112</span> <span style="color: #ae81ff;">14644962</span> <span style="color: #ae81ff;">14770479</span> <span style="color: #ae81ff;">14244628</span> <span style="color: #ae81ff;">12740160</span> +<span style="color: #e6db74;">"524288"</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">12857393</span> <span style="color: #ae81ff;">14155027</span> <span style="color: #ae81ff;">13637369</span> <span style="color: #ae81ff;">14200640</span> <span style="color: #ae81ff;">14875572</span> <span style="color: #ae81ff;">15050689</span> <span style="color: #ae81ff;">15099157</span> <span style="color: #ae81ff;">14856578</span> <span style="color: #ae81ff;">13190646</span> + +real 10m25.537s +user 0m1.316s +sys 0m14.994s</code></pre></div> + + + + + Late to the party and a few bits short + + 2021-01-16T21:16:11-07:00 + https://hpc.social/2021/late-to-the-party-and-a-few-bits-short + <p>I recently had the opportunity to purchase a pristine Novena desktop system. +For those who aren&rsquo;t aware, <a href="https://en.wikipedia.org/wiki/Novena_(computing_platform)">Novena</a> is a Freescale i.mx6 based open-hardware computing platform +which began shipping in 2015. It was available as a desktop, laptop, standalone +board and a really neat heirloom version with a wooden case. The Novena was +always a curiosity for me since it was announced. But back in 2015, I missed my +opportunity to purchase one – probably due to a bit of procrastination and the +fact that I already had a Udoo Quad board, which is powered by the same +processor. Because it’s based on the 32-bit processor, I purchased it with <em>open +eyes</em>, knowing that it would not deliver M1 performance. Remarkably, although +the creators of the Novena have declared it EOL status, there are still some +components available to purchase on <a href="https://www.crowdsupply.com/sutajio-kosagi/novena">Crowd Supply</a>, including mainboards.</p> + +<p><strong>Hackable? Yes, please</strong></p> + +<p>I’m a bit of a boomer when it comes to technology. I cut my teeth back in the +day on highly expandable systems such as the IBM PC XT, Commodore Amiga 2000 +and still to this day do my fair share of tinkering - for example that +super cool <a href="https://www.gaborsamu.com/blog/flora_watch/">Flora watch project</a> which I did back in 2020. That being said, I’ve +also been one to appreciate leading edge design from Apple and the super cool +NeXT and SPARCstation systems designed by the renown team at <a href="https://en.wikipedia.org/wiki/Frog_Design_Inc.">Frog Design</a>. But cases are designed to house and protect what’s inside of a computer when it&rsquo;s operating.</p> + +<p>The Novena desktop and laptop versions eschew this for a design which features +a pop out screen, supported by a gas strut similar to what you&rsquo;d see on a +hatchback liftgate, exposing the mainboard in all it&rsquo;s glory - when the system +is operating - caution is always a good idea.</p> + +<p>Of course I could tell you about the time many moons ago that I fried a system +by carelessly dropping a metalic object on the mainboard while the system was +running. With that hard lesson learned, I&rsquo;m being super cautious with Novena.</p> + +<p><strong>Better late than never</strong></p> + +<p>My Novena desktop arrived from a far off land and survived a transatlantic +voyage unscathed, due to impeccable packaging. So although I&rsquo;m very late to the Novena party, I managed to make it, circa 2021.</p> + +<p>Before deciding on purchasing this previously loved Novena, one important +factor I did research was OS support. OS support is often spotty for such non +standard systems. Luckily an industrious person has kicked off the <a href="https://github.com/novena-next"><em>novena-next +project</em></a>, which aims to deliver OS support for +the Novena for the foreseeable future. As always, your mileage may vary.</p> + +<p><strong>Seeing is believing</strong></p> + +<p>Opening the package, I was like a kid at Christmas. The previous owner shipped +me <em>the whole shebang</em> - Novena-RF SDR, extra green bezel, speakers, screws, +clips, power adapter etc. I connected the system to the power and it immediately +sprang to life and booted an older Debian version.</p> + +<p>I’ve done a lot of tinkering since that first day. My Novena now has Debian 10 +<em>Buster</em> installed (relying on support from <em>novena-next</em>), and boots from a +SATA SSD. The speakers have been installed along with the Novena-RF SDR (which +replaces the breakout board). In fact, I’m writing this blog on the Novena +running LibreOffice, while listening to music from YouTube through Chromium, +along with a bunch of terminals opened to some much more powerful systems +humming along in my basement.</p> + +<figure><img src="https://www.gaborsamu.com/images/novena_desktop.png" /> +</figure> + +<p>Novena definitely won’t win any speed records and is a few bits short of 64. +But it makes up for all of that in character. As I experiment with Novena, I +plan a few more blogs along the way. Stay tuned for more. A computer with nine +lives? It just may be the case with Novena.</p> + + + + + SRE to Solutions Architect + + 2021-01-02T19:06:36-07:00 + https://hpc.social/2021/sre-to-solutions-architect + <p>It’s been about two years since I joined NVIDIA as a Solutions Architect, which was a pretty big job change for me! Most of my previous work was in jobs that could fall under the heading of “site reliability engineering”, where I was actively responsible for the operations of computing systems, but my new job mostly has me helping customers design and build their own systems.</p> + +<p>I’m finally starting to feel like I know what I’m doing at least 25% of the time ? so I thought this would be a good time to reflect on the differences between these roles and what my past experience brings to the table for my (sort of) new job.</p> + +<p><span id="more-147"></span></p> + +<p><em>(Just a note: I feel like job titles for ops folks are a fraught topic. My job titles have included things like “Production Engineer”, “HPC Cluster Administrator”, and “HPC/Cloud Systems Engineer”. I tend to self-identify more with the term “sysadmin”, but I’m using “SRE” as the most current term that captures the work I’ve spent a lot of my career doing, where I generally approached ops from a software engineering perspective. Feel free to substitute your job title of choice!)</em></p> + +<p>I spent most of the past 10 years building and running large computing systems. With the exception of ~18 months working on backend storage for a <a href="https://www.facebook.com">fairly large website</a>, I’ve mostly worked on large high-performance-computing (HPC) clusters. These systems are generally used by researchers and engineers to run simulations and data analysis. The teams I joined were generally responsible for building these clusters, keeping them running, helping the researchers who used them, and making sure they performed well.</p> + +<p>In my day-to-day work in SRE (or whatever you call it), I mostly thought about problems like:</p> + +<ul><li>Are my team’s services operating reliably and predictably, according to our defined metrics?<ul><li>Translated: What’s broken today?! ?</li></ul></li><li>Are our (internal) customers having a good <em>qualitative</em> experience?</li><li>For any current or recently-past incidents, how can we understand what went wrong and incorporate that into our future development?</li><li>What major features or other changes are we hoping to release soon? How can we be confident they’ll work correctly and reliably?</li><li>Are we expecting to have to turn up more capacity or new systems soon? Are we ready to do so?</li><li>What projects can I pursue to automate anything boring that I have to work on?</li></ul> + +<hr class="wp-block-separator" /> + +<p>My role as a solutions architect is rather different, as I don’t actually have any services I’m responsible for keeping online. Instead, I’m generally working with external customers who are working with our products and using them in their own production environments. Because I’m focused on HPC and supercomputing, my customers have generally purchased NVIDIA’s hardware products, and are operating them in their own datacenters. I’m frequently talking to the SRE teams, but I’m not part of them myself.</p> + +<p>In my daily work as a solutions architect, I’m thinking more about questions like:</p> + +<ul><li>Do my (external) customers have a good understanding of what our products are and how to use them?<ul><li>This may include products they already use, or new products that they may be planning to deploy</li></ul></li><li>What are their pain points, and how can I feed that back to the product teams?<ul><li>And also: What new product developments can I provide pro-active advice on before it makes it to the customer?</li></ul></li><li>What new customer deployments are coming up, and how can I help them go smoothly?</li><li>How are our customers doing running their current clusters, and are they feeling a lot of pain?</li><li>What tools can I develop, or what content can I write, to help all of the above go well?</li></ul> + +<hr class="wp-block-separator" /> + +<p>On the one hand, I work on a lot of the same <em>problems</em> as a solutions architect as I did in SRE. I still spend a lot of time thinking about the scalability, performance, and reliability of HPC systems. I still care a lot about making sure the systems I help build are useful and usable for researchers.</p> + +<p>On the other hand, I’m not so much on the pointy end of these problems anymore. My work is mostly focused on enabling others to run reliable systems, rather than being directly on the hook for them. And while I do help directly manage some internal lab clusters, those systems have <em>very</em> loose SLOs. So in practice I haven’t been on call in about two years.</p> + +<p>I do think my experience in SRE has been really important in doing a good job in solutions architecture. I like to think I have a pretty good instinct for systems design at this point, and I can often help identify problems and bottlenecks in early stages. My troubleshooting skills from SRE work are incredibly helpful, as a lot of my work is helping customers understand what the heck is broken on their clusters. And I also find that it really helps to have someone who “speaks the same language” as the SRE teams for our customers, especially because I feel like so many vendor relationships neglect reliability concerns in favor of features.</p> + +<p>The transition has been really interesting, and I’m still conflicted about which kind of job I prefer. I don’t exactly miss being on call&#8230; but I do miss somewhat the more visceral feeling of understanding a running system really well through sheer continuous contact with it. However, I really love helping my customers build cool systems, and I like the satisfaction of helping many different teams do well, versus focusing tightly on a single service.</p> + +<p>I’m really enjoying the solutions architect gig right now, but I also wouldn’t be surprised if I ended up doing SRE work directly again at some point.</p> + + + + + Sketching out HPC clusters at different scales + + 2020-12-14T01:03:23-07:00 + https://hpc.social/2020/sketching-out-hpc-clusters-at-different-scales + <p>High-performance computing (HPC) clusters come in a variety of shapes and sizes, depending on the scale of the problems you’re working on, the number of different people using the cluster, and what kinds of resources they need to use. </p> + +<p>However, it’s often not clear what kinds of differences separate the kind of cluster you might build for your small research team:</p> + +<figure class="wp-block-image size-large"><img alt="" class="wp-image-139" height="1339" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/image-9.jpg" width="1601" /><figcaption>Note: do not use in production</figcaption></figure> + +<p>From the kind of cluster that might serve a large laboratory with many different researchers:</p> + +<figure class="wp-block-image size-large"><img alt="" class="wp-image-140" height="470" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/img_0143.jpg" width="892" /><figcaption>The Trinity supercomputer at Los Alamos National Lab, also known as “that goddamn machine” when I used to get paged at 3am</figcaption></figure> + +<p>There are lots of differences between a supercomputer and my toy Raspberry Pi cluster, but also a lot in common. From a management perspective, a big part of the difference is how many different specialized node types you might find in the larger system.</p> + +<p><span id="more-113"></span></p> + +<p><em>Just a note: in this post I’m assuming we’re talking about compute clusters of the type that might be used to run simulations or data analysis jobs. This probably won’t help if you’re designing a database cluster, a Kubernetes cluster to serve a web infrastructure, etc.</em></p> + +<p>Let’s start with one of the simplest ways you can build a cluster: a collection of<strong> compute nodes</strong>, all connected to a network, with a single <strong>“head node</strong>” that coordinates work between them:</p> + +<figure class="wp-block-image size-large"><img alt="Diagram showing a single head node connected to five compute nodes with a single network" class="wp-image-118" height="524" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/image.jpg" width="910" /></figure> + +<p>With this design, the head node performs most of the functions that coordinate work or provide shared services on the cluster. The compute nodes are then free for the actual compute jobs on the cluster, like simulating the weather or analyzing telescope data!</p> + +<p>Some of the shared services that most clusters provide from the head node include:</p> + +<ul><li>Running a <strong>job scheduler</strong> that accepts requests from the users and queues them up to run on the compute nodes</li><li>Exporting a <strong>shared filesystem </strong>to the other machines, so they can all access the same storage space</li><li>Accepting <strong>user logins</strong> so that the people who want to run on the cluster have an access point to the cluster</li><li>Acting as a <strong>management node</strong> that the cluster sysadmins can use to help maintain the rest of the cluster</li></ul> + +<p>This kind of design can scale remarkably well, and it’s probably the most common kind of cluster out there. But at some point, you might find that the head node is doing too much, and you need to split its functions across multiple machines.</p> + +<p>The first thing you’ll often see is moving user logins onto their own dedicated <strong>login node</strong>:</p> + +<figure class="wp-block-image size-large"><img alt="Diagram showing a login node, a management node, and five compute nodes connected on the same network" class="wp-image-120" height="643" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/image-1.jpg" width="1317" /></figure> + +<p>All the other functions are still on the head node (which is often explicitly called a <strong>management node</strong> at this point). But by moving user logins to their own node, it becomes easier to do maintenance or make changes to the larger system without disturbing your users. </p> + +<p>(It also means that if your users accidentally crash the login node, they’re less likely to take down all those shared services on the management node&#8230;)</p> + +<p>If you have lots of users, you can also easily add more login nodes! These scale pretty well because the shared services are all still on the management node, but your users get more interactive nodes for their development work</p> + +<figure class="wp-block-image size-large"><img alt="Diagram showing three login nodes, a management node, and five compute nodes on the same network" class="wp-image-122" height="631" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/image-2.jpg" width="1269" /></figure> + +<p>At this point, you might also set up a second management node in order to provide redundancy or failover in case your primary management node fails:</p> + +<figure class="wp-block-image size-large"><img alt="Diagram showing three login nodes, two management nodes, and five compute nodes" class="wp-image-124" height="703" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/image-3.jpg" width="1291" /></figure> + +<p>At this point we have a lot of compute nodes, redundant management nodes, and a nice collection of login nodes for the users to use for their work. What else might we need as we scale up?</p> + +<p>Well, for one thing, the shared filesystem is still on the management node. We might want to split it off onto its own machine to provide better performance:</p> + +<figure class="wp-block-image size-large"><img alt="Diagram showing three login nodes, two management nodes, a storage node, and five compute nodes on the same network" class="wp-image-126" height="777" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/image-4.jpg" width="1300" /><figcaption>Following tradition, storage is represented as a poorly-drawn cylinder to match the shape of a hard drive platter ?</figcaption></figure> + +<p>Or if we want to scale our performance higher than a single storage server can provide, we might want to use a <strong>distributed filesystem</strong> like Lustre, BeeGFS, or GPFS and provide a whole tier of dedicated storage machines:</p> + +<figure class="wp-block-image size-large"><img alt="Replace single storage node with three storage nodes in a cluster" class="wp-image-128" height="741" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/image-5.jpg" width="1341" /></figure> + +<p>You might also notice that we’re using the same network for everything! Communication between compute nodes, access to storage, and management services are all competing to send messages over the same network. This could be a problem if, for example, the application wants to simultaneously read lots of data from storage and exchange messages with neighboring compute nodes. </p> + +<p>At this point we may want to split these different types of traffic onto their own networks:</p> + +<figure class="wp-block-image size-large"><img alt="Same diagram, but add a separate application network connecting only the compute nodes , and a separate storage network connecting storage and compute only" class="wp-image-130" height="1044" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/image-6.jpg" width="1590" /></figure> + +<p>Depending on how much you need to optimize (or how much you want to spend!), you may have several different networks connecting all the machines in the cluster, separated by function. You may have dedicated networks for functions like:</p> + +<ul><li><strong>High-speed network</strong> (or <strong>application network</strong>): This is a dedicated network for user applications to communicate between compute nodes, and is often built using specialized hardware like Infiniband or a vendor-proprietary technology. This is especially important if you use technologies like MPI in your applications, which rely heavily on inter-node communication.</li><li><strong>Storage network</strong>: This is a dedicated network for access to storage. If you rely on especially fast network storage, you might use Infiniband or another very fast network here too.</li><li><strong>Management network</strong>: This is often the “everything else” network, used for job scheduling, SSH, and other miscellaneous traffic. This is often a less-performant network, using 1Gb or 10Gb Ethernet, because we expect the heavier usage to be on the application or storage networks.</li><li><strong>Out-of-band management network</strong>: Many datacenter environments have methods for managing individual servers outside their operating systems, such as accessing the baseboard management controllers. However, this kind of access can be a security risk, and it’s often put on its own network to restrict access.</li></ul> + +<p>All these different networks may be on their own hardware, for the best performance; or they may be virtual networks (VLANs) sharing the same physical connections. </p> + +<p>Once you get past this point, there are many different ways to continue splitting off or adding special-purpose functions, but these are less common outside of very large sites.</p> + +<p>For example, you may have multiple independent storage systems you want to access:</p> + +<figure class="wp-block-image size-large"><img alt="Add a second storage cluster, separate from the first, on the storage network" class="wp-image-132" height="1293" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/image-7.jpg" width="1600" /></figure> + +<p>Or your cluster may depend on fast access to an external resource, and you want to attach a dedicated tier of network routers:</p> + +<figure class="wp-block-image size-large"><img alt="Add a pair of router nodes on the management node. The router nodes also have connections to the internet " class="wp-image-134" height="1398" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/image-8.jpg" width="1492" /></figure> + +<p>Or you may even have some slower tier of storage that you need to move data in and out of, such as S3 or a tape system, and build a set of dedicated machines for data movement:</p> + +<figure class="wp-block-image"><img alt="Add a pair of data movement nodes connected to the management nodes. The data movement nodes also have a connection to an external storage system" class="wp-image-135" height="1600" src="https://thinking.ajdecon.org/wp-content/uploads/2020/12/53d60f83-a8c7-447a-b528-43e61ff5e300-3966-00000359711b33bb_file.jpg" width="1600" /></figure> + +<p>In other words, you can add as much complexity as you like! ? Or, as much as your users and workloads require. Very complex environments serving many researchers may have many different tiers of dedicated machines, for data movement, network routing, managing software licenses, and more. But not every environment will need this type of complexity.</p> + +<p>In all cases, the general strategy is the same: if your work is being bottlenecked by some you special-purpose function, you may consider moving that work to dedicated machines to get better performance. </p> + +<p>This needs to be balanced, though, against the costs of doing so, in money, power, rack space, or other constraints. Frequently, there’s a trade-off between adding special-purpose machines and adding more compute machines, and your users might prefer to just have more compute!</p> + + + + + When Research Infrastructure Is and Isn't Maintained + + 2020-12-04T00:00:00-07:00 + https://hpc.social/2020/when-research-infrastructure-is-and-isn-t-maintained + <p>(Note: This post is adapted from <a href="https://www.researchcomputingteams.org/newsletter_issues/0053">#53</a> of the <a href="https://www.researchcomputingteams.org">Research Computing Teams Newsletter</a>)</p> + +<p>There were two big stories in the news this week (as I write this, at the end of 2020) about what’s possible with sustained research infrastructure funding and what happens when research infrastructure isn’t sustained.</p> + +<p>In the first, you’ve probably read about AlphaFold, Google Brain’s efforts to bring deep learning to protein folding. <a href="https://www.the-scientist.com/news-opinion/deepmind-ai-speeds-up-the-time-to-determine-proteins-structures-68221">It did very well</a> in the 14th annual Critical Assessment of (protein) Structure Prediction (CASP) contest. Predictably but unfortunately, Google’s press releases wildly overhyped the results - “Protein Folding Solved”.</p> + +<p>Most proteins fold very robustly in the chaotic environment of the cell, and so it’s expected that there should be complex features that predict how the proteins folded configurations look. We still don’t know anything about the model AlphaFold used - other than it did very well on these 100 proteins - or how it was trained. There are a lot of questions of how it will work with more poorly behaved proteins - a wrong confident prediction could be much worse than no prediction. But it did get very good results, and with a very small amount of computational time to actually make the predictions. That raises a lot of hope for the scope of near-term future advances.</p> + +<p>But as <a href="https://twitter.com/aledmedwards/status/1333754396530847745">Aled Edwards points out on twitter</a>, the real story here is one of long term, multi-decadal, investment in research infrastructure including research data infrastructure by the structural biology community. The <a href="https://www.wwpdb.org">protein data bank</a> was set up 50 years ago (!!); and a culture of data sharing of these laboriously solved protein structures was set up, with a norm of contributing to (and helping curate) the data bank. That databank has been continuously curated and maintained, new techniques developed, eventually leading to the massive database now on which methods can be trained and results compared.</p> + +<p>It’s the sustained funding and support - monetarily but also in terms of aligning research incentives like credit - which built the PDB. The other big story we heard this week tells us that you can’t just fund a piece of infrastructure, walk away, and expect the result to be self-sustaining. On December 1st, the iconic <a href="https://www.the-scientist.com/news-opinion/famous-arecibo-radio-telescope-in-puerto-rico-collapses-68219">Arecibo Radio Telescope in Puerto Rico collapsed</a>. The telescope was considered important enough to keep running - there was no move to decommission it until late November - but not important enough to keep funding the maintenance to keep it functioning.</p> + +<p><img alt="Overhead image of a broken Arecibo Telescope" src="https://www.dursi.ca/assets/imgs/arecibo-collapsed.jpg" /></p> + +<p>Digital research infrastructure - software, data resources, computing systems - fall apart at least as quickly without ongoing funded effort to maintain them. It’s not about whether these digital pieces of infrastructure are “sustainable”; it’s whether or not they are <em>sustained</em>. Too many critical pieces of our digital research infrastructure are not being sustained.</p> + + + + + SC'20 Recap + + 2020-11-23T13:00:00-07:00 + https://hpc.social/2020/sc-20-recap + <p>The <a href="https://sc20.supercomputing.org/">HPC industry's biggest conference, SC</a>, was held virtually over the last two weeks. Although the original plan to hold it in Atlanta was supplanted by all-virtual format, it still managed to be a whirlwind show full of product showcases, research presentations, and interesting talks, panels, and workshops. The virtual format certainly wasn't the same as attending in-person, but some of the conference buzz and tone could still be sensed by following the <a href="https://twitter.com/search?q=%23SC20">#SC20 tag on Twitter</a>.</p> +<p>As <a href="https://glennklockwood.blogspot.com/2019/11/sc19-recap.html">with SC'19</a>, the conference seemed subdued in part due to the fact that many attendees were still being pulled away by their daily lives while attending and in part because the HPC community is still waiting for exascale to finally get here. The community's conversion to remote work has also smeared a lot of the usual vendor briefings and big announcements out over the entire five-month period since ISC'19, causing most of the hot news at SC this year to seem incremental over years past.</p> +<p>Still, I picked up on a few themes that I thought were noteworthy, and what follows is a recap of some of the highlights from the conference as I saw them.</p> +<p>All the standard disclaimers apply to the remainder of this post: these are just my personal opinion and do not represent the viewpoint of anyone other than me. I'm not an expert on many (most?) of these topics, so my observations may be misinformed or downright wrong--feel free to get in touch if I stand to be corrected. Also bear in mind that what I find interesting is colored by my day job as a storage architect; I don't pay close attention to the scientific or application spaces in HPC and instead focus on hardware, architecture, systems design, integration, and I/O. As such, I'm sure I missed all sorts of topics that others find exciting.</p> +<h2 style="text-align: left;">Table of Contents</h2> +<ol><li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#bigsplash">Big Splashes</a><ol> <li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#bigsplash-whatsnew">What's new</a></li> <li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#bigsplash-whatsmissing">What's missing</a></li></ol></li><li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#themes">High-level Themes</a><ol> <li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#comptechfutures">Computing Technologies Futures</a></li> <li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#storagetechfutures">Storage Technologies Futures</a></li></ol></li><li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#actualfutures">Actual Future Directions</a><ol> <li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#actualfutures-hpcai">The Relationship of HPC and AI</a></li> <li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#actualfutures-disagg">Disaggregation in Practice</a></li></ol></li><li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#ssug">Spectrum Scale User Group vs. Lustre BOF</a><ol> <li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#ssug-1">Enterprisey features that organizations may care about</a></li> <li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#ssug-2">Manageability features that administrators may care about</a></li> <li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#ssug-3">Performance, scalability, and reliability features that end users may care about</a></li> <li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#ssug-4">Interface features that platform developers may care about</a></li> <li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#ssug-overall">Overall Impressions</a></li></ol></li><li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#io500">IO-500 BOF</a></li><li><a href="https://glennklockwood.blogspot.com/feeds/posts/default/-/hpc?alt=rss#conclusion">Concluding Thoughts</a></li></ol> +<h2 id="bigsplash" style="text-align: left;">Big Splashes</h2> +<p>Although there weren't any earth-shattering announcements this year, there were a few newsworthy developments that received a healthy amount of press attention.</p> +<h3 id="bigsplash-whatsnew" style="text-align: left;">What's new</h3> +<p><b>RIKEN's Fugaku machine</b> made its debut at ISC'20 in June this year, but I felt a lot of its deserved fanfare was muted by the the newness of the pandemic and the late-binding decision to convert ISC'20 to being all remote. SC'20 was when Fugaku got to really shine; it improved benchmark results for HPL, HPCG, and Graph500 relative to its ISC'20 numbers:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-h7Z74v-IiMQ/X7s3qep3YbI/AAAAAAABPnk/fJLP0QrjIFAIL_IQ0Vj3_9pfII91KVdGQCLcBGAsYHQ/Em-YT_1VcAA82Fj.jpeg" style="margin-left: 1em; margin-right: 1em;"><img alt="Fugaku performance improvements since July 2020" height="226" src="https://lh3.googleusercontent.com/-h7Z74v-IiMQ/X7s3qep3YbI/AAAAAAABPnk/fJLP0QrjIFAIL_IQ0Vj3_9pfII91KVdGQCLcBGAsYHQ/w400-h226/Em-YT_1VcAA82Fj.jpeg" title="Fugaku performance improvements since July 2020" width="400" /></a></div> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Fugaku performance improvements since July 2020 from <a href="https://event.on24.com/wcc/r/2825195/3357A8DF10E6050DE025D69073348677">Prof. Matsuoka's FLATS keynote</a></span></b></div> +<p>But RIKEN and Fujitsu had a number of early science success stories to showcase around how the machine was being cited in scientific studies towards better understanding COVID-19.</p> +<p></p> +<p><b>Intel announced the Ice Lake Xeon architecture</b> as well and put a lot of marketing behind it. And by itself, Ice Lake is a <a href="https://www.hpcwire.com/solution_content/intel/the-ice-lake-top-10/">major advancement</a> since it's Intel's first server part that uses their 10 nm process and provides a PCIe Gen4 host interface, and it includes support for 2nd generation 3D XPoint DIMMs (Barlow Pass) and 8 DDR4 memory channels.</p> +<p>Unfortunately, Ice Lake is late to the party in the context of its competition; Intel's benchmark results <a href="https://newsroom.intel.com/news-releases/intel-building-future-high-performance-computing/">position Ice Lake as a competitor to AMD Rome</a> which matches Ice Lake's 8-channel/PCIe Gen4-based platform despite being over a year old at this point. For reference:</p> +<div> <table class="tg" style="display: block; margin-left: auto; margin-right: auto;"> <thead> <tr> <th class="tg-6ic8"></th> <th class="tg-7btt">Intel Ice Lake<sup><a href="https://newsroom.intel.com/news-releases/intel-building-future-high-performance-computing">[1]</a></sup></th> <th class="tg-7btt">AMD Rome<sup><a href="https://www.amd.com/en/products/cpu/amd-epyc-7h12">[2]</a></sup></th> </tr> </thead> <tbody> <tr> <td class="tg-l2oz"><b>Shipping</b></td> <td class="tg-baqh">4Q2020</td> <td class="tg-baqh">3Q2019</td> </tr> <tr> <td class="tg-9j3s"><b>Cores</b></td> <td class="tg-c3ow">up to 32</td> <td class="tg-9wq8">up to 64</td> </tr> <tr> <td class="tg-9j3s"><b>Memory</b></td> <td class="tg-c3ow">8x DDR4-3200</td> <td class="tg-9wq8">8x DDR4-3200</td> </tr> <tr> <td class="tg-9j3s"><b>Host Interface</b></td> <td class="tg-c3ow">?x PCIe Gen4</td> <td class="tg-9wq8">128x PCIe Gen4</td> </tr> </tbody> </table></div> +<p>By the time Ice Lake starts shipping, AMD will be launching its next-generation Milan server processors, so it's difficult to get excited about Ice Lake if one isn't married to the Intel software ecosystem or doesn't have specific use for the new AVX512 instructions being introduced.</p> +<p>The Intel software ecosystem is not nothing though, and Intel does seem to remain ahead on that front. Intel had its <a href="https://www.oneapi.com/events/devcon2020/">inaugural oneAPI Dev Summit during SC'20</a>, and although I don't follow the application developer space very closely, my perception of the event is that it focused on showcasing the building community momentum around oneAPI rather than delivering splashy announcements. That said, this oneAPI Dev Summit seems to have sucked the air out of the room for other Intel software-centric events; <a href="https://www.ixpug.org/events">IXPUG had no discernible presence at SC'20</a> despite IXPUG changing its name from "Intel Xeon Phi User Group" to "Intel eXtreme Performance User Group" when Xeon Phi was sunset. However one dev event is better than none; I did not hear of any equivalent events hosted by AMD at SC'20.</p> +<p><b>NVIDIA also announced new SKU of its Ampere A100 data center GPU</b> with a whopping 80 GB of HBM2. This was surprising to me since the A100 with 40 GB of HBM2 was only first unveiled two quarters ago. The A100 chip itself is the same so there's no uptick in flops; they just moved to HBM2e stacks which allowed them to double the capacity and get an incremental increase in memory bandwidth.</p> +<p>So, who's this part for? Doubling the HBM capacity won't double the price of the GPU, but the A100-80G part will undoubtedly be more expensive despite there being no additional FLOPS. My guess is that this part was released for</p> +<p></p> +<ol style="text-align: left;"> <li>People who just want to fit bigger working sets entirely in GPU memory. Larger deep learning models are the first thing that come to my mind.</li> <li>People whose applications can't fully utilize A100's flops due to suboptimal memory access patterns; higher HBM2e bandwidth may allow such apps to move a little higher along the roofline.</li> <li>People who may want to purchase AMD's next-generation data center GPU (which will undoubtedly also use HBM2e) but probably be released before the follow-on to Ampere is ready.</li></ol> +<p></p> +<p>NVIDIA also upgraded its Selene supercomputer to include these A100-80G parts, moving its Top500 position to #5 and demonstrating that these parts exist and deliver as advertised.</p> +<h3 id="bigsplash-whatsmissing" style="text-align: left;">What's missing</h3> +<p><b>HPE/Cray was pretty quiet</b> on announcements, especially after two SCs in a row with Shasta (now "Cray EX") news. HPE undoubtedly has its head down readying its first large Shasta installations, and given the fact that the primary manufacturing facilities for Cray Shasta are located in a <a href="https://www.wctrib.com/news/education/6749479-Chippewa-County-has-states-highest-14-day-COVID-19-case-rate-several-area-counties-also-see-increases">COVID hotspot in the US</a>, maybe this was to be expected--this autumn has not been the time to rush anything.</p> +<p>That said, we know that Cray EX systems have been shipping since July 2020:</p> +<blockquote class="twitter-tweet"> <p dir="ltr" lang="en">A wee video for Friday afternoon. Watch the installation of the four-cabinet Shasta Mountain system, the first phase of the <a href="https://twitter.com/ARCHER2_HPC?ref_src=twsrc%5Etfw">@ARCHER2_HPC</a> 23-cabinet system.<a href="https://t.co/DqYRDJi39B">https://t.co/DqYRDJi39B</a><a href="https://twitter.com/Cray_Inc?ref_src=twsrc%5Etfw">@Cray_Inc</a> <a href="https://t.co/8D4Hv5Msmt">pic.twitter.com/8D4Hv5Msmt</a></p> + — EPCCed (@EPCCed) <a href="https://twitter.com/EPCCed/status/1289177495304990721?ref_src=twsrc%5Etfw">July 31, 2020</a></blockquote> +<p>So it is a little surprising that HPE was not promoting any early customer or science success stories yet, and the only Cray EX/Shasta system to appear on Top500 was <a href="https://top500.org/system/179900/">Alps, a modest 4.6 PF Rome-based system at CSCS</a>. Next year--either at the <a href="https://www.isc-hpc.com/press-releases/ISC-2021-will-take-place-virtually-in-the-summer-of-2021.html">all-virtual ISC'21</a> or SC'21--will likely be the year of Cray EX.</p> +<p><b>Intel was also pretty quiet about Aurora</b>, perhaps for the same reason as HPE/Cray. The fact that Intel's biggest hardware news was around Ice Lake suggests that Intel's focus is on fulfilling the promises of disclosures they made at SC'19 rather than paving new roads ahead. There was a healthy amount of broad-stroke painting about exascale, but aside from the oneAPI buzz I mentioned above, I didn't see anything technically substantive.</p> +<p>Sadly, <b>IBM was the most quiet</b>, and it was perhaps the most prominent appearance of IBM in this year's official program was in<a href="https://www.llnl.gov/news/llnl-ibm-win-sc20-test-time-blue-genel">winning the Test of Time Award for the Blue Gene/L architecture</a>. It was almost a eulogy of IBM's once-dominant position at the forefront of cutting-edge HPC research and development, and this feeling was perhaps underscored by the <a href="https://twitter.com/science_dot/status/1329544810915479553?s=21">absence of perhaps the most noteworthy IBMer</a> involved in the creation of Blue Gene. This isn't to say IBM had no presence at SC'20 this year; it's just clear that their focus is on being at the forefront of hybrid cloud and cognitive computing rather than supercomputing for supercomputing's sake.</p> +<h2 id="themes" style="text-align: left;">High-level Themes</h2> +<p>The most prevalent theme that I kept running into was not the technology on the horizon, but rather the technology further off. There were a few sessions devoted to things like "Post Moore's Law Devices" and "Exotic Technology" in 2035, and rather than being steeped in deep technical insight, they leaned more towards either <a href="https://insidehpc.com/2019/02/john-shalf-and-thomas-sterling-to-keynote-isc-2019-in-frankfurt/">recitations of similar talks</a>&nbsp;given in years past (<a href="https://twitter.com/HPC_Guru/status/1329132708593766402?s=20">one speaker presented slides</a> that were <a href="https://www.nextplatform.com/2016/06/24/alchemy-cant-save-moores-law/">literally five years old</a>)or outlandish claims that hinged on, in my opinion, incomplete views of how technology evolves.</p> +<p>I found the latter talks a bit disturbing to find in the SC program since they contained very little technical insight and seemed more focused on entertainment value--the sort of thing usually relegated to post-conference hotel bar conversation. So rather than repeat their predictions as gospel, I'll present my critical take on them. I realize that it's far easier for me to throw stones at people at the top of the hill than to climb there myself, and I'm perfectly willing to accept that my opinions below are completely wrong. And, if you'd like to throw stones at me yourself, I contributed my position to <a href="https://sc20.supercomputing.org/?post_type=page&amp;p=3479&amp;id=pan116&amp;sess=sess187">a panel on tiered storage</a>&nbsp;this year against which all are welcome to argue.</p> +<h3 id="comptechfutures" style="text-align: left;">Computing Technologies Futures</h3> +<p>This year's focus on far-flung technologies at SC made me wonder--are these sorts of talks filling out the program because there's no clear path beyond exascale? Is it possible that the HPC community's current focus on climbing the exascale mountain is taking our minds off of the possibility that there's nothing past that mountain except desert?</p> +<p>For example, Shekhar Borkar gave his five-year outlook on memory technologies:</p> +<div> <blockquote class="twitter-tweet"> <p dir="ltr" lang="en">Memory Technology Score Card <br /><br />According to Shekhar it is SRAM, DRAM, Flash &amp; PCM for the next 5 years.<br /><br />Other technologies are OK for research but not ready for prime time yet.<a href="https://twitter.com/hashtag/SC20?src=hash&amp;ref_src=twsrc%5Etfw">#SC20</a> <a href="https://twitter.com/hashtag/HPC?src=hash&amp;ref_src=twsrc%5Etfw">#HPC</a> <a href="https://twitter.com/hashtag/AI?src=hash&amp;ref_src=twsrc%5Etfw">#AI</a> <a href="https://t.co/q7sjCp2DFH">pic.twitter.com/q7sjCp2DFH</a></p> + — HPC Guru (@HPC_Guru) <a href="https://twitter.com/HPC_Guru/status/1329129023314616320?ref_src=twsrc%5Etfw">November 18, 2020</a> </blockquote> </div> +<p>SRAM and DRAM are decades-old staples in the HPC industry, and even NAND has been used in production HPC for a decade now. The statement that PCM <i>may</i> be useful in the next five years is quite striking since <a href="https://arstechnica.com/information-technology/2017/03/intels-first-optane-ssd-375gb-that-you-can-also-use-as-ram/">PCM products have been shipping in volume since 2017</a>--from this, I take that the future is going to look an awful lot like the present on the memory and storage front. The biggest change, if any, will likely be the economics of NAND and 3D integration evolving to a point where we can afford more all-flash and all-HBM systems in the coming years.</p> +<p>On the computational front, many of the soothsayers leaned heavily on using cryogenics for the post-Moore's Law chip designs. Ultra-low-temperature CMOS and superconductors for supercomputers are a low-hanging fruit to pick when conjecturing about the future since (1) their physics are well understood, and (2) they have clear and nonlinear benefits over the CMOS technologies baked into chips today, as shown by Borkar:</p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-zuV0y49SHK8/X7nNeszLoAI/AAAAAAABPhc/_iiH0vrRZ6ke0KMwhX6oygdmT38Ubu-hACLcBGAsYHQ/s1884/Screen%2BShot%2B2020-11-21%2Bat%2B18.30.43.png" style="margin-left: 1em; margin-right: 1em;"><img alt="The benefits of low-temperature computing according to Shekhar Borkar" border="0" height="216" src="https://1.bp.blogspot.com/-zuV0y49SHK8/X7nNeszLoAI/AAAAAAABPhc/_iiH0vrRZ6ke0KMwhX6oygdmT38Ubu-hACLcBGAsYHQ/w400-h216/Screen%2BShot%2B2020-11-21%2Bat%2B18.30.43.png" title="The benefits of low-temperature computing according to Shekhar Borkar" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">The benefits of low-temperature computing according to <a href="https://sc20.supercomputing.org/?post_type=page&amp;p=3479&amp;id=inv105&amp;sess=sess298">Shekhar Borkar</a></span></b></div> +<p>The problem, of course, is that you won't ever be able to buy a cryogenic supercomputer unless a company can make enough money selling a cryogenic supercomputer to (1) pay down the non-recurring engineering costs, (2) recuperate the costs of productizing the design, and (3) make enough profit to make the shareholders or venture capitalists underwriting (1) and (2) happy.</p> +<p>Realize that cryogenics at scale are dangerous and messy--compared to water cooling, there is no municipal supply of liquid helium, and the market for building pumps and piping for cryogenic plumbing is virtually zero compared to water-based plumbing. When you add the fact that the vast majority of data centers--including the hyperscalers who drive much of the data center market--don't want to touch water-cooled infrastructure, the HPC market would have to bear the cost of cryogenic computing at-scale entirely on its own for the foreseeable future.</p> +<p>That all said, remember that this is just my own personal opinion. For a helpful and mostly objective perspective, <a href="https://twitter.com/hpc_guru/status/1329129023314616320?s=21">@HPC_Guru posted a thread that captures the general sentiment of these sessions</a>.</p> +<p>For the sake of entertainment, I'll include some of the more outlandish slides that I saw on this topic.</p> +<p>Erik DeBenedictis had the following predictions of the future in 2006 for 2020:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-3nEnwAHYAuM/X7nRmA9sYPI/AAAAAAABPho/eBlqvifpZhIKBYcZFfhM43Plj7XySdijQCLcBGAsYHQ/Screen%2BShot%2B2020-11-21%2Bat%2B18.48.05.png" style="margin-left: 1em; margin-right: 1em;"><img alt="The future of yesterday - a 2006 prediction of what HPC will look like in 2020 by Erik DeBenedictis" height="226" src="https://lh3.googleusercontent.com/-3nEnwAHYAuM/X7nRmA9sYPI/AAAAAAABPho/eBlqvifpZhIKBYcZFfhM43Plj7XySdijQCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-21%2Bat%2B18.48.05.png" title="The future of yesterday - a 2006 prediction of what HPC will look like in 2020 by Erik DeBenedictis" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">The future of yesterday - a 2006 prediction of what HPC will look like in 2020 by <a href="https://sc20.supercomputing.org/presentation/?sess=sess188&amp;id=pan106">Erik DeBenedictis</a></span></b></div> +<p>DeBenedictis' primary oversight in this prediction was failing to see the end of Dennard scaling due to physics. Had power consumption continued to drop with node size, we could very well be at 20 GHz today, and the fact that his core counts, flops/socket, and system peak were reasonable is a testament to good forecasting. However Dennard scaling is what forced CPUs towards longer vectors (which is how a 40-core socket can still get 1.6 TF without running at 20 GHz) and what motivated the development of the more power-efficient architecture of GPGPUs. DeBenedictis' predictions for the future, though, don't look as reasonable to me:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-4723-WwU21I/X7nUFGo0uaI/AAAAAAABPh0/llCQVvmd_4sLtS5ORsUI0UanOxS9mAb_wCLcBGAsYHQ/Screen%2BShot%2B2020-11-21%2Bat%2B18.59.07.png" style="margin-left: 1em; margin-right: 1em;"><img alt="The future of HPC is hybrid quantum/classical systems according to DeBenedictis" height="226" src="https://lh3.googleusercontent.com/-4723-WwU21I/X7nUFGo0uaI/AAAAAAABPh0/llCQVvmd_4sLtS5ORsUI0UanOxS9mAb_wCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-21%2Bat%2B18.59.07.png" title="The future of HPC is hybrid quantum/classical systems according to DeBenedictis" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">The future of HPC is hybrid quantum/classical systems according to <a href="https://sc20.supercomputing.org/presentation/?sess=sess188&amp;id=pan106">DeBenedictis</a></span></b></div> +<p>While quantum/classical hybrid machines may very well exist in 2035, they aren't exactly solving the same problems that today's supercomputers can. In a sense, he chose to make a meta-prediction that science will change to fit the technology available--or perhaps he chose to redefine supercomputing to mean something even more niche than it does today.</p> +<p>Thomas Sterling also gave his 200 GHz yottaflop prediction:</p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-yBR4kFo5hKI/X7nWYm3GamI/AAAAAAABPiI/lAVBcUoqlpUqHlCLCbvMFZ9w1RJNsllxgCLcBGAsYHQ/s2048/Screen%2BShot%2B2020-11-21%2Bat%2B19.08.41.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Thomas Sterling's gonzo predictions of HPC in 2035" border="0" height="226" src="https://1.bp.blogspot.com/-yBR4kFo5hKI/X7nWYm3GamI/AAAAAAABPiI/lAVBcUoqlpUqHlCLCbvMFZ9w1RJNsllxgCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-21%2Bat%2B19.08.41.png" title="Thomas Sterling's gonzo predictions of HPC in 2035" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;"><a href="https://sc20.supercomputing.org/presentation/?sess=sess188&amp;id=pan106">Thomas Sterling</a>'s gonzo predictions of HPC in 2035</span></b></div> +<p>which hasn't changed since <a href="https://twitter.com/glennklockwood/status/1012014953765752832?s=20">he predicted a superconducting yottaflop at ISC'18</a>. Unlike DeBenedictis, Sterling chose not to redefine HPC to fit the available technology but instead predict a physical, economical, and practical fantasy about the future. Not there's anything wrong with that. Everyone's got to have a goal.</p> +<p>Kathy Yelick offered the most pragmatic 15-year prediction:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-LRIzyo-1ku4/X7nX66YUeAI/AAAAAAABPiU/a57rttDmfFQObPLtfPWJHvPIWaRY8LlFACLcBGAsYHQ/Screen%2BShot%2B2020-11-21%2Bat%2B19.15.25.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Kathy Yelick's predictions of HPC in 2035" height="226" src="https://lh3.googleusercontent.com/-LRIzyo-1ku4/X7nX66YUeAI/AAAAAAABPiU/a57rttDmfFQObPLtfPWJHvPIWaRY8LlFACLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-21%2Bat%2B19.15.25.png" title="Kathy Yelick's predictions of HPC in 2035" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;"><a href="https://sc20.supercomputing.org/presentation/?sess=sess188&amp;id=pan106">Kathy Yelick</a>'s predictions of HPC in 2035</span></b></div> +<p>and I can't poke holes in any of these predictions because there is a clear path from today to this vision for the future. That said, if you actually attach flops and hertz to these predictions, the future does not look nearly as exciting as superconducting yottaflops do.</p> +<p>As dissatisfying as it may be, Shekhar Borkar had a slide that is probably the pathway into the future of HPC:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-qX6kB89e_sQ/X7nt9dKqLjI/AAAAAAABPjU/yV52iV_aqvwiHkXXkWDk3DlAdhSn-KcSwCLcBGAsYHQ/Screen%2BShot%2B2020-11-21%2Bat%2B18.14.34.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Moore's Law will survive as long as we change what it means" height="216" src="https://lh3.googleusercontent.com/-qX6kB89e_sQ/X7nt9dKqLjI/AAAAAAABPjU/yV52iV_aqvwiHkXXkWDk3DlAdhSn-KcSwCLcBGAsYHQ/w400-h216/Screen%2BShot%2B2020-11-21%2Bat%2B18.14.34.png" title="Moore's Law will survive as long as we change what it means" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Moore's Law will survive as long as we change what it means according to <a href="https://sc20.supercomputing.org/?post_type=page&amp;p=3479&amp;id=inv105&amp;sess=sess298">Borkar</a></span></b></div> +<p>The only way the future of HPC will be predictable is if you're willing to define what HPC is to fit whatever the available technologies are. Yelick expressed the same sentiment with her "Not sure, but it will be called OpenMP" bullet, and to his credit, Sterling himself did this with his Beowulf cluster. If the market just gives you a pile of parts, strap it together and call it HPC. And if transistor scaling has no more steam, find something that still has legs and call it Moore's Law.</p> +<h3 id="storagetechfutures" style="text-align: left;">Storage Technologies Futures</h3> +<p>On the storage front, the predictions from 2006 for 2020 storage technology were pretty reasonable as well. Dr. Mark Kryder (of Kryder's Law fame) predict that Kryder's Law would hold:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-BXV0smrShPA/X7ne46LTdfI/AAAAAAABPi8/PHj37Wz1UP47WZOFJWu5Zu_sZ9BjAvYWQCLcBGAsYHQ/Screen%2BShot%2B2020-11-21%2Bat%2B19.44.28.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Mark Kryder's vision for HDDs in 2020 as told in 2006" height="226" src="https://lh3.googleusercontent.com/-BXV0smrShPA/X7ne46LTdfI/AAAAAAABPi8/PHj37Wz1UP47WZOFJWu5Zu_sZ9BjAvYWQCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-21%2Bat%2B19.44.28.png" title="Mark Kryder's vision for HDDs in 2020 as told in 2006" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;"><a href="https://sc20.supercomputing.org/presentation/?sess=sess186&amp;id=pan105">Mark Kryder</a>'s vision for HDDs in 2020 as told in 2006</span></b></div> +<p>However he mispredicted how it would hold--his assumption was that surface bit density would keep skyrocketing, and this is why his bandwidth number was so far off. Packing magnetic bits ever more closely together turns out to be a very difficult problem, so the hard disk drive industry chose to increase capacities by solving the easier problem of packing more platters into a single 3.5" half-height form factor.</p> +<p>The flash predictions of Richard Freitas (who passed away in 2016) were also very reasonable:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-8HqWsOCKPdo/X7neIKciEKI/AAAAAAABPi0/jInrObVCr_UxJxgQuCFnfboCjnfnNH5MACLcBGAsYHQ/Screen%2BShot%2B2020-11-21%2Bat%2B19.42.20.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Predictions for solid-state storage in 2020 from Rich Freitas in 2006" height="233" src="https://lh3.googleusercontent.com/-8HqWsOCKPdo/X7neIKciEKI/AAAAAAABPi0/jInrObVCr_UxJxgQuCFnfboCjnfnNH5MACLcBGAsYHQ/w400-h233/Screen%2BShot%2B2020-11-21%2Bat%2B19.42.20.png" title="Predictions for solid-state storage in 2020 from Rich Freitas in 2006" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Predictions for solid-state storage in 2020 from <a href="https://sc20.supercomputing.org/presentation/?sess=sess186&amp;id=pan105">Rich Freitas</a> in 2006</span></b></div> +<p>His biggest miscalculation was not realizing that solid-state storage would bifurcate into the two tiers we now call RAM and flash. He predicted "storage class memory" based on the assumption that it would be block-based (like flash) but use a simple and low-latency bus (like RAM). We enjoy higher bandwidth and capacity than his prediction due to the increased parallelism and lower cost of NAND SSDs, but relying on PCIe instead of a memory bus and the low endurance of NAND (and therefore significant back-end data management and garbage collection) drove up the latency.</p> +<p>Predictions for the future were more outlandish. Kryder's prediction for 2035 was a bit too much for me:</p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-Y7vKyB56q6s/X7nk1xlRpiI/AAAAAAABPjI/PK71S5rkidw8Bljc6kUnNJEQF_h2U7DnwCLcBGAsYHQ/Screen%2BShot%2B2020-11-21%2Bat%2B20.10.59.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Kryder's 15-year outlook for HDDs" height="226" src="https://lh3.googleusercontent.com/-Y7vKyB56q6s/X7nk1xlRpiI/AAAAAAABPjI/PK71S5rkidw8Bljc6kUnNJEQF_h2U7DnwCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-21%2Bat%2B20.10.59.png" title="Kryder's 15-year outlook for HDDs" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;"><a href="https://sc20.supercomputing.org/presentation/?sess=sess186&amp;id=pan105">Kryder's 15-year outlook for HDDs</a> with a heaping serving of "oof"</span></b></div> +<p>Extrapolating Kryder's Law another 15 years puts us at 1.8 petabytes per hard drive, but this rests on the pretty shaky foundation that there's something holy about hard disk drive technology that will prevent people from pursuing different media. Realizing this requires two things to be true:</p> +<div> <ol style="text-align: left;"> <li>The HDD industry remains as profitable in the next fifteen years as it is today. Seeing as how parts of the HDD industry are already going extinct due to flash (remember when personal computers had hard drives?) and <a href="https://blog.westerndigital.com/host-managed-smr-dropbox/">hyperscale is taking more ownership of drive controller functionality</a> and eating into manufacturers' margins, I just don't see this as being likely.</li> <li>The cost to develop the required recording techniques (two-dimensional magnetic recording and bit-patterned media) is both as fast and as cheap as HAMR was. If it's not, see #1 above--there won't be the money or patience to sustain the HDD market.</li> </ol></div> +<p>This doesn't even consider the appeal of dealing with 1.8 PB drives as a system architect; at Kryder's forecasted numbers, it would take eleven days to fill, rebuild, or scrub one of these drives. As a system designer, why would I want this? Surely there are better ways to assemble spindles, motors, actuators, and sheet metal to increase my bandwidth and reduce my blast radius than cramming all these platters into a 3.5" form factor.</p> +<p>My bet (and note--I was not invited to contribute it, as I am not an expert!) is that the HDD market will continue to slow down as it falls off the Kryder Law curve due to scaling limitations. This will result in a slow but downward spiral where R&amp;D slows because it is starved of funding, and funding is starved because HDDs fall further and further off of the economics curve. HDDs won't be gone by 2035, but they will fit in the small gap between that exists between low-cost write-once-read-many media (like ultra-dense trash flash) and low-cost write-once-read-never media (like tape).</p> +<p>Kryder essentially acknowledged that his projection relies on something intrinsically special about HDDs; he commented that the technological advancements required to reach 1.8 PB HDDs will happen because HDD engineers don't want to lose their jobs to the flash industry. Personally, I'd take a new job with an exciting future over a gold watch any day of the week. Maybe that's the millennial in me.</p> +<p>I found this general theme of wildly projecting into the future rather yucky this SC, and I won't miss it if it's gone for another fifteen years. &nbsp;By their very nature, these panels are <i>exclusive</i>, not inclusive--someone literally has to <i>die</i> in order for a new perspective to be brought on board. &nbsp;There was an element to this in the Top500 BOF as well, and <a href="https://twitter.com/glennklockwood/status/1328450538929758208?s=21">one slide in particular made me cringe</a> at how such a prominent good-ol-boys club was being held up before the entire SC community. &nbsp;These sorts of events are looking increasingly dated and misrepresentative of the HPC community amidst the backdrop of SC putting diversity front and center.</p> +<p></p> +<h2 id="actualfutures" style="text-align: left;">Actual Future Directions</h2> +<p>Although wild projections of the future felt like fashionable hot topics of the year, a couple of previous hot topics seemed to be cooling down and transitioning from hype to reality. Two notable trends popped out at me: the long-term relationship between HPC and AI and what disaggregation may really look like.</p> +<h3 id="actualfutures-hpcai" style="text-align: left;">The Relationship of HPC and AI</h3> +<p>As has been the norm for a few years now, deep learning (now more broadly "AI") was peppered across the SC program this year. Unlike previous years, though, the AI buzz seemed to be tempered by a little more pragmatism as if it were coming down the hype curve. Perhaps the best talk that captured this was an invited talk by Cliff Young of Google about the possibility of a<a href="https://sc20.supercomputing.org/presentation/?id=inv111&amp;sess=sess299">Virtuous Cycle of HPC and AI</a>.</p> +<p>The "convergence of HPC and AI" has been talked about in the supercomputing community since HPC-focused GPUs were reinvented as an AI accelerator. If you look at who's been selling this line, though, you may realize that the conversation is almost entirely one-way; the HPC industry pines for this convergence. The AI industry, frankly, doesn't seem to care what the HPC industry does because they're too busy monetizing AI and bankrolling the development of the N+1th generation of techniques and hardware to suit <i>their</i> needs, not those of the HPC industry.</p> +<p>Dr. Young's talk closed this loop by examining what the AI industry can learn from HPC; the so-called "Cambrian explosion" of accelerators is somewhere near its peak which has resulted in a huge architectural design space to explore:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-f_O7zZRTwjs/X7n3n9mIWBI/AAAAAAABPjg/eRZU5bsRimQ4F08dctGqLpG0ODNE1wzrwCLcBGAsYHQ/Screen%2BShot%2B2020-11-18%2Bat%2B11.37.08.png" style="margin-left: 1em; margin-right: 1em;"><img alt="How ML can learn from HPC according to Cliff Young" height="226" src="https://lh3.googleusercontent.com/-f_O7zZRTwjs/X7n3n9mIWBI/AAAAAAABPjg/eRZU5bsRimQ4F08dctGqLpG0ODNE1wzrwCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-18%2Bat%2B11.37.08.png" title="How ML can learn from HPC according to Cliff Young" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">How ML can learn from HPC according to Cliff Young</span></b></div> +<p>When cast this way, HPC actually has a lot of experience in driving progress in these areas; the 4x4 systolic array design point has its genesis in the HPC-specific MIC architecture, and the HPC industry drove the productization of the DRAM-backed HBM memory hierarchy implemented by IBM for the Summit and Sierra systems. These HPC-led efforts presumably contributed to Google's ability to bet on much larger array sizes starting with its first-generation TPU.</p> +<p>In addition, it sounds like training has begun to reach some fundamental limits of data-parallel scalability:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-gU7jpzPEiJ0/X7n5NmusV9I/AAAAAAABPjs/oSjkuLcqrlQw4bf0HMyw8yXmglvEWRPowCLcBGAsYHQ/Screen%2BShot%2B2020-11-18%2Bat%2B11.38.54.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Limitations being faced by machine learning" height="226" src="https://lh3.googleusercontent.com/-gU7jpzPEiJ0/X7n5NmusV9I/AAAAAAABPjs/oSjkuLcqrlQw4bf0HMyw8yXmglvEWRPowCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-18%2Bat%2B11.38.54.png" title="Limitations being faced by machine learning" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Limitations being faced by machine learning</span></b></div> +<p>HPC has long dealt with the scalability limitations of allreduce by developing technologies like complex low- and high-radix fabric topologies and hardware offloading of collective operations. Whether the AI industry simply borrows ideas from HPC and implements its own solutions or contributes to existing standards remains to be seen, but standards-based interfaces into custom interconnects like <a href="https://aws.amazon.com/about-aws/whats-new/2018/11/introducing-elastic-fabric-adapter/">AWS Elastic Fabric Adapter</a> are a promising sign.</p> +<p>Another "hard problem" area in which HPC is ahead is in sparse matrices:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-4dJ0BpxDwys/X7n650KJ8EI/AAAAAAABPj4/7KTjJpjXf38AB7Ts1JeVjzi66w1L6S0ugCLcBGAsYHQ/Screen%2BShot%2B2020-11-18%2Bat%2B11.45.24.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Impending challenges brought by moving to sparse methods in ML" height="208" src="https://lh3.googleusercontent.com/-4dJ0BpxDwys/X7n650KJ8EI/AAAAAAABPj4/7KTjJpjXf38AB7Ts1JeVjzi66w1L6S0ugCLcBGAsYHQ/w400-h208/Screen%2BShot%2B2020-11-18%2Bat%2B11.45.24.png" title="Impending challenges brought by moving to sparse methods in ML" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Impending challenges brought by moving to sparse methods in ML</span></b></div> +<p>Young's position is that, although "sparse" means different things to AI (50-90% sparse) than it does to HPC (&gt;95% sparse), HPC has shown that there are algorithms that can achieve very high fractions of peak performance on sparse datasets.</p> +<p>His concluding slide was uplifting in its suggestion that the HPC-AI relationship may not be strictly one-way forever:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-DwLW3Y4S89U/X7n8gutH4jI/AAAAAAABPkE/zzgpxV2v9aQ5Q_UixG-o1X-l4-MHfpxSACLcBGAsYHQ/Screen%2BShot%2B2020-11-18%2Bat%2B11.51.26.png" style="margin-left: 1em; margin-right: 1em;"><img alt="How HPC and ML can work together to advance technology" height="226" src="https://lh3.googleusercontent.com/-DwLW3Y4S89U/X7n8gutH4jI/AAAAAAABPkE/zzgpxV2v9aQ5Q_UixG-o1X-l4-MHfpxSACLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-18%2Bat%2B11.51.26.png" title="How HPC and ML can work together to advance technology" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">How HPC and ML can work together to advance technology</span></b></div> +<p>He specifically called out promise in the use of mixed precision; AI already relies on judicious use of higher-precision floating point to stabilize its heavy use of 16-bit arithmetic, and <a href="https://sos23.ornl.gov/wp-content/uploads/2019/03/II_6_Dongarra2.pdf">scientific computing is finding algorithms in which 16-bit precision can be tolerated</a>.</p> +<p>Being more hardware- and infrastructure-minded myself, I was particularly surprised to see this nod to liquid cooling early on:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-vmn8uJbdUDI/X7n_1ikQ22I/AAAAAAABPkQ/4wUYGZhPS1I_99OnOW1gup7hDpNdltwZACLcBGAsYHQ/Screen%2BShot%2B2020-11-21%2Bat%2B22.02.14.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Liquid cooling in hyperscale - one of few areas in which HPC is ahead" height="226" src="https://lh3.googleusercontent.com/-vmn8uJbdUDI/X7n_1ikQ22I/AAAAAAABPkQ/4wUYGZhPS1I_99OnOW1gup7hDpNdltwZACLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-21%2Bat%2B22.02.14.png" title="Liquid cooling in hyperscale - one of few areas in which HPC is ahead" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Liquid cooling in hyperscale - one of few areas in which HPC is ahead</span></b></div> +<p>Google's TPU v3 was its first foray into direct liquid cooling, a data center technology that HPC has been using for decades (think: <a href="https://www.computerhistory.org/revolution/supercomputers/10/68/275">Cray-2's waterfall</a>). While this may not seem spectacular to any PC enthusiast who's done liquid cooling, the difficulty of scaling these systems up to rack-, row-, and data center-scale are not always linear. Young explicitly acknowledged HPC's expertise in dealing with liquid-cooled infrastructure, and if hyperscale is driven in this direction further, HPC will definitely benefit from the advances that will be enabled by a new and massive market driver.</p> +<h3 id="actualfutures-disagg" style="text-align: left;">Disaggregation in Practice</h3> +<p>The promise of disaggregation--having pools of CPU, persistent memory, GPUs, and flash that you can strap together into a a single node--has been around for a long time and had steadily gained attention as a potential candidate for an exascale technology. However I don't think there was a realistic hope for this until IBM's AC922 node--the one that comprises the Summit and Sierra systems--hit the market and demonstrated a unified, hardware-enabled coherent memory space across CPUs and GPUs.</p> +<p>The actual story there wasn't great though; coherence between CPU and GPU was enabled using NVIDIA's proprietary NVLink protocol while the CPU and NIC were connected via a different coherence protocol, OpenCAPI, over the same physical interface. CCIX and GenZ also emerged as high-speed protocols for cache coherence and disaggregation, and the story only got worse when Intel put forth CXL as its standard for coherence and disaggregation.</p> +<p>Fortunately, the dust is now settling and it appears that CXL and GenZ are emerging at the front of the pack. There was an amicable panel session where members of these two consortia presented a unified vision for CXL and GenZ which <i>almost</i> appeared credible: CXL would be the preferred protocol for inside a chassis or rack, and GenZ would be the preferred protocol between chassis and racks. Key features of the finalized CXL 2.0 standard were unveiled which largely revolved around support for <i>CXL switches</i>:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-ERLI8khJdMM/X7oNek5C1RI/AAAAAAABPkk/o2ijd7QmsbQzvnTiCRd8gzzVXQJ9y6MuQCLcBGAsYHQ/Screen%2BShot%2B2020-11-18%2Bat%2B10.10.31.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Roles played by CXL 2.0's switch capability" height="226" src="https://lh3.googleusercontent.com/-ERLI8khJdMM/X7oNek5C1RI/AAAAAAABPkk/o2ijd7QmsbQzvnTiCRd8gzzVXQJ9y6MuQCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-18%2Bat%2B10.10.31.png" title="Roles played by CXL 2.0's switch capability" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Roles played by CXL 2.0's switch capability from<span style="text-align: left;"><a href="https://sc20.supercomputing.org/presentation/?id=pan117&amp;sess=sess185">Debendra Das Sharma</a></span></span></b></div> +<p></p> +<p>These switches function not only as port expanders to allow many devices to plug into a single host, but also as true switches that enable multi-root complexes that pool hosts and devices to dynamically map devices to hosts using CXL's managed hotplug capability. There's also support for a <i>CXL Fabric Manager</i> that moderates something that looks a lot like SR-IOV; a single physical device can be diced up and mapped to up to sixteen different hosts. At its surface, this looks like a direct, open-standard competitor to NVLink, NVSwitch, and MIG.</p> +<p>What these new CXL switches <i>do not</i> support is inter-switch linking; all CXL devices much share a single switch to maintain the low latency for which CXL was designed. This is where GenZ fits in since it is a true switched fabric, and this is why the <a href="https://www.businesswire.com/news/home/20200402005187/en/CXL-Consortium™-Gen-Z-Consortium-Announce-MOU-Agreement">CXL and GenZ consortia have signed a memorandum of understanding (MOU)</a> to design their protocols towards mutual compatibility and interoperability so that the future of disaggregated systems will be composed of pooled CXL devices bridged by a GenZ fabric. A direct parallel was drawn to PCIe and Ethernet, where a future disaggregated system may see CXL assume the role of PCIe, and GenZ may assume the role currently filled by Ethernet. </p> +<p>When it came time for Q&amp;A, the panel got more interesting.</p> +<p>A lot of the audience questions revolved around what standards CXL is planning to wipe off the face of the planet. The Intel (and CXL) panelist, Debendra Das Sharma, fielded the bulk of these questions and made it clear that</p> +<p>(1) <b>CXL will not replace DDR</b> as a local memory interface; it is a complementary technology. This sounded a little disingenuous given the following slide was also shown to highlight CXL 1.0's latency being on par with DRAM latency:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-6u64pO4Rr_Y/X7oZhQKD-aI/AAAAAAABPkw/AiwWLBV8uXI87soGMA2JT5PcirzQKR_sgCLcBGAsYHQ/Screen%2BShot%2B2020-11-18%2Bat%2B10.12.25.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Latency of CXL in the context of storage devices" height="226" src="https://lh3.googleusercontent.com/-6u64pO4Rr_Y/X7oZhQKD-aI/AAAAAAABPkw/AiwWLBV8uXI87soGMA2JT5PcirzQKR_sgCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-18%2Bat%2B10.12.25.png" title="Latency of CXL in the context of storage devices" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Latency of CXL in the context of storage devices</span></b></div> +<p><br />(2) <b>CXL will not replace PCIe</b> as a host I/O interface; is a superset of PCIe and many devices will remain happy with PCIe’s load/store semantics. Of course, this is what I would say too if I had effective control over both the CXL standard and the PCIe SIG. &lt;p&gt;&lt;/p&gt;</p> +<p>When asked directly if Intel had joined the GenZ consortium though, Sharma gave a terse "no" followed by "no comment" as to why. He then immediately followed that with a very carefully crafted statement:</p> +<blockquote style="border: none; margin: 0px 0px 0px 40px; padding: 0px;"> <p style="text-align: left;">"While we have not joined the GenZ consortium, we are fully supportive of making the CXL enhancements that will help GenZ."</p> +</blockquote> +<p>The panelists also commented that the MOU was designed to make transitioning from CXL to GenZ protocols smooth, but when asked exactly how the CXL-to-GenZ bridge would be exposed, Tim Symons (representing Microchip and GenZ) could not offer an answer since this bridging function is still being defined. These sorts of answers left me with the impression that CXL is in the driver's seat and GenZ has been allowed to come along for the ride.</p> +<p>Reading between the lines further, there was a striking absence of HPE people on the panel given the fact that <a href="https://www.nextplatform.com/2019/09/09/inside-hpes-gen-z-switch-fabric/">GenZ originated within HPE's "The Machine" project</a>. It remains unclear where GenZ fits now that HPE owns Slingshot, a different high-performance scale-out switched fabric technology. What would be the benefit of having a three-tier Slingshot-GenZ-CXL fabric? If CXL 2.0 adopted a single-hop switch and fabric manager, what's to stop CXL 3.0 from expanding its scope to a higher radix or multi-hop switch that can sensibly interface directly with Slingshot?</p> +<p>Given that CXL has already eaten a part of GenZ's lunch by obviating the need for GenZ host interfaces, I wouldn't be surprised if GenZ eventually meets the same fate as The Machine and gets cannibalized for parts that get split between future versions of Slingshot and CXL. CXL has already effectively killed CCIX, and IBM's decision to join CXL suggests that it may be positioning to <a href="https://www.nextplatform.com/2020/09/03/the-memory-area-network-at-the-heart-of-ibms-power10/">merge OpenCAPI's differentiators into CXL after Power10</a>. This is pure speculation on my part though.</p> +<h2 id="ssug" style="text-align: left;">Spectrum Scale User Group vs. Lustre BOF</h2> +<p>Because SC'20 was smeared over two weeks instead of one, I got to attend both the Lustre BOF and one of the Spectrum Scale User Group (SSUG) sessions. I also came equipped with a much more meaningful technical understanding of Spectrum Scale this year (I've spend the last year managing a group responsible for Spectrum Scale at work), and it was quite fascinating to contrast the two events and their communities' respective priorities and interests.</p> +<p>The Spectrum Scale User Group featured a presentation on "<a href="https://www.spectrumscaleug.org/wp-content/uploads/2020/11/episode-11-what-is-new-in-5-1.pdf">What is new in Spectrum Scale 5.1.0</a>" and the <a href="https://sc20.supercomputing.org/presentation/?sess=sess316&amp;id=bof149">Lustre BOF</a> had its analogous Feature Discussion. I broadly bucketize the new features presented at both events into four categories:</p> +<h3 id="ssug-1" style="text-align: left;">1. Enterprisey features that organizations may care about</h3> +<p>For Spectrum Scale, this included support for newer releases of RHEL, SLES, Ubuntu, AIX(!), and Windows (!!). IBM also noted that Spectrum Scale also now supports the <a href="https://www.ibm.com/downloads/cas/AM1PYZBB">zEDC hardware compression unit on the z15 mainframe processor</a>:</p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-g04tb8_FLa0/X7qwLXzgx6I/AAAAAAABPlM/8xK525c4h7UZ_5lX0jrIGSHuW2a8zyGQQCLcBGAsYHQ/Spectrum%2BScale%2B5.1%2BPlatform%2BUpdates.png" style="margin-left: 1em; margin-right: 1em;"><img alt="https://www.spectrumscaleug.org/wp-content/uploads/2020/11/episode-11-what-is-new-in-5-1.pdf" height="226" src="https://lh3.googleusercontent.com/-g04tb8_FLa0/X7qwLXzgx6I/AAAAAAABPlM/8xK525c4h7UZ_5lX0jrIGSHuW2a8zyGQQCLcBGAsYHQ/w400-h226/Spectrum%2BScale%2B5.1%2BPlatform%2BUpdates.png" title="https://www.spectrumscaleug.org/wp-content/uploads/2020/11/episode-11-what-is-new-in-5-1.pdf" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Spectrum Scale 5.1 platform updates</span></b></div> +<p></p> +<p>The Lustre discussion presented their equivalent OS support slide with a similar set of supported enterprise Linux distributions (RHEL, SLES, Ubuntu). No support for AIX or Z (s390x) though:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-e7LAXSxdS8Y/X7qx0Ta_1NI/AAAAAAABPlY/JA4x86Z3vbk55oIafOmeOPDSyTZV8EAbACLcBGAsYHQ/Screen%2BShot%2B2020-11-22%2Bat%2B10.45.30.png" style="margin-left: 1em; margin-right: 1em;"><img alt="" height="225" src="https://lh3.googleusercontent.com/-e7LAXSxdS8Y/X7qx0Ta_1NI/AAAAAAABPlY/JA4x86Z3vbk55oIafOmeOPDSyTZV8EAbACLcBGAsYHQ/w400-h225/Screen%2BShot%2B2020-11-22%2Bat%2B10.45.30.png" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Lustre 2.14 platform updates</span></b></div> +<p></p> +<p>If nothing else, this was a reminder to me that the market for Spectrum Scale is a bit broader than just HPC like Lustre is. I have to assume they have enough AIX, Windows, and Z customers to justify the their support for those platforms. That said, wacky features like hardware-assisted compression is not unique to Spectrum Scale on Z; <a href="https://www.eofs.eu/_media/events/lad17/14_andreas_dilger_lad2017-zfs_improvements.pdf">Lustre picked up hardware-assisted compression</a> back in 2017 thanks to Intel.</p> +<p>New improvements to Spectrum Scale's security posture were also presented that were a little alarming to me. For example, one no longer has to add <span style="font-family: courier;">scp</span> and <span style="font-family: courier;">echo</span> to the sudoers file for Spectrum Scale to work (yikes!). There was also a very harsh question from the audience to the effect of "why are there suddenly so many security fixes being issued by IBM?" and the answer was similarly frightening; Spectrum Scale is now entering markets with stringent security demands which has increased IBM's internal security audit requirements, and a lot of new vulnerabilities are being discovered because of this.</p> +<p>It's ultimately a good thing that Spectrum Scale is finding a fixing a bunch of security problems, since the prior state of the practice was just not performing stringent audits. I assume that Lustre's approach to security audits is closer to where Spectrum Scale was in years past, and should Lustre ever enter these "new markets" to compete with Spectrum Scale, I expect a similarly uncomfortable quantity of security notices would come to light. This is all speculative though; the only definite is that IBM is moving GPFS towards role-based access control which is a positive direction.</p> +<p>Overall, Spectrum Scale seemed considerably more focused on developing these enterprisey features than Lustre.</p> +<h3 id="ssug-2" style="text-align: left;">2. Manageability features that administrators may care about</h3> +<p>Spectrum Scale also revealed a bunch of smaller features that are nice to have for administrators including</p> +<p></p> +<ul style="text-align: left;"> <li><b>Faster failing of hung RDMA requests</b> - you can now set a maximum time that an RDMA request can hang (e.g., if an endpoint fails) before its thread is killed by Spectrum Scale itself. This avoids having to wait for lower-level timeouts and seems like a nice-to-have knob for a file system that supports a lot of path and endpoint diversity. Lustre may be ahead on this front with its <a href="https://wiki.whamcloud.com/display/LNet/LNet+Health+User+Documentation#LNetHealthUserDocumentation-lnet_transaction_timeout">lnet_transaction_timeout parameter</a>, but it's unclear exactly how these two settings differ.</li> <li><b>Safeguards against administrator error</b> - Spectrum Scale added features that warn the administrator about doing something that may be a mistake, such as accidentally breaking quorum by downing a node or mapping incorrect drive slots to RAID groups. There's not really equivalent functionality in Lustre; these are the places where Lustre solution providers (think HPE/Cray ClusterStor) get to value-add management software on top of open-source Lustre (think <a href="https://pubs.cray.com/bundle/ClusterStor_CSCLI_Command_Reference_Guide_42_S9922/page/About_CSCLI_Command_Reference_Guide_E1000.html">cscli</a>)</li> <li><b>GUI and REST API changes</b> - you can do an increasing amount of management operations using the Spectrum Scale GUI or its underlying control-plane REST API. Lustre has the <a href="https://wiki.lustre.org/Integrated_Manager_for_Lustre">IML GUI</a>, but it isn't treated as a first-class citizen in the same way that Spectrum Scale does and it was not mentioned at the Lustre BOF at all. Again, this is an area where vendors usually value-add their own management on top of community Lustre.</li> <li><b>Improved monitoring, reporting, and phone-home</b> - a framework called "MAPS" was recently introduced to essentially do what Nagios does in most DIY environments--raise alarms for crashes, resource exhaustion, misconfiguration, and the like. It also does performance monitoring and historical data aggregation. As with the other manageability features mentioned, Lustre relies on third-party tools for these features.</li></ul> +<p></p> +<p>For resilience, Spectrum Scale announced new tunable parameters to improve parallel journal recovery:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-Jipe0aQYLmA/X7sgoE-yvKI/AAAAAAABPnY/JHUiSL5s0V8V4eUaMVlMJ3QedqsAdFqugCLcBGAsYHQ/Screen%2BShot%2B2020-11-22%2Bat%2B18.38.06.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Spectrum Scale's latest advancements in improving recovery performance" height="226" src="https://lh3.googleusercontent.com/-Jipe0aQYLmA/X7sgoE-yvKI/AAAAAAABPnY/JHUiSL5s0V8V4eUaMVlMJ3QedqsAdFqugCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-22%2Bat%2B18.38.06.png" title="Spectrum Scale's latest advancements in improving recovery performance" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Spectrum Scale's latest advancements in improving recovery performance</span></b></div> +<p></p> +<p></p> +<p>whereas Lustre announced parallel fsck with major performance improvements to speed up recovery:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-kYUWJhID2Fo/X7rTdizIz2I/AAAAAAABPmM/6_seKzcajo4LR7wg6peBfLgAawE8STESQCLcBGAsYHQ/Screen%2BShot%2B2020-11-22%2Bat%2B13.09.03.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Lustre's latest advancements in improving recovery performance" height="225" src="https://lh3.googleusercontent.com/-kYUWJhID2Fo/X7rTdizIz2I/AAAAAAABPmM/6_seKzcajo4LR7wg6peBfLgAawE8STESQCLcBGAsYHQ/w400-h225/Screen%2BShot%2B2020-11-22%2Bat%2B13.09.03.png" title="Lustre's latest advancements in improving recovery performance" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Lustre's latest advancements in improving recovery performance</span></b></div> +<p></p> +<p>Finally, Spectrum Scale showcased its vision to allow Spectrum Scale to be mounted inside containerized environments:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-V_E2gaE1ukA/X7q2JeDMkUI/AAAAAAABPlk/h6wwdezVob8M3-EsjB8XhGkRZQj6i-UhACLcBGAsYHQ/Container%2Bnative%2Bstorage%2Baccess%2B-%2Bcoming%2Bsoon.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Spectrum Scale vision for containerized application access" height="226" src="https://lh3.googleusercontent.com/-V_E2gaE1ukA/X7q2JeDMkUI/AAAAAAABPlk/h6wwdezVob8M3-EsjB8XhGkRZQj6i-UhACLcBGAsYHQ/w400-h226/Container%2Bnative%2Bstorage%2Baccess%2B-%2Bcoming%2Bsoon.png" title="Spectrum Scale vision for containerized application access" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b>The Spectrum Scale vision for containerized application access</b></div> +<p></p> +<p>This is actually somewhere that Lustre is quite a bit ahead in some regards because it has long had features like <a href="https://wiki.lustre.org/UID/GID_Mapping">UID/GID mapping</a> and <a href="https://www.ddn.com/blog/technology-innovation/leveraging-isolation-lustre-file-systems/">subdirectory mounts</a> that allow for a greater degree of isolation that maps well to untrusted containers.</p> +<p>That all said, Lustre's focus is not on taking on more of these nice-to-have manageability features. When asked about adding basic manageability features like supporting easy addition/removal of Lustre OSTs and OSSes to enable evergreen Lustre systems analogous to Spectrum Scale's <span style="font-family: courier;">mmrestripefs</span> command, the answer was effectively "no." The reason given is that (1) Lustre clients are where files get stitched together, so migration will always have to involve client access, and (2) <span style="font-family: courier;">lfs find</span> and <span style="font-family: courier;">lfs migrate</span> already provide the tools necessary to move data files in theory. From this, I take away that stitching those two <span style="font-family: courier;">lfs</span> commands together into a tool that actually does what <span style="font-family: courier;">mmfsrestripe</span> does is an exercise left to the viewer--or a company who can value-add such a tool on top of their Lustre offering.</p> +<h3 id="ssug-3" style="text-align: left;">3. Performance, scalability, and reliability features that end users may care about</h3> +<p>Spectrum Scale didn't have a huge amount to offer in the user-facing performance/scalability/reliability features this year. They improved their support for QOS (which is admittedly fantastic when compared to <a href="https://doc.lustre.org/lustre_manual.xhtml#dbdoclet.tbftuning">Lustre's Token Bucket Filter QOS</a>which cannot limit IOPS like Spectrum Scale can) from an administrator standpoint, and they have begun to think about how to incorporate TRIMming into flash-based Spectrum Scale deployments to offer reliable performance.</p> +<p>By comparison, Lustre's new features really shine in this department. Andreas Dilger presented this slide near the beginning of his talk:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-7n7BUOKsHUY/X7rW7UtjrRI/AAAAAAABPmc/-xhGvLpzb9oZCXtRBRPEw6NfyxUZ82FWQCLcBGAsYHQ/Screen%2BShot%2B2020-11-22%2Bat%2B11.30.36.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Some of Lustre's many upcoming performance improvements" height="226" src="https://lh3.googleusercontent.com/-7n7BUOKsHUY/X7rW7UtjrRI/AAAAAAABPmc/-xhGvLpzb9oZCXtRBRPEw6NfyxUZ82FWQCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-22%2Bat%2B11.30.36.png" title="Some of Lustre's many upcoming performance improvements" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Some of Lustre's many upcoming performance improvements</span></b></div> +<p></p> +<p>which reflects significant attention being paid to improving the performance of emerging noncontiguous and otherwise adversarial I/O pattern--perhaps motivated by storage-hungry AI and genomics markets.</p> +<p>Lustre is also introducing features aimed at both scale-up and scale-out, with a 30x speedup in the time it takes to <a href="https://jira.whamcloud.com/browse/LU-12988">mount petabyte OSTs</a> (likely in preparation for the <a href="https://www.globenewswire.com/news-release/2019/06/17/1869364/0/en/Cray-to-Deliver-First-Exabyte-Storage-System-for-the-Frontier-Exascale-System-at-ORNL.html">exascale Lustre installations coming in the next year or two</a>) and automated directory metadata <a href="https://jira.whamcloud.com/browse/LU-11025">sharding</a>, <a href="https://jira.whamcloud.com/browse/LU-12051">shrinking</a>, and <a href="https://jira.whamcloud.com/browse/LU-12624">balancing</a>. From this, it's clear that the primary focus of Lustre continues to be extreme scale and performance above all else, but it's unclear how much of this effort is putting Lustre ahead of Spectrum Scale as much as it is catching up to all the effort that went into making Spectrum Scale scale out to 250 PB for the Summit system.</p> +<h3 id="ssug-4" style="text-align: left;">4. Interface features that platform developers may care about</h3> +<p>The newest release of Spectrum Scale introduces improvements to NFS (by adding v4.1 support), CSI (incremental improvements), SMB (incremental improvements), and most surprising to me, HDFS. By comparison, I don't think Lustre directly supports any of these interfaces--you have to use third-party software to expose these protocols--and if they are supported, they aren't under active development.</p> +<h3 id="ssug-overall" style="text-align: left;">Overall Impressions</h3> +<p>These two presentations pointed to a sharp contrast between how Spectrum Scale and Lustre position themselves as storage systems; IBM's vision for Spectrum Scale is as a high-capacity data lake tier against which a diversity of apps (HPC, containerized services, map-reduce-style analytics) can consume and product data. They even said as much while talking about their HDFS support:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-DUmUI2S33cE/X7q2gcuGz-I/AAAAAAABPls/rtIMHLuNukY6pusiBZj4DC65OBAi4kzHwCLcBGAsYHQ/Screen%2BShot%2B2020-11-22%2Bat%2B11.05.34.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Spectrum Scale's vision as a hub for all data in the enterprise" height="226" src="https://lh3.googleusercontent.com/-DUmUI2S33cE/X7q2gcuGz-I/AAAAAAABPls/rtIMHLuNukY6pusiBZj4DC65OBAi4kzHwCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-22%2Bat%2B11.05.34.png" title="Spectrum Scale's vision as a hub for all data in the enterprise" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Spectrum Scale's vision as a hub for all data in the enterprise</span></b></div> +<p>Spectrum Scale AFM improvements were also touted at the user group presentation as a means to enable workflows that span on-premise and public cloud for workloads involving HPC, containerized services, file, and object--no matter where you operate, Spectrum Scale will be there. They showed this logo soup diagram which spoke to this:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-xwCG993aFH8/X7q37HCe9sI/AAAAAAABPl4/NNdT8W0oK_8ZKeoSp0jmkqaWSIBiXCeeQCLcBGAsYHQ/Screen%2BShot%2B2020-11-22%2Bat%2B11.11.29.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Spectrum Scale logo soup supporting complex workflows and hybrid cloud" height="226" src="https://lh3.googleusercontent.com/-xwCG993aFH8/X7q37HCe9sI/AAAAAAABPl4/NNdT8W0oK_8ZKeoSp0jmkqaWSIBiXCeeQCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-22%2Bat%2B11.11.29.png" title="Spectrum Scale logo soup supporting complex workflows and hybrid cloud" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Spectrum Scale logo soup supporting complex workflows and hybrid cloud</span></b></div> +<p></p> +<p>and it's clearly aligned with IBM's hybrid cloud corporate strategy. I can see how this vision could be useful based on my experience in industry, but at the same time, this looks like a Rube Goldberg machine with a lot of IBM-specific linchpins that concentrates risk on IBM product support (and licensing costs!) progressing predictably.</p> +<p>Lustre, by comparison, appears to be focused squarely on performance and scale. There was no logo soup or architectural vision presented at the Lustre BOF itself. This is likely a deliberate effort by the Lustre community to focus on being an open-source piece to a larger puzzle that others can package up by anyone with the need or business acumen to do so. Just as Linux itself is just a community effort around which companies like Red Hat (IBM) or SUSE build and market a solution, Lustre should be just one part of an organization's overall data management strategy whereas Spectrum Scale is trying to be the entire answer.</p> +<p>This isn't a value judgment for or against either; Lustre offers more architectural flexibility at the cost of having to do a lot of day-to-day lifting and large-scale architectural design oneself, while Spectrum Scale is a one-stop shop that likely requires fewer FTEs and engineering effort to build infrastructure for complex workflows. The tradeoff, of course, is that Spectrum Scale and its surrounding ecosystem is priced for enterprises, and absent a new pricing scheme that economically scales cost with capacity (hypothetically referred to as "data lake pricing" at the SSUG), the choice of whether to buy into Spectrum Scale or Lustre as a part of a larger data strategy may come down to how expensive your FTEs are.</p> +<p>On a non-technical note, the Lustre BOF certainly felt more community-oriented than the Spectrum Scale UG; the dialog was more collegial and there were no undertones of "customers" demanding answers from "vendors." This is not to say that the SSUG wasn't distinctly more friendly than a traditional briefing; it just felt a bit more IBM-controlled since it was on an IBM WebEx whose registration was moderated by IBM and where all the speakers and question answerers were IBM employees. Perhaps there's no other way in a proprietary product since the vendor ultimately holds the keys to the kingdom.</p> +<h2 id="io500" style="text-align: left;">IO-500 BOF</h2> +<p>The IO-500 BOF is one of my favorite events at both ISC and SC each year, but as with the rest of SC'20, this year's IO-500 BOF felt like a quiet affair. I noticed two noteworthy themes:</p> +<p></p> +<ol style="text-align: left;"> <li><b>I/O performance is being awarded in dimensions beyond just peak I/O bandwidth</b>. There are six awards now being given for first place: 10-node bandwidth, 10-node metadata, 10-node overall, total bandwidth, total metadata, and total overall. This contrasts with Top500 which treats performance in a single dimension (peak HPL) and implicitly perpetuates the position that HPL performance is the only aspect of performance that defines "#1." I quite like the IO-500 approach because it makes it easier to see a multidimensional picture of I/O performance and apply your own value system to the list to decide what combination of hardware and storage system software qualifies as #1.</li> <li><b>The importance of system configuration is elevating in the IO-500 community</b>--defining a system hardware schema, presenting the data uniformly, and establishing standard tools and techniques for collecting this data from the systems running the IO500 benchmark are all on the roadmap for the IO-500 benchmark. Again, this makes the list much more valuable for the purposes of <i>learning</i> something since a properly annotated set of submissions would allow you to understand the effects of, for example, choosing NVMe over SAS SSDs or declustered parity over RAID6 on nonvolatile media.</li></ol> +<p></p> +<p>The <a href="https://io500.org/site/submissions/full/sc20">final IO-500 list for SC'20</a> itself didn't change much this time; experimental and proof-of-concept file systems remain dominant in the top 10 positions, and DAOS, WekaFS, and IME carry most of the weight. However the #1 position <i>was</i> a surprise:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-cTwVFaPx2Ls/X7sDjKJv9AI/AAAAAAABPnA/NKiMql4zqAQ46XnyF8BLn85YZCEsSSqHQCLcBGAsYHQ/Screen%2BShot%2B2020-11-22%2Bat%2B15.23.25.png" style="margin-left: 1em; margin-right: 1em;"><img alt="Overall winner for the IO-500 full list was Pengcheng Laboratory's MadFS" height="226" src="https://lh3.googleusercontent.com/-cTwVFaPx2Ls/X7sDjKJv9AI/AAAAAAABPnA/NKiMql4zqAQ46XnyF8BLn85YZCEsSSqHQCLcBGAsYHQ/w400-h226/Screen%2BShot%2B2020-11-22%2Bat%2B15.23.25.png" title="Overall winner for the IO-500 full list was Pengcheng Laboratory's MadFS" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">The overall winner for the IO-500 full list was Pengcheng Laboratory's MadFS</span></b></div> +<p></p> +<p>A new file system called "MadFS" took the top spot with some ridiculous performance numbers, and frustratingly, there have been no public disclosures about what this file system is or how it works. The IO-500 committee said that they spoke privately with the submitters and felt comfortable that the entry was legitimate, but they were not at liberty to disclose many details since Pengcheng Laboratory is preparing to present MadFS at another venue. They did hint that MadFS drew inspiration from DAOS, but they didn't offer much more.</p> +<p>Peeling the MadFS submission apart does reveal a few things:</p> +<p></p> +<ul style="text-align: left;"> <li>It is a file system attached to <a href="https://www.globaltimes.cn/content/1171676.shtml">Pengcheng Laboratory's Cloudbrain-II system</a>, which is a <a href="https://e.huawei.com/us/products/cloud-computing-dc/atlas/atlas-900-ai">Huawei Atlas 900</a> supercomputer packed with <a href="https://en.wikichip.org/wiki/hisilicon/kunpeng/920-6426">Huawei Kungpeng 920 ARM CPUs</a> and&nbsp;<a href="https://www.hotchips.org/hc31/HC31_1.11_Huawei.Davinci.HengLiao_v4.0.pdf">Huawei Ascend 910 coprocessors</a>. Cloudbrain-II is a huge system with a huge budget, so it should have a very capable storage subsystem.</li> <li>72 processes were run on each of the 255 client nodes, reaching a peak of2,209,496 MiB/second. This translates to 73 Gbit/sec out of each 100 Gb/s node--pretty darned efficient.</li> <li>The MadFS file system used is 9.6 PB in size, and the fastest-running tests (ior-easy-*) ran for a little over six minutes. This corresponds to863 TB read and written in the best case, which is reasonable.</li> <li>The ior-easy tests were run using a transfer size of2,350,400 bytes which is a <i>really</i> weird optimization point. Thus, it's unlikely that MadFS is block-based; it probably runs entirely in DRAM or HBM, is log-structured, and/or relies on persistent memory to buffer byte-granular I/O from any underlying block devices.</li> <li>The submission indicates that 254 metadata nodes were used, and each node had six storage devices. The submission also says that data servers (of an undefined quantity) has 2 TB NVMe drives.</li> <ul> <li>Since 255 clients and 254 metadata servers were used, this may suggest that metadata is federated out to the client nodes. This would explain why the metadata rates are so astonishing.</li> <li>If the 9.6 PB of NVMe for data was located entirely on the 255 clients, this means each compute node would've had to have had over 37 TB of NVMe after parity. This seems unlikely.</li> <li>From this, we might guess that MadFS stores metadata locally but data remotely. This would be a very fragile architecture for important data, but a reasonable one for ephemeral storage akin to <a href="https://unifyfs.readthedocs.io/en/latest/">UnifyFS</a>.</li> </ul> <li>MadFS is not ready for prime time, as its <span style="font-family: courier;">statfs(2)</span> returns nonsense data. For example, the MadFS ior-easy-* runs report the file system has zero inodes, while the ior-hard-* runs reported268 trillion inodes all of which are used.</li></ul> +<p></p> +<p>Until more disclosures are made about MadFS and the Cloudbrain-II system though, there's little intellectual value in this IO-500 submission. However the waters are definitely chummed, and I for one will be keeping an eye out for news about this Chinese system.</p> +<p>Finally, although not part of the IO-500 BOF, Microsoft Azure released some benchmark results shortly after about their successful demonstration of <a href="https://www.hpcwire.com/off-the-wire/azure-hpc-reports-1-tb-s-cloud-parallel-filesystem/">over 1 TB/sec using BeeGFS in Azure</a>. This wasn't run to the IO-500 spec so it wouldn't have been a valid submission, but it is the single fastest IOR run in the cloud of which I am aware. This bodes well for the future of parallel file systems in the cloud as a blessed BeeGFS/Azure configuration would compete directly with <a href="https://aws.amazon.com/fsx/lustre/">Amazon FSx for Lustre</a>.</p> +<h2 id="conclusion" style="text-align: left;">Concluding Thoughts</h2> +<p>Virtual SC this year turned out to be far more exhausting than I had anticipated despite the fact that I never had to leave my chair. On the upside, I got to attend SC with my cat for the first time:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-i1kSsZQhShg/X7sSEEW7DMI/AAAAAAABPnM/7p5U0Fkkb6sOI3fwjF4VCbzg__06L2hNACLcBGAsYHQ/IMG_0446.JPG" style="margin-left: 1em; margin-right: 1em;"><img alt="Harriet dialing into the Women in HPC Workshop" height="300" src="https://lh3.googleusercontent.com/-i1kSsZQhShg/X7sSEEW7DMI/AAAAAAABPnM/7p5U0Fkkb6sOI3fwjF4VCbzg__06L2hNACLcBGAsYHQ/w400-h300/IMG_0446.JPG" title="Harriet dialing into the Women in HPC Workshop" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Harriet dialing into the Women in HPC Workshop with me</span></b></div> +<p></p> +<p>and I didn't find myself getting as sweaty running between sessions. On the downside, the whole conference was just <i>weird</i>. The only conference buzz I felt was through the Twitter community due to the total lack of chance encounters, late nights out, early morning briefings, and copious free coffee. The content felt solid though, and I admit that I made heavy use of pause, rewind, and 2x replay to watch things that I would have otherwise missed in-person.</p> +<p>In my past SC recaps I remarked that I get the most out of attending the expo and accosting engineers on the floor, and the complete absence of that made SC feel a lot less whole. As a speaker, the lack of engagement with the audience was very challenging too. The 45-second delay between live video and Q&amp;A made dialog challenging, and there was no way to follow up on questions or comments using the virtual platform. I suppose that is the price to be paid for having an otherwise robust virtual event platform.</p> +<p>Although COVID forced us all into a sub-optimal SC venue this year, I think it also took away a lot of advancements, discussions, and dialog that would've fed a richer SC experience as well. With any luck SC can be in-person again next year and the community will have bounced back and made up for the time lost this year. When SC'21 rolls around, we should have at least one exascale system hitting the floor in the US (and perhaps another in China) to talk about, and the Aurora system should be very well defined. We'll have a few monster all-flash file systems on the I/O front to boot (including one in which I had a had a hand!), and the world will be opening up again--both in the technological sense and the literal sense. The future looks bright.</p> +<p>As always, I owe my sincerest thanks to the organizers of SC this year for putting together the programs that spurred this internal monologue and the dialogues in which I engaged online these past two weeks. I didn't name every person from whom I drew insight, but if you recognize a comment that you made and would like attribution, please do let me know.</p> +<p>Finally, if you'd like to read more, see my recaps of the <a href="https://glennklockwood.blogspot.com/2020/11/pdsw20-recap.html">PDSW'20 workshop</a>, <a href="https://www.nersc.gov/news-publications/staff-blogs/sc20-tiered-storage-panel-recap/">my tiered storage panel</a>, and the forthcoming DAOS User Group.</p> + + + + + Buckle up, CPUs are going to get weirder + + 2020-11-22T00:00:00-07:00 + https://hpc.social/2020/buckle-up-cpus-are-going-to-get-weirder + <h2 id="the-m1-is-a-good-test-run-lets-get-ready">The M1 is a good test run, let’s get ready</h2> + +<p>(Note: This post is adapted from last week’s <a href="https://newsletter.researchcomputingteams.org/archive/5246c80f-2211-470c-94cb-d25496e8d5e8">issue 51</a> +of the <a href="https://www.researchcomputingteams.org">resarch computing teams newsletter</a>)</p> + +<p>The big news of the past month has been Apple’s new <a href="https://www.anandtech.com/show/16252/mac-mini-apple-m1-tested/7">M1 +CPU</a>. +The M1’s specs in and of themselves kind of interesting, but more +important to us in research computing is that the M1 is an example +of how CPUs are going to get more different as time goes on, and +that will have impacts on our teams. The M1 going to be a trial run for +a future of more diverse computing architectures that we’d do well +to get ready for.</p> + +<p>Large-scale research computing systems have all been about “co-design” +for ages, but the truth is that in the mainstream, big-picture CPU +design choices have been pretty fixed, with most of co-design +being about choice of accelerators or mix and match between CPU, +memory. and acceleration. Now that the market has accepted ARM as +a platform — and with <a href="https://riscv.org">RISC-V</a> on its way — we +can expect to start seeing bolder choices for CPU design being +shipped, with vendors making very different tradeoffs than have +been made in the past. So whether or not you see yourself using +Apple hardware in the future, M1’s choices and their consequences +are interesting.</p> + +<p>M1 makes two substantially different trade-offs. The first is +having DRAM on socket. This sacrifices extensibility — you can’t +just add memory — for significantly better memory performance and +lower power consumption. Accurately moving bits back and forth +between chips takes a surprising amount of energy, and doing it +fast takes a lot of power! The results are striking:</p> + +<blockquote class="twitter-tweet"><p dir="ltr" lang="ja">M1 MacBook AirでLINPACK動かして電力測定をしてみた。USB PD電力計+iOS用Linpackという謎アプリのため参考値だが34.01 GFlops/W。まともに測るべきだしスケールしないやり方なので比べられる値ではないが、点灯したLCD込みでGreen 500の1位は超えていることに… うーん、正しいのか? <a href="https://t.co/ldEroByfxt">pic.twitter.com/ldEroByfxt</a></p> +&mdash; Ohtsuji (@ohtsuji) <a href="https://twitter.com/ohtsuji/status/1328768907461623808?ref_src=twsrc%5Etfw">November 17, 2020</a></blockquote> + +<p>LINPACK - solving a set of linear equations - is a pretty flawed +benchmark, but it’s widely understood. The performance numbers +here are pretty healthy for a chip with four big cores, but the +<em>efficiency</em> numbers are startling. They’re not unprecedented +except for the context; these wouldn’t be surprising numbers for a +GPU, which also have DRAM-on-socket, and are similarly non-extensible. +But they are absurdly high for something more general-purpose like +a CPU.</p> + +<p>Having unified on-socket memory between CPU and integrated GPU also +makes possible some <a href="https://blog.tensorflow.org/2020/11/accelerating-tensorflow-performance-on-mac.html">great Tensorflow +performance</a>, +simultaneously speeds up and lowers power consumption for <a href="https://www.macrumors.com/2020/11/17/apple-silicon-m1-compiles-code-as-fast-as-mac-pro/">compiling +code</a>, +and does weirdly well at running +<a href="https://info.crunchydata.com/blog/postgresql-benchmarks-apple-arm-m1-macbook-pro-2020">postgreSQL</a>.</p> + +<p>The second tradeoff has some more immediate effects for research +computing teams. Apple, as is its wont, didn’t worry too much about +backwards-looking compatibility, happily sacrificing that for +future-looking capabilities. The new Rosetta (x86 emulation) seems +to work seamlessly and is <a href="https://twitter.com/pmelsted/status/1329934691944816640">surprisingly +performant</a>. But +if you want to take full advantage of the architecture of course +you have to compile natively. And on the day of release, a lot of +key tools and libraries didn’t just “automatically” work the way +they seemed to when most people first started using other ARM chips. +(Though that wasn’t magic either; the ecosystem had spent years +slowly getting ready for adoption by the mainstream.)</p> + +<p>“Freaking out” wouldn’t be too strong a way to describe +the reaction in some corners; one user claimed that GATK would +<a href="https://twitter.com/biocrusoe/status/1328704001039339521">“never +work”</a> on +Apple silicon (because a build script mistakenly assumed that an +optional library that had Intel-specific optimizations would be +present - they’re on it), and the absence of a free fortran compiler +on the day of hardware release worried other people (there’s already +<a href="https://github.com/fxcoudert/gfortran-for-macOS/releases/tag/11-arm-alpha1">experimental gfortran +builds</a>). +Having come of computational science age in the 90s when new chips +took months to get good tooling for, the depth of concern seemed a +bit overwrought.</p> + +<p>This isn’t to dismiss the amount of work that’s needed to get +software stacks working on new systems. Between other ARM systems +and M1, a lot of research software teams are going to have to put +in a lot of time porting new low-level libraries and tools to the +new architectures. Many teams that haven’t had to worry about this +sort of thing before are going to have to refactor architecture-specific +optimizations out and into libraries. Some code will simply have +to be rewritten - some R code has depended on <a href="https://developer.r-project.org/Blog/public/2020/11/02/will-r-work-on-apple-silicon/">Intel-specific NaN +handling</a> +to implement NA semantics (which are <a href="https://blog.revolutionanalytics.com/2016/07/understanding-na-in-r.html">similar to but different +from</a> +NaN) that M1 does not honour, so natively compiled R needs extra +checks on M1.</p> + +<p>It’s also not to dismiss the complexity that people designing and +running computing systems will have to face. Fifteen years ago, +the constraints on a big computing system made things pretty clear — +you’d choose a whackload of x86 with some suitably fast (for your application) +network. The main question were how fat are the nodes, what’s +the mix of low, medium, and high-memory nodes, and what your storage +system is like. It’s been more +complex for a while with accelerators, and now with entirely different +processor architectures in the mix, it will get harder. Increasingly, +there is no “best” system; a system has to be tuned to favour some +specific workloads. And that necessarily means disfavouring others, +which centres have been loathe to do.</p> + +<p>So the point here isn’t M1. Is M1 a good choice for your research +computing support needs? Almost certainly not if you run on clusters. +And if you’re good with your laptop or desktop, well, then lots of +processors will work well enough.</p> + +<p>But even so, a lot of software is going to now have to support these +new chips. And this is just the start of “weird” CPUs +coming for research computing.</p> + +<p>CPUs will keep coming that will make radically different tradeoffs +than choices than seemed obvious before. That’s going to make +things harder for research software and research computing systems +teams for a while. A lot of “<a href="https://encyclopedia2.thefreedictionary.com/vaxocentrism">all the world’s an +x86</a>” +assumptions - some that are so ingrained they are currently hard +to see - are going to get upended, and setting things back right +is going to take work. The end result will be more flexible and +capable code, build systems, and better-targeted systems, but it’ll +take a lot of work to get there. If you haven’t already started +using build and deployment workflows and processes that can handle +supporting multiple architectures, now is a good time to start.</p> + +<p>But the new architectures, wider range of capabilities, and different +tradeoff frontiers are also going to expand the realm of what’s +possible for research computing. And isn’t that why we got into +this field?</p> + + + + + PDSW'20 Recap + + 2020-11-20T06:00:00-07:00 + https://hpc.social/2020/pdsw-20-recap + <p>This year was the first all-virtual <a href="http://www.pdsw.org/index.shtml">Parallel Data Systems Workshop</a>, and despite the challenging constraints imposed by the pandemic, it was remarkably engaging. &nbsp;The program itself was contracted relative to past years and only had time for three Work-In-Progress (WIP) presentations, so it was a little difficult to pluck out high-level research trends and themes. &nbsp;However, this year's program did seem more pragmatic, with talks covering very practical topics that had clear connection to production storage and I/O. The program also focused heavily on the HPC side of the community, and the keynote address was perhaps the only talk that focused squarely on the data-intensive data analysis side of what used to be PDSW-DISCS. &nbsp;Whether this is the result of PSDW's return to the short paper format this year, shifting priorities from funding agencies, or some knock-on effect of the pandemic is impossible to say.</p> +<p>Although there weren't any strong themes that jumped out at me, <a href="https://glennklockwood.blogspot.com/2019/11/sc19-recap.html#pdsw">last year's theme of using AI to optimize I/O performance</a> was much more muted this year. &nbsp;<a href="http://www.pdsw.org/pdsw20/papers/ws_pdsw_S2_P1_Rosario.pdf">Eliakin del Rosario presented a paper</a> describing a clustering and visual analysis tool he developed that underpins <a href="https://sc20.supercomputing.org/?post_type=page&amp;p=3479&amp;id=ws_ross106&amp;sess=sess226">a study applying machine learning to develop an I/O performance model</a> presented in the main SC technical program, but there was no work in the direction of applying AI to directly optimize I/O. &nbsp;Does this mean that these ideas have climbed over the hype curve and are now being distilled down into useful techniques that may appear in production technologies in the coming years? &nbsp;Or was the promise of AI to accelerate I/O just a flash in the pan?</p> +<p>In the absence of common themes to frame my recap, what follows are just my notes and thoughts about some of the talks and presentations that left an impression. &nbsp;I wasn't able to attend the WIP session or cocktail hour due to non-SC work obligations (it's harder to signal to coworkers that you're "on travel to a conference" when you're stuck at home just like any other workday) so I undoubtedly missed things, but all slides and papers are available on <a href="http://www.pdsw.org/index.shtml">the PDSW website</a>, and anyone with an SC workshop pass can re-<a href="https://cdmcd.co/P4WY7Y">watch the recorded proceedings on the SC20 digital platform</a>.</p> +<p></p> +<p></p> +<h2 style="text-align: left;">Keynote - Nitin Agrawal</h2> +<p>This year’s keynote by <a href="http://pages.cs.wisc.edu/~nitina/">Nitin Agrawal</a> was a long-form research presentation on SummaryStore, an “approximate storage system” that doesn't store the data you put in it so much as it stores the data you will probably want to get back out of it at a later date. &nbsp;This notion of a storage system that doesn't actually store things sounds like an affront at a glance, but when contextualized properly, it makes quite a lot of sense:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-A-w5SKA256s/X7X5EMOW3xI/AAAAAAABPeI/zM_zV6TtemYefCZIKTbu7Gr_GlI8AW37QCLcBGAsYHQ/How%2Bnot%2Bto%2Bdrown%253A%2Bdemocratizing%2Bstorage.png" style="margin-left: 1em; margin-right: 1em;"><img height="225" src="https://lh3.googleusercontent.com/-A-w5SKA256s/X7X5EMOW3xI/AAAAAAABPeI/zM_zV6TtemYefCZIKTbu7Gr_GlI8AW37QCLcBGAsYHQ/w400-h225/How%2Bnot%2Bto%2Bdrown%253A%2Bdemocratizing%2Bstorage.png" width="400" /></a></div> +<p>There are cases where the data being stored doesn't have high value. &nbsp;For example, data may become less valuable as it ages, or data may only be used to produce very rough guesses (e.g., garbage out) so inputting rough data (garbage in) is acceptable. &nbsp;In these cases, the data may not be worth the cost of the media on which it is being stored, or its access latency may be more important than its precision; these are the cases where an approximate storage system may make sense.</p> +<p></p> +<p>The specific case presented by Dr. Agrawal, SummaryStore, strongly resembled a time series database to feed a recommendation engine that naturally weighs recent data more heavily than older data. &nbsp;The high-level concept seemed a lot like existing time series telemetry storage systems where high-frequency time series data are successively aggregated as they age so that new data may be sampled every few seconds while old data may be sampled once an hour.</p> +<p>For example, LMT and mmperfmon are time series data collection tools for measuring the load on Lustre and Spectrum Scale file systems, respectively. &nbsp;The most common questions I ask of these tools are things like</p> +<p></p> +<ul style="text-align: left;"><li>What was the sum of all write bytes between January 2018 and January 2019?</li><li>How many IOPS was the file system serving between 5:05 and 5:10 this morning?</li></ul> +<p>By comparison, it’s very rare to ask “How many IOPS was the file system serving between 5:05 and 5:10 two years ago?”  It follows that the storage system underneath LMT and mmperfmon can be “approximate” to save space and/or improve query performance.  Dr. Agrawal’s presentation included this pictorial representation of this:&lt;p&gt;&lt;/p&gt;</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-P1Sm1HEPVpI/X7X75BfDWaI/AAAAAAABPeU/myD6BnYdV_IP7vxthcSWRwD12W4kjQhhwCLcBGAsYHQ/Time-decayed%2Bstream%2Bapproximation.png" style="margin-left: 1em; margin-right: 1em;"><img height="225" src="https://lh3.googleusercontent.com/-P1Sm1HEPVpI/X7X75BfDWaI/AAAAAAABPeU/myD6BnYdV_IP7vxthcSWRwD12W4kjQhhwCLcBGAsYHQ/w400-h225/Time-decayed%2Bstream%2Bapproximation.png" width="400" /></a></div> +<p></p> +<p>Because these approximate storage systems are specifically designed with an anticipated set of queries in mind, much of Agrawal's presentation really spoke to implementation-specific challenges he faced while implementing SummaryStore--things like how SummaryStore augmented bloom filter buckets with additional metadata to allow approximations of sub-bucket ranges to be calculated. &nbsp;More of the specifics can be found in the <a href="http://www.pdsw.org/pdsw20/slides/pdsw_keynote_2020_Nitin.pdf">presentation slides</a> and references therein.</p> +<p>This notion of approximate storage is not new; it is preceded by years of research into <i>semantic file systems</i>, where the way you store data is driven by the way in which you intend to access the data. &nbsp;By definition, these are data management systems that are tailor-made for specific, high-duty cycle I/O workloads such as web service backends.</p> +<p>What I took away from this presentation is that semantic file systems (and approximate storage systems by extension) aren't intrinsically difficult to build for these specific workloads. &nbsp;Rather, making such a system sufficiently generic <i>in practice</i> to be useful beyond the scope of such a narrow workload is where the real challenge lies. &nbsp;Tying this back to the world of HPC, it's hard to see where an approximate storage system could be useful in most HPC facilities since their typical workloads are so diverse. &nbsp;However, two thoughts did occur to me:</p> +<p></p> +<ol style="text-align: left;"><li>If the latency and capacity characteristics of an approximate storage system are so much better than generic file-based I/O when implemented on the same storage hardware (DRAM and flash drives), an approximate storage system could help solve problems that traditionally were limited by memory capacity. &nbsp;DNA sequence pattern matching (think <a href="https://blast.ncbi.nlm.nih.gov">BLAST</a>) or de novo assembly could feasibly be boosted by an approximate index.</li><li>Since approximate storage systems are purpose-built for specific workloads, the only way they fit into a general-purpose HPC environment is using purpose-built composable data services. &nbsp;Projects like <a href="https://press3.mcs.anl.gov/mochi/">Mochi</a> or <a href="https://github.com/excelab/bespokv">BespoKV</a> provide the building blocks to craft and instantiate such purpose-built storage systems, and software-defined storage orchestration in the spirit of <a href="https://cug.org/proceedings/cug2016_proceedings/includes/files/pap105s2-file1.pdf">DataWarp</a> or the <a href="https://www.hpc.cam.ac.uk/research/data-acc">Cambridge Data Accelerator</a> would be needed to spin up an approximate storage service in conjunction with an application that would use it. &nbsp;</li></ol> +<p></p> +<p>I'm a big believer in #2, but #1 would require a forcing function coming from the science community to justify the effort of adapting an application to use approximate storage.</p> +<h2 style="text-align: left;">Keeping It Real: Why HPC Data Services Don't Achieve I/O Microbenchmark Performance</h2> +<p><a href="http://www.pdsw.org/pdsw20/papers/ws_pdsw_S1_P1_Carns.pdf">Phil Carns (Argonne) presented a lovely paper</a> full of practical gotchas and realities surrounding the idea of establishing a roofline performance model for I/O. &nbsp;The goal is simple: measure the performance of each component in an I/O subsystem's data path (application, file system client, network, file system server, storage media), identify the bottleneck, and see how close you can get to hitting the theoretical maximum of that bottleneck:</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-couyuEn261I/X7YGwPqk4uI/AAAAAAABPeg/M_HHYQMrdnM5q1qrdKX9KPBjTPw6J9FjgCLcBGAsYHQ/Screen%2BShot%2B2020-11-12%2Bat%2B08.11.48.png" style="margin-left: 1em; margin-right: 1em;"><img height="225" src="https://lh3.googleusercontent.com/-couyuEn261I/X7YGwPqk4uI/AAAAAAABPeg/M_HHYQMrdnM5q1qrdKX9KPBjTPw6J9FjgCLcBGAsYHQ/w400-h225/Screen%2BShot%2B2020-11-12%2Bat%2B08.11.48.png" width="400" /></a></div> +<div class="separator" style="clear: both; text-align: center;"><br /></div> +<p>The thesis of the paper was that even though this sounds simple, there’s a lot more than meets the eye.  I won’t recite the presentation (see the <a href="http://www.pdsw.org/pdsw20/papers/ws_pdsw_S1_P1_Carns.pdf">paper</a> and <a href="http://www.pdsw.org/pdsw20/slides/pdsw_S1_P1_%20Carns.pdf">slides</a>–they’re great), but I thought some of the more interesting findings included:&lt;p&gt;&lt;/p&gt;</p> +<div><ol style="text-align: left;"><li>There's a 40% performance difference between the standard OSU MPI bandwidth benchmark and what happens when you make the send buffer too large to fit into cache. &nbsp;It turns out that actually writing data over the network from DRAM (as a real application would) is demonstrably slower than writing data from a tiny cacheable memory buffer.</li><li>Binding MPI processes to cores is good for MPI latency but can be bad for I/O bandwidth. &nbsp;Highly localized process placement is great if those processes talk to each other, but if they have to talk to something off-chip (like network adapters), the more spread out they are, the greater the path diversity and aggregate bandwidth they may have to get out of the chip.</li><li>O_DIRECT bypasses page cache but not device cache, while O_SYNC does not bypass page cache &nbsp;but flushes both page and device caches. &nbsp;This causes O_DIRECT to reduce performance for smaller I/Os which would benefit from write-back caching when used by itself, but increase performance when used with O_SYNC since one less cache (the page cache) has to be synchronized on each write. Confusing <i>and</i> wild. &nbsp;And also completely nonstandard since these are Linux-specific flags.</li></ol><h2 style="text-align: left;">Towards On-Demand I/O Forwarding in HPC Platforms</h2></div> +<div><a href="http://www.pdsw.org/pdsw20/papers/ws_pdsw_S1_P2_Bez.pdf">Jean Luca Bez (UFRGS) presented a neat userspace I/O forwarding service</a>, FORGE, that got me pretty excited since the field of <a href="https://www.glennklockwood.com/data-intensive/storage/io-forwarding.html">I/O forwarding has been pretty stagnant</a> since IOFSL came out ten years ago.</div> +<div><br /></div> +<div>The high-level concept is simple: take the intelligence of collective I/O operations implemented in ROMIO and, instead of running them inside the same MPI application performing I/O, offload that functionality to discrete nodes:</div> +<div><br /></div> +<div><div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-vQtcMpV9y_w/X7YLc2Ti4HI/AAAAAAABPes/Z3WbhdeFEVYaHRETWwmwAEwWf2VjEGkrwCLcBGAsYHQ/Screen%2BShot%2B2020-11-12%2Bat%2B08.40.44.png" style="margin-left: 1em; margin-right: 1em;"><img height="225" src="https://lh3.googleusercontent.com/-vQtcMpV9y_w/X7YLc2Ti4HI/AAAAAAABPes/Z3WbhdeFEVYaHRETWwmwAEwWf2VjEGkrwCLcBGAsYHQ/w400-h225/Screen%2BShot%2B2020-11-12%2Bat%2B08.40.44.png" width="400" /></a></div> +<br />This FORGE service is ephemeral in that it is spun up at the same time your MPI application is spun up and persists for the duration of the job. &nbsp;However unlike traditional MPI-IO-based collectives, it runs on dedicated nodes, and it relies on <i>a priori</i> knowledge of the application's I/O pattern to decide what sorts of I/O reordering would benefit the application.</div> +<div><br /></div> +<div>This is perhaps a bit wasteful since nodes are being held idle until I/O happens, but the promise of this idea is much larger. &nbsp;Many large HPC systems have dedicated I/O forwarding nodes because they have to--for example, LNet routers or DVS servers exist in Cray-based HPC systems to do the network protocol conversion to allow InfiniBand-based Lustre and Spectrum Scale file systems to be mounted on Aries-based compute nodes. &nbsp;There's no reason these same nodes couldn't also be used to run FORGE-like services on-demand to buffer and reorder I/Os in transit. &nbsp;And if you stick some NVMe into these protocol conversion nodes, you suddenly have something that looks an awful lot like a transparent burst buffer akin to DDN Infinite Memory Engine.</div> +<div><br /></div> +<div>Taking this a step further, this idea also further motivates having reconfigurable storage infrastructure within an HPC system; with a little bit of knowledge about your I/O workload, one could reconfigure the parallelism and compute power available along the I/O data path itself to optimally balance the limited resources of nodes and the performance benefit. &nbsp;A couple examples:</div> +<div><ul style="text-align: left;"><li>Have a very IOPS-heavy, many-file workload? &nbsp;Since these tend to be CPU-limited, it would make sense to allocate a lot of FORGE nodes to this job so that you have a lot of extra CPU capacity to receive these small transactions, aggregate them, and drive them out to the file system.</li><li>Have a bandwidth-heavy shared-file workload? &nbsp;Driving bandwidth doesn't require a lot of FORGE nodes, and fewer nodes means fewer potential lock conflicts when accessing the shared file.</li></ul><div>This intelligent I/O forwarding naturally maps to file system architectures that incorporate I/O forwarding and stateless components--like <a href="https://glennklockwood.blogspot.com/2019/02/vast-datas-storage-system-architecture.html">VAST</a>--where more network and computational parallelism can be sloshed into a compute node's data path to deal with more complex or adversarial I/O patterns.</div> +</div> +<div><br /></div> +<h2 style="text-align: left;">Fractional-Overlap Declustered Parity</h2> +<div><a href="http://www.pdsw.org/pdsw20/papers/ws_pdsw_S2_P2_%20Ke.pdf">Huan Ke (U Chicago) presented a paper</a> that tried to bridge the gap between RAID implementations that use declustered parity, which has really fast rebuild but a huge failure domain, and traditional (clustered) parity which has very slow rebuilds but a very small failure domain.</div> +<div><br /></div> +<div>The special sauce proposed by Ke is being judicious about how stripes are laid out across a declustered group. &nbsp;Using Latin squares to map RAID blocks to physical drives, one can control how many unique stripes would be affected by a failure (termed the <i>overlap fraction</i>):</div> +<div><br /></div> +<div><div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-q4m6ljXvG7I/X7YTBI1-j_I/AAAAAAABPe4/acpRWMeZsx4agTJ96k_6OSTp8CcWAcN1gCLcBGAsYHQ/Screen%2BShot%2B2020-11-12%2Bat%2B09.46.02.png" style="margin-left: 1em; margin-right: 1em;"><img height="225" src="https://lh3.googleusercontent.com/-q4m6ljXvG7I/X7YTBI1-j_I/AAAAAAABPe4/acpRWMeZsx4agTJ96k_6OSTp8CcWAcN1gCLcBGAsYHQ/w400-h225/Screen%2BShot%2B2020-11-12%2Bat%2B09.46.02.png" width="400" /></a></div> +<div><br /></div> +This is usually where I stop being able to keep up in these sorts of parity scheme talks; however, I quickly realized that this parity scheme relies on the same principle that engineers use to design cost-efficient parameter sweep experiments. &nbsp;In fact, I made a <a href="https://www.glennklockwood.com/materials-science/statistical-design.html">webpage about this exact topic in the context of optimizing a hypothetical chemical vapor deposition experiment</a> when I was an undergraduate in materials science, and it's really not as complicated as I thought. &nbsp;</div> +<div><br /></div> +<div>What it boils down to is defining a set of experiments (or mappings between RAID blocks and drives) where you vary all the parameters (temperature, pressure etc--or which RAID block maps to which drive) but ensure that the same parameter value is never repeated twice (e.g., don't have two experiments with temperature held at 30C, or have two RAID layouts where block #2 is never placed on drive #3). &nbsp;Orthogonal arrays (which are composed of Latin squares) provide an analytical method for coming up with these unique combinations.</div> +<div><br /></div> +<div>In the engineering context, you essentially never repeat an experiment if you can infer the result of varying one parameter using a combination of other experiments. &nbsp;In the parity placement scheme, you never use a block mapping if a combination of drive failures will break all your RAID stripes. &nbsp;The neat idea behind what Ke presented is a method to vary this constraint so that you can find layout schemes that have any mix of blast radius (how many stripes are lost on an unrecoverable failure) against rebuild time.</div> +<div><br /></div> +<h2 style="text-align: left;">NVIDIA GPUDirect Storage Support in HDF5</h2> +<div><a href="http://www.pdsw.org/pdsw20/papers/ws_pdsw_S2_P3_Ravi.pdf">John Ravi presented his work</a> implementing support for NVIDIA's brand new <a href="https://developer.nvidia.com/blog/gpudirect-storage/">GPUDirect Storage</a> (which allows data transfer between GPU memory and an NVMe device without ever touching host memory using <a href="https://www.kernel.org/doc/html/latest/driver-api/pci/p2pdma.html">peer-to-peer PCIe</a>) in HDF5. &nbsp;Much of the talk focused on the implementation details specific to HDF5, but he did present some performance results which I found quite interesting:</div> +<div><br /></div> +<div><div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-60ItLuyV3bQ/X7c29whuOgI/AAAAAAABPfM/texW-SpqU5UgY2OQKkKlk1fgF_pZhBzXQCLcBGAsYHQ/Screen%2BShot%2B2020-11-12%2Bat%2B10.15.45.png" style="margin-left: 1em; margin-right: 1em;"><img height="225" src="https://lh3.googleusercontent.com/-60ItLuyV3bQ/X7c29whuOgI/AAAAAAABPfM/texW-SpqU5UgY2OQKkKlk1fgF_pZhBzXQCLcBGAsYHQ/w400-h225/Screen%2BShot%2B2020-11-12%2Bat%2B10.15.45.png" width="400" /></a></div> +<br />In the above diagram, "SEC2" refers to the default POSIX interface, "DIRECT" is POSIX using O_DIRECT, and "GDS" is GPUDirect Storage. &nbsp;What surprised me here is that all of the performance benefits were expressed in terms of bandwidth, not latency--I naively would have guessed that not having to bounce through host DRAM would enable much higher IOPS. &nbsp;These results made me internalize that the performance benefits of GDS lie in not having to gum up the limited bandwidth between the host CPU and host DRAM. &nbsp;Instead, I/O can enjoy the bandwidth of HBM or GDDR to the extent that the NVMe buffers can serve and absorb data. &nbsp;I would hazard that in the case of IOPS, the amount of control-plane traffic that has to be moderated by the host CPU undercuts the fast data-plane path enabled by GDS. &nbsp;This is consistent with literature from <a href="https://www.ddn.com/blog/ddn-ai-storage-gets-faster-simpler-gpudirect-storage/">DDN</a>&nbsp;and <a href="https://vastdata.com/resources/lightspeed-e-book/">VAST</a> about their performance boosts from GDS.</div> +<div><br /></div> +<h2 style="text-align: left;">Fingerprinting the Checker Policies of Parallel File Systems</h2> +<div>The final PDSW talk that struck a chord was by <a href="http://www.pdsw.org/pdsw20/papers/ws_pdsw_S3_P3_Han.pdf">Runzhou Han who presented a methodology for exercising parallel file systems' fsck tools</a> using targeted fault injection. &nbsp;He intentionally corrupted different parts of the data structures used by BeeGFS and Lustre to store metadata, then ran fsck to see how well those mistakes were caught. &nbsp;I think the biggest intellectual contribution of the work was formalizing a taxonomy of different types of corruption events (junk data, zeros written, duplicate data, and out-of-sync data) and ways in which fsck does or does not cope with them:</div> +<div><br /></div> +<div><div class="separator" style="clear: both; text-align: center;"><a href="https://lh3.googleusercontent.com/-eQLpeKUzuwM/X7dKJ7pWKmI/AAAAAAABPfY/qyIoWhHODycUmF1ubU74p6cz34h5u-6QACLcBGAsYHQ/Screen%2BShot%2B2020-11-12%2Bat%2B12.34.07.png" style="margin-left: 1em; margin-right: 1em;"><img height="225" src="https://lh3.googleusercontent.com/-eQLpeKUzuwM/X7dKJ7pWKmI/AAAAAAABPfY/qyIoWhHODycUmF1ubU74p6cz34h5u-6QACLcBGAsYHQ/w400-h225/Screen%2BShot%2B2020-11-12%2Bat%2B12.34.07.png" width="400" /></a></div> +<br />The practical outcome of this work is that it identified a couple of data structures and corruption patterns that are particularly fragile on Lustre and BeeGFS. &nbsp;Alarmingly, two cases triggered kernel panics in lfsck which led me to beg the question: why isn't simple fault injection like this part of the regular regression testing performed on Lustre? &nbsp;As someone who's been adjacent to several major parallel file system outages that resulted from fsck not doing a good job, hardening the recovery process is a worthwhile investment since anyone who's having to fsck in the first place is already having a bad day.</div> +<div><br /></div> +<div>That said, this paper seemed much more practical than foundational and it was unclear where this goes once the immediate issues discovered are addressed. &nbsp;To that end, I could see why hardening fsck isn't getting a lot of research attention.</div> + + + + + What will Post-Pandemic Academic Research Computing Look Like? + + 2020-10-24T01:00:00-06:00 + https://hpc.social/2020/what-will-post-pandemic-academic-research-computing-look-like- + <p>We’re nowhere near the endgame yet. But even now in the middle of the COVID-19 times it is not too soon to think about what research computing will look like when the threat of infection by SARS-CoV-2 no longer shapes our work lives. While the future looks good for research computing team individual contributors who are willing to learn on the fly, the coming years +will be treacherous for teams as organizations, and their managers.</p> + +<h2 id="what-hath-2020-wrought">What hath 2020 wrought</h2> + +<p>There’s a few pretty unambiguous “inputs” from 2020 that will have consequences for years to come:</p> + +<h3 id="institutional-and-governmental-coffers-are-depleted">Institutional and governmental coffers are depleted</h3> + +<p>Entire sectors of the economy are in bad shape. Institutional budgets have suffered across the board. There have been large unforeseen costs for dealing with the pandemic, while normal operating costs haven’t gone down much except in tiny budget lines like travel.</p> + +<p>At Universities, international student tuitions have dropped less than expected, but there are well-founded worries that they will continue dropping and not bounce back. In a lot of jurisdictions, dollops of one-off support for educational institution came from governments. Those governments will be tightening their budget as soon as they can, and reducing rather than increasing payouts over the course of many years to claw their way back to budget balance.</p> + +<h3 id="clients-are-now-used-to-research-computing-teams-being-distant">Clients are now used to research computing teams being distant</h3> + +<p>We’ve all been working from home over the course of months. A lot of previously unquestioned assumptions about how important it is to have certain groups or equipment “here” with the research groups so that they could be accessible are now known to be mistaken. Researchers, VPRs, and funders are seeing that virtual teams for research computing can support research perfectly well with some controls in place. Yes, it’s handy to sit down beside someone to get things sorted sometimes but we’ve learned we can do pretty well without that.</p> + +<h3 id="primacy-of-health-research">Primacy of health research</h3> + +<p>Life sciences has been an increasingly important part of research computing since quantitative molecular methods took off, and even more since the human genome project’s completion. During the pandemic, centres have dramatically shifted towards prioritizing various kinds of health research workloads, which in turn has boosted capacity (and expectations) of lots of health related research groups and their funders.</p> + +<h3 id="importance-of-data-and-data-sharing-better-understood">Importance of data and data sharing better understood</h3> + +<p>With most of the world daily monitoring case counts, “excess deaths”, case fatality rate vs infection fatality rate and the like, the importance of clean, high-quality data has never been more widely understood. And the limits of what “AI” or advanced analysis techniques can do with poor quality data is very clear.</p> + +<p>And as data’s importance becomes clearer the importance of pooling data has never been more obvious, even in disciplines typically very reluctant to do so (sometimes for good reasons, sometimes not). That’s very unlikely to rapidly change back.</p> + +<h3 id="the-best-research-computing-teams-have-learned-to-communicate-a-lot-better">The best research computing teams have learned to communicate a lot better</h3> + +<p>The research computing and data teams that have come through this pretty well and with satisfied clients have really had to up their games in communications - internally and externally, synchronous and asynchronous. Many of these teams already had experience sucessfully working with distributed collaborators and partners, and built on those strengths.</p> + +<p>But not all research computing and data teams have come through this experience with satisfied client researchers.</p> + +<h2 id="consequences-2021-and-beyond">Consequences: 2021 and beyond</h2> + +<p>None of the changes I’ve described above are particularly subtle or ambiguous, and I think the short-term consequences are almost as clear. Some short and mid-term consequences will be, roughly in order of certainty:</p> + +<h3 id="research-computing-teams-are-never-going-back-to-100-work-from-office">Research computing teams are never going back to 100% work-from-office</h3> + +<p>This one is so obvious it hardly needs to be said, but let’s say it. Space on University campuses has always been tight, and 2020 has shown us that research computing teams don’t need to be on campus. While each team will have to figure out its own approach - fully distributed, rotating with hot-desking, hybrid - we’re never going back to routinely being all together on campus.</p> + +<h3 id="research-budgets-are-mostly-going-to-shrink-except-in-health">Research budgets are mostly going to shrink, except in health</h3> + +<p>Governments worldwide will start trying to get their finances back into balance after the huge COVID-19 expenditures and shrunken tax revenues of 2020 and early(?) 2021. While research budgets probably won’t be drastically cut, they certainly won’t grow.</p> + +<p>On the other hand, even once the pandemic is well and truly over, funding for health and health research will be extremely popular, voters will be wary of another pandemic, and COVID-19 long-term effects will still need to be studied and monitored. Health and health research will have an even larger claim to priority over stagnant research funding than before, and institutions will be eager to support such efforts.</p> + +<h3 id="research-support-budgets-are-going-to-shrink">Research support budgets are going to shrink</h3> + +<p>With research budgets flat and institutions facing declining government funding and possibly international enrolments, there is going to be pressure to make cuts wherever possible. “Overheads” for the basic missions of teaching and research are going to be under increasing scrutiny.</p> + +<p>Any research computing team that can’t communicate very clearly its value to VPRs and university administration in terms of research dollars and other outcomes the administration cares about is going to be facing a lot of very uncomfortable questions. Any cuts to research support services that won’t result in months and months worth of of angry phone calls are going to look pretty attractive to administrations trying to figure out what to cut without firing faculty or teaching staff.</p> + +<h3 id="research-computing-teams-will-consolidate">Research computing teams will consolidate</h3> + +<p>VPRs have long eyed various kinds of core facilities and wondered if they could be contracted out<sup id="fnref:1"><a class="footnote" href="https://www.dursi.ca/feed.xml#fn:1" rel="footnote">1</a></sup>. A year from now, with VPRs earnestly looking for budget cuts, researchers increasingly comfortable with getting research computing support over Zoom and Miro, an increased emphasis on data-sharing and thus remote data infrastructures, and some research computing teams better able to communicate their value than others, there will be consolidation and sorting of researching data computing teams.</p> + +<p>Very small groups - a couple of specialists embedded in a large (especially health-related) research group, or a handful of research computing experts in a large corporate IT shop - are likely safe as long as they support research that continues to be funded as they’re too small a target to be worth cutting. But medium-sized centres with vague goals and priorities who can’t communicate the value they bring are going to be called upon to justify their existence.</p> + +<p>As this shakes out, funding will favour small, hyper-specialized teams who deeply understand some segment of local needs, and large regional centres with diversified funding sources, excellent communications, and clear goals and priorities that enter contracts with other institutions and groups.</p> + +<p>There isn’t going to be a dramatic “big bang” of closures, dissolutions, or mergers. Instead, straitened circumstances and very broad acceptance of virtual research support and data infrastructure will accelerate trends that have already been visible. And it’s going to be lead by individual contributors who are about to realize their employment options have significantly increased.</p> + +<h3 id="more-adoption-of-industry-best-practices-for-running-computer-systems">More adoption of industry best practices for running computer systems</h3> + +<p>Research software quality takes a lot of of (unjustified) guff, but the truth is that with version control, unit tests, CI/CD, and packaging, research software development is <em>much</em> closer to industry best practices than research computing systems operations is.</p> + +<p>With health data applications becoming increasingly important, that will have to change. Privacy restrictions around PHI will require better controls, documentation, and processes, including security incident reporting. Emphasis on data sharing and availability will push teams towards higher availability SLAs, which will push towards on-calls and practices like, if not chaos-engineering, at least routine testing of failures as with <a href="https://slack.engineering/disasterpiece-theater-slacks-process-for-approachable-chaos-engineering/">“disasterpiece theatre”</a>.</p> + +<h3 id="portfolios-of-research-computing-systems-are-going-to-be-rebalanced-away-from-big-metal">Portfolios of research computing systems are going to be rebalanced away from “big metal”</h3> + +<p>As with research computing teams, this isn’t going to be a big bang or a sudden pivot, but an acceleration of trends already in place.</p> + +<p>With greater emphasis on data and health applications, very large-scale physical science simulations (my own background) will be an even smaller, while still important, use case for research computing. With greater emphasis on remote data infrastructures, remote teams, and data sharing, commercial cloud adoption in research computing will continue to grow. On-premises infrastructure is going to continue to tilt away from being able to support small numbers of large simulations towards architectures which can provide more flexibility for a wider range of computing and data applications.</p> + +<h2 id="what-does-it-mean-for-us">What does it mean for us?</h2> + +<p>Like the mainstreaming of telemedicine, many of the consequences of the pandemic will just be pushing forward something that was always going to happen eventually but had lacked an impetus until now. And for many (most?) research computing team individual contributors, things will look pretty good - work-from-home will open up more job opportunities, even if the portfolio of projects they support starts looking different.</p> + +<p>But for research computing teams as organizations, and for their managers, the coming years will be treacherous. If the research computing team supporting University research groups doesn’t have to be on campus any more, why do they have to be University employees at all? If a neighbouring centre has better-run systems with better availability and already handle PHI, why not just use them for research software development support too?</p> + +<p>It is not too early to start upping your game when it comes to the adminstration, your researchers, and your team members. For the administration, you’re going to have to ensure that you can justify every budget item in terms the administration recognize and value, and that you have clear and focussed goals and priorities. For researchers, you can start making sure that your systems, processes, and practices are as high-quality and researcher-focussed and -friendly as possible. For your team members, if you’re not regularly communicating with them to make sure they’re happy in their current roles and with their career development, this is the time to start.</p> + +<hr /> + +<div class="footnotes"> + <ol> + <li id="fn:1"> + <p>See for instance table 2 of <a href="https://www.srainternational.org/blogs/srai-jra1/2019/12/09/operational-fiscal-management-of-core-facilities">Carter <em>et al</em>.</a>, where VPRs 2:1 would prefer service contracts for HPC centres over in-house options (of an admittedly small sample). <a class="reversefootnote" href="https://www.dursi.ca/feed.xml#fnref:1">&#8617;</a></p> + + </li> + </ol> +</div> + + + + + Things I Learned from Looking at 500 Research Computing Manager Jobs over 10 Months + + 2020-10-14T01:00:00-06:00 + https://hpc.social/2020/things-i-learned-from-looking-at-500-research-computing-manager-jobs-over-10-months + <p>I write a weekly <a href="https://newsletter.researchcomputingteams.org">newsletter</a> +for research computing managers, team leads, or those aspiring to +those roles. One of the things I’ve wanted to emphasize in the +newsletter is that managing research computing teams is a profession +in and of itself, and worth doing well. Part of that is emphasizing +the existence of career opportunities.</p> + +<p>So since the beginning I’ve included job listings and maintained +a <a href="https://www.researchcomputingteams.org/jobs">job board</a>, +posting about 500 such jobs over the past 10 months and removing +them as they become filled or otherwise unavailable. My main +criteria for such jobs are whether or not I would describe the work +as principally about managing or leading a research computing team - +admittedly a fuzzy definition.</p> + +<p>Over the course of examining those 500 jobs - and looking through +many many more that never made it to the board - I’ve learned some +things:</p> + +<p><strong>There are a lot of jobs out there for people managing research +computing teams</strong>. I’ve <em>never</em> had any trouble finding some weekly +to put in the job board or with highlights interesting enough to +list at the end of the newsletter.</p> + +<p><strong>There are certainly many more I’m missing</strong>. As the field matures +there are starting to be <a href="https://us-rse.org/jobs/">job</a> +<a href="https://society-rse.org/careers/vacancies/">boards</a> for research +software development or for particular sub-fields of research +computing like +<a href="https://bioinformatics.ca/job-postings/#/?&amp;order=desc">bioinformatics</a>. +But, consistent with research’s neglect of management as something +that needs to be done and done well, no such resources exist for +the managers of those important roles. So I have a go-to list of +google and other searches for jobs which I go through a couple of +times a week.</p> + +<p>In research, when you’re doing a literature search and you start +hitting the same papers again and again, you’re pretty sure you’ve +got a mostly complete list of references as a starting point. I’m +nowhere near that with my managing research computing teams job +list, largely because the names we use for these roles vary so +widely. So I’m confident that I only see a fraction of these jobs. +(You can help out by <a href="https://airtable.com/shrL6QGic3Mv9JFrs">submitting any +jobs</a> you know about).</p> + +<p><strong>Research computing teams are broadening, and so is the need for +managers</strong>. Where this is most obvious is in data science or data +engineering teams, which have spread to every sector and every +industry. Generic “Manager, Data Science” jobs are so plentiful +that I don’t list most of them - many of them are more operational +rather than “jobs leading research computing teams” - but even the +ones that make the cut are in sectors from health to transportation +to retail to engineering. There are increasingly data engineering, +cloud architecture, etc roles for supporting research computing +efforts, to say nothing of ML/AI jobs. And there are countless +management/team lead jobs for specialist research computing in +health, biology, and biomedicine.</p> + +<p><strong>Research data management is increasingly employable</strong>. As the +initial data science and data engineering work in organizations +mature, many institutions are realizing that they now need principled +approaches to data governance, stewardship, and modelling. This +is happening most rapidly in heavily regulated industries — +health, finance — but is starting to percolate outwards. +Those who have maintained and curated data resources for research, +or who have supported those that do, will be surprised at the number +of jobs in the private sector for doing similar work.</p> + +<p><strong>“Traditional” research computing team management jobs remain, and +they take forever to fill</strong>: There are definitely still routinely +“Director of Research Computing, University of Somethingorother” +jobs out there. And I don’t know whether it’s because of the +pandemic, or because of the competition from other sectors, but +such jobs are taking forever to fill this year. I routinely see +them open for months, and then reposted one or more times. I see +this in both for managers of teams running on-premises hardware and +for teams mainly doing software development.</p> + +<p><strong>Despite the talk of RSE units, most research computing jobs within +academic institutions are lone outposts</strong>: While in companies +research computing - data science, computing resource management, +software development - tends to be centralized (even if it is +matrixed out or embedded into other teams), in academia we’re +definitely not there - most of the team leads/manger jobs I see in +Universities are for small teams embedded in a single institute or +project. I think that’s a shame; it greatly reduces the opportunity +for cross-pollination, learning, and developing best practices, +makes work less efficient and less satisfying, and it makes teams +more management heavy than they need to be.</p> + + + + + XRootD Client Manager + + 2020-10-11T06:00:00-06:00 + https://hpc.social/2020/xrootd-client-manager + <p>The validation project for XRootD Monitoring is moving to phase 2, scale +testing. Phase 1 focused on correctness of single server monitoring. <a href="https://doi.org/10.5281/zenodo.3981359">The +report</a> is available.</p> + +<p>We are still forming the testing plan for the scale test of XRootD, but a +component of the testing will be multiple clients downloading from multiple +servers. In addition, we must record exactly how much data each client reads +from each server in order to validate the monitoring with the client’s real behavior.</p> + +<p>This level of testing will require detailed coordination and recording of client +actions. I am not aware of a testing framework that can coordinate and record +accesses of multiple clients and servers, therefore I spent the weekend +developing a simple framework for coordinating these tests.</p> + +<p>Some requirements for the application are:</p> + +<ul> + <li>Easy to use interface</li> + <li>Easy to add clients and servers</li> + <li>Authenticated access for clients, servers, and interface</li> + <li>Storage of tests and results</li> +</ul> + +<p>I chose <a href="https://heroku.com">Heroku</a> for prototyping this application.</p> + +<h2 id="interface">Interface</h2> + +<p>The web interface is available at https://xrootd-client-manager.herokuapp.com/. +I chose to host it on heroku as it is my go to for pet projects. I will likely +move this over to OSG’s production kubernetes installation soon. The entire +application is only the web interface and a back-end <a href="https://redis.io/">Redis</a> +data store.</p> + +<figure class=""> + <img alt="Screenshot of web interface" src="https://derekweitzel.com/images/posts/XRootDClientManager/Interface.png" /><figcaption> + Screenshot of simple web interface + + </figcaption></figure> + +<p>The web interface shows the connected clients and servers. The web interface +also connects to the web server with an persistent connection to update the list +of connected clients.</p> + +<h2 id="client-communication">Client Communication</h2> + +<p>Client communcation is handled through a Socket.IO connection. Socket.IO is a +library that will at create a bi-directional event based communcation between +the client and the server. The communcation is over websockets if possible, but +will fall back to HTTP long polling. A good discussion of long polling vs. +websockets is available from +<a href="https://www.ably.io/blog/websockets-vs-long-polling/">Ably</a>. The Socket.IO +connection is established between each worker, server, and web client and the +web server.</p> + +<p>The difficult part is authenticating the Socket.IO connections. We discuss this +in the security session.</p> + +<h2 id="security">Security</h2> +<p>Securing the commands and web interface is required since the web interface is +sending commands to the connected worker nodes and servers.</p> + +<h3 id="socketio-connections">Socket.IO Connections</h3> + +<p>The Socket.IO connection is secured with a shared key. The communication flow +for a non-web client (worker/server):</p> + +<ol> + <li>A JWT is created from the secret key. The secret key is communicated through +a separate secure channel. In most cases, it will be through the command +line arguments of the client. The JWT has a limited lifetime and a scope.</li> + <li>The client registers with the web server, with an Authentication bearer token +in the headers. The registration includes details about the client. It +returns a special (secret) <code class="language-plaintext highlighter-rouge">client_id</code> that will be used to authenticate the +Socket.IO connection. The registration is valid for 30 +seconds before the <code class="language-plaintext highlighter-rouge">client_id</code> is no longer valid.</li> + <li>The client creates a Socket.IO connection with the <code class="language-plaintext highlighter-rouge">client_id</code> in the request +arguments.</li> +</ol> + +<h3 id="web-interface">Web Interface</h3> + +<p>The web interface is secured with an OAuth login from GitHub. There is a whitelist +of allowed GitHub users that can access the interface.</p> + +<p>The flow for web clients connecting with Socket.IO is much easier since they are already authenticated +with OAuth from GitHub.</p> + +<ol> + <li>The user authenticates with GitHub</li> + <li>The Socket.IO connection includes cookies such as the session, which is a +signed by a secret key on the server. The session’s github key is compared to the +whitelist of allowed users.</li> +</ol> + +<h2 id="storage-of-tests-and-results">Storage of tests and results</h2> + +<p>Storage of the tests and results are still being designed. Most likely, the +tests and results will be stored in a database such as Postgres.</p> + +<h1 id="conclusions">Conclusions</h1> + +<p><a href="https://heroku.com">Heroku</a> provides a great playing ground to prototype these +web applications. I hope that I can find an alternative eventually that will run on +OSG’s production kubernetes installation.</p> + +<p>The web application is still be developed, and there is much to be done before +it can be fully utilized for the scale validation. But, many of the difficult +components are completed, including the communcation and eventing, secure web +interface, and clients.</p> + +<p>The GitHub repos are available at:</p> + +<ul> + <li><a href="https://github.com/djw8605/xrootd-client-manager">XRootD Client Manager</a></li> + <li><a href="https://github.com/djw8605/xrootd-ws-client">XRootD Client</a></li> +</ul> + + + + + Extending the Spectrum LSF GUI to display job GPU metrics + + 2020-09-04T17:30:44-06:00 + https://hpc.social/2020/extending-the-spectrum-lsf-gui-to-display-job-gpu-metrics + <p>I’ve previously written about accounting for GPU workloads in <a href="https://www.gaborsamu.com/blog/spectrumlsf_gpu_usage/">Spectrum LSF using Nvidia DCGM</a> to collect granular metrics including energy consumed, memory used, and overall GPU +utilization. Spectrum LSF collects the information and it is made available through the familiar bhist and bacct +commands.</p> + +<p>How can one go about displaying this information in the web-based job management interface that is provided by +Spectrum LSF Application Center or as part of the Spectrum LSF Suites? Here we will provide a simple example showing +how:</p> + +<ul> +<li>Administrators can customize the navigation in the Spectrum LSF web-based job management interface</li> +<li>Display the same GPU accounting information in the Spectrum LSF web-based job management interface</li> +</ul> +<p><strong>The following assumes that DCGM support has been enabled in Spectrum LSF and that you are running an edition of +the Spectrum LSF Suite or Spectrum LSF Application Center</strong></p> + +<p>The Spectrum LSF web-based job management interface enables GUI administrators to create new tabs with a user specified +URL or command. Here we will create a new tab which runs a command (script) which will run the Spectrum LSF <em>bhist</em> +command to display the GPU metrics for a given job. The script must be able to distinguish between a GPU and non-GPU +job.</p> + +<p>A. To begin, we&rsquo;ll require a simple script to display the detailed historical data of a given <em>jobID</em>, including +GPU metrics using the Spectrum LSF <em>bhist</em> command. An example simple script is provided below which is saved +with filename <em>gpu_acct.sh</em>.</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;">#!/bin/sh +</span><span style="color: #75715e;"></span><span style="color: #66d9ef;">if</span> <span style="color: #f92672;">[</span> -z <span style="color: #e6db74;">"</span>$1<span style="color: #e6db74;">"</span> <span style="color: #f92672;">]</span> +<span style="color: #66d9ef;">then</span> + echo <span style="color: #e6db74;">"Usage </span>$0<span style="color: #e6db74;"> &lt;jobID&gt;"</span> +<span style="color: #66d9ef;">else</span> +OUTPUT<span style="color: #f92672;">=</span><span style="color: #e6db74;">`</span>bhist -a -l -gpu $1<span style="color: #e6db74;">`</span> +grep -q <span style="color: #e6db74;">'GPU Energy Consumed'</span> <span style="color: #f92672;">&lt;&lt;&lt;</span> $OUTPUT <span style="color: #f92672;">&amp;&amp;</span> bhist -a -l -gpu $1 <span style="color: #f92672;">||</span> echo <span style="color: #e6db74;">"Not a GPU job."</span> +<span style="color: #66d9ef;">fi</span></code></pre></div> + +<p>As the Spectrum LSF administrator, create the above script in the <em>$LSF_BINDIR</em> directory with permissions 755.</p> + +<p>B. Next, login to the Spectrum LSF web-based interface as a user with administrative privileges and navigate to +<strong>Workload &gt; Workload</strong>. Note that the user must have the <em>Application Center Administrator</em> privilege.</p> + +<figure><img src="https://www.gaborsamu.com/images/workload_list2.png" /> +</figure> + +<p>C. It’s now necessary to select one of the jobs in the job list in order to display the job detail view. This is the +page where we will be adding the GPU accounting tab.</p> + +<figure><img src="https://www.gaborsamu.com/images/workload_detail2.png" /> +</figure> + +<p>D. Click the edit (pencil) dropdown that can be found at the top right of the Spectrum LSF web-based interface +and select <em>Edit Page</em>.</p> + +<figure><img src="https://www.gaborsamu.com/images/edit_page2.png" /> +</figure> + +<p>This will display the <em>Create New Tab</em> window which will be filled in during the next step.</p> + +<p>E. In the Create New Tab window, specify the following:</p> + +<ul> +<li>Tab Label: <em>GPU accounting</em></li> +<li>Content From: <em>Command</em> and specify the command <em>gpu_acct.sh %J</em></li> +</ul> +<p>Click the <em>Apply</em> button to complete the addition of the new tab on the job detail page.</p> + +<figure><img src="https://www.gaborsamu.com/images/create_new_tab2.png" /> +</figure> + +<p>F. Finally, click the Edit Page dropdown on the top right corner of the interface and select +<em>Apply and exit Pages Editing</em> to make the changes take effect. You will now see a new GPU accounting tab in the +job detail view. Here I’ve selected a GPU job that has been run previously through Spectrum LSF. We see the full +<em>bhist</em> output displayed here including the detailed GPU accounting.</p> + +<p><figure><img src="https://www.gaborsamu.com/images/apply_changes2.png" /> +</figure> + +<figure><img src="https://www.gaborsamu.com/images/gpu_acct2.png" /> +</figure> +</p> + +<p>As a final note, for jobs that have not requested a GPU resource through Spectrum LSF, we will see the message +“Not a GPU job&quot; displayed when the GPU accounting tab is selected.</p> + +<figure><img src="https://www.gaborsamu.com/images/not_gpu_job2.png" /> +</figure> + +<p>That concludes this simple example showing how the Spectrum LSF web-based interface can be customized.</p> + + + + + White Managers in Research Computing, We Need to be Speaking Out About Racism, then Listening and Advocating + + 2020-06-05T01:00:00-06:00 + https://hpc.social/2020/white-managers-in-research-computing-we-need-to-be-speaking-out-about-racism-then-listening-and-advocating + <p>Many people in our research computing community — and in the broader research community we serve — are in pain this week. There’s another video of another Black man, George Floyd, begging for his life while being murdered by a police officer in Minneapolis. Here in Toronto a Black woman, Regis Korchinski-Paquet, died when what should have been a routine call resulted in a mystifying number of police officers showing up. With only police officers present in her apartment, she went over her high-rise balcony to her death, <a href="https://www.cbc.ca/news/canada/toronto/regis-korchinski-paquet-toronto-police-1.5590296">with her last words being, repeatedly, “Mom, help”</a>. This is all taking place during a pandemic which is disproportionately killing and incapacitating Black people, Indigenous people, and people of colour because they have less access to jobs that can be worked from home, and are more likely to be living in overcrowded multi-generational homes.</p> + +<p>So with news and social media being dominated by the consequences of systemic racism, anti-Black violence in particular, and <a href="https://brutality.glideapp.io">police violence</a> in reaction to anti-police-brutality protests, a lot of people are feeling despair and anguish.</p> + +<p>As managers, we are leaders of communities. Small communities, but nonetheless. We have a responsibility to members of those communities to let them know we support them and are here for them. It doesn’t take much to be small bit of genuine help to <a href="https://twitter.com/FutureDrDukes/status/1267508084143865859">someone really struggling</a>. But we have to initiate the conversations. Our community members won’t open up to us about these topics until we’ve demonstrated we can have some kind of adult conversation about racism.</p> + +<p>Doing or saying something is scary for many of us in research computing — who are overwhelmingly not Black and mostly white, which is a related conversation we need to have — because we are worried, reasonably, about getting it wrong. And it’s easy to make the excuse that because we don’t have Black team members (which… you know, same) it’s not something we need to address.</p> + +<p>Most of us don’t have team members who have gotten sick with COVID-19 either, but we’ve certainly been addressing that. It’s been hard and uncomfortable and we didn’t get it all right the first time around and we did it anyway. You don’t necessarily know who’s hurting in your team and community or why. Not addressing a topic dominating the news and social media now doesn’t project professionalism, it just suggests discomfort or indifference.</p> + +<p>I do not have great suggestions about what to say or do. I can offer some articles and collections of resources I’m finding useful:</p> + +<ul> + <li><a href="https://blacktechpipeline.substack.com/p/hey-employers-do-black-lives-matter">How Black employees want to be supported by their employers during times of protest for BlackLivesMatter</a> - Pariss Athena</li> + <li><a href="https://medium.com/the-establishment/welcome-to-the-anti-racism-movement-heres-what-you-ve-missed-711089cb7d34">Welcome To The Anti-Racism Movement — Here’s What You’ve Missed</a> - Ijeoma Oluo</li> + <li><a href="https://docs.google.com/document/d/1a-lzdtxOlWuzYNGqwlYwxMWADtZ6vJGCpKhtJHHrS54/preview?pru=AAABcpnrylg*2vEMb2In8-9aRyfg0OKSuA">Anti-Racist Resource Guide</a> - Victoria Alexander</li> + <li><a href="https://docs.google.com/document/d/1BRlF2_zhNe86SGgHa6-VlBO-QgirITwCTugSfKie5Fs/preview?pru=AAABcocJWsk*HmXb3HkF-szmJL5SmeHugg">Anti-Racism Resources</a> - Sarah Sophie Flicker, Alyssa Klein</li> + <li><a href="https://docs.google.com/document/d/1PrAq4iBNb4nVIcTsLcNlW8zjaQXBLkWayL8EaPlh0bc">Scaffolded Anti-Racism Resources</a> - Anna Stamborski, Nikki Zimmermann, Bailie Gregory</li> +</ul> + +<p>I can also tell you what I’m doing at work. I’ve raised the issue at our all hands meeting using words much like the above, and let people know they can talk to me about it if they need to. Unhelpfully, I sounded a bit awkward, even after practicing, but the next conversation will be easier. I’ve made a point of checking in a little deeper with people during one-on-ones, and doing a lot of listening, I’m listening for feedback even when it’s uncomfortable, and I’ll keep reading those materials, and others, to see what I can do better and how I can support change.</p> + +<p>That’s not the best or even a particularly good way to address what’s going on now and what’s been going on for a very long time. It’s the bare minimum, and started too late. The challenge will come when making changes, then advocating for more change to peers and upwards. But it’s a start.</p> + +<p><em>From <a href="https://buttondown.email/ljdursi/archive/ed9c986b-6007-426d-a8c7-9f49b3b0d107">issue #27</a> of the <a href="https://buttondown.email/ljdursi/archive/">Research Computing Teams newsletter</a></em></p> + + + + + Exascale's long shadow and the HPC being left behind + + 2020-05-20T18:33:00-06:00 + https://hpc.social/2020/exascale-s-long-shadow-and-the-hpc-being-left-behind + <p>The delivery of Japan’s all-CPU Fugaku machine and the disclosure of the UK’s all-CPU ARCHER 2 system amidst the news, solidly “pre-Exascale” machines with pre-exascale budgets, is opening old wounds around the merits of deploying all-CPU systems in the context of leadership HPC.  Whether a supercomputer can truly be “leadership” if it is addressing the needs of today using power-inefficient, low-throughput technologies (rather than the needs of tomorrow, optimized for efficiency) is a very fair question to ask, and Filippo took this head-on:<br /><br />&lt;blockquote class="twitter-tweet" style="display: block; margin: auto;"&gt;&lt;div dir="ltr" lang="en"&gt;Unfortunately take codes from Tier-2 with GPU to Tier-1 without GPU is a <em>huge</em> step backward. These calls are holding back the true potential of <a href="https://twitter.com/hashtag/GPU?src=hash&amp;ref_src=twsrc%5Etfw">#GPU</a> computing in accelerating scientific discovery! <a href="https://t.co/qVVEWFDXt1">https://t.co/qVVEWFDXt1</a>&lt;/div&gt; +— Filippo Spiga (@filippospiga) <a href="https://twitter.com/filippospiga/status/1263072225781047297?ref_src=twsrc%5Etfw">May 20, 2020</a>&lt;/blockquote&gt;<br /><br />Of course, the real answer depends on your definition of “leadership HPC.”  Does a supercomputer qualify as “leadership” by definition if its budget is leadership-level?  Or does it need to enable science at a scale that was previously unavailable?  And does that science necessarily have to require dense floating point operations, as the Gordon Bell Prize has historically incentivized?  Does simulation size even have anything to do with the actual impact of the scientific output?<br /><br />While I do genuinely believe that the global exascale effort has brought nearly immeasurable good to the HPC industry, it’s now casting a very stark shadow that brings contrast to the growing divide between energy-efficient, accelerated computing (and the science that can make use of it) and all the applications and science domains that do not neatly map to dense linear algebra.  This growing divide causes me to lose sleep at night because it’s splitting the industry into two parts with unequal share of capital.  The future is not bright for infrastructure for long-tail HPC funded by the public, especially since the cloud is aggressively eating up this market.<br /><br />Because this causes a lot of personal anxiety about the future of the industry in which I am employed, I submitted the following whitepaper in response to an NSCI RFI issued in 2019 titled “<a href="https://www.federalregister.gov/d/2019-12866">Request for Information on Update to Strategic Computing Objectives</a>.”  To be clear, I wrote this entirely on my personal time and without the permission or knowledge of anyone who pays me–to that extent, <a href="https://twitter.com/hpcprogrammer/status/1261480678866259974?s=21">I did not write this as a GPU- or DOE-apologist</a> company man, and I did not use this as a springboard to advance my own research agenda as often happens with these things.  I just care about my own future and am continually trying to figure out how much runway I’ve got.<br /><br />The TL;DR is that I am very supportive of efforts such as Fugaku and Crossroads (contrary to <a href="https://twitter.com/hpcprogrammer/status/1261483277506019335?s=21">accusations otherwise</a>), which are looking to do the hard thing and advance the state of the art in HPC technology without leaving wide swaths of traditional HPC users and science domains behind. Whether or not efforts like Fugaku or Crossroads are enough to keep the non-Exascale HPC industry afloat remains unclear.  For what it’s worth, I never heard of any follow-up to my response to this RFI and expect it fell on deaf ears.<br /><br />&lt;h2&gt;Response to “Request for Information on Update to Strategic Computing Objectives”&lt;/h2&gt;G. K. Lockwood<br />August 17, 2019<br /><br />&lt;h3&gt;Preface&lt;/h3&gt;This document was written as a direct response to the Request for Information on Update to Strategic Computing Objectives (Document Number 2019-12866) published on June 18, 2019.  All views expressed within are the personal opinion of its author and do not represent the views or opinions of any individuals or organizations with whom the author may or may not be associated in any professional or personal capacities.  This document was authored without the support, knowledge, or input of any such individuals or organizations, and any similarity between the opinions expressed here and any other individuals or organizations is purely coincidental.<br /><br />&lt;h3&gt;Question 1. What are emerging and future scientific and technical challenges and opportunities that are central to ensuring American leadership in Strategic Computing (SC), and what are effective mechanisms for addressing these challenges?&lt;/h3&gt;<br />While the NSCI Strategic Plan identified four overarching principles which are undeniably required to maintain continued American leadership, its five strategic objectives are, in many ways, mutually incompatible with each other.<br /><br />In the three years following the initial NSCI plan towards delivering capable exascale, the outcomes of the Aurora and CORAL-2 procurements within DOE have made undeniably clear that the definition of “capable exascale” necessarily requires the use of GPU technologies.  Because GPUs are, in many ways, accelerators specifically suited for scientific problems that can be reduced to dense linear algebra, this has effectively signaled that scientific challenges which are not reducible to dense linear algebra (and therefore incompatible with GPU technologies) are, by definition, no longer of strategic significance.<br /><br />By bifurcating science domains based on whether they are or are not compatible with GPU-based acceleration, we are now at a crossroads where entire classes of domain science research that have historically run at-scale on CPU-based leadership computing systems will be left behind.  To be clear, this is not simply a matter of engineering—many important classes of scientific challenges are fundamentally incompatible with the GPU accelerator model of computation, and no amount of code modernization will change this fact.  Yet these same science domains, which rely on complex multiphysics applications that are core to strategic areas such as stockpile stewardship and climate science, are of undeniably critical importance to both national security and society at large.<br /><br />Thus, there is now a clear and growing gap between NSCI’s ambition to deliver capable exascale and the larger mission to maintain leadership in entirety of truly strategically important computing in the nation.  There are technical challenges intrinsic in this growing gap which include pursuing research in hardware and software technologies that approach strategic computing more holistically rather than exclusively from a FLOPS perspective.  The community has long acknowledged that the scope of HPC has surpassed simply performing floating point operations, and the definition of capability computing now includes enabling science that, for example, may require tremendous data analysis capabilities (e.g., moving, transforming, and traversing massive data sets) but have relatively low floating point requirements.  The DOE Crossroads procurement and the Japanese leadership program and its Fugaku system embody this more balanced approach, and there is little doubt that both Crossroads and Fugaku will demonstrate a number of world’s-firsts and, by definition, demonstrate leadership in strategic computing without making all of the sacrifices required to meet today’s definition of capable exascale.<br /><br />Both Crossroads and Fugaku have required significant R&amp;D investment to enable these dimensions of capability, and the NSCI would do well to explicitly call out the need for continued investment in such directions that are orthogonal to exaflop-level capability.<br /><br />&lt;h3&gt;Question 2. What are appropriate models for partnerships between government, academia and industry in SC, and how can these partnerships be effectively leveraged to advance the objectives of SC?&lt;/h3&gt;<br />The most impactful models for industry-government partnership in HPC have come in the form of close collaboration between the HPC facilities that deploy extreme-scale systems and the technology providers in industry that create and support the required hardware and software solutions.  Strategy necessarily involves taking input from user requirements, workload characterization, and technology trends to inform future directions, and HPC facilities are uniquely qualified to speak to both user requirements (by virtue of the fact that they directly interact with users in support of HPC systems) and workload characterization (by virtue of the fact that they manage HPC systems).  Complementarily, industry technology providers (vendors) are uniquely qualified to speak to technology directions, marketability, and sustainability in the larger technology market.<br /><br />This effective collaboration can take the form of non-recurring engineering such as those contracts associated with large system procurements (often to address more tactical challenges towards strategic computing) or standalone programs such as DOE PathForward (which addresses longer-term technology development towards strategic computing).  In both cases though, industry (not HPC facilities or academic researchers) propose the initial scope of work based on their own understanding of both (1) HPC-specific requirements and (2) larger market and profit prospects.  This latter point is critical because the HPC market alone is simply not large enough to sustain purpose-built technologies, and sustaining new technologies and their peripheral enabling ecosystems requires buy-in from multiple markets.<br /><br />The role of academia in research is more complex, as academic research in HPC can be either basic or applied in nature.  Basic research (such as in applied mathematics and algorithm development) has stood on its own historically since such work results in a larger base of knowledge from which specific technology solutions (whether developed by industry or HPC facilities) can be composed both today and in the future.  The federal agencies participating in NSCI can claim credit for funding the basic research outcomes that have been incorporated into innumerable software and hardware technologies in use today. <br /><br />On the other hand, applied research (such as developing new software systems that may implement the outcomes of basic research) has had very mixed outcomes.  It is often the case that applied researchers who have no direct relationship with neither HPC facilities nor technology providers formulate research projects based on second-hand HPC requirements and technology trends.  It follows that their interpretation of such requirements is incomplete, and their research outcomes are misaligned with the actual needs of HPC facilities and industry.  Barring cases where academic applied research outcomes are so valuable that they stand on their own (of which there are many examples including OpenMPI and Tau), applied research in the absence of such a sustainability path results in a tremendous amount of software that has virtually no long-term (i.e., strategic) value to SC.<br /><br />This speaks to a gap between applied research in academia and those who apply research in practice that must be closed.  This gap has been perpetuated by a lack of HPC practitioners (domain scientists and applied researchers directly attached to HPC facilities or technology providers) on the committees that evaluate the merit of research.  Thus, a more effective engagement model would involve coupling the academic research pipeline to HPC facilities and industry more closely.  This may be something as informal as increasing the diversity of review panels and program committees to include representatives from facilities and industry to a formal requirement that successful research proposals have a clearly defined connection to a specific industry or facility partner.  Regardless of the solution though, funding applied research that will be “thrown over the wall” to HPC facilities and vendors without their input is not compatible with SC.<br /><br />&lt;h3&gt;Question 3. How do we develop and nurture the capable workforce with the necessary skill and competencies to ensure American leadership in SC? What are effective nontraditional approaches to lowering the barriers to knowledge transfer?&lt;/h3&gt;<br />Although virtually every report discussing strategic directions and future requirements of HPC call for knowledge transfer and building a larger workforce through training and outreach (e.g., see the complete set of <a href="https://exascaleage.org/">DOE Exascale Requirements Reviews</a>), such reports generally neglect two critical realities of employing and retaining a talented workforce at production HPC facilities and in industry.<br /><br />The first reality is that the problems intrinsic to modern HPC (solving problems at extreme scales) are no longer exclusive to HPC.  The ubiquity of technology in modern life now means that the entire technology industry must deal with problems at scale as a matter of course.  As such, the HPC community is now competing with well-capitalized commercial entities that have increased the absolute value of a skilled engineer to levels that the scientific research community simply cannot afford.<br /><br />Thus, the perceived lack of skilled workforce in HPC is not a failing of the workforce development strategy in place; in fact, it may be a great indicator of its success, as it has created a workforce whose skills have value that far outstrip the investment put into workforce development.  However, this also means that the talented individuals who eschew the higher pay and amenities of working in the larger technology industry do so for non-monetary reasons (work-life balance, attraction to the science mission, geographic locality).  It is therefore critically important that strategic computing identify these motivators and built upon them to the greatest possible degree to maintain an edge in an extremely competitive hiring landscape.<br /><br />The second reality is that the key to an exceptional workforce is not simply a matter of technical knowledge.  There is no shortage of individuals who understand parallel programming in the world, and it is of little strategic value to pursue workforce development strategies that prioritize knowledge transfer as the principal outcome.  Rather, strategic computing requires a workforce that is capable of critical thinking and has a natural drive to solve problems that have never been solved before.  These traits should be emphasized to a far greater degree than the current pedagogical emphasis on material that can be learned from a manual by anyone with a curious mind.<br /><br />By definition, very few people in the world have prior experience in world-class HPC.  There are very limited opportunities to build a credible work history in extreme-scale HPC for individuals who are ineligible for student internships or postdoctoral appointments.  As a result, world-class HPC facilities rarely see qualified applicants for open positions when “qualified” is defined on the basis of relevant work experience; a mid-career developer or systems engineer working in a campus-scale HPC organization simply has no opportunities to demonstrate his or her intellectual capability in a way that is outstanding to the facilities that deliver strategic computing resources.<br /><br />Thus, an integrative approach to workforce development that (1) emphasizes problem-based learning rather than rote reiteration of manuals and standards documents in an environment where (2) representatives from NSCI constituent agencies can engage with trainees (i.e., potential employees) in a fashion with less formality and pretense than a typical “CV-phone screen-interview” pipeline may reveal a much broader potential workforce whose strengths more closely align with strategic computing.  Such an approach may manifest in the form of intensive boot camps such as the DOE ATPESC program, grants for mid-career retraining in partnership with a leadership computing facility, or sabbatical support for technical staff at the nation’s mid-scale computing facilities.<br /><br />&lt;h3&gt;Question 4. How can technical advances in SC and other large government and private initiatives, including infrastructure advances, provide new knowledge and mechanisms for executing next generation research?&lt;/h3&gt;<br />No response.<br /><br />&lt;h3&gt;Question 5. What are the future national-level use cases that will drive new computing paradigms, and how will new computing paradigms yield new use cases?&lt;/h3&gt;It is easy to claim that artificial intelligence will be the most important future national use case to drive new computing paradigms.  However, this is a very dangerous statement to make without qualification, as the actual level of readiness for applying AI to solve scientific problems is very low, and the actual scales, aggregate demand, and algorithmic motifs required by such workloads for scientific discovery are poorly undefined.  More generally, the requirements of AI workloads at large remain uncertain; for example, the Facebook uses a variety of AI techniques in production and have found that each application area requires different computational, storage, and network resources (see <i><a href="https://research.fb.com/wp-content/uploads/2017/12/hpca-2018-facebook.pdf">Applied Machine Learning at Facebook: A Datacenter Infrastructure Perspective</a></i>).  Outside of the large hyperscale datacenters, industry consensus suggests that production AI workloads remain largely at single-server scales.  As such, it is difficult to confidently assert what the rate of scale-out AI will be for strategic computing.<br /><br />The current leading technique for AI at scale is deep learning, yet scientific discovery is at odds with the black-box nature of this method.  Alternative methods such as decision trees offer much more insight into why a trained model behaves as it does and is more compatible with applying physical constraints to which physical systems being modeled (e.g., see <i><a href="https://doi.org/10.1073/pnas.1711236115">Iterative random forests to discover predictive and stable high-order interactions</a></i>).  However, the relative importance of such non-block-box learning techniques in HPC are completely unknown, as are the general optimization points for such techniques in the context of scientific computing.  There is a danger that the similarities between deep learning and many HPC problems (GEMM-heavy workloads) place an artificially high importance on the role of deep learning in SC.  It may be the case that deep learning is the most effective method for applying AI to address problems in scientific computing, but caution must be taken to ensure that major challenges in SC not all look like deep-learning nails simply because GPUs are a very effective hammer.<br /><br />From a domain science perspective, there are very few domain sciences where AI can replace traditional simulation-driven workflows wholesale.  As such, the role of AI in SC will be largely supplementary; scientific workflows may integrate an AI component to generate starting conditions, replace humans in the loop during steering, or identify areas of interest in the results of a primary simulation.  However, it is very unlikely that AI will grow to be of greater significance to scientific computing than modeling and simulation.  Instead, it will be the source of new computational resource requirements that simply did not exist in the past because those tasks were carried out by humans.  The road towards integrating AI into scientific workflows will also be a long and tortuous one, as the field is evolving far more rapidly in industry than scientific computing traditionally has.  Care must be taken that SC not tie itself too closely to a method (and its associated hardware configurations) that may be deprecated in short order.<br /><br />&lt;h3&gt;Question 6. What areas of research or topics of the 2016 NSCI Strategic Plan should continue to be a priority for federally funded research and require continued Federal R&amp;D investments? What areas of research or topics of the 2016 Strategic Plan no longer need to be prioritized for federally funded research?&lt;/h3&gt;<br />The five objectives outlined in the 2016 NSCI Strategic Plan all gravitate around elements of topics that require continued federal R&amp;D investments, but they do require realignment with the technological, scientific, and economic landscape as it exists now.<br /><br />&lt;h4&gt;Objective 1: accelerating the development of capable exascale by the mid-2020s&lt;/h4&gt;The 2016 NSCI report correctly stated that capable exascale technologies would not be available until the mid-2020s, but DOE pulled its exascale system deliveries into the early 2020s.  As a result, the delivery of exascale had to be accelerated at significantly higher costs: there have been significant capital costs (the first US exascale systems will cost between 2x and 10x their immediate predecessors, either setting a new bar for the cost of future leadership HPC systems or resulting in a bubble in funding for all post-exascale machines), operational costs (the power budgets may exceed the original 20 MW goal by 50%), and opportunity cost (only two of the three CORAL labs actually deployed a CORAL-1 machine).<br /><br />Notably absent here is a commensurate increase (2x-10x, 1.5x, or 1.3x as above) in R&amp;D efforts towards making these exascale systems widely accessible to applications that do not fall under the umbrella of ECP funding.  As such, NSCI must continue to emphasize the importance of funding R&amp;D to enable the “capable” component of this objective through the mid-2020s at minimum.<br /><br />&lt;h4&gt;Objective 2: Developing a coherent platform for modeling, simulation, and data analytics&lt;/h4&gt;The convergence of HPC and Big Data was a popular point of discussion when the 2016 report was written, but there has yet to be a compelling, quantitative analysis that demonstrates the difference between a “Big Data” system and an “HPC” system despite the best efforts of several leadership-scale HPC facilities.  The challenge is not one of technology and system architecture; rather, the principal design point for “Big Data” systems outside of the HPC world has simply been one of cost (e.g., scaling out cheap hardware over a cheap network for a very well-defined bulk data access pattern) over performance.  There is absolutely nothing that stops the typical “Big Data” application stacks, both old (e.g., Hadoop and Spark; see <a href="https://doi.org/10.1109/BigData.2016.7840606">this paper</a>) and new (e.g., TensorFlow; see <a href="https://dl.acm.org/doi/10.5555/3291656.3291724">this paper</a>) from running at scale on any modern HPC systems, and both have been demonstrated at scale on systems that were sensibly designed.<br /><br />As such, this objective need not be emphasized in the future.  Rather, engineering work is required to enable the “Big Data” stacks in use outside of HPC to work efficiently on the HPC systems of tomorrow.  This remains a software, not architectural, problem, and very much an engineering, not research, challenge.<br /><br />&lt;h4&gt;Objective 3: R&amp;D towards post-CMOS technologies and new paradigms&lt;/h4&gt;It is not the role of NSCI constituent agencies to fund the development of new materials systems explicitly for post-CMOS computing, because these agencies, their review committees, and the academic researchers they fund do not have the insight into the realities of logistics, material costs, and manufacturing required to predict what combination of materials and microarchitectures could actually be turned into a marketable product that can be sustained by the larger technology industry.  In the absence of this insight, R&amp;D towards post-CMOS technologies is likely to produce interesting demonstrations that are impractical for the purposes of actually developing leadership-scale computing systems.  Instead, such research should be funded using facility-industry partnerships as discussed previously in Question 2.<br /><br />Investing in R&amp;D towards new paradigms in computing should also be considered not with respect to enabling new scientific applications, but rather accelerating existing scientific workloads that are incompatible with exascale technologies (GPUs).  As discussed in response to Question 1, there is a very real risk of leaving entire domains of computational science behind as the definition of leadership computing (when equated to exascale) becomes increasingly narrow in scope.  Developing new accelerator technologies that are of benefit to complex application workflows (e.g., multiphysics simulations) are of critical importance in the coming years missions such as stockpile stewardship and climate science fall by the wayside.<br /><br />&lt;h4&gt;Objective 4: Improving application development and workforce development&lt;/h4&gt;The DOE Exascale Computing Project (ECP) has demonstrated a highly effective way of integrating researchers, application code teams, and facilities towards improving application development.  Providing a coherent ecosystem of recommended methods (such as its IDEAS project; e.g., see <a href="https://ideas-productivity.org/ideas-ecp/">ECP-IDEAS</a>), development tools (funded under its Software Technologies area), algorithm-application partnerships (through its co-design centers), and application integration efforts (funded under Hardware and Integration area) are an excellent blueprint for improving application development.  Developing a more generic model for establishing and supporting this style of development beyond the timeline of the ECP funding should be pursued.<br /><br />Improving workforce development should reduce its focus on basic technical training and more on improving critical thinking as described in the response to Question 3 above.<br /><br />&lt;h4&gt;Objective 5: Broadening public-private partnership&lt;/h4&gt;As described in the response to Question 2 above, public-private partnership is absolutely critical to sustain SC in the coming years.  The financial incentives driving technology development from the world outside of HPC have come to outstrip the resources available to HPC to exist independently.  SC efforts must engage with both technology providers and the primary market forces (the enterprise and hyperscale computing industries) to better understand where technologies, solutions, and opportunities can be pursued in partnership rather than in parallel.<br /><br />&lt;h3&gt;Question 7. What challenges or objectives not included in the 2016 NSCI Strategic Plan should be strategic priorities for the federally funded SC R&amp;D? Discuss what new capabilities would be desired, what objectives should guide such research, and why those capabilities and objective should be strategic priorities?&lt;/h3&gt;The mission of providing capable exascale as described in the 2016 NSCI Strategic Plan is proving to be not a sustainable long-term path.  As described in the response to Question 1 above, the first exascale machines stand to accelerate scientific problems that can be cast as dense matrix-matrix multiplication problems, but there are large swaths of scientific problems to which this does not apply.  If one considers the Graph500 BFS list, three of the top five systems are over seven years old and will be retired in 2019.  While graph problems are not prolific in SC, the fact that such little progress has been made in accelerating extreme-scale graph traversal during the seven years that exascale has been aggressively pursued is indicative of some classes of HPC problems being abjectly left behind.<br /><br />Thus, a primary objective towards capable exascale must be examining the opportunity costs of the current strategic direction.  If it is determined that there is simply no way to bring forward those types of computational problems that are incompatible with GPU-based acceleration, then a clearer strategy must be formulated to ensure that the scientific challenges being solved by those computational problems do not stagnate.  As it stands, the public discourse surrounding the first-generation US exascale architectures is not universally positive because of this perceived scientific exclusivity of the chosen architectures, and such exclusivity is at odds with both capable computing and computing leadership.<br />&lt;div&gt;<br />&lt;/div&gt;</p> + + + + + COBOL, Imperial College, Bursty Maintenance, and Sustained Scientific Software + + 2020-04-18T01:00:00-06:00 + https://hpc.social/2020/cobol-imperial-college-bursty-maintenance-and-sustained-scientific-software + <p>We’ve all read about the huge rise in unemployment claims causing +unprecedented loads on US state software systems, with the situation +so dire that the governor of New Jersey put out <a href="https://qz.com/1832988/covid-19-results-in-new-jersey-desperately-needing-cobol-coders/">an urgent call +for COBOL programmers</a>. +It’s worth looking at this from the point of view of research +software, where we need software to be sustainable and reproducible +for long periods of time.</p> + +<p>The systems that need suddenly need COBOL developers have often +been chugging away with maintenance and tweaks for 40–50 +years. This is an almost unfathomable success in the world of +software. So the current issue clearly isn’t with the quality of +the software itself <em>per se</em>.</p> + +<p>Is COBOL being “obsolete” the problem? I mean, look +at that record of success again. COBOL is a proven, <a href="https://hackernoon.com/i-took-a-cobol-course-and-it-wasnt-the-worst-z1ba3yrp">perfectly +serviceable</a>, +domain-specific language for these sorts of batch tasks. There’s +ways to connect to tools and services written in other languages, +so it can coexist with other systems. The lack of (say) a vibrant and +rapidly-evolving ecosystem of third-party packages isn’t necessarily +a bad thing here. (How innovative and cutting-edge do you want the +system that sends out your pension cheques to be, exactly, when the +time comes? Do you really want someone to accidentally +<a href="https://qz.com/646467/how-one-programmer-broke-the-internet-by-deleting-a-tiny-piece-of-code/">leftpad</a> +your bank account?)</p> + +<p>Yes, people coming in to maintain the software for the first time +will have to familiarize themselves with a new, old, language. But +people in research or open-source software learn an unfamiliar language to +contribute to a code base every day. Even if they knew the language, +they would still have to learn the codebase itself, the idioms, and +the problem domain. All of those things can be quickly learned by +new developers if there is documentation and tests, and especially +if there are people who have recently been maintaining the code +base to help. And that’s the issue here.</p> + +<p>These COBOL systems weren’t poorly designed, or obsolete, or a bad +match to their requirements. Easily handling 100x the previously +expected maximum rate of applications isn’t a feature, it’s a symptom +of giddy overengineering. The requirements just changed suddenly. +And when that happened, the people, procedures, and resources weren’t +in place to do the necessary maintenance.</p> + +<p>There is no such thing as infrastructure which does not require +maintenance, and the need for that maintenance is often quite bursty. +This is just as true in research software as it is in governmental +systems. Research software which goes into production needs to be +written in a maintainable fashion, but that’s not enough. There +has to be funding support to keep in place the people, procedures, +and resources necessary to maintain that software, likely in bursts. +And those resources have to remain in place between bursts.</p> + +<p>The bursty nature of necessary maintenance has also come up in +research software, in the saga of the <a href="https://twitter.com/neil_ferguson/status/1241835454707699713">Imperial College epidemic +modelling +software</a>. +When COVID-19 arrived, this tool suddenly moved from a mildly +interesting research code to a key input into UK domestic policy. +Transparency and flexibility leapt from being nice-to-haves to key +requirements, and the people, procedures, documentation, tests, and +resources weren’t in place to add them.</p> + +<p>The importance and urgency of epidemic modelling meant that expertise +and resources from many places were made available to extend and +eventually rewrite the code. But this isn’t a sustainable model for +research computing software, any more than it is for unemployment +application processing systems.</p> + +<p>We still genuinely don’t know how to reliably provide maintenance, bursty +or otherwise, for software, shared databases, or systems in +our research communities. Our funding models are all built around +supporting experiments, observations, or theoretical works — +short-term projects which start, proceed, result in publications +and other research outputs, and are then done. Mechanisms for ongoing support of evolving +research <em>inputs</em> isn’t even a work in progress — it’s absent.</p> + +<p>If experimental methods work develops new kinds of equipment or +reagents which are useful to other researchers, then a vendor starts +manufacturing and selling those items to researchers, with money +that comes out of their grants — and that’s the sustainability +model. We don’t have that for ongoing efforts in software, databases, +or even reliably for hardware shared at a larger scale than a single +organization yet.</p> + +<p>For software undergoing active development, there are at least +plausible approaches proposed. Some of them look, +reasonably enough, like the research equipment model above. Add a +modest amount of money to grants earmarked for distribution to +software, databases, or systems that the research group relies on. +Maybe that would work! But it would almost certainly preferentially +fund projects that are being actively worked on, taking feature +requests and bug reports for software or new submissions for +databases.</p> + +<p>For mature, quiescent resources that “just work” and +so fade into the background, the tools that don’t need development +until they suddenly do, we need other solutions. Likely we need +centres of expertise in research computing, populated by professionals +as advocated by <a href="https://society-rse.org">RSE societies</a> <a href="https://us-rse.org">around +the world</a>, with named maintainers even for +research tools actively used but not actively developed.</p> + +<p>People — +<a href="https://bssw.io/blog_posts/maintainers-drive-software-sustainability">maintainers</a>, +with the tools to do their job — are what drive software +sustainability, not language choices or technologies. As a research +community we need to find and retain funding to retain, develop, +and empower those people to do their work. Otherwise we’re going +to waste time and effort urgently re-learning and re-creating tools +when individually unforeseeable but collectively predictable bursts +in maintenance are needed.</p> + + + + + When you got time on your side - create something + + 2020-04-01T13:04:28-06:00 + https://hpc.social/2020/when-you-got-time-on-your-side-create-something + <p>As we all settle down into the new norm of being housebound during this global epidemic, it&rsquo;s given the opportunity to work on projects which would have +remained on the back burner for an indefinite period.</p> + +<p>As the eternal tinkerer, I&rsquo;ve lately turned my attention to the Adruino community and all of the very interesting projects and possibilities that exist. +One wearable electronics project that caught my eye a number of months back was a wristwatch project which I spotted on the Adafruit site <a href="https://learn.adafruit.com/flora-geo-watch">here</a>. Of course, ordering the parts was the easy part. What I found in the meantime is that my soldering iron was also <em>kaput</em> and +I could not for the life of me find any of the wires, solder and other electronics tools. So alongside the box full of electronics components, I ordered a +shiny new soldering iron, essentials for soldering and a few different types of glue.</p> + +<p>And the last important piece of this jigsaw puzzle was the watch band. I had been scouting around some time for a suitable band - something high quality, +yet fashionable. I managed to purchase a fantastic Kapital (Japan) indigo velcro band from Grailed.</p> + +<p>As all of the pieces were finally in my hands, what was missing was time. This past weekend, I was able to devote some time to prototyping and ultimately +soldering together all of the pieces with some younger helping hands. Definitely my soldering skills were not what they used to be. But there was something +special about sitting on my back porch in the spring sunshine stripping wires, and soldering. The most challenging part for me was not assembling the watch. rather it was gluing the straps to the back o the watch face in order to be able to mount it to the watch band. I had to try a few different glues with a lot of patience. I wasn&rsquo;t keen on using E6000 glue due to it&rsquo;s toxicity&hellip;and rather opted to use a non-toxic glue from Aleene&rsquo;s. Not sure how it will hold up in the long term though - time will tell (pun intended). Above is a photo of the watch connected to it&rsquo;s USB &ldquo;umbillical cord&rdquo; for power and to load sketch (code).</p> + +<p>And this is how it looks on my arm running off of a mini LiPo battery (also courtesy of Adafruit).</p> + +<figure><img src="https://www.gaborsamu.com/images/flora_kapital2.jpg" /> +</figure> + +<p>Tinkering is fun!</p> + + + + + How To Quickly Start One-on-Ones with your Research Computing Team- A One-Week Plan of Action + + 2020-03-24T01:00:00-06:00 + https://hpc.social/2020/how-to-quickly-start-one-on-ones-with-your-research-computing-team-a-one-week-plan-of-action + <p>Research computing teams around the world are finding themselves working completely remotely suddenly. As a manager, you’ve gotten over the first hump and made sure everyone has the tools they need - software, VPN access, accounts on whatever chat and videoconferencing tools you’ll need. Now what?</p> + +<p>We all know that <a href="https://www.extension.harvard.edu/professional-development/blog/challenges-managing-virtual-teams-and-how-overcome-them">remote teams need more communication</a> than on-site teams, so you’ll need to start communicating more. This is a perfect time to start doing one-on-ones if you haven’t been doing them already.</p> + +<p>What follows is a one-week plan to get started doing one-on-ones with your newly-remote research computing team. For each weekday, there’s about 10 minutes of reading and another 10-15 minutes of homework to do to get you started doing one-on-ones with your team starting one week from when you begin. There’s follow-up activities in weeks two and three to take stock, make tweaks, and start thinking about tools that will help.</p> + +<p>This document is available in <a href="https://www.dursi.ca/assets/quickstart-one-on-ones/quickstart-one-on-one.pdf">pdf</a> and <a href="https://www.dursi.ca/assets/quickstart-one-on-ones/quickstart-one-on-one.epub">epub</a> formats. You can also sign up below to get +the material sent to you one day at a time in a series (your email won’t +be used for <em>anything</em> else except sending you the material below.)</p> + +<h1 id="day-1---background-and-planning">Day 1 - Background and Planning</h1> + +<p>Even on-site, one of the most important things a manager can do with their teams is to have regular one-on-one meetings with each of their team members. This practice is almost ubiquitous in tech companies and many other industries. The fact that there are tools, websites, podcasts, and videos about it might lead you to think they’re complicated; they’re not. They’re super simple. Those resources all exist because one-on-ones are important and people are trying to help. Some of those resources are quite good, and I’ll provide some pointers to some of them that I think are particularly relevant in our research computing context; but you don’t <em>need</em> any of them.</p> + +<p>One-on-ones are just meetings with each individual team member and you; they get a half hour of your completely undivided attention, every week (or at <strong>worst</strong>, every other week). The basic principles of successful one-on-one meetings are:</p> + +<ul> + <li>The meeting is scheduled and at a regular time every week.</li> + <li>This is about building working relationships.</li> + <li>This isn’t a status update: the meeting is about your team member, not you.</li> + <li>So, the the team member and their agenda goes first, every time.</li> + <li>Take notes in a way that shows you’re paying attention.</li> + <li>Followup is crucial.</li> + <li>When in doubt, imagine having one-on-ones with someone you report to.</li> +</ul> + +<p>And that’s it. There’s no tricks or particularly hard parts! If you follow the principles above when making decisions, and are disciplined enough to keep the meetings going even (especially) when things are busy, pay attention to what you’re hearing, and follow up, you’re going to to have successful one-on-ones.</p> + +<p>Simple as they might be, these meetings are going to be most effective way to achieve four important things:</p> + +<ul> + <li>Understand your team member better and so build solid working relationships.</li> + <li>Build trust with your team member.</li> + <li>Make your team member feel more important and engaged, and likely to raise issues with you.</li> + <li>Learn <em>much</em> more about what’s actually going on with the work your team is doing.</li> +</ul> + +<p>On top of those benefits, most managers find that these meetings actually save them time - people will save up questions they have for the one-on-ones rather than asking them as they come up, you’ll be able to better match people to tasks (and be able to find out how better to direct them on those tasks), and if anything starts to go sideways you’ll find out about it faster.</p> + +<p><strong>Your assignment:</strong> Let’s get things started by finding potential one-on-one slots on your calendar starting a week from today; in a couple of days you’ll be sending the list of those timeslots out to your team members to sign up for their one-on-ones. Look for a few more slots than team members - if you have 8 team members, aim to find say 12 slots. Identify 30-minute slots on your calendar for that week, ideally with a bit of padding on either side to prepare and review your notes. Prioritize slots that are normally free in coming weeks, and start doing what you can to fence those times off for a bit. Normally we’d be doing this for three weeks out or so when our calendars are mostly empty except for recurring meetings - here we’re doing them in a hurry, and I know you already have a bunch of things lined up for next week. But this is important, so if you have to, consider rescheduling or cancelling some other low priority meetings next week to make room. List the slots in a google spreadsheet, or start a doodle poll with them as the entries, and in a couple of days we’ll get people to sign up for them.</p> + +<p>Also: if you’re having a weekly team meeting today, give them a heads up that because you’re now all remote you’d like to start meeting with them one-on-one every week, and you’ll be sending an email out.</p> + +<h2 id="resources">Resources</h2> +<ul> + <li><a href="https://medium.com/@stephen_younge/your-team-just-went-remote-now-what-b643e58fad61">Your Team Just Went Remote, Now What</a> by Stephen Younge talks about the importance of communications and one-on-ones in the current moment</li> + <li><a href="http://shop.oreilly.com/product/0636920056843.do">The Manager’s Path</a> and <a href="https://www.goodreads.com/book/show/324750.High_Output_Management">High Output Management</a> are new and classic tech-industry management books respectively that talk about extensively and early on about one-on-ones</li> +</ul> + +<h2 id="faqs">FAQs</h2> + +<p><strong>Q: I have 12 people directly reporting to me - I just don’t think I can find 6 hours a week to spend on this.</strong><br /> +A: I promise you that this is one of the most productive ways you can be spending your management time — certainly in difficult periods like this, but even when things get back to normal. You don’t have to take my word for it - ask around if you know anyone doing one-on-ones in their team, and see what they say. Doing these meetings will mean you’ll be less stressed as a manager, have a better handle on what’s going on in your team, be able to put fires out sooner, have a forum for giving your team members feedback and coaching, be better able guide your team members skills and career development, and your team members will know that you’re listening to them. If you’re still skeptical, phase it in - start every-other week and move to weekly after you have a few rounds under your belt.</p> + +<p><strong>Q: Ok, 12, sure, but I’m a PI with a lab of 30 people. How’s that supposed to work?</strong><br /> +A: Thirty people is just too many people to give effective management to - you aren’t meaningfully tracking the needs, career development, and progress of thirty people <em>now</em> on top of everything else you need to do, and your lab members already know it. One-on-ones aren’t magic; they can’t fix that. So you’ll have to pick and choose between some options.</p> + +<p>Perhaps you’ll prioritize trainees and some senior staff: have one-on-ons with them, and after a couple of rounds so that they understand how it works, have the senior staff start one-on-ones with more junior staff, even if there’s no formal reporting relationship there. That’s not as good as them having one-on-one time with you, but it’s better than no one having one-on-one time with anyone, and it starts preparing senior staff or trainees for future management duties. Every so often you could make sure you have “skip-level one-on-ones” with the staff or trainees who are having one-on-ones with these senior lab members - individually or as a group meeting - to make sure things are going well.</p> + +<p>Alternately, you could just have bi-weekly one-on-ones with everyone; that’s 7.5 hours a week providing direct hands-on management with your team members. Again, it’s not as good as weekly one-on-ones but it is significantly better than not having them.</p> + +<p><strong>Q: So if biweekly is ok, can I do these monthly?</strong><br /> +A: A useful way to think about one-on-ones from your team members point of view is to imagine the situation with you having one-on-ones with someone more senior to you - your boss if you’re a staff manager, or a department chair or dean if you’re a PI. How would you treat a regular monthly half-hour one-on-one meeting with someone you report to?</p> + +<p>I don’t think that this is a stretch to imagine. You’d want this precious 30 min a month to go as well as possible. You’d spend some time preparing a dog-and-pony show, probably some slides or something, and prioritize and hone a small number of questions you need answers to. It would be A Big Deal, and so kind of stressful, each time.</p> + +<p>Your boss would get a sanitized view of what’s going on. You’d get a chance to look good to a boss whose been around a while and recognizes this as the highly polished view that it is. You’d maybe get a few answers you needed, or a promise to get back to you on those questions, which is good - but at the cost of significant preparation and stress.</p> + +<p>Monthly <em>just isn’t worth doing</em>. Bi-weekly isn’t great - you won’t save as much time on interruptions and questions, because short questions that can wait three days for an answer often can’t wait a week or more - but it’s not bad. Weekly is the best.</p> + +<p><strong>Q: My schedule gets changed at the last minute a lot - I’m not sure I can always keep the same time every week for these meetings!</strong><br /> +A: It’s ok! Your team members know you’re busy. Stuff gets rescheduled. They understand, really. The important thing isn’t that you’re meeting Frieda at 2pm every Thursday come what may; the important thing is that Frieda knows she always has 30 minutes on your schedule every week. Just reschedule them as needed, just like you’d do with any other important meeting - for the same week, and as soon as you know something’s coming up.</p> + +<h1 id="day-2---understanding-what-the-one-on-ones-will-look-like">Day 2 - Understanding What The One-on-Ones will Look Like</h1> + +<p>You know the basic principles of each one-on-one meeting now; what does this mean for how the meetings will go?</p> + +<p>First, let’s talk about the medium. This is about building working relationships, and while it’s probably not literally impossible to do that over text-based chat, it’s hugely slower and more prone to missteps. On top of that, there are going to eventually be sensitive topics discussed in these one-on-ones: you want the team member to be able to tell you about problems they’re having with co-workers, or that they didn’t do so great at something, or the stress of working at home with the uncertainty of the pandemic, and they might reasonably be reluctant to put these concerns into writing and send them off into the internet.</p> + +<p>So the options are some kind of video/teleconference, or just phonecalls. Videoconferencing is better, because it lets you show facial expression and some body language; that goes a long way towards conveying intent and reducing miscommunications. You probably already use some tools for videoconferencing, and whatever you use is fine. I personally have no problem recommending:</p> + +<ul> + <li><a href="http://This is about building working relationships.">Zoom.us</a> - rock solid, requires installation of an app on desktop or mobile, free for 40-minute one-on-one calls which works perfectly here.</li> + <li><a href="https://slack.com/video-conferencing">Slack videoconference</a> - requires a paid plan which is extremely reasonably priced for academic or non-profit work; less solid but works reasonably well and if you’re already using slack it’s one fewer application to juggle. A downside is that you can’t do video on mobile.</li> + <li><a href="https://whereby.com/user">Whereby</a> - Free for up to four people, works in browser on desktop or on mobile via free apps; pretty solid, only requires the host to register an account.</li> +</ul> + +<p>But really, if you already use something else and your team members have access to it, just use it, it’s fine. If you’re not already using something, pick whereby or zoom. And if for some reason none of those are available, just plan to make phone calls.</p> + +<p>So that’s the medium, what’s the message? There are a number of suggested high-level agendas for one-on-ones out there. I’m going to recommend going with the <a href="https://www.manager-tools.com/map-universe/basics">Manager Tools One-on-Ones</a> agenda: it’s super simple, I’ve seen it work very well in a number of research computing contexts, it’s well motivated by empirical data, and I think it makes the best starting point. If you’ve seen and used a different agenda that’s worked well, feel free to use it instead; otherwise, use theirs.</p> + +<p>The Manger Tools agenda is:</p> + +<ul> + <li>10 minutes for what your team member wants to talk about</li> + <li>10 minutes for items on your agenda</li> + <li>10 minutes to focus on their career or skills development</li> +</ul> + +<p>Let’s tackle these in order:</p> + +<p><strong>Their agenda:</strong> 10 minutes for what they want to talk about.</p> + +<p>It’s hard to give you much information here, since your various team members will have different topics they want to cover. A number will misunderstand and try to give updates on their tasks; try to gently discourage that and nudge them toward higher-level topics.</p> + +<p>They will also likely have questions about the effort, their role, what’s coming next; this is a great opportunity for discussion. They’ll have questions about the current situation that you are unlikely to be able to answer, and you’ll just have to be honest about what you do and don’t know. They’ll talk about how things are currently working with the team and may bring up things you want to follow up on.</p> + +<p>But there will also be a lot of not particularly work-related discussion they want to have with you. Maybe it’ll be about their new home-office setup, or their pets, or their family, or the world situation. They may want to lament the loss of sports (and talk about their favourite team), or talk about a new dish they’re learning to cook now that they can’t go to one of their neighbourhood restaurants. Remember that the purpose of this meeting is to build a working relationship with your team member, to understand them better and to open up lines of communications. It’s a privilege to have them sharing this with you. Take notes and ask lots of questions — you’ll learn a lot from the answers — and share in kind if you feel comfortable doing so.</p> + +<p><strong>Your agenda:</strong> 10 minutes for your topics.</p> + +<p>Remember, the one-on-one isn’t a status update meeting, and using it as one wastes an opportunity. You’re the boss; you can ask for status updates any time you want, and it’s something easily covered over chat or email. These meetings are better for higher level topics - talking about <em>why</em> they’re having troubles, or great successes, with particular tasks, what’s coming next, and the like. If you want, you can routinely check on status of tasks before the one-on-one, and then that can inform the discussion. The goal here isn’t to get a weekly update on what tasks are slipping behind - the goal here is in the medium term to have fewer tasks slipping behind because you better understand the work and your team members.</p> + +<p>This is a good opportunity to give positive or corrective feedback on things you’ve seen over the past few days; in times of change like now, this feedback can be a useful way to reinforce expectations around things like code reviews or meetings that may look a little different now then they did several weeks ago when there was more in-person interaction.</p> + +<p>You might well want to followup from things from previous meetings, or things that have come up from the past weeks work; to ask questions about things you saw. In general, even during your time, the more questions you can be asking that draw information from the team member, rather than just telling them stuff, the better.</p> + +<p>It’s also a great time to share some updates about the big-picture view of how things are going in your department or effort, and how it will effect them and their work, or opportunities it might give them in the future. It’s a great opportunity to gauge their level of enthusiasm for new aspects of upcoming work.</p> + +<p>Don’t use one on ones to share information that you’re going to be sharing with several members of the team, though.</p> + +<ol> + <li>It’s not a good use of this time. This meeting is about the individual team member, so it should cover things specifically about them. (Covering how something you’ve updated the entire team about will effect <em>them in particular,</em> however, is completely on-topic.)</li> + <li>It’s not a good use of your time. If it’s something several team members need to know about, cover it at your weekly team meeting and save yourself some time.</li> + <li>It’s surprisingly prone to causing problems. Efficiency aside, you’re probably going to accidentally convey slightly different messages each time - at the very least, they’ll each hear slightly different things. The <em>best</em> case is that this leads to unnecessary confusion.</li> +</ol> + +<p><strong>Professional Development:</strong> 10 minutes</p> + +<p>This isn’t something that comes out of your time or their time, because professional development is a shared responsibility between the two of you. A first one-on-one is a great chance to update your baseline on what their long-term career goals are, and give them any guidance or resources they ask for; and to find out if there are particular aspects of the research they’d like to get more involved in or new technologies they’d like to learn. In future one-on-ones, you can cover upcoming conferences or presentation opportunities, try to find work opportunities that will help them build experience they’re looking for, or coach them on particular skill development.</p> + +<p>Once you’ve been doing those for a while, you’ll find that there usually isn’t going to be 10 minutes worth of things to say about their career or skills development - their development needs or long-term career goals just aren’t going to change much week-to-week. So after a while it’ll more typically be 15 minutes for them, 15 minutes for you. But it’s still hugely valuable to have a recurring slot for these topics to be discussed, and your first few one-on-ones there’ll probably be more than enough topics there to have it take the time.</p> + +<p><strong>Your assignment:</strong> For each of your team members, start a document or (a page in a document) listing them, what you know of their career plans, what they’re working on, things you’ve been meaning to talk with them about, your current understanding of their strengths and weaknesses, how you imagine their role might change over the coming year… anything that would be useful to touch on in a one-on-one. It doesn’t need to be anything like comprehensive - it’ll get added to over the course of many one-on-ones - but it’ll be a good starting point to preparing for the first round of meetings.</p> + +<p>Also: if today’s your weekly team meeting today, give them a heads up that because you’re now all remote you’d like to start meeting with them one-on-one every week, and you’ll be sending an email out.</p> + +<h2 id="resources-1">Resources</h2> +<ul> + <li>The entire Manager Tools Basics series, and especially their <a href="https://www.manager-tools.com/map-universe/basics">one-on-ones podcasts</a>, are extremely widely followed and are definitely worth listening to.</li> +</ul> + +<h2 id="faqs-1">FAQs</h2> + +<p><strong>Q: What if they don’t have anything to say or ask for their 10-15 minutes?</strong><br /> +A: Team members that are kind of distrustful about this new practice might be kind of reticent to talk or ask questions. It may take them several meetings to really start trusting this whole one-on-one thing and start opening up. That’s ok; one of the purposes of having these meetings is exactly to open up lines of communications.</p> + +<p>Long silences can be helpful here; ask them what they’d like to talk about, smile patiently and look at the camera, and count out 10 seconds in your head. That 10 seconds will feel like a while to you, and an absolute eternity to them, but it’s very convincing that you’re interested in waiting to hear what they have to say. If they’re still not coming up with anything, you can try prompting them with very broad questions — “How is this whole remote work going for you?” “How are you dealing with the COVID-19 situation?” “Do you feel our approach to working remote is going well?”. It’s better if the topics and questions come unprompted, but that may take some time. It’s alright.</p> + +<p>Frankly though, by far the more common issue is:</p> + +<p><strong>Q: What if they go long?</strong><br /> +A: This is <em>way</em> more common than there being not enough to say, especially at the beginning. If they’re not used to having your undivided attention, they’re going to have a lot to say, especially for the first few meetings. My suggestion is just let them go long for the first few. If there are things you really want to cover on your agenda, gently interrupt them towards the end with time to cover your priorities - but remember, you’re the boss, you can talk to them whenever you want about stuff; this is their half hour to bring things to you. If they keep going long week after week, start giving them feedback about that and gradually over the weeks get them to 10-15 minutes. But don’t be too forceful about it; these meetings are about opening channels of communication.</p> + +<p><strong>Q: I just have something quick to ask, can we start with that before we get into their stuff?</strong><br /> +A: Absolutely, categorically, no. I don’t blame you, I’ve slipped and done this myself, but it sabotages the whole meeting - now it’s just another meeting with the boss quizzing the team member about stuff they want to know about. Seriously, you can wait 15 minutes - or just ask them on slack (or whatever) before the meeting.</p> + +<p><strong>Q: Can I ask about status of their tasks at all?</strong><br /> +A: Talking about tasks’ status isn’t completely off-limits, but it’s <em>really easy</em> for status questions to slowly take over the one-on-ones. If there’s a task or project you two haven’t talked about for a while, by all means you can take this opportunity to ask for a quick update, but try to make sure that’s the exception not a rule. This is a better venue for identifying problems that keep coming up and coaching them on dealing with those, or building on strengths they’ve shown in dealing with other tasks - high level things rather than quick updates. Again, you’re the boss - you can ask for status updates any time you want. This time is for them.</p> + +<p><strong>Q: What if they offer status updates during their time?</strong><br /> +A: Listen, and take notes; and pay attention to anything they’re saying beyond the status update (are they pointing out things they had to overcome that you didn’t know about? Are they talking in a way that suggests they’re not normally getting enough credit?). If it is really just a status update, thank them but explain that this isn’t a status update meeting, you really want to focus on higher-level issues — whether they have what they need to succeed, whether there are things they want to know about the program as a whole, how things are going with their coworkers, what have they learned working on project X, are there things that they’re concerned about for the coming weeks. If a few weeks of gentle redirection isn’t enough, you can be more direct (or you can try to short-circuit things by directly asking for status updates before the one-on-one).</p> + +<p><strong>Q: What if I don’t have a full 10-15 minutes of things to cover?</strong><br /> +A: That’s ok! The main purpose of this meeting is for them to talk. We’ll introduce later some general questions you can raise in your time if you don’t have specific things you need to address; but otherwise, if your list is short this week, give them a heads up so they can use more time this time; and if the meetings are short sometimes, it’s fine.</p> + +<h1 id="day-3---sending-out-the-invitations">Day 3 - Sending Out The Invitations</h1> + +<p>You’ve done the hardest parts of preparation now - found spots in your calendar, and identified the starting points for discussions with each of your team members. Today’s easy, but it’s the key - you send out the email (or slack message, or..) announcing that this is coming and get people to start signing up for the slots you set aside two days ago.</p> + +<p>Make sure that the times you’ve looked up two days ago are ready to be signed up for. Pick your favourite way of doing that, in rough order of least work-for-you to most:</p> + +<ul> + <li><strong>Google doc sign-up spreadsheet</strong>: Set up a google doc spreadsheet (or Office 365 Excel sheet or whatever you use) with the times in one column and the next column being who gets that slot. Make it nice-looking if you want; you now have a signup sheet.</li> + <li><strong>Doodle poll</strong> - In Doodle under Poll Settings you can “Limit the number of choices per option” to one, so first person to pick the option gets it.</li> + <li><strong>Email</strong> - just have them email you with their preferred time.</li> +</ul> + +<p>With that done you’re ready to send off the announcement and begin the signups!</p> + +<p>Here’s a perfectly good email announcing the one-on-ones and asking for signups. If the letter strikes you as good enough, then fill in the blanks and send it off. If you want to rework or rewrite it to be in your own voice, absolutely do so: but send it off.</p> + +<p><em>Hi team:</em></p> + +<p><em>With us working remotely now, we don’t get as many opportunties to talk with each other, hear how things are going, and ask questions of each other.</em></p> + +<p><em>So I’d like to start having weekly half-hour one-on-one meetings with each of you individually. These aren’t status update meetings; it’s a meeting for you to tell me how things are going, ask questions about our projects, let me know what you need help with, or to tell me things you need from me. I’ll have some questions for you about how things are going, and will give any input I have on what’s gone on in the past week. And we’ll have an opportunity to spend some time each meeting talking about your professional development and career goals.</em></p> + +<p><em>We’ll have the same agenda each meeting:</em></p> + +<p><em>First 10 minutes - your agenda. Whatever you want to talk about or ask questions about. If there’s a question I don’t know the answer to off hand, I’ll have it for you by the next meeting. If you want to ask me about something you’re doing and it would help to screen share a plot or diagram, by all means, but please this isn’t something to prepare slides for. It’s just us talking.</em></p> + +<p><em>Second 10 minutes - my topics for the week. I’ll talk about things that have come up in the past week, share updates on efforts that have specific relevance to the work you’re doing, and follow up on things we discussed in earlier one-on-ones.</em></p> + +<p><em>Third 10 minutes - your professional and career development. We’ll talk about your career goals and what we can both do to help you get there. Maybe there are areas of research you’d like to be more involved in, or technologies you’d like to learn more about; if we’re discussing these regularly then I’ll be better able to take them into account when opportunities come up. After our first few meetings this may not be something we have items to discuss every single week; for those weeks it’ll be 15 minutes/15 minutes.</em></p> + +<p><em>We’ll have these meetings over [videoconference]. [Details].</em></p> + +<p><em>I’ve setup a sign up sheet for time slots at [google doc/doodle poll/here in the rest of the email]; let me know your first choice for times (first come, first served!) After a couple weeks we can adjust the times if we need to.</em></p> + +<p><em>We’ll begin on [start date]; I’m looking forward to starting having these conversations with you.</em></p> + +<p><em>Let me know if you have any questions.</em></p> + +<p><em>Best wishes,</em> + <em>[You]</em></p> + +<p><strong>Your assignment</strong>: If today is your weekly team meeting, cover the content of the letter before sending it out. Then get the times signup ready in whatever format you like. Rework the email as necessary, and send it off. You’re done!</p> + +<h2 id="resources-2">Resources</h2> +<ul> + <li><a href="https://letterstoanewdeveloper.com/2020/03/16/how-to-manage-one-to-ones/">How to manage one to ones</a> - One-on-ones from the team members point of view; you could include this in the email if you wanted.</li> +</ul> + +<h2 id="faqs-2">FAQs</h2> + +<p><strong>Q: Isn’t this… not a lot of notice for my team?</strong><br /> +A: Honestly, yes - we’re ramping this up in a week total. Normally I’d suggest starting a couple of weeks out to socialize the idea and answer questions (and to make sure your calendar was less full already), but you’re working remotely now and need this a little more quickly.</p> + +<p>And it’s not really <em>that</em> sudden. There’s a 60% chance your weekly team meeting has already happened and you’ve gotten a chance to give them a heads-up it’s coming; otherwise you’ll have one coming up and have a chance to talk with them about it.</p> + +<p>Finally - your team knows that things are very uncertain right now, and there have been (and will continue to be) a lot of changes coming up. That both makes this more reasonable and mean your team will be motivated to want more face to face time with you.</p> + +<p>If you <em>really</em> think it’s going to be an issue, bump the first few meeting times to the following week to give you and your team an extra couple of days to talk it through. But it’s not like they have to prepare a presentation or something for the first meeting. It’s you and each of them talking.</p> + +<p><strong>Q: What if one of my team members won’t sign up?</strong><br /> +A: So imagine you - possibly younger you - getting an offer for more and regular face time with your boss, especially during a time of uncertainty. Do you run away, or jump on it instantly?</p> + +<p>Once you send this out, most if not all of your team members are going to sign up before the electrons in your email have cooled down. We’ll talk tomorrow and the next day about how to handle the theoretically-possible occasional reluctant team member; but remember that asking your team members to attend one half-hour meeting a week is a <em>perfectly reasonable thing to ask,</em> even with short notice, and even under completely normal circumstances.</p> + +<p><strong>Q: I’m still not sure about this scheduled meetings thing - can’t I just play it by ear every week and schedule them on the fly?</strong><br /> +A: I mean, you can <strong>do</strong> anything you want. But if you want valuable one-on-one meetings, this is the way to do it.</p> + +<p>“Playing it by ear” just isn’t efficient; if you thought finding gaps in your calendar and matching them to people took some time during this process, imagine doing it every single week.</p> + +<p>But more importantly, these meetings are about your team members, and what matters to them is the certainty of knowing they have time on your schedule.</p> + +<p>Yes, your schedule might get moved around if things get busy — but when things are busy, stuff that’s scheduled probably gets done (even if it gets done at a different time than originally planned), and stuff that isn’t on your schedule likely just gets dropped.</p> + +<p>These meetings are for and about your team members, and for them to be valuable, your team members need the certainty of having first dibs on a slot in your calendar. Catch-as-catch-can is probably how you’re doing things now; the value in this is setting up something more regular.</p> + +<h1 id="day-4---answering-signups-and-preparing-notes">Day 4 - Answering Signups and Preparing Notes</h1> + +<p>So you’ve sent out the email yesterday, and you’re starting to get answers and maybe some questions. Today you’ll respond to some of those questions and get ready to start.</p> + +<p>First, notice how many of your slots are already filled. People by and large want time with you, and are happy to have it. If you’re using google docs for a signup sheet, check out in “version history” the time that first slot got snapped up. Fast, right?</p> + +<p>Start putting the signups in your calendar as recurring events, with no end date. (You can always move around individual events, or add an end date later). If you use email invitations yourself, include the team member on the event and send them an invite, even if you don’t usually do that internally, so they know they’re on your calendar. Thank them for signing up so quickly, and tell them you look forward to your conversation.</p> + +<p>You might be getting some questions from some team members: what to expect from these meetings, they are a so new, do they need to prepare anything. Those could be over email/slack, or if you have your weekly team meeting today it could be in person. These questions are (qualitative) data! Make note of them in the document you started two days ago for each team member. These are team members who might need help with change — and there’s a lot of change going on right now, so that’s good to know about — or maybe they are team members you’ll need to work a little harder with developing trust with, and the one-on-ones will help with that. On the other hand they were comfortable enough with you to raise the question to you directly. Don’t try to interpret the data yet - just note that they had questions about the one-on-ones and what they were.</p> + +<p>Answer the questions that you get (and if you get several versions of the same question, it might also be useful to send out the answer to the whole team). Also, it might be good to send out a reminder note to nudge the stragglers.</p> + +<p>The other thing to do today is to get some note-taking forms ready for use during the one-on-one. The details of the form don’t matter much - there needs to be a place to take notes on what they told you; to put reminders for things you want to talk about; and place to write down things that have to be done for followup (either by you or by them). We’ll populate the “things you want to talk about” portion tomorrow.</p> + +<p>The key to taking the notes once you’re in the meeting is to (a) take notes like it’s a meeting, not like it’s a class - more about this in a minute - and (b) take notes on paper.</p> + +<p>Yes, on paper. Like an animal.</p> + +<p>Or on electronic paper - I use my iPad + stylus.</p> + +<p>The key here is to take notes in a way that show you’re paying attention. Even remotely via teleconference, typing notes in a document doesn’t do that. Again, when in doubt, imagine yourself in one-on-ones with someone you report to. You’re talking to them, they’re nodding and typing along; you stop talking, and they keep typing for a minute or so, then look up. How paid-attention-to do you feel? I mean, sure, they’re <em>probably</em> taking notes - or maybe they’re firing off a quick response to an email or sending someone a slack message.</p> + +<p>Remember, a principle goal of these meetings is the team member understanding that they have your undivided attention for a non-trivial amount of time every week. You leaning off to the side, scribbling something down, and coming up with a pen in your hand occasionally is completely unambiguous.</p> + +<p>On my iPad, I have a notebook for each team member (I use Goodnotes, there’s a lot of other good ones out there too), and before each meeting I set up one page with the date on it for stuff they’re telling me about, one page for me that I can put things that I want to talk about, and I use the bottom of each page for follow-up items.</p> + +<p>The other key note-taking thing is to take notes like you would for a meeting, <strong>not</strong> like you would for a class. We in academia-adjacent fields can be really bad about this. We’ve spent a lot of time in class and lectures and symposia and colloquia writing down all the information being presented like any of it might be on the exam. That’s easier when you’re <em>doing</em> the note taking - you don’t have to make decisions about what’s important and what’s not - but it means that it’s more work to <em>use</em> the notes later, since everything’s there no matter how important it was. And in our line of work we know that stuff is read many more times than it’s written.</p> + +<p>So while you’re taking notes, try to focus on the important things; specific things they say or do that you want to remember, things that need to be followed up on, surprises, etc. This will make you a more careful listener, too. If you’re not sure if something was important, ask a followup question about it and you’ll find out pretty quickly. Useful multi-purpose followup prompts every research computing manager should have in their toolbox include: “Tell me more about that.” “That sounds tricky.” and “What are some options for handling that?”</p> + +<p><strong>Your assignment</strong>: As above; add the meetings to your calendar as the signups come in, and answer any questions that you get. If there are people who haven’t signed up, send a reminder message. Get some note taking forms ready for your meetings; we’ll populate them with some initial questions and topics tomorrow. Just make sure there’s some room at the <strong>start</strong> of the form for what they tell or ask you (because they go first), a place for you to write down things you want to talk about (we’ll put stuff there tomorrow), and a place to take note of things that need to be followed-up on, by either of you.</p> + +<h2 id="resources-3">Resources</h2> + +<p><a href="https://www.manager-tools.com/2005/07/the-single-most-effective-management-tool-part-1">Manager-Tools podcast</a> - At the end of the page (look for downloads) there’s a template form for taking notes and question prompts: the “1-on-1 Key Points and Prep Form”, in PDF or word. It’s a good starting point for your notes form if you want to use it.</p> + +<h2 id="faqs-3">FAQs</h2> + +<p><strong>Q: Seriously, on paper?</strong><br /> +A: Like an animal, yes.</p> + +<p><strong>Q: C’mon.</strong><br /> +A: You c’mon.</p> + +<p><strong>Q: Ok, fine. What if one of the filled slots has already been stomped on by a meeting that’s come up for me?</strong><br /> +A: It happens, and there’s no sense in pretending it won’t. Just let the team member know; it’ll give them a model of what might happen in the future. “Hey Lawrence - I’ve got you in my calendar for 11:30am on Tuesdays now - thanks for signing up! Something just came up for our slot this week - it’s a one-off and I tried to have it rescheduled but I can’t avoid it. Can we do 3pm on Thursday for just this week?” This is one of the reasons you made sure there were a few extra slots. Maybe X-out that replacement slot from the signup sheet if people are still signing up.</p> + +<p><strong>Q: Should we set up some kind of shared agenda so we can each see what topics we each have?</strong><br /> +A: Some teams find that extremely useful, and some don’t. Tools inevitably shape the work they’re used for, so you and your team still need to figure out the best way to run these for your particular work first. Hold off on integrating tooling into these meetings for now until you have a better sense of what you need. After a couple weeks of one-on-ones is a good time to take stock and see might would be helpful.</p> + +<h1 id="day-5---preparing-for-the-first-week-of-one-on-ones">Day 5 - Preparing for the First Week of One-On-Ones</h1> + +<p>This is almost it! All, or at least most, of your team has signed up for one-on-ones; by the end of the day you will have had your team meeting at some point this week where you’ll have either given the team members a heads-up or answered some questions; you have note sheets ready to be filled out. Today you’ll handle anyone who hasn’t signed up yet, and figure out how to fill out the forms with your topics for the first one-on-ones.</p> + +<p>It’s really unlikely that more than a full day afterwards you still have someone who hasn’t signed up for a one-on-one slot after you’ve answered questions and sent out a reminder. I’m including what to do if it <em>does</em> happen, because people will ask otherwise, but understand that this isn’t the common case.</p> + +<p>If you do have someone dragging their feet, this is the time to follow up with them. (And the fact that you have to follow up with them is also data which is worth recording. Maybe it’s reluctance, maybe they’re overwhelmed, maybe it’s a lot of things — you don’t know yet, but there’s <em>something</em> there). Find out if they have questions; be genuinely curious about their reasons for not signing up, don’t assume a reason (this is a time of a lot of change and disruption, we’re all going through a lot right now). Try to get to the reason it hasn’t been done yet, do what you can to address it, and directly ask them to choose one of the remaining times by the end of the day.</p> + +<p>If by the end of the day they still haven’t signed up, sign them up for one of the remaining slots - the latest one that’s still available, ideally - and send them the invitation. Yes, this is a little heavy-handed, but you’ve asked them three times now within 48 hours to complete a very simple and reasonable task. Either they are reluctant because of work-related reasons, or overwhelmed because of potentially non-work-related-reasons, and either way having individual conversations with them is the right thing to do, and your duty as their manager.</p> + +<p>Ok, so by the end of the day you will have everyone signed up for one-on-one slots. Now it’s time to make sure you know what you’ll be talking about in your time.</p> + +<p>Pull out the document you wrote three days ago covering each team member, and the forms from yesterday, and pick a few easy things you’d like to cover in your first one-on-one together with each of the team members. Maybe it’s about something coming up that’s relevant to them, maybe it’s a check in on a project that hasn’t come up in a while, maybe you’ve been thinking about getting them to give more presentations and you wonder if that’s something they’d be up for. Make a note to ask how they’re doing, and whether they think the team is working well in this new mode, and if they have any suggestions for things the team could do differently or start doing.</p> + +<p>You may also want to review some one-on-one question lists (there’s some in the resources section below) to get some ideas for big picture questions you could ask that would draw out information that would help you. Open-ended questions that can lead into followup questions are often very eye-opening, and you’ll find that ten minutes flies by.</p> + +<p>Note that this is really <strong>not</strong> the time to bring up that thing that really bugs you but you haven’t mentioned yet because you’ve been waiting to have “the talk”. This is not the time for “the talk”. If you start a new practice like one-on-ones and then immediately drop something like that on them, especially if it’s from a while ago and they had no idea, they’ll feel ambushed — and they won’t be wrong.</p> + +<p>Now I’m a big believer in giving corrective feedback (even though I still find it <em>really</em> hard!) and in research computing, not giving enough negative feedback is a much bigger problem than giving too much. But this meeting series is about them, and developing a solid working relationship and trust. When that’s done, then among other things it will be easier for you to give and them to receive respectful, helpful, negative feedback, and have it taken in the spirit it’s intended. But building that relationship and trust, especially with everything else changing around us, will take time.</p> + +<p>Lean <em>way</em> into the positive for now. (Again, big believer in the usefulness of good negative feedback, but people often forget how powerful and effective positive feedback is in encouraging the behaviour that the team needs and helping a team member grow to reach their potential.) If they’ve been working at anything even approaching their normal productivity these past weeks, there’s lots to give positive feedback about! It’s ok to point out some small negative things you saw in the last week or so — nudge them about being better with “mute” on Zoom, remind them about that code review that’s still pending, ask them to be on time to the next team call, whatever — but don’t poison these meetings for the team member by introducing some big negative topic early on, and do <em>not</em> dig up anything that’s more than a couple of weeks old.</p> + +<p>For this week, preparing for the career development section is really easy. Unless you’ve had this conversation very recently, just make a note to ask them what their medium and long term career goals are now, and what skills and experience they’d like to develop to get there.</p> + +<p>And that’s it. Everyone’s signed up; the one-on-one forms are ready and waiting; next week the one-on-ones start. You’re all ready — you can do this!</p> + +<h2 id="resources-4">Resources</h2> + +<p><a href="https://knowyourteam.com/blog/2020/02/19/how-to-coach-employees-ask-these-1-on-1-meeting-questions/">How to Coach Employees? Ask these One-on-One Meeting Questions</a> - Claire Lew, KnowYourTeam +<a href="https://www.peoplebox.ai/t/one-on-one-meeting-template-manager-questions-list">The Ultimate 1-on-1 Meeting Questions Template</a> - PeopleBox +<a href="https://one-on-ones.app">One-on-Ons app</a> - Random one-on-one questions</p> + +<h2 id="faqs-4">FAQs</h2> + +<p><strong>Q: More of a comment than a question - This whole thing is a lot of work?</strong><br /> +A: Kind of, yeah. But it’s a lot more work getting them started than keeping them going. Once everything is set up, it only takes a few minutes a week per team member in addition to the meetings to get all of the benefits of one-on-ones — which will help your team members and help you.</p> + +<p><strong>Q: I’ve gone back and forth with one team member and answered their questions and they still seem reluctant; what should I say?</strong><br /> +A: “I look forward to speaking more about this with you Wednesdays at 1pm.”</p> + +<p>Seriously, there are <em>very few times</em> when “because I’m the boss” is a good enough reason; these sort of process details about how the team will work together when you’re now all suddenly working remotely is <em>exactly</em> one of those times. It’s good to hear their concerns if they have any, you should respect those concerns, and you should expect them to show up for the one-on-one.</p> + +<h1 id="day-6---last-minute-reminders">Day 6 - Last minute Reminders</h1> + +<p>Congratulations - this is the day the one-on-ones start! You’ve done the hardest part, trust me.</p> + +<p>The first few one-on-ones meetings may seem a little awkward and stilted, but they’ll quickly grow more comfortable as you get the hang of them.</p> + +<p>Keep in mind the principles:</p> + +<ul> + <li>The meeting is scheduled and at a regular time every week.</li> + <li>This is about building working relationships.</li> + <li>This isn’t a status update: the meeting is about your team member, not you.</li> + <li>So, the the team member and their agenda goes first, every time.</li> + <li>Take notes in a way that shows you’re paying attention.</li> + <li>Followup is crucial.</li> + <li>When in doubt, imagine having one-on-ones with someone you report to.</li> +</ul> + +<p>You’ve already got the first one done, by creating the scheduling, and you’ve got the note-taking sorted. Now you just have conversations with your team members.</p> + +<p>If I could recommend any tactics for the conversations, I’d just say:</p> + +<ol> + <li>They go first. Kick off with “What would you like to talk about?” or something similar to hand over the agenda to them.</li> + <li>Listen a lot more than you speak, and ask a lot of high-level questions and followup questions.</li> + <li>Let them go long for their part if they want to this time. The career development conversation can wait another week. If it gets to 20-25 minutes and you really want to cover your topics this week, see if you can gently interrupt; but remember, you’re the boss, you can talk to them about your stuff any time you want.</li> + <li>Take notes and highlight the things to follow up on. Make sure those followup items end up on your todo list.</li> + <li>Focus mostly on the positive for the first few meetings.</li> +</ol> + +<p><strong>Your assignment</strong>: Have some one-on-ones!</p> + +<h1 id="day-8---hows-it-going">Day 8 - How’s It Going?</h1> + +<p>Hey, it’s the middle of one-on-ones week — congratulations! You got them started, and fast! How’s it going?</p> + +<p>This is a good time to glance through the notes from the first few one-on-ones. What are you learning that you didn’t know this time last week? Have you already helped some of your team members solve a problem they were having?</p> + +<p>Be sure to add the things you said you’d followup on to whatever task list system you use. Having the conversations with your team members builds a good working relationship; but it’s following up on the things you said you’d do that builds trust. Did the team member ask you for some information? To contact someone for them? To get something un-stuck in administration? Add it to your list and get them done before the next one-on-one. That, more than anything else, will prove to them you were listening and care about what you heard.</p> + +<h1 id="day-10---reviewing-week-one---what-went-well-and-planning-for-week-two">Day 10 - Reviewing Week One - What Went Well, and Planning for Week Two</h1> + +<p>You’re done your first week of one-on-ones, just 10 work days after starting the process. Congratulations, this is a big milestone.</p> + +<p>So the benefits I listed on day one of getting started with one-on-ones were:</p> + +<ul> + <li>Understand your team member better and so build solid working relationships.</li> + <li>Build trust with your team member.</li> + <li>Make your team member feel more important and engaged, and likely to raise issues with you.</li> + <li>Learn <em>much</em> more about what’s actually going on with the work your team is doing.</li> +</ul> + +<p>Building trust will take more than a week; setting that one aside, do you feel that there are team members you already understand a bit better? How did the team members seem to react to having your attention for 30 minutes? Did you learn anything about the work being done that surprised you?</p> + +<p>Scan those one-on-one notes again and update the document on your team members with things you learned. It could be very work-related things like career goals, or it could be things like the names of their pets - if they told you about it, it’s important to them, so it’s important to you.</p> + +<p>Of the team members, who had a lot to say? Did some go better than others? How are you doing with the follow up tasks? Do those followup tasks suggest new topics of discussion for next week?</p> + +<p>Now start putting together the one-on-one notes for next week. Have you learned something - maybe a question - that worked really well with one team member and you want to try with others? What’s come up over the last week that you’d like to talk about?</p> + +<p>Congratulations - you’re done with week one, and already ready for week two! Preparing this does take a little time each week, and it will always take time, but it will be easier as the weeks go on.</p> + +<h1 id="day-15---reviewing-week-two---what-went-well-and-thinking-of-future-one-on-ones">Day 15 - Reviewing Week Two - What Went Well, and Thinking of Future One-on-Ones</h1> + +<p>Fantastic. Two weeks of one-on-ones!</p> + +<p>You’re now starting to get the hang of this, and seeing what works and what doesn’t.</p> + +<p>This might be a good time to take stock and see if there are things that would help the process go more smoothly. Have there been topics that have come up that some preparation would have been good for? Maybe it would be useful to have some kind of shared agenda. Some groups just have a google doc for each team member shared with the manager, and that can work nicely.</p> + +<p>If that would be useful, consider raising it at the next team meeting. But for heaven’s sake, before you put any item on it, think about it from the direct’s point of view. If your one-on-one is a Thursday, and on Monday you enter an agenda item like “Performance on Project X” or “Your future in the department”, your team member is going to have a <strong><em>very bad week.</em></strong> **Be much more explicit and include context: “Project X review: 5 things that went well, one thing to tweak for next time”, or “Opportunities opening up elsewhere in the department”.</p> + +<p>If you’d like more specialized tools, there’s a bunch of promising seeming ones; I’ll list some of heard of below in the resources.</p> + +<p>Those tools might be helpful to you, or might not; our team doesn’t even use a shared agenda, but research computing is incredibly diverse and your teams needs will be different from ours. If there are any tools you find that make your teams’ one-on-ones easier and more successful, by all means use them (and let me know!)</p> + +<p>You’re done! This is the end of the 3-week run of emails on starting one-on-ones in a hurry. If this was valuable to you, consider signing up for the <a href="https://www.dursi.ca/newsletter.html">Research Computing Teams Newsletter</a>; any time something like this is posted it will show up in the newsletter, along with a weekly roundup of management, community, and technology news relevant to you as the manager of a research computing team.</p> + +<p>Congratulations again, and best of luck to you and your research computing team!</p> + +<h2 id="resources-5">Resources</h2> + +<ul> + <li><a href="https://soapboxhq.com/">https://soapboxhq.com/</a></li> + <li><a href="http://getlighthouse.com">getlighthouse.com</a></li> + <li><a href="https://knowyourteam.com/">https://knowyourteam.com/</a></li> + <li><a href="https://www.fellow.app/">https://www.fellow.app/</a></li> + <li><a href="https://teambit.io/">https://teambit.io/</a></li> + <li><a href="https://www.fellow.app/">https://www.fellow.app/</a></li> + <li><a href="https://www.small-improvements.com/">https://www.small-improvements.com/</a></li> +</ul> + +<h2 id="faqs-5">FAQs</h2> + +<p><strong>Q: So this isn’t as bad as I thought it was going to be, but I’m still not convinced. Should I just drop them?</strong><br /> +A: Do me a favour? Keep them going for two months. Have them become part of the routine way you manage. Get input from your team members. Then do what you think is best.</p> + + + + + GRACC Transition Visualization + + 2020-03-08T06:00:00-06:00 + https://hpc.social/2020/gracc-transition-visualization + <p>The OSG is in the progress of transitioning from an older ElasticSearch (ES) cluster to a new version. Part of this process is reindexing (copying) data from the old to the new. Unfortunately, it’s not easy to capture a status of this transition. For this, I have created the <a href="https://gracc-transition.herokuapp.com/">GRACC Transition page</a>.</p> + +<p>The goal is to transition when both the old and new ES have the same data. A simple measure of this is if they share the same number of documents in all of the indexes.</p> + +<p>Source for this app is available on github: <a href="https://github.com/djw8605/gracc-transition">GRACC Transition</a></p> + +<h2 id="data-collection">Data Collection</h2> + +<p>Data collection is performed by a probe on each the new and old ElasticSearch clusters. Upload is performed with a POST to the gracc transition website. Authorization is performed with a shared random token between the probe and the website.</p> + +<p>The probe is very simple. It queries ES for all indexes, as well as the number of documents and data size inside the index.</p> + +<p>There are also many indexes that the OSG is not transitioning to the new ES. In order to ignore these indexes, a set of regular expressions is used to remove the indexes from consideration. Those regular expressions are:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>/^osg.*/, // Start with osg.* +/^ps_.*/, // Start with ps_* +/^shrink\-ps_.*/, // Start with shrink-ps_* +/^glidein.*/, // Start with glidein* +/^\..*/, // Start with . +/^ps\-itb.*/ // Start with ps-itb* +</code></pre></div> +</div> + +<h2 id="the-website">The Website</h2> + +<p><img alt="GRACC Transition Website" src="https://derekweitzel.com/images/posts/gracc-transition/gracc-transition-website.png" /></p> + +<p>The gracc transition app is hosted on the <a href="https://www.heroku.com/">Heroku</a>. I choose Heroku because it provides a simple hosting platform with a database for free.</p> + +<p>The website pushes alot of the data processing to the client. The data is stored in the database as JSON and is sent to the client without any transformation. The client pulls the data from the website for both the new and old ES and begins to process the data within javascript.</p> + +<p>The website breaks the statistics into three visualizations:</p> + +<ol> + <li><strong>Progress Bars</strong>: Comparing the total documents and total data size of the old and new. The progress is defined as new / old. The bars provide a very good visualization of the progress of the transition as they need to reach 100% before we are able to fully transition.</li> + <li><strong>Summary Statistics</strong>: The summary statistics show the raw number of either missing or mismatched indexes. If an index is in the old ES but is not in the new ES, it is counted as <strong>missing</strong>. If the index is a different size in the old vs. the new, it is counted as <strong>mismatched</strong>.</li> + <li><strong>Table of Indices</strong>: Finally, a table of indices is shown with the number of documents that are missing, or simply <strong>Missing</strong> if the index is missing in the new ES.</li> +</ol> + +<p>In addition to the table, I also provide a button to download the list of indexes that are missing or mismatched. This can be useful for an administrator to make sure it matches what they expect or to process with elasticsearch.</p> + +<h2 id="improvements-and-future">Improvements and Future</h2> + +<p>In the future, I would like to generate a weekly or even daily email to show the progress of the transition. This would give provide a constant reminder of the state of the transition.</p> + + + + + Bootup fun- dual-socket POWER9 + + 2020-01-16T13:43:37-07:00 + https://hpc.social/2020/bootup-fun-dual-socket-power9 + <p>Well today it&rsquo;s going to be a short one. For those of you out there who are like me and enjoy +watching systems boot, I&rsquo;ve recorded this brief (~3 minutes) bootup sequence of a dual-socket +POWER9 based system. This was done through the CLI based OpenBMC console (<em>obmc-console-client</em>) +and we see the system progressing through the bootup sequence to a running instance of CentOS 7.</p> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + +<p>And for something a bit more esoteric another bootup video recorded a number of years back. +This time a MIPS-based IBM Workpad z50 booting NetBSD. Definitely not a room heater, but +probably the best keyboard I&rsquo;ve used on a small form factor laptop - ironically the form +factor is referred to as &ldquo;hpc&rdquo;, which in this case stands for &ldquo;handheld pc&rdquo;.</p> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + + + + + NUMA on NINE with Spectrum LSF + + 2020-01-10T06:21:37-07:00 + https://hpc.social/2020/numa-on-nine-with-spectrum-lsf + <p>NUMA (non-uniform memory access) has been written about ad nauseam. For those fans of POWER processors +out there, we&rsquo;ll show briefly in this blog what the NUMA looks like on a dual-socket POWER9 development +system. For those not familiar with NUMA there are many resources that can be found on the Internet +describing NUMA systems in detail. In a nutshell, a NUMA system is made up of a single planar board +(motherboard) with more than one CPU socket. Each socket is directly connected to part of the +system main memory (RAM), but can also use parts of the main system memory to which it is not +directly attached - through a crossbar or interconnect - of course there is a penalty in doing this - +hence the &ldquo;non-uniform&rdquo; moniker.</p> + +<p>For the sake of performance, pinning tasks to specific CPUs is an important consideration. We&rsquo;ll +look briefly at some of the processor affinity capabilities provided by the well known IBM Spectrum +LSF workload scheduler.</p> + +<p>Let&rsquo;s begin by looking at how many NUMA zones are on this POWER9 based system. We can do this +using the <em>lscpu</em> command.</p> + +<div class="highlight"><pre><code class="language-plaintext">[root@kilenc etc]# lscpu +Architecture: ppc64le +Byte Order: Little Endian +CPU(s): 128 +On-line CPU(s) list: 0-127 +Thread(s) per core: 4 +Core(s) per socket: 16 +Socket(s): 2 +NUMA node(s): 2 +Model: 2.1 (pvr 004e 1201) +Model name: POWER9 (raw), altivec supported +CPU max MHz: 2902.0000 +CPU min MHz: 1821.0000 +L1d cache: 32K +L1i cache: 32K +L2 cache: 512K +L3 cache: 10240K +NUMA node0 CPU(s): 0-63 +NUMA node8 CPU(s): 64-127</code></pre></div> + +<p>So we can see above that there are two NUMA zones, each with 64 threads.</p> + +<p>The system has a (meager) total of 32GB RAM - we confirm this using the <em>free</em> command.</p> + +<div class="highlight"><pre><code class="language-plaintext">[root@kilenc etc]# free + total used free shared buff/cache available +Mem: 32244032 25006592 551360 440512 6686080 5384704 +Swap: 8257472 4987136 3270336</code></pre></div> + +<p>If we want to see how memory is attached to each NUMA, we can use the <em>numactl</em> command as follows. +We confirm that there is 16GB RAM per NUMA. This also shows the distances (weights) between each NUMA.</p> + +<div class="highlight"><pre><code class="language-plaintext">[root@kilenc etc]# numactl -H +available: 2 nodes (0,8) +node 0 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 +node 0 size: 15245 MB +node 0 free: 599 MB +node 8 cpus: 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 +node 8 size: 16242 MB +node 8 free: 983 MB +node distances: +node 0 8 + 0: 10 40 + 8: 40 10 </code></pre></div> + +<p>Linux on PowerLE (Little Endian) includes the <em>ppc64_cpu</em> command which allows you to display characteristics and settings of +of CPUs including SMT (Simultaneous multithreading), etc.</p> + +<p>We see above a total of 128 threads on the system. This is because SMT 4 is enabled. So we have 16 +cores per socket for a total of 32 cores. Multipled by 4 (SMT=4), we get the value of 128.</p> + +<p>And the output from <em>ppc64_cpu</em> showing cores 0 - 31, each with 4 threads. The +* beside each thread denotes that it&rsquo;s active.</p> + +<div class="highlight"><pre><code class="language-plaintext">[root@kilenc etc]# ppc64_cpu --info +Core 0: 0* 1* 2* 3* +Core 1: 4* 5* 6* 7* +Core 2: 8* 9* 10* 11* +Core 3: 12* 13* 14* 15* +Core 4: 16* 17* 18* 19* +Core 5: 20* 21* 22* 23* +Core 6: 24* 25* 26* 27* +Core 7: 28* 29* 30* 31* +Core 8: 32* 33* 34* 35* +Core 9: 36* 37* 38* 39* +Core 10: 40* 41* 42* 43* +Core 11: 44* 45* 46* 47* +Core 12: 48* 49* 50* 51* +Core 13: 52* 53* 54* 55* +Core 14: 56* 57* 58* 59* +Core 15: 60* 61* 62* 63* +Core 16: 64* 65* 66* 67* +Core 17: 68* 69* 70* 71* +Core 18: 72* 73* 74* 75* +Core 19: 76* 77* 78* 79* +Core 20: 80* 81* 82* 83* +Core 21: 84* 85* 86* 87* +Core 22: 88* 89* 90* 91* +Core 23: 92* 93* 94* 95* +Core 24: 96* 97* 98* 99* +Core 25: 100* 101* 102* 103* +Core 26: 104* 105* 106* 107* +Core 27: 108* 109* 110* 111* +Core 28: 112* 113* 114* 115* +Core 29: 116* 117* 118* 119* +Core 30: 120* 121* 122* 123* +Core 31: 124* 125* 126* 127* </code></pre></div> + +<p>We can turn off SMT quite easily as follows:</p> + +<div class="highlight"><pre><code class="language-plaintext">[root@kilenc etc]# ppc64_cpu --smt=off + +[root@kilenc etc]# ppc64_cpu --smt +SMT is off</code></pre></div> + +<p>Now when we run <em>ppc64_cpu &ndash;info</em> we see that SMT is disabled. Note that +we only see one * per row now.</p> + +<div class="highlight"><pre><code class="language-plaintext">[root@kilenc etc]# ppc64_cpu --info +Core 0: 0* 1 2 3 +Core 1: 4* 5 6 7 +Core 2: 8* 9 10 11 +Core 3: 12* 13 14 15 +Core 4: 16* 17 18 19 +Core 5: 20* 21 22 23 +Core 6: 24* 25 26 27 +Core 7: 28* 29 30 31 +Core 8: 32* 33 34 35 +Core 9: 36* 37 38 39 +Core 10: 40* 41 42 43 +Core 11: 44* 45 46 47 +Core 12: 48* 49 50 51 +Core 13: 52* 53 54 55 +Core 14: 56* 57 58 59 +Core 15: 60* 61 62 63 +Core 16: 64* 65 66 67 +Core 17: 68* 69 70 71 +Core 18: 72* 73 74 75 +Core 19: 76* 77 78 79 +Core 20: 80* 81 82 83 +Core 21: 84* 85 86 87 +Core 22: 88* 89 90 91 +Core 23: 92* 93 94 95 +Core 24: 96* 97 98 99 +Core 25: 100* 101 102 103 +Core 26: 104* 105 106 107 +Core 27: 108* 109 110 111 +Core 28: 112* 113 114 115 +Core 29: 116* 117 118 119 +Core 30: 120* 121 122 123 +Core 31: 124* 125 126 127 </code></pre></div> + +<p>Now that we have our system running with SMT off we have a total of 16 cores per CPU, each with 16GB +RAM attached directly. We can also use the system <em>lstopo</em> command which is part of the <em>hwloc</em> package +to display information about the system topology.</p> + +<div class="highlight"><pre><code class="language-plaintext">[gsamu@kilenc ~]$ lstopo +Machine (31GB total) + NUMANode L#0 (P#0 15GB) + Package L#0 + L3 L#0 (10MB) + L2 L#0 (512KB) + L1d L#0 (32KB) + L1i L#0 (32KB) + Core L#0 + PU L#0 (P#0) + L1d L#1 (32KB) + L1i L#1 (32KB) + Core L#1 + PU L#1 (P#4) + L3 L#1 (10MB) + L2 L#1 (512KB) + L1d L#2 (32KB) + L1i L#2 (32KB) + Core L#2 + PU L#2 (P#8) + L1d L#3 (32KB) + L1i L#3 (32KB) + Core L#3 + PU L#3 (P#12) + L3 L#2 (10MB) + L2 L#2 (512KB) + L1d L#4 (32KB) + L1i L#4 (32KB) + Core L#4 + PU L#4 (P#16) + L1d L#5 (32KB) + L1i L#5 (32KB) + Core L#5 + PU L#5 (P#20) + L3 L#3 (10MB) + L2 L#3 (512KB) + L1d L#6 (32KB) + L1i L#6 (32KB) + Core L#6 + PU L#6 (P#24) + L1d L#7 (32KB) + L1i L#7 (32KB) + Core L#7 + PU L#7 (P#28) + L3 L#4 (10MB) + L2 L#4 (512KB) + L1d L#8 (32KB) + L1i L#8 (32KB) + Core L#8 + PU L#8 (P#32) + L1d L#9 (32KB) + L1i L#9 (32KB) + Core L#9 + PU L#9 (P#36) + L3 L#5 (10MB) + L2 L#5 (512KB) + L1d L#10 (32KB) + L1i L#10 (32KB) + Core L#10 + PU L#10 (P#40) + L1d L#11 (32KB) + L1i L#11 (32KB) + Core L#11 + PU L#11 (P#44) + L3 L#6 (10MB) + L2 L#6 (512KB) + L1d L#12 (32KB) + L1i L#12 (32KB) + Core L#12 + PU L#12 (P#48) + L1d L#13 (32KB) + L1i L#13 (32KB) + Core L#13 + PU L#13 (P#52) + L3 L#7 (10MB) + L2 L#7 (512KB) + L1d L#14 (32KB) + L1i L#14 (32KB) + Core L#14 + PU L#14 (P#56) + L1d L#15 (32KB) + L1i L#15 (32KB) + Core L#15 + PU L#15 (P#60) + HostBridge L#0 + PCIBridge + PCI 9005:028d + Block(Disk) L#0 "sda" + HostBridge L#2 + PCIBridge + PCI 14e4:1657 + Net L#1 "enP4p1s0f0" + PCI 14e4:1657 + Net L#2 "enP4p1s0f1" + HostBridge L#4 + PCIBridge + PCIBridge + PCI 1a03:2000 + GPU L#3 "controlD64" + GPU L#4 "card0" + NUMANode L#1 (P#8 16GB) + Package L#1 + L3 L#8 (10MB) + L2 L#8 (512KB) + L1d L#16 (32KB) + L1i L#16 (32KB) + Core L#16 + PU L#16 (P#64) + L1d L#17 (32KB) + L1i L#17 (32KB) + Core L#17 + PU L#17 (P#68) + L3 L#9 (10MB) + L2 L#9 (512KB) + L1d L#18 (32KB) + L1i L#18 (32KB) + Core L#18 + PU L#18 (P#72) + L1d L#19 (32KB) + L1i L#19 (32KB) + Core L#19 + PU L#19 (P#76) + L3 L#10 (10MB) + L2 L#10 (512KB) + L1d L#20 (32KB) + L1i L#20 (32KB) + Core L#20 + PU L#20 (P#80) + L1d L#21 (32KB) + L1i L#21 (32KB) + Core L#21 + PU L#21 (P#84) + L3 L#11 (10MB) + L2 L#11 (512KB) + L1d L#22 (32KB) + L1i L#22 (32KB) + Core L#22 + PU L#22 (P#88) + L1d L#23 (32KB) + L1i L#23 (32KB) + Core L#23 + PU L#23 (P#92) + L3 L#12 (10MB) + L2 L#12 (512KB) + L1d L#24 (32KB) + L1i L#24 (32KB) + Core L#24 + PU L#24 (P#96) + L1d L#25 (32KB) + L1i L#25 (32KB) + Core L#25 + PU L#25 (P#100) + L3 L#13 (10MB) + L2 L#13 (512KB) + L1d L#26 (32KB) + L1i L#26 (32KB) + Core L#26 + PU L#26 (P#104) + L1d L#27 (32KB) + L1i L#27 (32KB) + Core L#27 + PU L#27 (P#108) + L3 L#14 (10MB) + L2 L#14 (512KB) + L1d L#28 (32KB) + L1i L#28 (32KB) + Core L#28 + PU L#28 (P#112) + L1d L#29 (32KB) + L1i L#29 (32KB) + Core L#29 + PU L#29 (P#116) + L3 L#15 (10MB) + L2 L#15 (512KB) + L1d L#30 (32KB) + L1i L#30 (32KB) + Core L#30 + PU L#30 (P#120) + L1d L#31 (32KB) + L1i L#31 (32KB) + Core L#31 + PU L#31 (P#124) + HostBridge L#7 + PCIBridge + PCI 10de:1db6 + GPU L#5 "renderD128" + GPU L#6 "card1"</code></pre></div> + +<p>Next, we will look at how affinity can be controlled using the Spectrum LSF workload scheduler from IBM.</p> + +<p><strong>Note here that we are using the LINPACK benchmark, but I&rsquo;ve not taken steps to do any optimizations.</strong></p> + +<p>Spectrum LSF is the workload scheduler installed on this system. It provides rich capabilities for CPU +and memory affinity.</p> + +<p>Here we submit a run of HPL requesting 16 processor cores all on the same NUMA node and binds the tasks +on the NUMA node with memory binding.</p> + +<div class="highlight"><pre><code class="language-plaintext">[gsamu@kilenc testing]$ bsub -n 16 -q normal -o /home/gsamu/%J.out +-R "affinity[core(1,same=numa):cpubind=numa:membind=localonly]" mpirun ./xhpl +Job &lt;101579&gt; is submitted to queue &lt;normal&gt;.</code></pre></div> + +<p>After the job starts, we can see it the list of processes (PIDs), as well as the memory utilization.</p> + +<div class="highlight"><pre><code class="language-plaintext">[gsamu@kilenc testing]$ bjobs -l 101579 + +Job &lt;101579&gt;, User &lt;gsamu&gt;, Project &lt;default&gt;, Status &lt;RUN&gt;, Queue &lt;normal&gt;, Co + mmand &lt;mpirun ./xhpl&gt;, Share group charged &lt;/gsamu&gt; +Fri Jan 10 18:46:21: Submitted from host &lt;kilenc&gt;, CWD &lt;$HOME/hpl-2.3/testing&gt;, + Output File &lt;/home/gsamu/101579.out&gt;, 16 Task(s), Request + ed Resources &lt;affinity[core(1,same=numa):cpubind=numa:memb + ind=localonly]&gt;; +Fri Jan 10 18:46:21: Started 16 Task(s) on Host(s) &lt;16*kilenc&gt;, Allocated 16 Sl + ot(s) on Host(s) &lt;16*kilenc&gt;, Execution Home &lt;/home/gsamu&gt; + , Execution CWD &lt;/home/gsamu/hpl-2.3/testing&gt;; +Fri Jan 10 18:46:30: Resource usage collected. + The CPU time used is 24 seconds. + MEM: 2.9 Gbytes; SWAP: 1 Mbytes; NTHREAD: 55 + PGID: 80693; PIDs: 80693 80700 80702 + PGID: 80707; PIDs: 80707 + PGID: 80708; PIDs: 80708 + PGID: 80709; PIDs: 80709 + PGID: 80710; PIDs: 80710 + PGID: 80711; PIDs: 80711 + PGID: 80712; PIDs: 80712 + PGID: 80713; PIDs: 80713 + PGID: 80714; PIDs: 80714 + PGID: 80715; PIDs: 80715 + PGID: 80716; PIDs: 80716 + PGID: 80717; PIDs: 80717 + PGID: 80718; PIDs: 80718 + PGID: 80719; PIDs: 80719 + PGID: 80720; PIDs: 80720 + PGID: 80721; PIDs: 80721 + PGID: 80722; PIDs: 80722 + + + MEMORY USAGE: + MAX MEM: 2.9 Gbytes; AVG MEM: 1.4 Gbytes + + GPFSIO DATA: + READ: ~0 bytes; WRITE: ~0 bytes + + SCHEDULING PARAMETERS: + r15s r1m r15m ut pg io ls it tmp swp mem + loadSched - - - - - - - - - - - + loadStop - - - - - - - - - - - + + RESOURCE REQUIREMENT DETAILS: + Combined: select[type == local] order[r15s:pg] affinity[core(1,same=numa)*1:cp + ubind=numa:membind=localonly] + Effective: select[type == local] order[r15s:pg] affinity[core(1,same=numa)*1:c + pubind=numa:membind=localonly] </code></pre></div> + +<p>During the job runtime, we use the <em>ps</em> command to check which processor cores the <em>xhpl</em> processes +are bound to (see PSR column). It should be noted that Spectrum LSF also creates a cgroup cpuset +for this job.</p> + +<div class="highlight"><pre><code class="language-plaintext">[gsamu@kilenc testing]$ ps -Fae | grep xhpl +UID PID PPID C SZ RSS PSR STIME TTY TIME CMD +gsamu 80702 80700 0 2884 31936 36 18:46 ? 00:00:00 mpirun ./xhpl +gsamu 80707 80702 97 10471 387072 4 18:46 ? 00:00:21 ./xhpl +gsamu 80708 80702 97 10619 396672 20 18:46 ? 00:00:21 ./xhpl +gsamu 80709 80702 97 10345 378816 56 18:46 ? 00:00:21 ./xhpl +gsamu 80710 80702 97 10596 395200 8 18:46 ? 00:00:21 ./xhpl +gsamu 80711 80702 97 10470 387072 48 18:46 ? 00:00:21 ./xhpl +gsamu 80712 80702 97 10619 396672 44 18:46 ? 00:00:21 ./xhpl +gsamu 80713 80702 97 10351 379136 12 18:46 ? 00:00:21 ./xhpl +gsamu 80714 80702 97 10322 377472 24 18:46 ? 00:00:21 ./xhpl +gsamu 80715 80702 97 10350 379328 0 18:46 ? 00:00:21 ./xhpl +gsamu 80716 80702 97 10494 388736 60 18:46 ? 00:00:21 ./xhpl +gsamu 80717 80702 97 10232 371648 40 18:46 ? 00:00:21 ./xhpl +gsamu 80718 80702 97 10205 370048 28 18:46 ? 00:00:21 ./xhpl +gsamu 80719 80702 97 10321 377536 52 18:46 ? 00:00:21 ./xhpl +gsamu 80720 80702 97 10465 387008 36 18:46 ? 00:00:21 ./xhpl +gsamu 80721 80702 97 10200 369664 16 18:46 ? 00:00:21 ./xhpl +gsamu 80722 80702 96 10461 386560 32 18:46 ? 00:00:21 ./xhpl +gsamu 80879 36562 0 1736 2816 80 18:46 pts/1 00:00:00 grep --color=auto xhpl</code></pre></div> + +<p>Cross referencing the above list of CPU cores with the output of <em>numactl</em>, we see at the job +is running on NUMA node 0.</p> + +<div class="highlight"><pre><code class="language-plaintext">[gsamu@kilenc testing]$ numactl -H +available: 2 nodes (0,8) +node 0 cpus: 0 4 8 12 16 20 24 28 32 36 40 44 48 52 56 60 +node 0 size: 15245 MB +node 0 free: 944 MB +node 8 cpus: 64 68 72 76 80 84 88 92 96 100 104 108 112 116 120 124 +node 8 size: 16242 MB +node 8 free: 5607 MB +node distances: +node 0 8 + 0: 10 40 + 8: 40 10 </code></pre></div> + +<p>The Spectrum LSF <em>bhosts</em> command also provides an affinity option (<em>&ndash;aff</em>) to display the NUMA +bindings. Here is the output from that command. The * denotes that there are tasks pinned.</p> + +<div class="highlight"><pre><code class="language-plaintext">[gsamu@kilenc testing]$ bhosts -aff +Host[30.7G] kilenc + NUMA[0: 0M / 14.8G] + Socket0 + core0(*0) + core4(*4) + core24(*8) + core28(*12) + core32(*16) + core36(*20) + core40(*24) + core44(*28) + core48(*32) + core52(*36) + core56(*40) + core60(*44) + core72(*48) + core76(*52) + core80(*56) + core84(*60) + NUMA[8: 0M / 15.8G] + Socket8 + core2064(64) + core2068(68) + core2072(72) + core2076(76) + core2080(80) + core2084(84) + core2088(88) + core2092(92) + core2096(96) + core2100(100) + core2104(104) + core2108(108) + core2112(112) + core2116(116) + core2120(120) + core2124(124)</code></pre></div> + +<p>This is just a quick example of affinity jobs in IBM Spectrum LSF. You can find out much more about the +capabilities of Spectrum LSF in the documentation which is available on the <a href="https://www.ibm.com/support/knowledgecenter/SSWRJV_10.1.0/lsf_welcome/lsf_welcome.html">IBM Knowledge Center</a></p> + + + + + Intelligent HPC - Keeping Hard Work at Bay(es) + + 2019-12-19T02:21:37-07:00 + https://hpc.social/2019/intelligent-hpc-keeping-hard-work-at-bay-es- + <p>Since the dawn of time, humans have looked for ways to make their lives easier. Over the centuries human ingenuity has given us inventions such as the wheel and simple machines – which help greatly with tasks that would otherwise be extremely laborious. Over the time, we’ve learned there are often alternatives to brute force ways of doing things. It’s this human reasoning that has driven the advancement we find in our world today.</p> + +<p>Fast forward to this century where computer driven simulations have been developed as the third branch of scientific method supplementing theory and experimentation. For decades, simulation and modelling have delivered unprecedented capabilities to drive innovation for the betterment of the world. The need to run more simulations faster, has spurred the development of ever faster processors, networking and storage. The approach to speeding up simulations has been one of brute force. Faster computing to deliver faster results. But the insatiable desire to perform simulations faster has very real implications in today’s world – such as managing the power requirements of future supercomputers. It’s time in high performance computing to revisit the brute force approaches to achieve the next level of performance.</p> + +<p><strong>Lessons from the past</strong></p> + +<p>We sometimes forget that it’s important to look at lessons from the past, in order to create a better future. HPC simulations today are computationally intensive – and as the fidelity of models increases, so does the number of calculations and time to solution. Rethinking this laborious method for simulations, are there ways that we can cut down on the number of calculations performed? A calculation avoided, is time saved. Our lesson takes us back to 1763 when Thomas Bayes authored <em>“An Essay towards solving a Problem in the Doctrine of Changes”</em>, from which Bayes’ Theorem was developed.</p> + +<p>In simple terms, Bayes’ theorem can be used to predict the probability of an outcome, based upon prior knowledge or information. What if Bayes’ theorem could be applied to computational simulations to determine the likelihood of a given iteration of a simulation to provide a useful outcome, and to discard those iterations where there is not likely a useful outcome? A calculation avoided, is time saved. As it turns out, applying Bayesian methods to HPC design can dramatically reduce the time to optimal product specification.</p> + +<p><strong>Bayesian optimization at work</strong></p> + +<p>To put Bayesian methods to the test, the engineers of the IBM Systems High Speed Bus Signal Integrity (HSB-SI) Team used software based upon the principles of Bayesian statistics called IBM Bayesian Optimization (IBO) developed by IBM Research. IBO was designed to accelerate computational workflows through the application of sophisticated algorithms. The HSB-SI team’s challenge is to minimize the time needed design validation simulation analysis of high-speed interfaces for the purpose of choosing an optimal configuration point, while maintaining or increasing the fidelity of the solution. In testing IBO, they wanted to reduce the number of simulations needed to reach the optimal configuration point for chip-to-chip communication.</p> + +<blockquote> +<p><em>“Our team is taking advantage of state-of-the-art machine learning to design computer systems of the future.”</em> +<strong>Dale Becker, Ph.D., Chief Engineer Electrical Packaging Integration, IBM</strong></p> + +</blockquote> +<p><a href="https://tci.taborcommunications.com/l/21812/2019-12-18/6kxfd4">The results were dramatic</a>. They achieved a 140x faster time to solution with higher accuracy than their legacy method. They used 99% less cores to arrive at a higher confidence solution with less than a 1% error rate using IBO.</p> + +<p>With time to solution being a critical element of competitive advantage, the adoption of sophisticated statistical methods and machine learning to accelerate simulation workflows is destined to grow quickly. In our next article about innovations in HPC we will highlight multiple use cases where Bayesian optimized workflows are transforming HPC simulation-driven innovation.</p> + +<p><strong>Originally published on HPCwire IBM Solution Channel on December 18, 2019</strong></p> + + + + + SC'19 Recap + + 2019-11-27T09:59:00-07:00 + https://hpc.social/2019/sc-19-recap + <p>Last week was the annual <a href="https://sc19.supercomputing.org/">Supercomputing conference, held this year in Denver</a>, and it was its usual whirlwind of big product announcements, research presentations, vendor meetings, and catching up with old colleagues.  As is the case every year, SC was both too short and too long; there is a long list of colleagues and vendors with whom I did not get a chance to meet, yet at the same time I left Denver on Friday feeling like I had been put through a meat grinder.<br /><br />All in all it was a great conference, but it felt like it had the same <a href="https://glennklockwood.blogspot.com/2019/06/isc19-recap.html">anticipatory undertone I felt at ISC 2019</a>.  There were no major changes to the Top 500 list (strangely, that <a href="https://www.scmp.com/tech/policy/article/3015997/china-has-decided-not-fan-flames-super-computing-rivalry-amid-us">mysterious 300+ PF Sugon machine that was supposed to debut at ISC</a> did not make an appearance in Denver).  AMD Rome and memory-channel Optane are beginning to ship, but it seems like everyone’s got their nose to the grindstone in pursuit of achieving capable exascale by 2021.<br /><br />As with every major HPC conference, I approached SC this year with the following broad objectives:<br />&lt;ol&gt;&lt;li&gt;<b>Sharing knowledge and ideas</b> by contributing to the technical program and its workshops, tutorials, and BOFs with the goal of getting more momentum behind good ideas and steering research and roadmaps in a direction best aligned with where I think the HPC industry needs to go&lt;/li&gt;&lt;li&gt;<b>Gathering intelligence</b> across different technologies and market verticals to stay ahead of where technology and the community may be driving as a result of other parallel industries&lt;/li&gt;&lt;li&gt;<b>Contributing to community development</b> amongst storage and I/O researchers and practitioners with the goal of broadening the community and bringing more people and ideas to the table&lt;/li&gt;&lt;li&gt;<b>Building and maintaining relationships</b> with individual vendor representatives and peers so that I know to whom I can turn when new opportunities or challenges come up&lt;/li&gt;&lt;/ol&gt;The things I took away from the conference are colored by these goals and the fact that I mostly work in high-performance storage systems design.  If I missed any major themes or topics in this recap post, it was likely a reflection of the above goals and perspective.<br /><br />&lt;h2 id="before"&gt;Before the conference&lt;/h2&gt;SC’19 started back in the early spring for me since I served on the technical papers committee and co-chaired the Parallel Data Systems Workshop this year.  That all amounted to a predictable amount of work throughout the year, but there were two surprises that came up in October with respect to SC that are worth mentioning before we dive into the technical contents of the conference.<br /><br />&lt;h3&gt;The “I am HPC Guru” campaign&lt;/h3&gt;<a href="https://twitter.com/JimCownie">Jim Cownie</a> had the brilliant idea in early October to launch a covert campaign to create “I am HPC Guru” pins for SC, and he enlisted a group of willing members of the HPC Twitter community to pitch in.  I was fortunate enough to be invited to participate in the fun, and judging by the reach of the <a href="https://twitter.com/search?q=%23IAmHPCGuru">#IAmHPCGuru</a> tag on Twitter during the conference, it was a wild success.<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td&gt;<a href="https://1.bp.blogspot.com/-oP5ANNrwSMM/XdsQydbfYAI/AAAAAAABHeE/Wuf7R75LQK4UPrl7vyyNDWSDQQGn0iU9QCLcBGAsYHQ/s1600/293D77AB-1BCA-44F6-AC92-F883782F4534_1_201_a.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="320" src="https://1.bp.blogspot.com/-oP5ANNrwSMM/XdsQydbfYAI/AAAAAAABHeE/Wuf7R75LQK4UPrl7vyyNDWSDQQGn0iU9QCLcBGAsYHQ/s320/293D77AB-1BCA-44F6-AC92-F883782F4534_1_201_a.jpeg" width="240" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="font-size: 12.800000190734863px;"&gt;An allotment of “I am HPC Guru” pins.  People who pitched in also got a commemorative larger-sized pin (shown outside the bag above) which was a calling card for members of the secret society.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />Hats off to Jim for conceiving this great idea, seeing through the design and shipment of the pins, and being so inclusive with the whole idea.  There are now hundreds of HPC_Guru pins all over the world thanks to Jim’s efforts (and a couple dozen still with me here in California…), and I think it was a really positive way to build the Twitter-HPC community.<br /><br />&lt;h3&gt;The new job&lt;/h3&gt;Life also threw me a bit of a curve ball in late October when I took on a new set of responsibilities at NERSC and changed from contributing to an R&amp;D group to leading an operational storage team.  This meant that, in addition to all the pre-conference commitments I had made with an eye towards longer-term storage technology strategy, I suddenly had to contextualize my goals with respect to a completely new role in tactical planning and deployment.<br /><br />Whereas I’ve historically written off sales-oriented meetings at SC, having good relationships with vendor sales teams in addition to their engineers and product managers is now an essential component of my new position.  As a result of wearing these two hats instead of one, the number of hard commitments I had over the course of the conference about doubled over what it usually had been.  About half of these meetings were private (and not things about which I could write), and they also reduced the time I could’ve otherwise getting into the weeds about upcoming technologies.<br /><br />Because the conference was so broken up into private and public meetings for me this year, a chronological recounting of the conference (as I did for <a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html">my SC’18 recap</a>) would be full of odd gaps and not make a whole lot of sense.  Instead, I will focus around a few of the juiciest topics I took away from the conference:<br />&lt;ol&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2019/11/sc19-recap.html#trends">High-level trends that seemed to pop up repeatedly over the week</a><span id="goog_846935845"></span><span id="goog_846935846"></span><a href="https://www.blogger.com/"></a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2019/11/sc19-recap.html#splash">Intel’s disclosures around the Aurora/A21 system</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2019/11/sc19-recap.html#pdsw">Outcomes from the 2019 Parallel Data Systems Workshop (PDSW 2019)</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2019/11/sc19-recap.html#e1kf">The Perlmutter all-NVMe storage node architecture</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2019/11/sc19-recap.html#daos">DAOS and the 2019 DAOS User Group meeting</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2019/11/sc19-recap.html#else">Everything else</a>&lt;/li&gt;&lt;/ol&gt;&lt;div&gt;<br />&lt;/div&gt;</p> +<h2 id="trends">High-level trends</h2> +<p>It’s difficult to group together all of the disparate things I heard and learned over the week into crisp bundles that I would consider emerging trends, but there were a few broad topics that kept popping up that suggested the following:<br /><br /><b>#1 - Memory-channel 3D XPoint is now out in the wild at sufficient scale that a picture is beginning to form around where it fits in the I/O stack</b>.  The <a href="http://nextgenio.eu/">NEXTGenIO project</a> and <a href="https://daos-stack.github.io/">Intel DAOS</a> both demonstrated the performance achievable when 3D XPoint is integrated into larger systems this year, and the acceleration it offers can be staggering when a sensible software framework is built upon around persistent memory to bridge it with other media (like flash) and higher-level functionality (like parallel storage).  Michèle Weiland and Adrian Jackson presented their successes with the NEXTGenIO project throughout the week, most notably in the technical papers track (see “<a href="https://dl.acm.org/citation.cfm?id=3356159">An early evaluation of Intel’s Optane DC persistent memory module and its impact on high-performance scientific applications</a>”) and across several smaller events (e.g., Adrian presented performance results, <a href="https://www.epcc.ed.ac.uk/blog/2019/10/30/precision-persistent-programming">detailed in his EPCC blog post</a>, at the Multi-Level Memory BOF).  DAOS also made a splash on IO-500; more on this below.<br /><br /><b>#2 - The I/O ecosystem developed in preparation for the manycore era is making the transition from pure research to practical engineering effort.</b>  As the first generation of 7nm CPUs hit the market with KNL-like core counts and massive scale-up GPU node architectures are being announced by every major HPC silicon provider, latency-hiding techniques for I/O are becoming a hot topic.  Asynchronous I/O—that is, techniques that allow an application to continue computing while a write I/O operation is still happening—came up a few times, and this technique is also moving up in the software stack from system software (such as DAOS, WekaIO, and VAST) into middleware (MPI-IO and HDF5).  I touch on this in the PDSW section below.<br /><br /><b>#3 - Innovation in HPC storage is moving away from the data plane and towards full data life cycle.  </b>Whereas focus in HPC I/O has traditionally revolved around making I/O systems as fast as possible, research and product announcements this year seemed to gravitate towards data management—that is, how to manage the placement of data before, during, and after I/O.  Proprietary frameworks for data migration, policy management, tiering, and system-level analytics and intelligence (backed by serious vendor investment; see <a href="https://investors.cray.com/news-releases/news-release-details/cray-introduces-clusterstor-e1000-storage-fuel-converged">Cray ClusterStor Data Services</a> and <a href="https://www.ddn.com/press-releases/ddn-unveils-exa5-hpc-big-data-ai-acceleration-multicloud-data-management-isc19/">DDN STRATAGEM</a>) are popping up across the storage appliance market as a differentiator atop open-source software like Lustre, and research around applying AI to optimize data placement is maturing from novel research into product engineering.<br /><br /><b>#4 - Scientific workflows—and the parallels they have with enterprise and hyperscale markets—are starting to be taken seriously by technology providers.  </b>Vendors have begun to take ownership of the data movement challenges that exist <i>between</i> bursts of compute-intensive jobs. Advances aimed at edge computing are becoming surprisingly relevant to HPC since decentralized data that is far away from compute is, in a sense, how HPC has done storage for decades.  Whether they be sensors distributed across billions of cell phones, thousands of non-volatile storage media distributed across an exascale computing system, or detectors deployed at giant telescopes relying on a supercomputer for image processing, there are a common set of data management, movement, and remote processing challenges whose solutions can be applied across the board.<br /><br />&lt;h2 id="splash"&gt;Intel’s big splash&lt;/h2&gt;Following on their big system-level disclosures at ISC’19, Intel’s disclosure of the ALCF exascale system node architecture and the unveiling of their software strategy seemed to be the biggest splash of SC’19.  I was not actually at the Intel DevCon keynote where Raja Koduri made the announcements, but his slides on <a href="https://s21.q4cdn.com/600692695/files/doc_presentations/2019/11/DEVCON-2019_16x9_v13_FINAL.pdf">Xe and oneAPI are available online</a>.<br /><br />The node architecture is, at a glance, very similar to the Summit node architecture today:<br />&lt;blockquote class="twitter-tweet"&gt;&lt;div dir="ltr" lang="en"&gt;Aurora <a href="https://twitter.com/hashtag/supercomputer?src=hash&amp;ref_src=twsrc%5Etfw">#supercomputer</a> <a href="https://twitter.com/argonne?ref_src=twsrc%5Etfw">@argonne</a> will have nodes with 2 Sapphire Rapids CPUs and 6 Ponte Vecchio GPUs with unified memory architecture<a href="https://twitter.com/hashtag/SC19?src=hash&amp;ref_src=twsrc%5Etfw">#SC19</a> <a href="https://twitter.com/hashtag/HPC?src=hash&amp;ref_src=twsrc%5Etfw">#HPC</a> <a href="https://twitter.com/hashtag/AI?src=hash&amp;ref_src=twsrc%5Etfw">#AI</a> <a href="https://twitter.com/hashtag/Exascale?src=hash&amp;ref_src=twsrc%5Etfw">#Exascale</a> <a href="https://twitter.com/hashtag/GPU?src=hash&amp;ref_src=twsrc%5Etfw">#GPU</a> <a href="https://t.co/HTGMnYh7AY">pic.twitter.com/HTGMnYh7AY</a>&lt;/div&gt; +— HPC Guru (@HPC_Guru) <a href="https://twitter.com/HPC_Guru/status/1196219238328881152?ref_src=twsrc%5Etfw">November 18, 2019</a>&lt;/blockquote&gt;From the slide and accompanying discussion on Twitter, there was quite a lot unveiled about the node architecture.  Each node will have:<br />&lt;ul&gt;&lt;li&gt;Two Sapphire Rapids Xeons (which appear to have 8 channels of DDR in the aforementioned slide) and six Ponte Vecchio Intel GPUs&lt;/li&gt;&lt;li&gt;A CXL-based “Xe Link” router provides all-to-all connectivity between the GPUs, presumably comparable to (but more standards-based than) NVLink/NVSwitch, for a unified memory space&lt;/li&gt;&lt;li&gt;Eight Slingshot NIC ports per node, which is 1.6 Tbit/sec of injection bandwidth&lt;/li&gt;&lt;li&gt;A “<a href="https://twitter.com/david_schor/status/1196216301716307968">Rambo Cache</a>” that sits between HBM, GPU, and CPU that presumably reduces NUMA effects for hot data that is being touched by many computing elements&lt;/li&gt;&lt;li&gt;A “<a href="https://twitter.com/david_schor/status/1196215105450496000">matrix engine</a>” (which sounds an awful lot like NVIDIA’s tensor cores) in each GPU&lt;/li&gt;&lt;/ul&gt;&lt;div&gt;This was an extremely daring release of information, as Intel has now publicly committed to a 7nm GPU part (comparable to TSMC’s 5nm process), along with a high-yield EMIB process (their chiplet interconnect for HBM integration) and Foveros (their 3D die stacking for Rambo integration), in 2021.&lt;/div&gt;</p> +<div><br /></div> +<div>Intel also released the beta version of their&nbsp;<a href="https://software.intel.com/oneapi">Intel oneAPI</a> which appears to be <a href="https://twitter.com/david_schor/status/1196212194339250176">a mixture of re-branded Intel developer products</a> (Fortran and C++ compilers, TBB, MKL, DAL, MPI, VTune, etc) with their new SYCL-based Data Parallel C++ compiler. &nbsp;The novelty here is that Intel is committing to supporting this entire stack for CPUs, GPUs, FPGAs, and matrix accelerators so that, for example, you could feasibly write a single application with a single set of tools that runs across all accelerator types.</div> +<div><br /></div> +<div>There was a lot of interest in SYCL at the Performance Portability and Productivity workshop, P3HPC, on Friday. &nbsp;There were two talks of particular interest in the parts I attended; the first, presented by Balint Joo of Jefferson Lab, presented the <a href="https://drive.google.com/file/d/1rBIzzdGWvVHrQKTwA44o8OLhOSA4nW2P/view?usp=sharing">performance of a quantum chromodynamics kernel when implemented using Kokkos, accelerator-specific libraries, and SYCL</a>:<br /><br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://1.bp.blogspot.com/-YHXs6YL4WSo/Xd2KjpB4JII/AAAAAAABHg8/PPkgPTxcGtw-s7_vonpLPuuOFhNJofGLACLcBGAsYHQ/s1600/EC1521A9-3E0F-4BA6-AED1-AB51182E4C13_1_201_a.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-YHXs6YL4WSo/Xd2KjpB4JII/AAAAAAABHg8/PPkgPTxcGtw-s7_vonpLPuuOFhNJofGLACLcBGAsYHQ/s400/EC1521A9-3E0F-4BA6-AED1-AB51182E4C13_1_201_a.jpeg" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">SYCL vs. Kokkos vs. native on NVIDIA and Intel architectures</td></tr></tbody></table><br />These early results are promising, and with the exception of KNL, the SYCL ecosystem is already showing promise as a performance-portable framework. &nbsp;The same is generally true for more complex computational kernels as well, as presented by <a href="https://drive.google.com/file/d/12asEc4DddbEOJ1YQR9EV2QXZlBQs70si/view?usp=sharing">Istvan Reguly from Pázmány Péter Catholic University</a>:<br /><div class="separator" style="clear: both; text-align: center;"></div> +<br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://1.bp.blogspot.com/-YA4TpniLg4o/Xd2LqntEV1I/AAAAAAABHhE/q53qMlVGTvkW1FnnvtO1XTGNMHwreIqUwCLcBGAsYHQ/s1600/2FAA7FDA-5100-4F79-B9F5-B912BC43EFA0_1_201_a.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-YA4TpniLg4o/Xd2LqntEV1I/AAAAAAABHhE/q53qMlVGTvkW1FnnvtO1XTGNMHwreIqUwCLcBGAsYHQ/s400/2FAA7FDA-5100-4F79-B9F5-B912BC43EFA0_1_201_a.jpeg" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Performance portability figure of merit for a complex kernel using different performance-portable parallel runtimes.</td></tr></tbody></table><br />Intel's choice to back an open standard rather than develop its own proprietary APIs for each accelerator type was a very smart decision, as it looks like they are already making up lost ground against NVIDIA in building a robust software ecosystem around their accelerator technologies. &nbsp;The fact that these presentations were given by application scientists, not Intel engineers, really underscores this.</div> +<div><br /></div> +<div>Strangely, AMD kept a low profile at SC by comparison despite the fact that Rome is beginning to enter the market and, by all accounts I heard on the show floor, selling like gangbusters. &nbsp;One major procurement I heard about switched from an Intel CPU-based plan of record to AMD processor as a result of a schedule slip by Intel; this wound up resulting the system obtaining 50% more cores at the same cost (plus the added benefit of PCIe Gen4) which is a testament to the advantage that AMD currently has in the near term.</div> +<div><br /></div> +<div>By comparison, very few large HPC centers seem to be biting on Intel's Cascade Lake-AP despite Intel's&nbsp;<a href="https://medium.com/performance-at-intel/hpc-leadership-where-it-matters-real-world-performance-b16c47b11a01">very aggressive marketing against Rome</a>. &nbsp;Combined with the above observation that the Aurora architecture's Sapphire Rapids processors will only have eight memory channels per socket suggests that Cascade Lake-AP's 12-channel socket was likely released as a stopgap to have an answer to Rome while 10nm Xeon part production is scaling up.</div> +<p><br />&lt;h2 id="pdsw"&gt;PDSW 2019&lt;/h2&gt;This year I had the great honor of co-chairing the Parallel Data Systems Workshop, the premiere data and storage workshop at SC, along with the esteemed Phil Carns (creator of <a href="https://www.mcs.anl.gov/research/projects/darshan/">Darshan</a> and <a href="https://en.wikipedia.org/wiki/OrangeFS">PVFS2/OrangeFS</a>, among other things).  We tried to broaden the scope of the workshop to be more inclusive of “cloudy” storage and data topics, and we also explicitly tried to build the program to include discussion about data management that ran tangential to traditional HPC-focused storage and I/O.<br /><br />The proceedings are already online in an <a href="https://conferences.computer.org/sc19w/2019/#!/toc/16">interim location hosted by ACM</a>, and the full proceedings will be published by IEEE TCHPC.  Slides are available on the <a href="http://www.pdsw.org/">PDSW website</a>, and I tried to tag my realtime thoughts using <a href="https://twitter.com/search?q=%23pdsw19">#pdsw19 on Twitter</a>.<br /><br />&lt;h3&gt;Alluxio Keynote&lt;/h3&gt;Our keynote speaker was Haoyuan Li, founder of <a href="https://www.alluxio.io/">Alluxio</a>, who gave a brilliant talk about the data orchestration framework he developed at <a href="https://amplab.cs.berkeley.edu/">AMPLab</a> and went on to commercialize.  It is an abstraction that stitches together different storage resources (file systems, object stores, etc) into a single namespace that applications can use to read and write data in a way that hides the complexity of tiered storage.  It was designed towards the beginning of the “Big Data revolution” with a specific eye towards providing a common interface for data accessibility; by writing an application against the Alluxio API, it would be made future-proof if the HDFS or S3 APIs fizzled since Alluxio normalizes the specific API and semantics of a native storage interface from user applications.<br /><br />Had something like this existed in the early days of HPC, there’s a good chance that we would not be stuck using POSIX I/O as the least common denominator for data access.  That said, Alluxio does solve a slightly easier problem in that it targets analytics workloads that are read-intensive—for example, it does not provide a means for applications to do random writes, and so it provides only a subset of the full semantics that some more general-purpose I/O interfaces (such as file access) may provide.  In making this trade-off though, it is able to aggressively cache data from any storage backend in a distributed memory space, and Alluxio has a configurable cache eviction policy for predictable workflows.<br /><br />In describing the motivation for the Alluxio design, Haoyuan had some interesting insights.  In particular, he pointed out that there is a growing movement away from the hyperconverged hardware architecture that motivated Hadoop and HDFS:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-2qbf6V-KeiM/XdokMSW19kI/AAAAAAABHcQ/6O8UfJFwx_sCnIdJl0rW7lXJ4PulFOSTgCLcBGAsYHQ/s1600/IMG_8272.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="298" src="https://1.bp.blogspot.com/-2qbf6V-KeiM/XdokMSW19kI/AAAAAAABHcQ/6O8UfJFwx_sCnIdJl0rW7lXJ4PulFOSTgCLcBGAsYHQ/s400/IMG_8272.jpeg" width="400" /></a>&lt;/div&gt; +<br />The whole “move compute to where the data is!” model for Hadoop has always struck me as rather fanciful in practice; it only works in single-tenant environments where there’s no chance of someone else’s compute already existing where your data is, and it imposes a strict coupling between how you scale data and analytics.  As it turns out, the data analytics industry is also waking up to that, and as Haoyuan’s slide above shows, separating storage from compute gives much more flexibility in how you scale compute with respect to data, but at the cost of increased complexity in data management.  The whole point of Alluxio is to minimize that cost of complexity by making data look and feel local by (1) providing a single namespace and API, and (2) using distributed memory caching to make data access perform as well as if compute and memory were colocated.<br /><br />This is a bit ironic since HPC has been disaggregating storage from compute for decades; HPC systems have tended to scale compute capability far faster than storage.  However, the HPC community has yet to address the added complexity of doing this, and we are still struggling to simplify storage tiering for our users.  This is only getting worse as some centers slide back into hyperconverged node designs by incorporating SSDs into each compute node.  This causes different tiers to spread data across multiple namespaces <i>and</i> also further complicate data access since the semantics across those namespaces differ.  For example, it’s not sufficient to know that<br />&lt;ul&gt;&lt;li&gt;<span>/local</span> is the fastest tier&lt;/li&gt;&lt;li&gt;<span>/scratch</span> is less fast&lt;/li&gt;&lt;li&gt;<span>/home</span> is slow&lt;/li&gt;&lt;/ul&gt;since<br />&lt;ul&gt;&lt;li&gt;<span>/local</span> is only coherent with other processes sharing the same physical compute node&lt;/li&gt;&lt;li&gt;<span>/scratch</span> is globally coherent&lt;/li&gt;&lt;li&gt;<span>/home</span> is globally coherent&lt;/li&gt;&lt;/ul&gt;Alluxio is not the solution to this problem at present because it is optimized for write-once, read-many workloads whereas HPC does have to support random writes.  That said, HPC storage systems that incorporate the same design goals as Alluxio (connecting many types of storage under a single namespace, providing a restricted set of semantics, and applying aggressive caching to deliver local-like performance) hold a lot of promise.  Perhaps it’s no surprise that every serious parallel file system on the market is beginning to implement features like this—think Lustre <a href="http://wiki.lustre.org/File_Level_Redundancy_Solution_Architecture">File-Level Redundancy (FLR)</a> and <a href="https://dl.acm.org/citation.cfm?id=3356139">Persistent Client Caching (LPCC)</a>, <a href="http://files.gpfsug.org/presentations/2017/Manchester/02-1_AFM.pdf">Spectrum Scale AFM</a>, and the core two-tier design of <a href="https://www.weka.io/pdf-content/wekaio-hpc-storage-architecture/">WekaIO</a>.<br /><br />Haoyuan also presented a few case studies that showcased the ability of Alluxio to ease the transition from on-premise infrastructure (like Hadoop with HDFS) to hybrid cloud (e.g., run Presto across datasets both in older on-prem HDFS and newer S3 buckets).  It seems to be very fashionable to run analytics directly against data in object stores in industry, and Alluxio essentially gives such data more dynamism by being the place where active data can be staged for processing on demand.  Because it is a stateless orchestration layer rather than a storage system itself, Alluxio also seems nicely compatible with dynamic provisioning of compute resources.  In this sense, it may be an interesting internship project to see if Alluxio could be deployed on an HPC system to bridge a large data analytics job with an off-system object store.  Get in touch with me if you know a student who may want to try this!<br /><br />&lt;h3&gt;Asynchronous I/O&lt;/h3&gt;Middleware for asynchronous I/O came up in two different papers this year.  The first, “<a href="https://sc19.supercomputing.org/proceedings/workshops/workshop_pages/ws_pdsw109.html">Enabling Transparent Asynchronous I/O using Background Threads</a>” by Tang et al., described a new pluggable runtime for HDF5 that processes standard HDF5 I/O requests asynchronously.  It does this by copying I/O requests and their metadata into a special buffer, putting those requests on a queue that is managed by the asynchronous runtime, building a directed graph of all requests’ dependencies, and dispatching I/Os alongside regular application execution using a lightweight (Argobots-based) asynchronous worker pool.<br /><br />What this amounts to is that a standard HDF5 write call wouldn’t block until the I/O has been committed to disk somewhere; instead, it returns immediately after the async runtime makes a copy of the data to be written into its own private memory buffer.  The application is then free to continue computing, while an Argobots thread begins buffering and dispatching outstanding asynchronous I/O calls.  The performance that results from being able to overlap I/O with computation is remarkable:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-FsMoNOoV-Xc/XdrLI2LR_WI/AAAAAAABHcg/oYZFSLVLYIANmINOJSzxRGzaDU_diJuSACLcBGAsYHQ/s1600/169FBEAD-B394-48D4-9646-91A7CFD21747_1_201_a.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="298" src="https://1.bp.blogspot.com/-FsMoNOoV-Xc/XdrLI2LR_WI/AAAAAAABHcg/oYZFSLVLYIANmINOJSzxRGzaDU_diJuSACLcBGAsYHQ/s400/169FBEAD-B394-48D4-9646-91A7CFD21747_1_201_a.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;I/O speedup at scale as a result of the asynchronous runtime backend for HDF5 presented by Tang et al.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />What’s more impressive, though, is that this backend is almost entirely transparent to the user application; in its simplest form, it can be enabled by setting a single environment variable.<br /><br />Later in the day, Lucho Ionkov presented a much more novel (research-y?) asynchronous I/O runtime in his paper, “<a href="https://sc19.supercomputing.org/proceedings/workshops/workshop_pages/ws_pdsw101.html">A Foundation for Automated Placement of Data</a>” which glued together <a href="https://github.com/lanl/DRepl">DRepl</a> (an abstraction layer between scientific applications and storage architectures, vaguely similar to what Alluxio aims to do), <a href="https://dl.acm.org/citation.cfm?id=3085484">TCASM</a> (a Linux kernel modification that allows processes to share memory), and <a href="https://link.springer.com/chapter/10.1007/978-3-319-20119-1_22">Hop</a> (an expressive key-value store with tunable performance/resilience requirements).  The resulting runtime provides a high-level interface for applications to express I/O and data placement as a series of attach, publish, and re-attach operations to logical regions of memory.  The runtime then manages the actual data movement (whether it be between nodes or to persistent storage) asynchronously.<br /><br />Again, the net result in speedup as the problem size scales up is impressive:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-S--IJmBfKj8/XdrRdtryjmI/AAAAAAABHcs/CoKVjetWhvYXjFHYGdwqOZf3BnI8FMDPQCLcBGAsYHQ/s1600/D8B1E642-9B22-484E-99F9-C83DE81B9AC4_1_201_a.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-S--IJmBfKj8/XdrRdtryjmI/AAAAAAABHcs/CoKVjetWhvYXjFHYGdwqOZf3BnI8FMDPQCLcBGAsYHQ/s400/D8B1E642-9B22-484E-99F9-C83DE81B9AC4_1_201_a.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;I/O speedup at scale using the asynchronous I/O runtime presented by Iokov in Otstott et al.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;As with the asynchronous HDF5 paper, performance gets better with scale as the increasing costs of doing I/O at scale are amortized by overlapping it with computation.  In contrast to HDF5 though, this runtime comes with a completely new application API, so one would need to convert an application’s critical I/O routines to use this framework instead of POSIX I/O.  The runtime is also pretty heavyweight in that it requires a separate global data placement “nameserver,” a custom Linux kernel, and buy-in to the new memory model.  In that sense, this is a much more research-oriented framework, but the ideas it validates may someday appear in the design of a fully framework that incorporates both an application runtime and a storage system.<br /><br /><b>Why is this important?</b>  These asynchronous I/O runtimes are making a lot more sense in the era of heterogeneous computing where accelerators (think GPUs) really aren’t good at driving a full kernel-based I/O pipeline.  Instead of running a full I/O stack and enforcing strict consistency (i.e., serializing I/O) on a lightweight accelerator core, having an asynchronous runtime running on a fat core that simply copies an I/O buffer from accelerator memory to slower memory before releasing program control back to the accelerator allows the accelerator tp spend less time doing what it’s terrible at doing (ordering I/O operations) and more time computing.  At the same time, the fat core that is running the asynchronous I/O runtime can then operate on that copied I/O buffer on its own time, reorder and serialize operations to ensure consistency, and jump into and out of the kernel to enforce file permissions without interrupting the accelerator:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-egf3u1VCO1w/XdrhXLQcqxI/AAAAAAABHc4/V5AP2pTwnsUXzNKkMmMHCS1uWuBFXy6TgCLcBGAsYHQ/s1600/async-io-runtime.png" style="margin-left: auto; margin-right: auto;"><img border="0" src="https://1.bp.blogspot.com/-egf3u1VCO1w/XdrhXLQcqxI/AAAAAAABHc4/V5AP2pTwnsUXzNKkMmMHCS1uWuBFXy6TgCLcBGAsYHQ/s640/async-io-runtime.png" width="100%" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Sketch of how an asynchronous I/O runtime might map to a heterogeneous node architecture&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />Ron Oldfield did raise a really great consideration during PDSW about this though: at the end of the day, the asynchronous I/O runtime still has to share network resources with the application’s message passing runtime (e.g., MPI).  He alluded to work done a decade ago that found that asynchronous I/O was often stomping on MPI traffic since both MPI and I/O could happen at the same time.  Without some kind of awareness or coordination between the asynchronous I/O runtime and the application communication runtime, this sort of scheme is prone to self-interference when running a real application.<br /><br />Given this, the right place to integrate an asynchronous I/O runtime might be inside the message passing runtime itself (e.g., MPI-IO).  This way the asynchronous I/O scheduler could consider outstanding asynchronous messages it must pass as well and be smart about dispatching too many competing network transfers at the same time.  Unfortunately this then places a complex burden of serialization and synchronization on the runtime, and this starts to look a lot like just throwing messages at the NIC and letting it figure out the correct ordering.  The principal advantage here would be that the runtime has a lot more visibility into user intent (and may have more spare processing capacity if most of the application time is spent on an accelerator), so it could afford to be smarter about how it builds its dependency graph.<br /><br />&lt;h3&gt;Analytics for Runtime and Operations&lt;/h3&gt;No computing-related workshop would be complete without a smattering of artificial intelligence and machine learning, and PDSW was no different this year.  Two papers were presented that attempted to use machine learning to predict parallel I/O performance in slightly different ways.<br /><br />Suren Byna presented “<a href="https://sc19.supercomputing.org/proceedings/workshops/workshop_pages/ws_pdsw103.html">Active Learning-based Automatic Tuning and Prediction of Parallel I/O Performance</a>” where the authors developed an approach for autotuning parallel I/O (specifically using MPI-IO hints and Lustre striping parameters) using active learning to predict the optimal values for their tuning parameters.  They used two different approaches, and the faster one uses predicted performance to infer optimal tuning values.  Given how many factors actually come to play in parallel I/O performance on production systems, their model was able to predict I/O performance quite well under a range of I/O patterns:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://1.bp.blogspot.com/--PIWB0B9dS4/Xdrl62VwRUI/AAAAAAABHdE/iAmCZ17la0ky3riAvdOd4hs6OjO5q1q3QCLcBGAsYHQ/s1600/87F85F2F-A8B5-40AC-A9AD-4A06D8AAECBA_1_201_a.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="300" src="https://1.bp.blogspot.com/--PIWB0B9dS4/Xdrl62VwRUI/AAAAAAABHdE/iAmCZ17la0ky3riAvdOd4hs6OjO5q1q3QCLcBGAsYHQ/s400/87F85F2F-A8B5-40AC-A9AD-4A06D8AAECBA_1_201_a.jpeg" width="400" /></a>&lt;/div&gt; +<br />Bing Xie et al presented “<a href="https://sc19.supercomputing.org/proceedings/workshops/workshop_pages/ws_pdsw108.html">Applying Machine Learning to Understand Write Performance of Large-scale Parallel Filesystems</a>” which pursued a similar line of work—using machine learning to predict I/O performance—but with a slightly different goal.  Xie’s goal was to identify the factors which most strongly affect predicted I/O performance, and she found that write performance was most adversely affected by metadata load and load imbalance on Blue Gene/Q and GPFS, whereas Cray XK7 and Lustre were more affected by aggregate file system load and load imbalance.  This system-centric work laid out a more sophisticated blueprint for identifying causal relationships between poor I/O performance and system-level health events, and I think applying these approaches to the <a href="https://www.nersc.gov/research-and-development/tokio/a-year-in-the-life-of-a-parallel-file-system/">dataset I published last year with my Year in the Life of a Parallel File System paper</a> might identify some interesting emergent relationships between bad performance and the subtle factors to which they can be attributed.<br /><br /><b>Why is this important?</b>  Industry is beginning to take notice that it is no longer sufficient to just report there here-and-now of how parallel file systems are behaving, and more sophisticated analytics engines are being co-deployed with very large systems.  For example, the <a href="https://www.hpcwire.com/2019/10/03/summit-has-real-time-analytics-heres-how-it-happened-and-whats-next/">Summit system at Oak Ridge made a splash in October by announcing the real-time analytics engine</a> that was implemented on top of it, and <a href="https://www.cray.com/products/storage/clusterstor/view">Cray View</a> is a similar analytics-capable engine built atop Lustre that Cray offers as a part of its ClusterStor lineup.  I’m not sure if DDN has something comparable, but their recent purchase of Tintri and its <a href="https://www.tintri.com/products/tintri-analytics">robust, enterprise-focused analytics engine</a> means that they hold IP that can be undoubtedly be applied to its HPC-focused storage product portfolio.<br /><br />Being able to predict performance (and the conditions that cause it to degrade!) is the holy grail of parallel I/O systems management, and it’s a sure bet that all the HPC storage vendors are watching research in this area very closely to see what ideas they can pluck from the community to add value to their proprietary analytics engines.  The fact that AI is being applied to production system data and yielding useful and actionable outcomes gives legs to this general idea of AI for self-driving systems.  The talks at PDSW this year were only demonstrations, not hardened products, but these ad-hoc or small-scale demonstrations are moving us in the right direction.<br /><br />&lt;h3&gt;My Talk on Data Motion&lt;/h3&gt;I also coauthored and presented a paper at PDSW this year that was an exploratory study of how we can understand data movement throughout an entire data center.  The goal of the entire paper, “<a href="https://sc19.supercomputing.org/proceedings/workshops/workshop_pages/ws_pdsw106.html">Understanding Data Motion in the Modern HPC Data Center</a>,” was to generate this diagram that shows how much data flows between different systems at NERSC:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;&lt;/div&gt;</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<div class="separator" style="clear: both; text-align: center;"></div> +<p><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-hUn48BmW3Cg/Xdr8OwIV87I/AAAAAAABHdw/cNkutk-zRR8DQNwW6Ac-fGjo65MgHEWKACLcBGAsYHQ/s1600/datacenter-graph.jpg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="300" src="https://1.bp.blogspot.com/-hUn48BmW3Cg/Xdr8OwIV87I/AAAAAAABHdw/cNkutk-zRR8DQNwW6Ac-fGjo65MgHEWKACLcBGAsYHQ/s400/datacenter-graph.jpg" width="400" /></a>&lt;/div&gt;</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p><br />I won’t recount the technical content of the talk here, but the paper is open access for those interested.  The essence of the study is that we showed that it is possible to examine data motion beyond the context of individual jobs and begin tying together entire workflows, but there’s a lot of supporting work required to shore up the tools and telemetry from which this analysis draws.  The paper was very much a long-form work in progress, and I’d be interested in hearing from anyone who is interested in pursuing this work further.<br /><br />&lt;h2 id="e1kf"&gt;Scale-up highly available NVMe hardware&lt;/h2&gt;Although it didn’t make a many headlines (as storage rarely does), <a href="https://investors.cray.com/news-releases/news-release-details/cray-introduces-clusterstor-e1000-storage-fuel-converged">Cray announced its new ClusterStor E1000 platform shortly before SC</a> and had some of their E1000-F all NVMe enclosures on display at a few booths.  I normally don’t care too much about storage enclosures (it’s all just sheet metal, right?), but this announcement was special to me because it is the hardware platform that is going into NERSC’s Perlmutter system in 2020, and I’ve been involved with the different iterations of this hardware design for over a year now.<br /><br />It’s very gratifying to see something start out as a CAD drawing and a block diagram and grow up into actual hardware:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-mcihJuv4KHg/Xd1wvLcecuI/AAAAAAABHfU/FOZbYoHwzz0OsHQRd-WblLCOwKffw4XCgCLcBGAsYHQ/s1600/051E2FB3-E18E-4CB0-A128-C9643DAF38E8.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-mcihJuv4KHg/Xd1wvLcecuI/AAAAAAABHfU/FOZbYoHwzz0OsHQRd-WblLCOwKffw4XCgCLcBGAsYHQ/s400/051E2FB3-E18E-4CB0-A128-C9643DAF38E8.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;The E1000-F all-NVMe enclosure&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />Torben Kling Petersen gave a talk at the Exhibitor Forum disclosing the details of the hardware design on behalf of Cray, and it looks like they’ve made just about everything surrounding the E1000 public:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-uloI_lQ3Ff8/Xd1xGElhp0I/AAAAAAABHfc/4tnlBHFKMY4uBb4nMYORjYksZwJdBOl2gCLcBGAsYHQ/s1600/08FF9AF4-E0B9-4304-A510-C6505C65B31E_1_201_a.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="300" src="https://1.bp.blogspot.com/-uloI_lQ3Ff8/Xd1xGElhp0I/AAAAAAABHfc/4tnlBHFKMY4uBb4nMYORjYksZwJdBOl2gCLcBGAsYHQ/s400/08FF9AF4-E0B9-4304-A510-C6505C65B31E_1_201_a.jpeg" width="400" /></a>&lt;/div&gt; +<br />The foundation for this platform is the E1000-F high-availability enclosure as shown in the above slide.  It has two separate Rome-based servers (“controllers”) and 24 U.2 NVMe slots capable of PCIe Gen4.  Each Rome controller has slots for up to three 200 Gbit NICs; doing the math, this gives a very nicely balanced design that is implemented entirely without PCIe switches:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-pm3ADq968-c/Xd12_h5Yi8I/AAAAAAABHfo/IdytQRpXcPgHl_OfPmfezFd67wF9uQwvACLcBGAsYHQ/s1600/e1kf-cartoon.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="325" src="https://1.bp.blogspot.com/-pm3ADq968-c/Xd12_h5Yi8I/AAAAAAABHfo/IdytQRpXcPgHl_OfPmfezFd67wF9uQwvACLcBGAsYHQ/s400/e1kf-cartoon.png" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Cartoon block diagram for one half of the E1000-F chassis.  Note that the NVMe read rates (violet text) are assumed based on Samsung PM1733 specs and performance projections that Petersen presented.  Also note that each NVMe drive is 2x2 PCIe Gen4 with multipath to the other Rome controller (not shown).&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;I visited the booth of the ODM with whom Cray worked to develop this node design and was fortunate enough to meet the node architects from both sides who gave me a really helpful breakdown of the design.  Physically, the 2U chassis is laid out something like this:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-O6Z1S401OCw/Xd164wyS9nI/AAAAAAABHf0/eFZZFOhQG6gKMkuHjRjQJrQVzjVhQlOoACLcBGAsYHQ/s1600/e1kf-layout.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="253" src="https://1.bp.blogspot.com/-O6Z1S401OCw/Xd164wyS9nI/AAAAAAABHf0/eFZZFOhQG6gKMkuHjRjQJrQVzjVhQlOoACLcBGAsYHQ/s400/e1kf-layout.png" width="400" /></a>&lt;/div&gt; +<br />Just about everything is both hot-swappable and fully redundant.  The entire system can be powered and cooled off of a single 1.2 kW(?) power supply, and all the fans are hot-swappable and configured in a 5+1:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-z-UKjbgSVGY/Xd17dG-hWRI/AAAAAAABHf8/fOlOtBODrxYlmjibKtES2P-47WZzSbrWgCLcBGAsYHQ/s1600/775BB1C2-4008-4DBA-86EA-3C8601803A90_1_201_a.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-z-UKjbgSVGY/Xd17dG-hWRI/AAAAAAABHf8/fOlOtBODrxYlmjibKtES2P-47WZzSbrWgCLcBGAsYHQ/s400/775BB1C2-4008-4DBA-86EA-3C8601803A90_1_201_a.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Fans are all individually replaceable and configured in 5+1.  You can also see the NVMe backplanes, attached to an active midplane (not shown), through the open fan slot.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />All the fans are on the same pulse-width modulator (PWM), so they all operate at the same speed and provide even airflow as long as they are properly powered.  My recollection from what the architect told me is that the PWM signal is provided by an FPGA on the midplane which also handles drive power-up.  Because there is only a single midplane and this power/cooling controller lives on it, this power/cooling FPGA is also configured redundantly as 1+1.  Thus, while the midplane itself is not redundant or field-replaceable, the active components on it are, and it would take physical damage (e.g., someone punching a hole through it and breaking the PCB traces) to knock the whole chassis offline.<br /><br />Each chassis has two independent node boards that are hot-pluggable and self-contained:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-SWMov-I2DwU/Xd180WGiAHI/AAAAAAABHgI/iFdEtpuQfdoF1GuyrQQH_ZZR07yjC6bRACLcBGAsYHQ/s1600/BD7CFE49-BDC8-46B6-B8FC-02D7BD22F9AC.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-SWMov-I2DwU/Xd180WGiAHI/AAAAAAABHgI/iFdEtpuQfdoF1GuyrQQH_ZZR07yjC6bRACLcBGAsYHQ/s400/BD7CFE49-BDC8-46B6-B8FC-02D7BD22F9AC.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;One of the E1000-F node sleds with its cover popped off at the Cray booth&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;Each node board is wrapped in a sheet metal sled and has a screwed-on lid.  The whole node sled was designed by the ODM to be a field-replaceable unit (FRU), so doing something like a DIMM swap does require a screwdriver to remove the top cover.  However it’s ultimately up to OEMs to decide how to break down FRUs.<br /><br />The ODM had a bare controller board at its booth which looks like this:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;&lt;/div&gt;</p> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://1.bp.blogspot.com/-e-X_hhgvnlo/Xd2BNNo48MI/AAAAAAABHgc/i2w91323rtkhcshMkr6j1ebuWnu1k-CwACLcBGAsYHQ/s1600/IMG_8312.JPG" style="margin-left: auto; margin-right: auto;"><img border="0" height="178" src="https://1.bp.blogspot.com/-e-X_hhgvnlo/Xd2BNNo48MI/AAAAAAABHgc/i2w91323rtkhcshMkr6j1ebuWnu1k-CwACLcBGAsYHQ/s400/IMG_8312.JPG" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">E1000-F bare controller board</td></tr></tbody></table> +<p>There are two M.2 PCIe Gen4 slots for mirrored boot drives and a pair of big hot-plug block connectors in the front of the board for redundant power and 48 lanes of PCIe Gen4 for the 24x U.2 drives hanging off the midplane.  There’s a single riser slot for two standard HHHL PCIe add-in cards where two NICs plug in, and a third OCP-form factor slot where the third NIC can slot in.  The rear of the controller sled shows this arrangement:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-S-Vk3p6O3iI/Xd2CElcLokI/AAAAAAABHgk/0fF_Uz6Ke2MgoO9M6Anrh0NEnSZw6FPAACLcBGAsYHQ/s1600/435EE91C-A15A-4FCD-AAFD-F88CD1D37A1B.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-S-Vk3p6O3iI/Xd2CElcLokI/AAAAAAABHgk/0fF_Uz6Ke2MgoO9M6Anrh0NEnSZw6FPAACLcBGAsYHQ/s400/435EE91C-A15A-4FCD-AAFD-F88CD1D37A1B.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Rear view of a single Rome controller&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;It looks like there’s a single RJ45 port (for LOM?), a power and reset button, a single USB-3, and a mini DisplayPort for crash carting.<br /><br />When Cray announced the E1000-F, <a href="https://www.hpcwire.com/2019/10/30/cray-debuts-clusterstor-e1000-finishing-remake-of-portfolio-for-exascale-era/">HPCwire ran a block diagram of the complete chassis design</a> that suggested that heartbeating would be done through a non-transparent bridge (NTB) implemented on the AMD Rome host interface.  This was a little worrisome since AMD has yet to release the proper drivers to enable this NTB for Linux in a functional way; this simple fact is leading other ODMs towards a more conservative node design where a third-party nonblocking PCIe switch is added simply to provide a functioning NTB.  When I asked the architect about this, though, he revealed that the E1000-F also has an internal gigabit Ethernet loop between both controllers for heartbeating which completely obviates the need to rely on any NTB for failover.<br /><br />Another interesting thing I learned while talking to the E1000-F designers is that the power supply configuration gives a lot of runway for the overall system design:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-9X6oywadOEw/Xd2EgIttYZI/AAAAAAABHgw/L22x4eng6YYI0T9YSwCbRr50y5SCA0-bwCLcBGAsYHQ/s1600/950DFDAE-C3E8-46CC-85AF-851023FB4EBB.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="400" src="https://1.bp.blogspot.com/-9X6oywadOEw/Xd2EgIttYZI/AAAAAAABHgw/L22x4eng6YYI0T9YSwCbRr50y5SCA0-bwCLcBGAsYHQ/s400/950DFDAE-C3E8-46CC-85AF-851023FB4EBB.jpeg" width="300" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;One of the two power supply sleds for the E1000-F chassis.  Lots of free real estate remains and is currently occupied by bus bars.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;The current power supply is (I believe) ~1200 W, and the carrier sled on which it is mounted is mostly empty space taken up by two fat bus bars that reach all the way to the front of it.  In leaving all of this space in the sled, it will be fully possible to build a physically compatible PSU sled that delivers significantly more power to the U.2 NVMe drives and host controllers if the power consumption of the controllers or the NVMe drives increases in the future.  The ODM confirmed that the cooling fans have similar headroom and should allow the whole enclosure to support a higher power and thermal load by just upgrading the power and controller FRUs.<br /><br />This point is important because the performance of PCIe Gen4 SSDs are actually capped by their power consumption—if you look at product sheets for ruler SSDs (M.2, NF1, and E1.S), you will find that their performance is universally lower than their U.2 and HHHL variants due to the fact that the ruler standards limit power to 8-12W compared to U.2/HHHL’s ~25W.  This E1000-F chassis is designed as-is for 25W U.2 drives, but there are already <a href="https://146a55aca6f00848c565-a7635525d40ac1c70300198708936b4e.ssl.cf1.rackcdn.com/images/3e94b383b566fc8bfb7814ec6cad5dc88f2bad7c.pdf">proposals to push individual SSD power up to 40W</a> and beyond.  Given this trend and the high bandwidth available over a PCIe Gen4 x4 connector, it’s entirely possible that there will be a demand for higher-power NVMe enclosures as Gen4 matures and people want to drive Gen4 NVMe at line rate.<br /><br />&lt;h2 id="daos"&gt;DAOS User Group&lt;/h2&gt;The <a href="https://wiki.hpdd.intel.com/display/DC/DUG19">2019 DAOS User Group</a> was held on Wednesday in a hotel adjacent to the main convention center. Contrary to previous years in which I attended, this meeting felt like a real user group; there were presenters from several different organizations, none of whom directly contribute to or are contractual customers of DAOS.  There were also real performance data which largely centered around the <a href="https://www.vi4io.org/io500/start">insanely high IO-500 benchmark score that DAOS posted earlier in the week</a>:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-BkuxsxoIhbc/Xd36EaomTYI/AAAAAAABHhg/aEXJklOVKbIrI8W9U8m1B76hXL9KfNNrACLcBGAsYHQ/s1600/2A2C3168-663C-404A-A8BC-2644E7626D5D_1_201_a.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-BkuxsxoIhbc/Xd36EaomTYI/AAAAAAABHhg/aEXJklOVKbIrI8W9U8m1B76hXL9KfNNrACLcBGAsYHQ/s400/2A2C3168-663C-404A-A8BC-2644E7626D5D_1_201_a.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Bandwidth spread on the IO-500’s IOR test suite&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;These numbers are using a pretty modest server environment and client count (<a href="https://twitter.com/geomark/status/1197297380535808000?s=21">24 DAOS servers, 26 client nodes, 28 ranks per client, dual-rail OPA100</a>) and use the native DAOS API.  What I didn’t snap a photo of are the crazy metadata rates which posted a geometric mean of 4.7 million IOPS; by comparison, the 250 PB Alpine file system attached to the Summit supercomputer at Oak Ridge posted 1.2 million IOPS using more than 500 clients.  To the extent that it was meant to address the IOPS limitations intrinsic to traditional parallel file systems, the DAOS design is looking like a resounding success.<br /><br />According to the speaker, the metadata performance of this IO-500 run was not limited by any server-side resources, so adding more clients (like WekaIO’s top-scoring run with 345 clients) could have pushed this number higher.  It was also stated that the staggering IOR read performance was limited by the aggregate Optane DIMM bandwidth which is a testament to how highly optimized the data path is.<br /><br />&lt;h3&gt;Actually using DAOS&lt;/h3&gt;This is all using the DAOS native API though, and unless you intend to rewrite all your <span>open()</span>s and <span>write()</span>s as <span>daos_pool_connect()</span> + <span>daos_cont_open()</span> + <span>daos_array_open()</span>s and <span>daos_array_write()</span>s, it’s hard to tell what this really means in terms of real-world performance.  Fortunately there was a great set of talks about the <a href="https://wiki.hpdd.intel.com/display/DC/DUG19?preview=/114950685/117310261/3_DUG19_middleware.pdf">DAOS POSIX compatibility layer and related middleware</a>.  I described the POSIX middleware a little in <a href="https://glennklockwood.blogspot.com/2019/06/isc19-recap.html">my recap of ISC’19</a>, but it’s much clearer now exactly how a POSIX application may be adapted to use DAOS.  Ultimately, there are three options that DAOS provides natively:<br /><br />&lt;ul&gt;&lt;li&gt;<b>libdfs</b>, which is a DAOS library that provides a POSIX-like (but not POSIX-compatible) API into DAOS.  You still have to connect to a pool and open a container, but instead of reading and writing to arrays, you read and write arbitrary buffers to byte offsets within file-like objects.  These objects exist in a hierarchical namespace, and there are functions provided by libdfs that map directly to POSIX operations like mkdir, rmdir, statfs, etc.  Using libdfs, you would still have to rewrite your POSIX I/O calls, but there would be a much smaller semantic gap since POSIX files and directories resemble the files and directories provided by libdfs.  A great example of what libdfs looks like can be found in the <a href="https://github.com/hpc/ior/blob/master/src/aiori-DFS.c">IOR DFS backend code</a>.&lt;/li&gt;&lt;li&gt;<b>dfuse</b>, which is a FUSE client written on top of libdfs.  With this, you literally get a file system mount point which POSIX applications can interact with natively.  Because this uses FUSE though, such accesses are still generating system calls and memory copies which come with steep latency penalties.&lt;/li&gt;&lt;li&gt;<b>libioil</b>, which is a POSIX interception library.  This is what you’d LD_PRELOAD in front of a standard application, and it does the remapping of genuine POSIX API calls into libdfs-native calls without ever going through the kernel.&lt;/li&gt;&lt;/ul&gt;<br />&lt;div&gt;Cedric Milesi from HPE presented benchmark slides that showed that using the DFS (file-based) API over the native (array-based) API has no effect on performance:&lt;/div&gt;</p> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://1.bp.blogspot.com/-R12yuhpqwnw/Xd4F1kooVII/AAAAAAABHiE/m8Kp2vuhzz0xyG9hkhhG2OXGoycJPEqRgCLcBGAsYHQ/s1600/FD351A70-C5B7-4B43-B58F-427E991FA99F_1_201_a.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-R12yuhpqwnw/Xd4F1kooVII/AAAAAAABHiE/m8Kp2vuhzz0xyG9hkhhG2OXGoycJPEqRgCLcBGAsYHQ/s400/FD351A70-C5B7-4B43-B58F-427E991FA99F_1_201_a.jpeg" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Performance scaling of the native DAOS API (which encodes array objects) to the DAOS DFS API (which encodes file and directory objects). &nbsp;No discernible performance difference.</td></tr></tbody></table> +<p><br />&lt;div&gt;Thus, there is no performance difference whether you treat DAOS like an array store (its original design) or a file/directory store (through the libdfs API) as far as bandwidth is concerned.  This is excellent news, as even though libdfs isn’t a drop-in replacement for POSIX I/O, it implements the POSIX data model (data is stored as streams of bits) which is a more comfortable look and feel for a storage system than storing typed arrays.  And since libioil is a shim atop libdfs, the above performance data suggests that POSIX applications won’t pay significant bandwidth overheads by preloading the POSIX intercept library to get DAOS compatibility out of the box.&lt;/div&gt;</p> +<div><br /></div> +<div>What's less clear is what the metadata overheads of libdfs are. &nbsp;Because the whole metadata model of DFS (files and directories) is very different from native DAOS (arrays), it's impossible to do a head-to-head comparison of metadata performance. &nbsp;That said, DFS metadata is only a subset of the full POSIX metadata so it should be faster even on identical hardware. &nbsp;For example, DAOS only enforces permissions when opening a container, so I would not expect DFS to have any notion of file-level or directory-level ownership or permissions bits. &nbsp;As such, DFS would not incur the cost of doing an expensive recursive permission check on dfs_open(), and the open rate should be much higher than something that adheres to POSIX.</div> +<div><br /></div> +<div><a href="https://wiki.hpdd.intel.com/display/DC/DUG19?preview=/114950685/117310295/5_DUG19_Argonne_harms_v2.pdf">Kevin Harms from ALCF also presented a really enlightening slide</a> containing very early performance tests from their internal DAOS testbed using dfuse and libioil:</div> +<div><br /></div> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-KKpue81Rplw/Xd4CH_zW1BI/AAAAAAABHh4/8Ycwv0QkgVs_rEO-D5wM6IWQ4cbFfPW4gCLcBGAsYHQ/s1600/E7AA229C-FE13-411A-BA27-2F843CDC7C0C_1_201_a.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="300" src="https://1.bp.blogspot.com/-KKpue81Rplw/Xd4CH_zW1BI/AAAAAAABHh4/8Ycwv0QkgVs_rEO-D5wM6IWQ4cbFfPW4gCLcBGAsYHQ/s400/E7AA229C-FE13-411A-BA27-2F843CDC7C0C_1_201_a.jpeg" width="400" /></a></div> +<div><br /></div> +<div>This slide is a treasure trove of interesting information:</div> +<div><ol><li>It implicitly confirms that the verbs provider for libfabric not only works, but works well. &nbsp;Recall that the Intel testbed from which IO-500 was run used Intel OmniPath 100, whereas the Argonne testbed uses a competitor's fabric, InfiniBand.</li><li>Single-stream performance of DAOS using the dfuse interface is 450 MB/sec which isn't terrible. &nbsp;For comparison, single-stream performance of Lustre on Cray Aries + FDR InfiniBand is about the same.</li><li>Using the libioil POSIX interface dramatically increases the single-stream performance which shines a light on how costly using the Linux VFS kernel interface (with FUSE on top) really is. &nbsp;Not using FUSE, avoiding an expensive context switch into kernel mode, and avoiding a memcpy from a user buffer into a kernel buffer gives a 3x performance boost.</li></ol><div>Again, in the sense that DAOS was meant to address the performance impacts of using a kernel-based storage system for I/O, it looks like DAOS is meeting expectation.</div> +</div> +<div><br /></div> +<div><div>Finally,&nbsp;<a href="https://wiki.hpdd.intel.com/display/DC/DUG19?preview=/114950685/117310261/3_DUG19_middleware.pdf">Mohamad Chaarawi also spent some time talking about the Lustre/DAOS integration</a>&nbsp;which uses DAOS dfuse to stitch together a Lustre namespace with DAOS DFS namespaces. &nbsp;I mentioned this in my ISC recap, but there's now a pretty detailed slide about how this will look in practice:</div> +<div><br /></div> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-XNASGvs6FN8/Xd4SMLDN2iI/AAAAAAABHiQ/bBIeXxA2ySgV1Occ09dOgDlKTcqSnVuCgCLcBGAsYHQ/s1600/5FD6F079-D02B-4CD3-9D11-C63B599F53D7_1_201_a.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="300" src="https://1.bp.blogspot.com/-XNASGvs6FN8/Xd4SMLDN2iI/AAAAAAABHiQ/bBIeXxA2ySgV1Occ09dOgDlKTcqSnVuCgCLcBGAsYHQ/s400/5FD6F079-D02B-4CD3-9D11-C63B599F53D7_1_201_a.jpeg" width="400" /></a></div> +<div><br /></div> +<div>This Lustre integration won't be quite as rosy as I described earlier since DFS namespaces don't seamlessly merge into the Lustre namespace. &nbsp;Instead, it looks like DFS namespaces will be mounted in a separate directory hierarchy governed by their pool UUID ("PUUID" in above slide) and container UUID ("CUUID"), and the Lustre namespace will contain symlinks to the DFS mounts. &nbsp;What exactly creates and destroys these symlinks is unclear; in July it had sounded like Lustre foreign layouts would dynamically stitch DAOS objects into Lustre using the Lustre control plane, but now it sounds like DAOS will behave more like autofs on top of Lustre.</div> +</div> +<div><br /></div> +<h3>The burgeoning DAOS community</h3> +<div>Although the progress and increasing tangibility of DAOS is impressive, I was most struck by the diversity of stakeholders represented at the DAOS User Group meeting. &nbsp;In particular, the participation of HPE (the non-Cray part, no less!) and Lenovo was a surprise to me since neither has an immediate interest in the Argonne exascale system which has been the biggest driver for DAOS development. &nbsp;Lenovo in particular made the bold statement that they want to sell a DAOS appliance in 4Q2020/1Q2021 called the "DSS-D Integrated Solution with DAOS."</div> +<div><br /></div> +<div>Oddly enough, the Cray part of HPE was not obviously present at the DAOS User Group despite their involvement in Argonne's Aurora system and activity on the DAOS mailing lists. &nbsp;This may just be a reflection of Cray's historic reluctance to send engineering staff to SC, but their absence was quite notable in contrast to Lenovo's head-first dive into announcing a DAOS appliance. &nbsp;There were also no loud voices supporting all of the work that DAOS has put into integrating with Apache Spark, nor were there any vocal supporters of Intel's newly stated ambition to create a native <a href="https://en.wikipedia.org/wiki/SEG-Y">SEG-Y interface</a> (a format used by oil and gas) for DAOS.<br /><br /></div> +<h2 id="else">Everything else</h2> +<div>There were some interesting tidbits that I picked up at SC this year don't fit neatly anywhere else in this post but are worth writing down.</div> +<div><br /></div> +<h3>Technical tidbits - the Cray Shasta cabinet</h3> +<div>Much like the Cray E1000-F storage enclosure, I have also watched the Cray Shasta cabinet design evolve from a set of CAD diagrams to living, breathing behemoth of sheet metal and coolant tubing. &nbsp;SC'19 was the debut of a finished Cray Shasta compute cabinet, and it's a sight to behold:</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://1.bp.blogspot.com/-9uFvpqZqQVk/Xd47QZMmxsI/AAAAAAABHio/ZOEKu5Jn5tAnquxMm7W7qCRE7SLuQjJegCLcBGAsYHQ/s1600/DAE5AE65-2E30-4174-8B71-73CC32B5159E.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="400" src="https://1.bp.blogspot.com/-9uFvpqZqQVk/Xd47QZMmxsI/AAAAAAABHio/ZOEKu5Jn5tAnquxMm7W7qCRE7SLuQjJegCLcBGAsYHQ/s400/DAE5AE65-2E30-4174-8B71-73CC32B5159E.jpeg" width="300" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">The front end of the new Cray Shasta compute cabinet</td></tr></tbody></table> +<div>These new cabinets are all direct liquid cooled, and the water tubing to each blade from the center manifold is all done up in the above photo. &nbsp;Compute blades slot in vertically, and each cabinet has French doors that open in directions opposite to each other. &nbsp;The back end is a little less neat at a glance:</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://1.bp.blogspot.com/-6s8gW6eBBmY/Xd475Yd4pUI/AAAAAAABHiw/5OlBsyNcNXAzcCZI2a388X2rMSzxAIS8wCLcBGAsYHQ/s1600/95BB7E4A-0691-4CA9-B4D5-27516D40CFA3.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="400" src="https://1.bp.blogspot.com/-6s8gW6eBBmY/Xd475Yd4pUI/AAAAAAABHiw/5OlBsyNcNXAzcCZI2a388X2rMSzxAIS8wCLcBGAsYHQ/s400/95BB7E4A-0691-4CA9-B4D5-27516D40CFA3.jpeg" width="300" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">The back end of the new Cray Shasta compute cabinet</td></tr></tbody></table> +<div>As with the front end, it opens up with French doors, and interestingly, the rear doors look identical to the front doors. &nbsp;Although I didn't ask explicitly, my guess is that this means that both the front and rear of the cabinets could feature giant cabinet graphics if so desired.</div> +<div><br /></div> +<div>The rear cabling is almost all copper 200 Gb/s:</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://1.bp.blogspot.com/-ZeHIjtCDiAk/Xd48iKhreSI/AAAAAAABHi8/QF9rBirRDBgFCpPZIiGXthajR8c6OUKcwCLcBGAsYHQ/s1600/846CD644-BE1A-4ABE-BFC8-6DC43FE35B64.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-ZeHIjtCDiAk/Xd48iKhreSI/AAAAAAABHi8/QF9rBirRDBgFCpPZIiGXthajR8c6OUKcwCLcBGAsYHQ/s400/846CD644-BE1A-4ABE-BFC8-6DC43FE35B64.jpeg" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Cray Slingshot switch blade and Cray chassis management module</td></tr></tbody></table> +<div>And, in a departure from the XC and XT/XE lines, all of this copper cabling uses a standard QSFP-DD connectors to carry 2x200 Gb. &nbsp;In the above photo, you can see a genuine Cray Slingshot switch blade slotted in horizontally (cf. the vertically slotted compute blades) and the water coupling for the liquid-cooled switch blade and management module. &nbsp;There are no fancy coolant waterfalls with Shasta, but that's probably not a bad thing. &nbsp;As I've heard it told, the Cray-2 waterfall was a case of making lemonade from lemons; apparently fluorinert reacts corrosively with curved plastic surfaces.</div> +<div><br /></div> +<h3>Less-technical tidbits</h3> +<div>SC isn't purely about the technology, and truth be told, the personalities and community are the principal reason I attend every year. &nbsp;It follows that a number of personal highlights for me weren't directly related to HPC at all but were nevertheless very valuable bits of information that I took away from Denver.</div> +<div><br /></div> +<div>For example, I met two of the big marketing minds behind a major HPC company who really floored me by attributing value to my support of the HPC industry and community through social media. &nbsp;Social media is really how I got my start in this industry (I started as a hobbyist), so it's gratifying to hear that I might be contributing in a way that is meaningful to kindred spirits who also got into the HPC field from unconventional paths. &nbsp;It was also a reminder that there are always real people behind every corporate Twitter account, and you very well may meet them at a conference like SC. &nbsp;When that happens, it can be a really positive experience ("Great to meet the person behind the handle!") or an embarrassing one ("I really did say that three years ago, didn't I?"). &nbsp;This year was the first time it became clear that, in trying to avoid the latter case as a matter of course, the former becomes more prevalent without a whole lot of added effort.</div> +<div><br /></div> +<div>I also met what may have been the world's slickest corporate sales team, whose brilliantly staged choreography of chance encounters over drinks only became apparent to me as I was walking back to my hotel. &nbsp;I know that plenty of people dislike interacting with sales, but being a great salesperson is really a craft in and of itself, and I respect people who are masters of their trade regardless of what it is. &nbsp;And now if I ever find myself in a situation where I need to win someone over cold, I know from whom I can draw inspiration to unleash my inner "customer success manager." &nbsp;It's a careful balance of drawing out concerns, driving open-ended complaints towards something actionable, and knowing where to cut through red tape and just get the right people talking.</div> +<div><br /></div> +<div>Another non-technical area in which I was looking for information this year was management philosophy. &nbsp;I've had the pleasure of working with and for some very talented managers who recognize management as a distinct vocation in and of itself, and I made it a point to get time with a few such people who've consistently built me up over the years. &nbsp;One of the more pithy philosophies I took away from one colleague is that there are times when neither "asking for permission" nor "asking for forgiveness" is the right approach—rather, sometimes you have to "radiate intent." &nbsp;I'd never heard this before, but it makes sense in that it allows others the opportunity to say "no" and take explicit ownership of inaction, but it doesn't require the inverse of saying "yes" and taking responsibility for the outcomes.</div> +<div><br /></div> +<h3>Staying organized</h3> +<div>Finally, I am always trying to figure out the optimal "workflow" for keeping organized at SC, and this year was no different. &nbsp;A few years ago I fully committed to simply not bringing my laptop to the conference venue every day in lieu of bringing a much lighter and more versatile iPad Pro, and this worked fine with two exceptions:</div> +<div><ul><li>For the Parallel I/O in Practice tutorial I co-presented, I brought my laptop so that all four presenters could project from it and I could use my iPad for keeping realtime notes.</li><li>For PDSW, I brought my laptop just in case, knowing that I would be in the same room all day. &nbsp;I wound up presenting from it simply because it provided a better viewing angle from the podium; the room arrangements in Denver were such that it was impossible for a speaker at the podium to see the slides being projected, so he or she would have to rely on the device driving the projector to tell what content was actually being projected.</li></ul><div>I did have to use the laptop at the hotel on Saturday night to make some final modifications to my PDSW talk (there are a few obscure features in PowerPoint that simply aren't exposed in the iOS version), but the rest of the conference (including a couple of BOF talks) that were iPad-only.</div> +</div> +<div><br /></div> +<div>For notetaking, I started storing all of my notes in <a href="https://agenda.com/">Agenda</a>, and where appropriate, used Agenda's feature to create a single note for each calendar entry corresponding to a formal meeting. &nbsp;For unstructured conversations on the expo floor or between sessions, I kept one catch-all note per day in which I typed everything I could remember as soon as the conversation ended. &nbsp;For example, the conversation I had with the designers of the E1000-F enclosure was saved as a combination of obscure written details I took as soon as I left the booth and photos I snapped during the conversation.</div> +<div><br /></div> +<div>In places where typing on an iPad was not possible (e.g., in most technical sessions, where there were no tables), I used <a href="https://www.nebo.app/">Nebo</a> and an Apple Pencil to take handwritten notes. &nbsp;As it turns out, hand-writing on an iPad sitting on your knee is far more productive than either trying to type text letter-by-letter into the on-screen iPad keyboard or awkwardly balancing the folded-out iPad Pro keyboard on a lap or bag. &nbsp;Nebo is really good at converting handwriting into ASCII, and that ASCII easily copies out and into an Agenda note.</div> +<div><br /></div> +<div>This workflow supplanted my approach last year which relied exclusively on using <a href="https://www.gingerlabs.com/">Notability</a> and hand-written notes with OCR. &nbsp;In meetings where a table <i>was</i> available (i.e., vendor briefings), being able to type rather than handwrite was far more effective in capturing every nuance in spoken word. &nbsp;I've found that I rarely ever get a copy of the slides shown at SC briefings, so being able to quickly capture exact hardware specs or release dates as someone is trying to gloss over some unflattering details is really not possible when writing everything by hand.</div> +<div><br /></div> +<div>For tracking action items, I've started used <a href="https://culturedcode.com/things/">Things 3</a> (which is admittedly crazy expensive) but is really good at capturing to-do items in under five seconds so that they can be more formally sorted, assigned a start/complete date, etc at the end of the day or after the conference.</div> +<div><br /></div> +<div>This all mostly worked, but I did run into a major issue with Agenda where all my ad-hoc notes vanished when I got home from Denver and my home computer decided to sync. &nbsp;The good news is that Agenda uses internal versioning so the notes' contents weren't truly lost, and their support team was extremely responsive in both recovering my lost notes and releasing a fix within a week. &nbsp;Not a great first experience with the app, but I'm not sure that'll stop me from using it.</div> +<div><br /></div> +<h2>Concluding thoughts</h2> +<div>As always seems to be the case, the week of SC was over before I knew it. &nbsp;There's a lot I know that I didn't get to see in terms of colleagues, exhibitors, and technical program sessions. &nbsp;Of everything I&nbsp;<i>did</i>&nbsp;get to see, there's there's plenty that I wasn't sure I'd be allowed to write up. &nbsp;So if you happened to get this far and are wondering why I didn't write about the most interesting thing that you got out of the conference this year, odds are that I didn't see it, or if I did, I wasn't sure I was allowed to write about it. &nbsp;And if I&nbsp;<i>did</i>&nbsp;write about you and you won't get in trouble for being attributed by name, please let me know and I'd be happy to update this post to give you credit.</div> +<div><br /></div> +<div>Denver was the city of the first SC I ever attended, so I was glad to be back. &nbsp;I was also happy to get to see snow at least once this year:</div> +<div><br /></div> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-deMUJP9tRlc/Xd5GYZ0K3HI/AAAAAAABHjI/FiiiB_olwWkWTEakUzZbJZ5aQWFMAFz2wCLcBGAsYHQ/s1600/80A3C5C3-E02A-47E8-95A3-8A6A2ADE5241.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="300" src="https://1.bp.blogspot.com/-deMUJP9tRlc/Xd5GYZ0K3HI/AAAAAAABHjI/FiiiB_olwWkWTEakUzZbJZ5aQWFMAFz2wCLcBGAsYHQ/s400/80A3C5C3-E02A-47E8-95A3-8A6A2ADE5241.jpeg" width="400" /></a></div> +<div><br /></div> +<div>and the convention center did an excellent job of providing space, AV support, catering, and gigantic coffee urns:</div> +<div><br /></div> +<div class="separator" style="clear: both; text-align: center;"><a href="https://1.bp.blogspot.com/-2oQTWgax8t8/Xd5HFhTgg4I/AAAAAAABHjQ/Mabh8dy-lHE-6Kd_8eVIS9nte6BP0kNfACLcBGAsYHQ/s1600/EE0A8732-B484-4FD1-A531-33B543EAA2B8.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="300" src="https://1.bp.blogspot.com/-2oQTWgax8t8/Xd5HFhTgg4I/AAAAAAABHjQ/Mabh8dy-lHE-6Kd_8eVIS9nte6BP0kNfACLcBGAsYHQ/s400/EE0A8732-B484-4FD1-A531-33B543EAA2B8.jpeg" width="400" /></a></div> +<div><br /></div> +<div>I got less sleep on average this year than any SC prior (around 6 hours a night), and yet I feel like I accomplished less of what was on my list than ever before. &nbsp;I suppose that's just a sign that the conference (or perhaps my ambition!) continues to grow, and I should expect SC'20 to be even bigger, better, and exhausting.</div> + + + + + The Purpose of Research Computing is the Research, not the Computing + + 2019-11-06T00:00:00-07:00 + https://hpc.social/2019/the-purpose-of-research-computing-is-the-research-not-the-computing + <p>Absolutely everyone in research computing will agree that supporting +research is their centre’s highest goal. And they’re not lying, +but at many centres I’ve visited, they aren’t really correct, either.</p> + +<p>The day-to-day work in such a centre, naturally enough, is all about +technical operations - keeping the computers running, updating +software, making sure <code>/scratch</code> has enough space free, answering +emails. And of course, it has to be. But without internal champions +actively and continually turning the focus back to the <em>purpose</em> +of those activities - the research outcomes that those activities +are meant to support - the internal, technical, activities <em>become</em> +the purpose of the centre.</p> + +<p>Pretty quickly, you end up with centres that are ranking their +performance quarter to quarter with cluster utilization numbers, +or having all researcher interactions occurring via “tickets” and +measuring volume and mean-time-to-closure of those tickets (because +shorter conversations with researchers are better, right?) And +once that’s happened, it becomes very hard to change; bytes and +flops and closure rates have become the reason for coming to work. +It’s baked into the reporting, the funding, the staff’s annual +performance review. Sure, many of these same centres do collect +and in some way report publications, but if publication rates +resulting from work with the centre are down 5% last year because +two productive groups need new capabilities but the centre has +decided to grow current capability, no one is getting an uncomfortable +call from the boss at these centres. Ticket closure rates going +down 5% though… maybe you’re getting a call.</p> + +<p><img alt="Organizations that care about their clients make their offerings very clear." src="https://www.dursi.ca/assets/purpose_research_computing/pexels_inside-a-store-2199190_crop.jpg" style="float: left; width: 33%; padding: 15px 15px 15px 0px;" /></p> + +<p>It doesn’t take very long to spot centres like this, even from the +outside. On their websites, most prominently of all, are the +statistics that their biggest cluster premiered at position X on +the Top 500, it has such-and-such much disk space, umpty-ump GPUs, +and even more CPUs. There are elaborate multi-stage sign-up +procedures which make the centre’s own reporting easy but getting +a graduate student started on the cluster tedious. Their website +will show a couple of dated research success stories, but if a +researcher is visiting the website for the first time and wants to +know basic facts relevant to them, things like “What is a list of +services that the centre offers”, “Can you help my grad student do +X and if so how long would it take”, “What is current wait times +for resources/for software installation”, the researcher is out of +luck - they’re just directed to a “contact us” email address (which, +of course, feeds into a ticket tracker).</p> + +<p>(Have you ever visited a restaurant webpage and needed like 4 or 5 +clicks to get to the menu and their hours? If the restaurant took +the menu off the website entirely and you instead had to file a +ticket so you could ask specifically if they made spaghetti carbonara, +that’s what most research computing centre websites are like for +researchers. Organizations that care about their customers make +their offerings very clear.)</p> + +<p>The thing is, using metrics like utilization, tickets, storage and +the like to measure how much research support is happening is +madness, and we all know it’s madness. You can goose utilization +numbers by letting researchers run jobs inefficiently, by prioritizing +job size distributions that may or may not represent researcher +needs, or by having staff do a lot of benchmarks “just to make +everything’s still good”. You can keep ticket closure rates up by +having something that should be clarified or automated or fixed and +instead leaving it vague or manual or broken so that there’s a +stream of tickets coming in that are easily closed; or by irrelevantly +dicing what could be a long, productive discussion with a researcher +into a series of shorter “tickets”.</p> + +<p>It’s madness because neither utilization, nor ticket closure rates, +nor storage use, nor even training course enrolment are valuable +to research <em>in and of themselves</em>. They are <em>inputs</em> to the process +of supporting research via computing; not the purpose, not the +desired outcomes. Being guided by metrics of those inputs and just +hoping that as long as those numbers stay good the best possible +science outcomes will just happen of their own accord is an abdication +of responsibility, and a squandering of scarce research-support +resources.</p> + +<p>And it’s worse than that, of course. Even a focus on inputs, if +it was being honest, would focus on <em>all</em> the inputs, and certainly +the most valuable and hardest-to-replace inputs - the technical +staff. What’s the “utilization” of the staff? What fraction of +that Ph.D. chemist’s time over there is spent actually enabling +research projects, versus updating software packages or responding +to “why is my job still in the queue” tickets? How much time does +our data centre monitoring expert spend swapping memory and cables? +Is that up this quarter, or down; and if it’s down, why? What +fraction of the expertise of the support staff is being used? What +is the meaningful contribution rate?</p> + +<p><img alt="Inputs produce outpus which produce outcoms which produce impact. The inputs are not what you should measure." src="https://www.dursi.ca/assets/purpose_research_computing/shutterstock_input_outcome.jpg" style="float: right; width: 50%; padding: 15px 0px 15px 15px;" /></p> + +<p>The reason that those staff input metrics aren’t being measured and +others are is simple, and clarifying. The hardware inputs aren’t +being used as metrics due to a (false) belief that they are meaningful +in and of themselves, nor because of an (incorrect) understanding +that they are they can be taken in a principled way as a proxy for +the desired research outcomes. They’re used because they’re easy +to gather. And they’re comfortable to use because they don’t really +require centre managers to make any hard choices.</p> + +<p>Focussing on the inputs instead of the outputs - or even better, +outcomes - isn’t only a research computing thing, of course. It’s +an absolutely classic mistake in a lot of sectors; a google search +for <a href="https://www.google.com/search?q=focus+on+outcomes%2C+not+inputs&amp;oq=focus+on+outcomes%2C+not+inputs">focus on outcomes, not +inputs</a> +returns 139 million results.</p> + +<p>There are two prototypical reasons why it happens. If I were feeling +in a twitter-ranty mood again, I might be tempted to draw the analogy +to the first case - lack of competition, due to private- or +public-sector monopolies, reducing the urgency of focusing on +customer’s needs. You see this in internal departments of large +organizations, where the “customer base” is locked in, or in other +places where there’s no real competition (Hello most departments +of motor vehicles, or cable companies, or Google Mail support!). +These departments end up developing a relentless internal focus, +having cryptic and opaque internal decision-making processes seemingly +unrelated to what their clients actually want, and famously make +clients jump through hoops to get their needs met. This isn’t +caused by malevolence, or even indifference; it couldn’t be for it +to be so widespread. It’s just that, absent any real driver to +focus on <em>customer</em> outcomes, it is almost impossible to drive +internal priorities towards anything other than internal efficiencies. +Those few companies in this situation that <em>do</em> manage to maintain +a focus on client outcomes are doing so by constantly expending +almost heroic levels of unseen effort inside the organization.</p> + +<p>But I don’t actually think that’s what driving some research computing +centres inputs focus when it comes to operations and technical +decision making. I think it comes almost from the other direction, +the other classic case; that of small nonprofits, typically enormously +concerned with their clients, who focus first on a very basic need +and then don’t know how to generalize beyond that as they grow.</p> + +<p>Imagine a small nonprofit, passionately committed to helping people, +that gets its start meeting a very basic need - let’s say they’re +providing before-school breakfasts to children in or near poverty. +At that level, the activity <em>is</em> the outcome; they can count the +number of breakfasts served, try to get better at serving breakfasts +with a given amount of donations, work on raising money to fund +more breakfasts, maybe expand to different schools or supplying a +wider range of breakfasts to be inclusive of students with particular +dietary needs. They are <em>super</em> committed to their clients.</p> + +<p>But as that nonprofit starts expanding, it becomes clear their +client base needs a wider range of services. It starts partnering +with food banks, to help fight student hunger at home; its staff +participate in some after-school tutoring programs. But it has no +way to prioritize these activities. Is it a hunger-fighting +nonprofit? Is it a help-underprivledged-students-succeed-at-school +nonprofit? If they could double the tutoring efforts at the cost +of slowing the growth of the breakfast program next year, is that +the right thing to do, or not? How would they know?</p> + +<p>This is a terrifying transition for a nonprofit to go through. +Before, it knew exactly what it was doing, and had very clear metrics +for success. In this intermediate stage, it probably has some +earmarked resources to participate in the tutoring and foodbanks, +and it touts that work, but it doesn’t know how to do anything but +report on school breakfasts. To go beyond this means making choices +about what it will prioritize - and more scarily, what it will <em>not</em> +prioritize - and working on program evaluation plans for the much +more ambitious but more ambiguous goals of “fighting hunger” or +“helping students succeed at school”. Many nonprofits never make +that transition. Some stay small and focussed, which works well +but limits their impact; many stay in limbo in that uncomfortable +intermediate state until they are overtaken by events or other +organizations.</p> + +<p>At most research computing centres, I think the story is more like +that of the nonprofit. Except let’s be honest, while providing +breakfasts is inherently meaningful and has very few organizations +willing to do it, providing cycles and storage isn’t, and has many +alternate providers.</p> + +<p>But going beyond meeting the basic needs of providing research +computing cycles and storage, which was a much greater need in the +90s than it is today, is genuinely hard. It’s very labour intensive - +it requires going out to the entire research community you aim +to support, including those who you’ve never had a working relationship +with, and understanding needs. It’s very uncomfortable - you have +to then prioritize those needs based on their value to the larger +enterprise and to where you can make the most difference, and that +means having awkward conversations bout <em>not</em> prioritizing other +needs. And it’s incredibly uncertain - it means going from evaluations +based on numbers on a dashboard that are largely under your control, +to unfamiliar qualitative evaluations and doing the hard work of +trying to measure research outcomes.</p> + +<p>But there’s a relatively straightforward approach to get there +starting from where you are. It takes some work, but just going +through the process is clarifying.</p> + +<ol> + <li><strong>What do you do now?</strong> You know, broadly, what services you offer to +researchers, you’ve just never had to make it explicit. Start to put +together a very simple <a href="https://www.cherwell.com/library/blog/7-steps-to-defining-and-designing-an-effective-service-catalog/">service catalog</a>. +It doesn’t have to be very complicated; figure out internally what +services you offer, at quite a high level, in language that researchers +would care about. Get staff to offer suggestions. For each service, +for internal consumption only, figure out what’s involved in providing it - what are +typical amount of hours involved, who has to coordinate with whom, <em>etc.</em>.</li> + <li><strong>How do those services help researchers?</strong> Again, you have a broad +sense of this, but make it concrete. Is it more publications? +Higher-impact publications? <em>Faster</em> publication? Better collaboration +opportunties? Higher funding success rates? Better job prospects for +students and postdocs? More successful faculty or postdoc recruitment? +Friendly users or your VPR can help with this. Come up with a handful +that seem most important in your user community.</li> + <li><strong>Connect services and benefits.</strong> Come up with some concrete +examples of how you’ve provided each of those benefits with the services you +make available. You may find benefits that you can’t yet justify claiming +you provide, or services you’ve forgotten about.</li> + <li><strong>Refine your services and benefits lists.</strong> Start talking about +these benefits and services, in talks at user groups or when doing +outreach to departments, new hires, incoming graduate student +training, and the like. Find out which ones attract attention, +which ones don’t. Ask for suggestions for new items for the lists, +and new conncetions between the two.</li> + <li><strong>Start thinking about indicators and evaluation.</strong> Besides anecdotes, +how could you convince your funder, or senior leadership at your institution, +that you provide those benefits? How could you show you were getting +better? How could you convince them that a 15% increase in funding would +provide some meaningful improvement to research institution? The answer +will depend on the benefits you’ve chosen, but there are lots of +<a href="https://www.councilofnonprofits.org/tools-resources/evaluation-and-measurement-of-outcomes">resources</a> +out <a href="https://managementhelp.org/evaluation/outcomes-evaluation-guide.htm">there</a> +to help you with this. Closer to home, I absolutely promise you +there are people at your instution who will talk to you about program +evaluation until you want to pass out just to enjoy some quiet. +What you come up with will seem quite different to you. They won’t +be instruments with 3 decimal places of accuracy; they +may be citation counts or randomly sampled surveys or qualitative +interviws. Measuring research is hard - but everyone in research +knows and understands this. Approaches like short surveys or +interviews are labour intensive, but provide amazing information - +they will provide a constant stream of incoming success stories that +you can make use of, and less successful storis you can learn from.</li> + <li><strong>Start thinking about rebalancing your service offerings</strong>. Once +you have these lists, and approaches to evaluations, then and only +then do you have a principled way to make decisions about in which services +to invest more, and in which to invest less. And you’ll have very +convincing arguments to take to funders and leadership.</li> +</ol> + +<p>I get it that going through this process to the point where you +can meaningfully what ask the right next thing to do to help +research isn’t easy. It absolutely isn’t. It’s a lot of work, +and while it is useful in many different ways, it still doesn’t +make things easy - if anything, it forces you to confront tradeoffs +and hard choices that focusing on inputs may have let you avoid. A centre that hasn’t +been thinking this way for a while will have some low-hanging fruit +that can be picked to start, but after that there will be multiple +ways for a centre to be supporting research, and no clear answer +which is the “best”. Making those choices will require knowing +the strengths of the centre and knowing where those strengths are +needed in the research community it serves — and not all research +needs are the same! But <em>those</em> are questions that team leaders +need to be wrestling with.</p> + +<p>The alternative, just running a set of computers for the same +friendly user group of people year after year, isn’t research +support; it’s a hobby.</p> + + + + + LetsEncrypt for Multiple Hosts + + 2019-10-11T20:38:14-06:00 + https://hpc.social/2019/letsencrypt-for-multiple-hosts + <p>Using <a href="https://letsencrypt.org/">LetsEncrypt</a> for certificate creation and management has made secure communications much easier. Instead of contacting the IT department of your university to request a certificate, you can skip the middle man and generate your own certificate which it trusted around the world.</p> + +<p>A common use case of certificates is to secure data transfers. Data transfers that use the GridFTP, XRootD, or HTTPS transfer protocols can load balance between multiple servers to increase throughput. <a href="https://www.keepalived.org/">keepalived</a> is used to load balance between multiple transfer servers. The certificate provided to the clients need to have the virtual host address of the load balancer, as well as the hostname of each of the worker nodes.</p> + +<ol> + <li>Create a shared directory between the data transfer nodes</li> + <li>Install httpd on each of the data transfer nodes</li> + <li>Configure httpd to use the shared directory as the “webroot”</li> + <li>Configure <code class="language-plaintext highlighter-rouge">keepalived</code> to use virtualize port 80 to at least 1 of your data transfer nodes.</li> + <li>Run certbot with the webroot option, as well as the multiple hostnames of the data transfer nodes.</li> +</ol> + +<p>Create a NFS share that each of the data transfer nodes can read. The steps in creating a NFS shared directory is outside the scope of this guide. In this guide, the shared directory will be referred as <code class="language-plaintext highlighter-rouge">/mnt/nfsshare</code> . Next, install httpd on each of the data transfer nodes:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@host $ yum install httpd +</code></pre></div> +</div> + +<p>Create a webroot directory within the shared directory on one of the nodes:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@host $ mkdir /mnt/nfsshare/webroot +</code></pre></div> +</div> + +<p>Configure httpd to export the same webroot on each of the data transfer nodes:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>&lt;VirtualHost *:80&gt; + DocumentRoot "/mnt/nfsshare/webroot" + &lt;Directory "/mnt/nfsshare/webroot"&gt; + Require all granted + &lt;/Directory&gt; +&lt;/VirtualHost&gt; +</code></pre></div> +</div> + +<p>Configure <code class="language-plaintext highlighter-rouge">keepalived</code> to virtualize port 80 to at least one of your data transfer nodes. +Add to your configuration:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>virtual_server &lt;VIRTUAL-IP-ADDRESS&gt; 80 { + delay_loop 10 + lb_algo wlc + lb_kind DR + protocol tcp + + real_server &lt;GRIDFTP-SERVER-#1-IP ADDRESS&gt; { + TCP_CHECK { + connect_timeout 3 + connect_port 80 + } + } +} +</code></pre></div> +</div> + +<p>Run <code class="language-plaintext highlighter-rouge">certbot</code> with the webroot options on only 1 of the data nodes. The first domain in the command line should be the virtual hostname:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>root@host $ certbot certonly -w /mnt/nfsshare/webroot -d &lt;VIRTUAL_HOSTNAME&gt; -d &lt;DATANODE_1&gt; -d &lt;DATANODE_N&gt;... +</code></pre></div> +</div> + + + + + Computational Science Collaborations Train Great Managers - But Trainees Might Need Help To Become Good Managers First + + 2019-09-29T01:00:00-06:00 + https://hpc.social/2019/computational-science-collaborations-train-great-managers-but-trainees-might-need-help-to-become-good-managers-first + <blockquote> + <p><em>What I write below likely applies to fields of theoretical +and observational science that involve collaborations, too. I think +the experiences that trainees in laboratory science are likely +significantly different, as are those people who spent a large +amount of time working in a single group in a well-defined large +project. I’d certainly like to hear from colleagues from those +areas; are there similarities, or are things quite different?</em></p> + +</blockquote> + +<p>We don’t like to talk about it much, but the natural career path +in academia - from undergrad to research assistant to +graduate student, postdoc, Junior PI and beyond - involves a +progression first towards and then away from the actual doing of +science to the coordinating, orchestrating, planning of, and +advocating for scientific projects; or, in other words, management +(though we’d <em>never</em> call it that; academia is full of +open disdain for anything that smacks of management, marketing, +or other suspiciously real-world activities).</p> + +<p>But computational-science academic environments are pretty particular places, with different approaches to working in a team than, say, in much of business.</p> + +<p>First, academic work is largely performed by trainees like students, who +have a very different relationship to their academic supervisor than an +employee does to their manager. At its best, when an academic lab +is run by ethical and competent leadership<sup id="fnref:1"><a class="footnote" href="https://www.dursi.ca/feed.xml#fn:1" rel="footnote">1</a></sup>, significant effort +is put into developing those trainees, giving them increasing +responsibilities, and looking for opportunities for them to apply +those emerging skills to new problems.</p> + +<p>Second, since much of the work is on open research problems, it’s +very difficult to judge how long something “should” take, so deadlines +in assigning tasks is relatively uncommon; updates tend to sound like +“here’s what I managed to get done this week,” and it is what it is.</p> + +<p>Third, due to the open-endedness, the trainee/mentor relationship, +and modelling the extreme independence of senior academics, there +is a norm of collegiality. Directing someone’s work +comes across as very heavy handed; the final work output can be +ruthlessly assessed, but path to get there, the work process, is +somewhat off-limits.</p> + +<p>Fourth, it’s common - maybe even the norm - for projects to be +tackled with others outside of not only the team, but in different +institutions entirely.</p> + +<p>Finally, the independence of researchers, the dynamic nature of +research, and the fact that so many coworkers are elsewhere mean +many working relationships are comparatively +short-lived.</p> + +<p>So imagine that you are a postdoc - the most senior trainee - in a +computational lab, routinely working in multi-institutional +collaborations, and this is where you’re developing your people and +project management chops. You are directing some particular piece +of research that will lead to publications key for your career. +You aim to be the lead author on one or more of those papers and +you have a clear idea of the path to get there, and you’re driven — +you’ll be on the job market next year and you learned at a +conference that a competitor’s lab is looking at some of the same +questions.</p> + +<p>But your “project team” are peers or even academics +more senior than you, and many are outside your institution entirely; +getting them to do anything is a matter of persuasion. Your local, +more junior, trainees are just beginning their journey, and for +them to contribute meaningfully you have to find ways to develop +their skills and the best ways to benefit from those skills. +You want everyone to be invested, contributing, and growing their +skills, and you don’t have time to direct people in how to do +their work even if you were so inclined. And the clock is ticking.</p> + +<p>What kind of skills are you developing as you’re thrust into this situation?</p> + +<hr /> + +<p>Much of the computing and technical community is teaching itself +things about management that the rest of the world has known for a +half-century. Famously, Google’s Project Oxygen, which was at +aimed least in part at “proving” that management does not matter, +started in 2008 looking at management across teams in the organization, +and found that it really does. (Surprise!). Their original findings +identified <a href="https://www.inc.com/marcel-schwantes/the-8-biggest-things-that-google-managers-do-to-su.html">8 characteristics of managers of successful +teams</a>, +and 3 pitfalls that managers in less successful teams fell into.</p> + +<p>Those characteristics of good managers, in decreasing order of importance:</p> + +<ol> + <li>They’re good coaches.</li> + <li>They empower their team and don’t micro-manage.</li> + <li>They express interest in their team members’ success and personal well-being.</li> + <li>They’re productive and results-oriented.</li> + <li>They’re good communicators and they listen to the team.</li> + <li>They help employees with career development.</li> + <li>They have a clear vision and strategy for the team.</li> + <li>They have key technical skills that help them advise the team.<sup id="fnref:2"><a class="footnote" href="https://www.dursi.ca/feed.xml#fn:2" rel="footnote">2</a></sup></li> +</ol> + +<p>How will our postdoc rate against those criteria? Well:</p> + +<ol> + <li>They are going to be very concerned with skills development in their direct reports, encouraging them on to bigger and better things — so the postdoc learns to be a good coach;</li> + <li>They certainly won’t micromanage — they’ll let team members decide how to approach their work;</li> + <li>They’ll be very aware of the need to develop their team member’s success, partly due to norms around credit, and partly due to expectations that people move up and on in their careers;</li> + <li>They’re very focussed on high-quality work and the steps to get there;</li> + <li>Because they’ve never been able to rely on role power or organizational authority to coordinate the getting work done, they will have developed the communication skills necessary to communicate with and listen to the team, at least around tasks;</li> + <li>Thinking about team-members career advancement needs and goals is going to be second nature, although they might take it for granted that they know what the team members goals are;</li> + <li>They’ll have quite clear goals and a vision, even if they’re not used to having to communicate it explicitly in an environment where everyone knows that the goal is discrete publications and can clearly see where their work fits in;</li> + <li>They will typically be quite proficient in their relevant technical skills.</li> +</ol> + +<p>That’s just about a clean sweep!</p> + +<p>So I claim that the sort of training that I’ve seen people get on projects in +computational (or observational or theoretical) science collaborations equips +people with the advanced skills to become great managers.</p> + +<p>But there’s a downside. The very hands-off approach to management +(indeed, the refusal to countenance that “management” is even an +appropriate thing for scientists to stoop to) means that some of +the more basic, fundamental, skills are lacking. The same early +work at Google pointed out key shortcomings of their less successful +managers:</p> + +<ol> + <li>Have trouble making a transition to the team.</li> + <li>Lack a consistent approach to performance management.</li> + <li>Spend too little time managing and communicating.</li> +</ol> + +<p>And those start to look like real sticking points for our postdoc. +Almost no one is taught management skills before their first +management position, but scientists are more or less taught that +management <em>shouldn’t</em> be done; “we’re not that sort of people”. +So:</p> + +<ol> + <li>Making the transition to being the manager of the team is going to be doubly difficult for our postdoc — both in internalizing their role as a manager, and in putting the time in to develop really solid working relationships with the team members.</li> + <li>Performance communications - giving people feedback (positive and negative) on their work often and regularly, rather than waiting weeks or months for some big sub-project to be done and then assessing the finished project — is going to be completely foreign, if not anathema, to them.</li> + <li>Our postdoc is going to spend little to no time actually managing, or communicating about the things they haven’t had to communicate about before like sharing the vision aind aims of the team, or finding out the specific career goals of individual team members.</li> +</ol> + +<p>So while many or even all of the advanced skills might well be in +place for scientist trainees to excel at management outside of the +academy, the basic skills — or even models of what the basic skills +would look like — are often going to be lacking.</p> + +<p>But those basic skills are the easiest to address! Anyone can learn +them, and someone who’s spent a good chunk of their career in the +sciences <em>certainly</em> can.</p> + +<p>So many computational scientists do end up becoming good — +and so quickly become great — managers successfully on their +own, but it can take a lot of trial and error, and be stressful for +all involved. (My own transition towards becoming good has been…. uneven.)</p> + +<p>I don’t think that transition <em>has</em> to be so difficult; today there are +some fantastic resources out +there to help. And maybe it’s where I’ve been looking or which +materials resonate with me, but a lot of the strongest sources +that really nail the basics come from people on the more technical +side. I’m a huge fan of <a href="https://www.manager-tools.com">Manager Tools</a>, +which have amassed a huge library a “here’s a number of steps you +can start taking today” podcasts and a couple of books, with data +to back them up. A number of colleagues really like <a href="https://www.oreillyå.com/library/view/the-managers-path/9781491973882/">The Manager’s +Path</a> +which takes a career-long view of stepping up the career ladder +(set in the tech industry but most of the material would carry over +to other fields) and the different skills and responsibilities +needed at each stage. And Google’s ongoing organizational reasearch and +resulting training materials at +<a href="https://rework.withgoogle.com">rework.withgoogle.com</a> are well +worth reading.</p> + +<p>Scientists learn a lot of transferrable skills in their training, +and the world needs more of their input in teams and projects +across all sectors. There’s a stereotype about scientists being +nerdy introverts, but collaborations across institutions build really +quite advanced communications and management skills that can serve +them very well almost anywhere. And if they need a little help +adjusting to the different skills needed for managment of +projects or teams outside of academia, there are resources out there now to +help them succeed. If there are some that have especially helped +you, please do share them with me and I’ll list them here.</p> + +<div class="footnotes"> + <ol> + <li id="fn:1"> + <p>Which, more visibly now than ever before, cannot be taken as a given. <a class="reversefootnote" href="https://www.dursi.ca/feed.xml#fnref:1">&#8617;</a></p> + + </li> + <li id="fn:2"> + <p>Note that at Google, technical skills are <em>dead last</em> as a skill for managers; but that largely results from Google having such a high technical bar for hiring. <a class="reversefootnote" href="https://www.dursi.ca/feed.xml#fnref:2">&#8617;</a></p> + + </li> + </ol> +</div> + + + + + ISC'19 Recap + + 2019-06-27T01:31:00-06:00 + https://hpc.social/2019/isc-19-recap + <p>I was fortunate enough to attend the <a href="https://www.isc-hpc.com/">ISC HPC conference</a> this year, and it was a delightful experience from which I learned quite a lot.  For the benefit of anyone interested in what they have missed, I took the opportunity on the eleven-hour flight from Frankfurt to compile my notes and thoughts over the week.<br /><br />I spent most of my time in and around the sessions, BOFs, and expo focusing on topics related to I/O and storage architecture, so that comprises the bulk of what I’ll talk about below.  Rather than detail the conference chronologically as <a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html">I did for SC’18</a> though, I’ll only mention a few cross-cutting observations and trends here.<br /><br />I’ll also not detail the magnificent <a href="https://hps.vi4io.org/events/2019/iodc">HPC I/O in the Data Center workshop</a> here, but anyone reading this who cares about storage or I/O should definitely flip through the slides on the <a href="https://hps.vi4io.org/events/2019/iodc#agenda">HPC-IODC workshop website</a>!  This year HPC-IODC and <a href="https://wopsss.org/">WOPSSS</a> merged their programs, resulting in a healthy mix of papers (in both CS research and applied research), expert talks, and fruitful discussion.<br /><br />&lt;h2&gt;High-level observations&lt;/h2&gt;As is often the case for ISC, there were a few big unveilings early in the week.  Perhaps the largest was the disclosure of several key architectural details surrounding the <a href="https://aurora.alcf.anl.gov/">Aurora exascale system to be deployed at Argonne in 2021</a>.  <a href="https://www.tacc.utexas.edu/systems/frontera">TACC’s Frontera system</a>, a gigantic Dell cluster stuffed with Intel Cascade Lake Xeons, made its debut on the Top500 list as well.  In this sense, Intel was in good form this year.  And Intel has to be, since only one of the handful of publicly disclosed pre-exascale (<a href="https://www.nextplatform.com/2018/10/30/berkeley-lab-first-in-line-for-cray-shasta-supercomputers/">Perlmutter</a> and <a href="https://www.nextplatform.com/2018/06/21/details-emerge-on-post-k-exascale-system-with-first-prototype/">Fugaku</a>) and exascale systems (<a href="https://www.nextplatform.com/2019/05/07/cray-amd-tag-team-on-1-5-exaflops-frontier-supercomputer/">Frontier</a>) will be using Intel parts.<br /><br />The conference had also had an anticipatory undertone as these pre-exascale and exascale systems begin coming into focus.  The promise of ARM as a viable HPC processor technology is becoming increasingly credible as <a href="https://vanguard.sandia.gov/astra/index.html">Sandia’s Astra machine</a>, an all-ARM cluster integrated by HPE, appeared throughout the <a href="https://2019.isc-program.com/">ISC program</a>.  These results are paving the way for Fugaku (the “post-K” machine), which will prove ARM and its SVE instruction set at extreme scale.<br /><br />Also contributing to the anticipatory undertone was a lot of whispering that occurred outside of the formal program.  The recently announced acquisition of Cray by HPE was the subject of a lot of discussion and conjecture, but it was clear that the dust was far from settled and nobody purported to have a clear understanding of how this would change the HPC market.  There was also some whispering about a new monster Chinese system that was on the cusp of making this year’s ISC Top500.  Curiously, the Wuxi supercomputer center (where Tianhe-2 is housed) had a booth on the show floor, but it was completely vacant.<br /><br />Also noticeably absent from the show floor was NVIDIA, although they certainly sent engineers to participate in the program.  By comparison, AMD was definitely present, although they were largely promoting the impending launch of Rome rather than their GPU lineup.  A number of HPC solutions providers were excited about Rome because of both high customer demand and promising early performance results, and there wasn’t a single storage integrator with whom I spoke that wasn’t interested in what doors will open with an x86 processor and a PCIe Gen4 host interface.<br /><br />&lt;h2&gt;Intel disclosures about Aurora 2021&lt;/h2&gt;Perhaps the biggest news of the week was a “<a href="https://2019.isc-program.com/presentation/?id=inv_sp184&amp;sess=sess212">special event</a>” presentation given by Intel’s Rajeeb Hazra which disclosed a number of significant architectural details around the Aurora exascale system being deployed at Argonne National Laboratory in 2021.<br /><br />&lt;h3&gt;Nodes will be comprised of Intel Xeon CPUs and multiple Intel GPUs&lt;/h3&gt;Intel has confirmed that Aurora will be built on Intel-designed general-purpose GPUs based on the “Xe” architecture with multiple GPUs per node.  With this disclosure and the knowledge that nodes will be connected with Cray’s Slingshot interconnect, it is now possible to envision what a node might look like.  Furthermore, combining the disclosure of a high GPU:CPU ratio, the Aurora power budget, and some vague guessing at the throughput of a 2021 GPU narrows down the number of nodes that we may expect to see in Aurora.<br /><br />Although no specific features of the Intel GPUs were disclosed, Intel was also promoting their new <a href="https://en.wikichip.org/wiki/x86/avx512vnni">AVX512-VNNI instructions</a> to position their latest top-bin Xeon cores as the best option for inference workloads.  Coupled with what we can assume will be highly capable GPUs for training acceleration, Intel is building a compelling story around their end-to-end AI portfolio.  Interestingly, news that <a href="https://www.nextplatform.com/2019/06/17/nvidia-makes-arm-a-peer-to-x86-and-power-for-gpu-acceleration/">NVIDIA is partnering with ARM</a> dropped this past week, but NVIDIA’s noted absence from ISC prevented a comparable ARM-NVIDIA AI solution from shining through.<br /><br />&lt;h3&gt;System will have over 10 PB of system memory&lt;/h3&gt;Aurora will have a significant amount of memory presumably comprised of a combination of HBM, DDR, and/or Optane persistent memory.  The memory capacity is markedly higher than that of the AMD-based Frontier system, suggesting that Intel may be leveraging Optane persistent memory (which has a lower cost per bit than DDR) to supplement the HBM that is required to feed such a GPU-heavy architecture.<br /><br />&lt;h3&gt;The storage subsystem will deliver over 230 PB of capacity at over 25 TB/sec&lt;/h3&gt;Perhaps the most interesting part of Aurora is its I/O subsystem, which will use an object store and an all-solid-state storage architecture instead of the traditional parallel file system.  This will amount to 230 PB of usable flash capacity that can operate in excess of 25 TB/sec.  Although I’ll describe this storage architecture in more depth below, combining the performance point of 25 TB/sec with the aforementioned high GPU:CPU ratio suggests that each compute node will be able to inject a considerable amount of I/O traffic into the fabric.  This points to very capable Xeon cores and very capable NICs.<br /><br />&lt;h3&gt;The programming model for the system will utilize SYCL&lt;/h3&gt;Intel has announced that its “One API’ relies on the <a href="https://www.khronos.org/sycl/">Khronos Group’s SYCL standard</a> for heterogeneous programming in C++ rather than the incumbent choices of OpenMP, OpenACC, or OpenCL.  This does not mean that OpenMP, OpenACC, and/or OpenCL won’t be supported, but it does reveal where Intel intends to put all of its efforts in enabling its own GPUs and FPGAs for HPC.  They further emphasized their desire to keep these efforts open, standards-based, and portable, undoubtedly demonstrating stark contrast with the incumbent GPU vendors.  This is an interesting long-term differentiator, but time will tell whether SYCL is able to succeed where OpenCL has failed and gain a foothold in the HPC ecosystem.<br /><br />&lt;h2&gt;DAOS will be HPC’s gateway drug to object stores&lt;/h2&gt;DAOS (the “Distributed Asynchronous Object Store,” pronounced like it’s spelled) is an object store that Intel has been developing for the <a href="https://www.theregister.co.uk/2012/07/11/doe_fastforward_amd_whamcloud/">better part of a decade in collaboration with the US Department of Energy</a>.  The DAOS name has become overloaded in recent years as a result of it changing scope, focus, and chief architects, and the current version is quite different from the original DAOS that was prototyped as a part of the DOE Fast Forward program (e.g., only <a href="https://www.snia.org/sites/default/files/SDC15_presentations/dist_sys/EricBarton_DAOS_Architecture_Extreme_Scale.pdf">one of three original DAOS components, DAOS-M, survives</a>).  A few key features remain the same, though:<br />&lt;ul&gt;&lt;li&gt;It remains an object store at its core, but various middleware layers will be provided to expose alternate access APIs and semantics&lt;/li&gt;&lt;li&gt;It is specifically designed to leverage Intel Optane persistent memory and NAND-based flash to deliver extremely high IOPS in addition to high streaming bandwidth&lt;/li&gt;&lt;li&gt;It relies on user-space I/O via <a href="http://mercury-hpc.github.io/">Mercury</a> and <a href="https://spdk.io/">SPDK</a> to enable its extreme I/O rates&lt;/li&gt;&lt;li&gt;Its <a href="https://github.com/daos-stack/daos/blob/master/doc/storage_model.md#4.1">storage architecture</a> is still based on a hierarchy of servers, pools, containers, and objects&lt;/li&gt;&lt;/ul&gt;Object stores have historically not found success in HPC due to HPC apps’ general dependence on POSIX-based file access for I/O, but the Aurora DAOS architecture cleverly bridges this gap.  I was lucky enough to run into Johann Lombardi, the DAOS chief architect, at the Intel booth, and he was kind enough to walk me through a lot of the details.<br /><br />DAOS will provide seamless integration with a POSIX namespace by using <a href="https://jira.whamcloud.com/browse/LU-11376">Lustre’s new foreign layout feature</a> which <a href="https://www.eofs.eu/_media/events/lad18/15_johann_lombardi_intel_cross_tier_unified_namespace_v3.pdf">allows an entity in the Lustre namespace to be backed by something that is not managed by Lustre</a>.  In practice, a user will be able to navigate a traditional file namespace that looks like any old Lustre file system using the same old ls and cd commands.  However, some of the files or directories in that namespace may be <a href="https://github.com/daos-stack/daos/blob/master/src/client/dfs/README.md">special DAOS objects</a>, and navigating into a DAOS-based object transparently switches the data path from one that uses the traditional Lustre client stack to one that uses the DAOS client stack.  In particular,<br />&lt;ul&gt;&lt;li&gt;Navigating into a directory that is backed by a DAOS container will cause the local DAOS agent to mount that DAOS container as a POSIX namespace using FUSE and junction it into the Lustre namespace.  Files and subdirectories contained therein will behave as regular POSIX files and subdirectories for the most part, but they will only honor a subset of the POSIX consistency semantics.&lt;/li&gt;&lt;li&gt;Accessing a file that is backed by a DAOS container (such as an HDF5 file) will cause the client to access the contents of that object through whatever API and semantics the DAOS adapter for that container format provides.&lt;/li&gt;&lt;/ul&gt;DAOS also includes a preloadable library which allows performance-sensitive applications to bypass the FUSE client entirely and map POSIX API calls to DAOS native API calls.  For applications that use middleware such as HDF5 or MPI-IO, I/O will be able to entirely bypass the POSIX emulation layer and get the highest performance through DAOS-optimized backends.  In the most extreme cases, applications can also write directly against the DAOS native object API to control I/O with the finest granularity, or use one of DAOS’s addon APIs that encapsulate other non-file access methods such as key-value or array operations.<br /><br />A significant amount of this functionality is already implemented, and Intel was showing DAOS performance demos at its booth that used both IOR (using the DAOS-native backend) and Apache Spark:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-pQCEzVp5fKE/XRP6nbvlVrI/AAAAAAABFdE/VhnOgDCJmKUvdp6NQfuF29oCWvnZvGy5ACLcBGAs/s1600/IMG_6381.JPG" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="320" src="https://1.bp.blogspot.com/-pQCEzVp5fKE/XRP6nbvlVrI/AAAAAAABFdE/VhnOgDCJmKUvdp6NQfuF29oCWvnZvGy5ACLcBGAs/s320/IMG_6381.JPG" width="240" /></a>&lt;/div&gt; +<br /><br />The test hardware was a single DAOS server with Intel Optane DIMMs and two Intel QLC NAND SSDs and demonstrated over 3 GB/sec on writes and over a million read IOPS on tiny (256-byte) transfers.  Johann indicated that their testbed hardware is being scaled up dramatically to match their <a href="https://wiki.hpdd.intel.com/display/DC/roadmap">extremely aggressive development schedule</a>, and I fully expect to see performance scaling results at SC this November. <br /><br />This is all a far cry from the original Fast Forward DAOS, and this demo and discussion on the show floor was the first time I felt confident that DAOS was not only a good idea, but it was a solution that can realistically move HPC beyond the parallel file system.  Its POSIX compatibility features and Lustre namespace integration provide enough familiarity and interoperability to make it something usable for the advanced HPC users who will be using the first exascale machines.<br /><br />At the same time, it applies a number of new technologies in satisfying ways (Mercury for user-space network transport, <a href="https://www.pdl.cmu.edu/PDL-FTP/PDSI/CMU-PDL-08-110.pdf">GIGA+ for subtree sharding</a>, Optane to coalesce tiny I/Os, …) that, in most ways, puts it at technological parity with other high-performance all-flash parallel storage systems like <a href="https://www.weka.io/">WekaIO</a> and <a href="https://www.vastdata.com/">VAST</a>.  It is also resourced at similar levels, with DOE and Intel investing money and people in DAOS at levels comparable to the venture capital that has funded the aforementioned competitors.  Unlike its competitors though, it is completely open-source and relies on standard interfaces into hardware (<a href="https://ofiwg.github.io/libfabric/">libfabric</a>, <a href="http://spdk.io/">SPDK</a>) which gives it significant flexibility in deployment.<br /><br />As with everything exascale, only time will tell how DAOS works in practice.  There are plenty of considerations peripheral to performance (data management policies, system administration, and the like) that will also factor into the overall viability of DAOS as a production, high-performance storage system.  But so far DAOS seems to have made incredible progress in the last few years, and it is positioned to shake up the HPC I/O discussion come 2021.<br /><br />&lt;h2&gt;The Cloud is coming for us&lt;/h2&gt;This ISC also marked the first time where I felt that the major cloud providers were converging on a complete HPC solution that could begin eroding campus-level and mid-range HPC.  Although application performance in the cloud has historically been the focus of most HPC-vs-cloud debate, compute performance is largely a solved problem in the general sense.  Rather, data—its accessibility, performance, and manageability—has been the single largest barrier between most mid-range HPC users and the cloud.  The convenience of a high-capacity and persistent shared namespace is a requirement in all HPC environment, but there have historically been no painless ways to produce this environment in the cloud.<br /><br />AWS was the first to the table with a solution in <a href="https://aws.amazon.com/fsx/lustre/">Amazon FSx</a>, which is a managed Lustre-as-a-service that makes it much easier to orchestrate an HPC workflow that relies on a high-performance, high-capacity, shared file system.  This has prompted the other two cloud vendors to come up with competing solutions:  Microsoft Azure’s partnership with Cray is resulting in a <a href="https://www.cray.com/solutions/supercomputing-as-a-service/cray-clusterstor-in-azure">ClusterStor Lustre appliance in the cloud</a>, and Google Cloud will be offering <a href="https://cloud.google.com/blog/products/storage-data-transfer/competing-with-supercomputers-hpc-in-the-cloud-becomes-reality">DDN’s EXAScaler Lustre appliances as a service</a>.  And Whamcloud, the company behind Lustre, offers its own <a href="https://wiki.whamcloud.com/display/PUB/Cloud+Edition+for+Lustre+Software">Lustre Cloud Edition</a> on all three major cloud platforms.<br /><br />In addition to the big three finally closing this gap, a startup called <a href="https://kmesh.io/">Kmesh</a> burst on to the I/O scene at ISC this year and is offering a cloud-agnostic solution to providing higher-touch parallel file system integration and management in the cloud for HPC.  Vinay Gaonkar, VP of Products at Kmesh, gave insightful presentations at several big I/O events during the week that spoke to the unique challenges of designing Lustre file systems in a cloud ecosystem.  While architects of on-prem storage for HPC are used to optimizing for price-performance on the basis of purchasing assets, optimizing price-performance from ephemeral instance types often defies conventional wisdom; he showed that instance types that may be considered slow on a computational basis may deliver peak I/O performance at a lower cost than the beefiest instance available:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-b12cCvM1gvo/XRQEGgHTWaI/AAAAAAABFdQ/1jrm3GjLeTkHsd-zJywV4KN1QAnVGkdSwCLcBGAs/s1600/IMG_6395.jpg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="239" src="https://1.bp.blogspot.com/-b12cCvM1gvo/XRQEGgHTWaI/AAAAAAABFdQ/1jrm3GjLeTkHsd-zJywV4KN1QAnVGkdSwCLcBGAs/s320/IMG_6395.jpg" width="320" /></a>&lt;/div&gt; +<br />Vinay’s slides are available online and <a href="https://hps.vi4io.org/_media/events/2019/hpciodc-hpc_on_public_clouds_vinay_gaonkar.pdf">offer a great set of performance data for high-performance storage in the public clouds</a>.<br /><br />The fact that there is now sufficient market opportunity to drive these issues to the forefront of I/O discussion at ISC is an indicator that the cloud is becoming increasingly attractive to users who need more than simple high-throughput computing resources.<br /><br />Even with these sorts of parallel file systems-as-a-service offerings though, there are still non-trivial data management challenges when moving on-premise HPC workloads into the cloud that result from the impedance mismatch between scientific workflows and the ephemeral workloads for which cloud infrastructure is generally designed.  At present, the cost of keeping active datasets on a persistent parallel file system in the cloud is prohibitive, so data must continually be staged between an ephemeral file-based working space and long-term object storage.  This is approximately analogous to moving datasets to tape after each step of a workflow, which is unduly burdensome to the majority of mid-scale HPC users.<br /><br />However, such staging and data management issues are no longer unique to the cloud; as I will discuss in the next section, executing workflows across multiple storage tiers is no longer a problem unique to the biggest HPC centers.  The solutions that address the burdens of data orchestration for on-premise HPC are likely to also ease the burden of moving modest-scale HPC workflows entirely into the cloud.<br /><br />&lt;h2&gt;Tiering is no longer only a problem of the rich and famous&lt;/h2&gt;Intel started shipping Optane persistent memory DIMMs earlier this year, and the rubber is now hitting the road as far as figuring out what I/O problems it can solve at the extreme cutting edge of HPC.  At the other end of the spectrum, flash prices have now reached a point where meat-and-potatoes HPC can afford to buy it in quantities that can be aggregated into a useful tier.  These two factors resulted in a number of practical discussions about how tiering can be delivered to the masses in a way that balances performance with practicality.<br /><br />The <a href="http://www.sagestorage.eu/">SAGE2</a> project featured prominently at the high-end of this discussion.  Sai Narasimhamurthy from Seagate presented the <a href="https://dl.acm.org/citation.cfm?doid=3203217.3205341">Mero software stack</a>, which is the Seagate object store that is being developed to leverage persistent memory along with other storage media.  At a distance, its goals are similar to those of the original DAOS in that it provides an integrated system that manages data down to a disk tier.  Unlike the DAOS of today though, it takes on the much more ambitious goal of providing a <a href="https://dl.acm.org/citation.cfm?id=3127024.3127034">PGAS-style memory access model into persistent storage</a>.<br /><br />On the other end of the spectrum, a number of new Lustre features are rapidly coalescing into the foundation for a capable, tiered storage system.  At the Lustre/EOFS BOF, <a href="http://wiki.lustre.org/File_Level_Redundancy_Solution_Architecture#Phase_4:_Erasure_Coded_Striped_Files">erasure coded files</a> were shown on the roadmap for the Lustre 2.14 release in 2Q2020.  While the performance of erasure coding probably makes it prohibitive as the default option for new files on a Lustre file system, erasure coding in conjunction with Lustre’s file-level replication will allow a Lustre file system to store, for example, hot data in an all-flash pool that uses striped mirrors to enable high IOPS and then tier down cooler data to a more cost-effective disk-based pool of erasure-coded files.<br /><br />In a similar vein, Andreas Dilger also discussed future prospects for Lustre at the <a href="https://hps.vi4io.org/events/2019/iodc">HPC I/O in the Data Center workshop</a> and showed <a href="https://hps.vi4io.org/_media/events/2019/hpc-iodc-lustre_next_20_years-dilger.pdf">a long-term vision for Lustre</a> that is able to interact with both tiers within a data center and tiers across data centers:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-f8WoFmxIh7Y/XRQItNt8k_I/AAAAAAABFdc/LjfajPmyIkEBnfiZAUHTaHAiYBOcLlInQCLcBGAs/s1600/IMG_6400.jpg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="239" src="https://1.bp.blogspot.com/-f8WoFmxIh7Y/XRQItNt8k_I/AAAAAAABFdc/LjfajPmyIkEBnfiZAUHTaHAiYBOcLlInQCLcBGAs/s320/IMG_6400.jpg" width="320" /></a>&lt;/div&gt; +<br /><br />Many of these features already exist and serve as robust building blocks from which a powerful tiering engine could be crafted.<br /><br />Finally, tiering took center stage at the Virtual Institute for I/O and IO-500 BOF at ISC with the <a href="https://www.vi4io.org/io500/list/19-06/start">Data Accelerator at Cambridge beating out OLCF Summit as the new #1 system</a>.  A key aspect of Data Accelerator’s top score arose from the fact that it is an <a href="https://www.eofs.eu/_media/events/lad18/07_alasdair_king_cam-bb_data_accelerator-lad18.pdf">ephemeral burst buffer system</a>; like Cray DataWarp, it dynamically provisions parallel file systems for short-term use.  As a result of this ephemeral nature, it could be provisioned with no parity protection and deliver a staggering amount of IOPS.<br /><br />&lt;h2&gt;Impressions of the industry&lt;/h2&gt;As I’ve described before, I often learn the most by speaking one-on-one with engineers on the expo floor.  I had a few substantive discussions and caught on to a few interesting trends.<br /><br />&lt;h3&gt;No winners in EDSFF vs. NF.1&lt;/h3&gt;It’s been over a year since Samsung’s NF.1 (formerly M.3 and NGSFF) and Intel’s EDSFF (ruler) SSD form factor SSDs, and most integrators and third-party SSD manufacturers remain completely uncommitted to building hardware around one or the other.  Both form factors have their pros and cons, but the stalemate persists by all accounts so far.  Whatever happens to break this tie, it is unlikely that it will involve the HPC market, and it seems like U.2 and M.2 remain the safest bet for the future.<br /><br />&lt;h3&gt;Memory Landscape and Competition&lt;/h3&gt;The HBM standard has put HMC (hybrid memory cube) in the ground, and I learned that Micron is committed to manufacturing HBM starting at the 2e generation.  Given that SK Hynix is also now manufacturing HBM, Samsung may start to face competition in the HBM market as production ramps up.  Ideally this brings down the cost of HBM components in the coming years, but the ramp seems to be slow, and Samsung continues to dominate the market.<br /><br />Perhaps more interestingly, 3DXPoint may be diversifying soon.  Although the split between Intel and Micron has been well publicized, I failed to realize that Intel will also have to start manufacturing 3DXPoint in its own fabs rather than the shared facility in Utah.  Micron has also announced its commitment to the NVDIMM-P standard which could feasibly blow open the doors on persistent memory and non-Intel processor vendors to support it.  However, Micron has not committed to an explicit combination of 3DXPoint and NVDIMM-P.<br /><br />Realistically, the proliferation of persistent memory based on 3DXPoint may be very slow.  I hadn’t realized it, but not all Cascade Lake Xeons can even support Optane DIMMs; there are separate SKUs with the requisite memory controller, suggesting that persistent memory won’t be ubiquitous, even across the Intel portfolio, until the next generation of Xeon at minimum.  Relatedly, none of the other promising persistent memory technology companies (Crossbar, Everspin, Nantero) had a presence at ISC.<br /><br />&lt;h3&gt;China&lt;/h3&gt;The US tariffs on Chinese goods are on a lot of manufacturers’ minds.  Multiple vendors remarked that they are either<br /><br />&lt;ul&gt;&lt;li&gt;thinking about moving more manufacturing from China into Taiwan or North America,&lt;/li&gt;&lt;li&gt;already migrating manufacturing out of China into Taiwan or North America,&lt;/li&gt;&lt;li&gt;under pressure to make shorter-term changes to their supply chains (such as stockpiling in the US) in anticipation of deteriorating conditions&lt;/li&gt;&lt;/ul&gt;<br />I was not expecting to have this conversation with as many big companies as I did, but it was hard to avoid.<br /><br />Beyond worrying about the country of origin for their components, though, none of the vendors with whom I spoke were very concerned about competition from the burgeoning Chinese HPC industry.  Several commented that even though some of the major Chinese integrators have very solid packaging, they are not well positioned as solutions providers.  At the same time, customers are now requiring longer presales engagements due to the wide variety of new technologies on the market.  As a result, North American companies playing in the HPC vertical are finding themselves transitioning into higher-touch sales, complex custom engineering, and long-term customer partnerships.<br /><br />&lt;h2&gt;Concluding thoughts&lt;/h2&gt;&lt;div&gt;This year’s ISC was largely one of anticipation of things to come rather than demonstrations that the future has arrived.  Exascale (and the pre-exascale road leading to it) dominated most of the discussion during the week.  Much of the biggest hype surrounding exascale has settled down, and gone are the days of pundits claiming that the sky will fall when exascale arrives due to constant failures, impossible programming models, and impossible technologies.  Instead, exascale is beginning to look very achievable and not unduly burdensome: we know how to program GPUs and manycore CPUs already, and POSIX file-based access will remain available for everyone.  Instead, the challenges are similar to what they’ve always been–continuing to push the limits of scalability in every part of the HPC stack.&lt;/div&gt;</p> +<div><br /></div> +<div>I owe my sincerest thanks to the organizers of ISC, its sessions, and the HPC-IODC workshop for putting together the programs that spurred all of the interesting discourse over the week. &nbsp;I also appreciate the technical staff at many of the vendor booths with whom I spoke. &nbsp;I didn't name every person with whom I drew insights on the expo floor, but if you recognize a comment that you made to me in this post and want credit, please do let me know--I'd be more than happy to. &nbsp;I also apologize to all the people with whom I spoke and sessions I attended but did not include here; not everything I learned last week fit here.</div> + + + + + Beyond Simulation – Harnessing AI for Next-Generation HPC at ISC + + 2019-06-05T03:21:37-06:00 + https://hpc.social/2019/beyond-simulation-harnessing-ai-for-next-generation-hpc-at-isc + <p>Computer simulation has become a staple technique in many disciplines – so much so that it often described as the “third pillar” of the scientific method. Alongside theory and experimentation, simulation is used in everything from automotive design to computational chemistry to forecasting weather and market movements.</p> + +<p>Simulation helps us solve problems that are too difficult, time-consuming, or expensive to solve empirically – for example, what is the optimal design and material for an impeller in a centrifugal pump? Or what failure states might exist in a semiconductor design from which a device can’t recover?</p> + +<p>By devising accurate mathematical models and approximating those numerically in software, we can predict the behavior of real-world systems based on various parameters and a set of initial conditions. The better the model, the quality of the input data, and the more computing power that can be brought to bear, the better the prediction.</p> + +<p><strong>Simulation vs. analytics</strong></p> + +<p>High-performance data analytics (HPDA) and computer simulation are increasingly joined at the hip. Analytic techniques are sometimes used to improve simulation – providing better quality datasets to feed a simulation model, for example. Other times, simulation helps improve analytics – back-testing the performance of a financial or weather model over past data, for example, to gain confidence in a model’s predictive quality.</p> + +<p>While simulation has served us well, it has limits. The quality of a predictive model is only as good as our ability to identify features useful in making accurate predictions. For some problems, such as are structural mechanics, the features required to build a predictive model are relatively well known. For other problems, such as financial markets or weather models, the number of potential parameters is vast, and their effects are sometimes poorly understood, significantly affecting the quality of the result.</p> + +<p><strong>A fourth pillar in the scientific method</strong></p> + +<p>AI is rapidly emerging as a “fourth pillar” in the scientific method complementing theory, experimentation, and simulation techniques. Inference allows computers to make educated guesses about future results without the need to go through a full-blown simulation.</p> + +<p>In fact, the AI development process can be modeled as automation of the scientific method where the steps are:</p> + +<ol> +<li>Observe</li> +<li>Hypothesize</li> +<li>Test Hypothesis</li> +<li>(return to #1)</li> +</ol> +<p><strong>The power of &ldquo;better guesses&rdquo;</strong></p> + +<p>Humans often infer things based on prior knowledge intuitively. For example, back to our impeller design, if a centrifugal pump needs to handle a viscous or corrosive liquid, the human engineer might know intuitively that a strong, non-reactive material like stainless steel is a good choice. By making educated guesses on materials and other parameters, the problem-space to be simulated is reduced dramatically.</p> + +<p>When dealing with complex problems, however, our human ability to make such inferences breaks down. Even for subject matter experts, problems like modeling chemical reactions or predicting how a semiconductor will behave, are beyond our experience. The systems we need to model are too complex and involve too many parameters.</p> + +<p><strong>Intelligent Simulation</strong></p> + +<p>Fortunately, computers are very good at sifting through vast amounts of data and detecting patterns not obvious to humans. The best way to boost simulation performance is often to avoid simulations that will be irrelevant and not useful. By applying machine learning and other AI techniques to make informed guesses about what parameters and simulations will be most useful in solving a problem we can:</p> + +<ul> +<li>Reduce the number of simulations required</li> +<li>Provide higher resolution simulations and more trustworthy models</li> +<li>Reduce costs and cycle times wherever computer simulation is used</li> +</ul> +<p>Intelligent simulation helps us more effectively explore a problem space by predicting what regions, data, and exploratory techniques are most likely to be useful and omitting the rest.</p> + +<p><strong>Bayesian Optimization</strong></p> + +<p>In probability theory, Bayes’ theorem describes the probability of an event, based on prior knowledge of conditions that might be related to the event. It turns out that Bayesian analysis is a particularly effective way to capture common sense information from data, to help make better predictions, thus reducing the amount of computer simulation required. IBM has developed a Bayesian optimization accelerator that can function as an HPC advisory engine.</p> + +<p>Powered by Bayesian optimization libraries, the system helps scientists exploit these state-of-the-art techniques to computer simulation in multiple industries without the need for deep AI expertise. Bayesian optimization has demonstrated that it can reduce simulation requirements by half with no disruption to the existing HPC infrastructure, dramatically improving HPC productivity.</p> + +<p><strong>Harnessing AI for Next-Generation HPC @ ISC 2019</strong></p> + +<p>At this year’s ISC conference in Frankfurt, Germany, you can learn more about IBM solutions for AI and HPC –</p> + +<ul> +<li>Learn how accelerating simulations with <a href="https://www.ibm.com/blogs/think/2018/11/fueling-the-hpc-transformation-with-ai/">Bayesian optimization</a> has the potential to help you perform simulations in half the time</li> +<li>Learn how IBM Q researchers are putting machine learning on the path to quantum advantage</li> +<li>Try out <a href="https://rxn.res.ibm.com/">IBM RXN for Chemistry</a> and learn how AI techniques are helping automated discovery for organic chemistry by predicting chemical reactions</li> +<li>Finally, learn how a CPPM PCIe40 data acquisition adapter in an IBM POWER9 based system can help advance state-of-the-art research in high-energy physics and other applications</li> +</ul> +<p>Stop by the IBM booth (D-1140 in the exhibit hall) to see demos Power Systems and Spectrum Storage to Spectrum Computing and Watson Machine Learning Accelerator.</p> + + + + + Powering the Future of HPC & AI with OpenPOWER + + 2019-05-30T03:21:37-06:00 + https://hpc.social/2019/powering-the-future-of-hpc-ai-with-openpower + <p>It is coming up on one year that the Summit supercomputer based on IBM POWER9 at Oak Ridge National Lab claimed the number one spot on the Top500 ranking. This system represents the culmination of a significant collaboration between OpenPOWER foundation members IBM, Nvidia, Mellanox and Red Hat with the goal of producing well a balanced computing platform for not only traditional HPC workloads such as modelling and simulation, but also AI workloads. With this milestone approaching, we took the opportunity to catch-up with Hugh Blemings, Executive Director at the OpenPOWER Foundation to chat about the foundation, and what lies ahead.</p> + +<p><strong>Q: Our readership may not have heard of the OpenPOWER Foundation, what’s your 30 second summary?</strong></p> + +<p>A: About six years ago now, IBM realised that to widen the reach of their POWER technology it’d make sense to have other companies making use of it. Accordingly they worked with Google, Mellanox, Nvidia and Tyan as founding members to set up the OpenPOWER Foundation with that goal in mind.</p> + +<p>The Foundation promotes OpenPOWER technologies, including software and hardware while facilitating an open ecosystem around them that provides industry specifications, helps members collaborate with each other on their various endeavours and provides training and promotion of a Power architecture and Member solutions</p> + +<p>As it stands in mid 2019 we’ve got over a dozen companies making system hardware – servers, desktops, adapter cards etc based around POWER8 and POWER9, something north of 50 ISVs with OpenPOWER optimised software and well over 300 members overall – academic, individual and corporate.</p> + +<p><strong>Q: You mentioned that you’ve been doing a bit of a tour of open source conferences and similar events exhibiting OpenPOWER these last 18 months, what have been some of the takeaways from those conversations?</strong></p> + +<p>A: It’s been very interesting. We try and have a mix of member solutions at events – we’ve had everything from systems for the traditional data center, Open Compute kit, desktop solutions, as well as demos of world class software, accelerators and I/O devices on the booth to show the diversity of the OpenPOWER ecosystem as well as illustrate the common threads.</p> + +<p>We almost always have a system or two from <a href="https://raptorcs.com/">Raptor Engineering</a> – they’re based in the US and specialise in very open and high performance systems. Both their Talos II and Blackbird systems are entirely open hardware designs – so you have information down to a schematic and PCB layout level – as well as an entirely open software stack – firmware, BMC, operating system and apps.</p> + +<p>This openness is, I think, unique in the industry for a high performance system, and the Blackbird is at a very keen price point too. We put a sign on one the other week at the Red Hat Summit that said “POWER9 under two grand” that got people’s attention.</p> + +<p>We usually have a Google Zaius or other hyperscale nodes in the booth. As Google announced at our 2018 Summit, the Zaius is “Google Strong” and has gone into production and is again an Open Hardware design – but very much the other end of the design point to the Raptor systems being intended for hyperscale.</p> + +<p>So my first takeaway has been that performance is almost a given – these are the same chips and basic designs that power the #1 and #2 systems in the Top500, so of course they’re fast and truly built for HPC and AI workloads. But when you walk people through the openness of the systems and what that can mean for building trusted systems, that’s where people get excited.</p> + +<p>It’s probably worth noting that POWER9 doesn’t have a management engine on chip like most x86 architecture parts, so this helps a lot with that trust factor. It does mean that if you want to run Minix on it you have to do it yourself though.</p> + +<p>The second takeaway has been that there are significant numbers of customers buying OpenPOWER systems from companies other than IBM and using them to develop and fine tune their HPC codes before deploying them up to (say) Sierra or Summit. Sometime this is a rack or two, other times a couple of boxes to go under a (lucky) developers desk.</p> + +<p>In fact I think you can now get a system from Raptor that has a couple of POWER9s, an NVIDIA GPU and the IBM PowerVision software all ready to go – so you can put one under your desk for development – the ultimate developer workstation there!</p> + +<p>The last takeaway has been simply that folks are pleasantly surprised to hear they can get OpenPOWER hardware from companies in addition to IBM which shows the diversity of the ecosystem and solutions available.</p> + +<p><strong>Q: So what are the other hardware offerings that come to your mind?</strong></p> + +<p>A: The gear from Yadro is interesting – they have a POWER8 design that focuses on huge memory and storage capacity in a single box. Something like 384 threads with 8TB of RAM and 24 NVMe Drives, all in a 2U 19” rack. Great for in memory databases and very memory intensive workloads – AI, ML that sort of thing.</p> + +<p>Wistron have their Mihawk, a couple of POWER9s, PCIe Gen3 and Gen4 slots a plenty, OpenCAPI for GPU/FPGA accelerators and a ton of solid state storage in the front of the rack which makes for a powerful inferencing system. Inspur Power Systems have five new OpenPOWER models about to drop too which they showed off at the NVIDIA GTC Conference and our 1st OpenPOWER Summit in Tokyo last week.</p> + +<p><strong>Q: What do you think attracts HPC and AI users to OpenPOWER?</strong></p> + +<p>A: One of the senior technical folks from a US national lab once explained to me that it’s been about feeds for a while now – memory and I/O bandwidth. If you look at POWER9 you’ve a processor design that is architected with this in mind internally, then to get off chip you’ve got lots of NVLink for GPUs, CAPI and OpenCAPI connections, DDR4 channels plus PCIe Gen4 – so you’ve doubled the bandwidth down at the physical layer.</p> + +<p>NVLink and CAPI/OpenCAPI are, unsurprisingly, a big draw too. Having a cache coherent interconnect between your CPUs and off chip accelerator simplifies programming models immensely and there is a significant performance improvement too just from reducing if not eliminating copying data about, for example from memory to the CPU, on to the GPU and back.</p> + +<p><strong>Q: What’s the state of the Open Source software ecosystem around OpenPOWER nowadays?</strong></p> + +<p>A: As you might recall, OpenPOWER, or PowerPC as it was back then was one of the earliest non-x86 ports of things like the Linux Kernel, the GNU Compiler Collection etc. is very mature. All the Linux distros just work – Red Hat, SuSE, Ubuntu, Gentoo, Red Flag, Debian etc. Given that the POWER ISA has evolved very cleanly over the years it’s not unknown to see people running old 32 bit PowerPC code with minimal if any modification.</p> + +<p><strong>Q: ISC19 is coming up in Frankfurt next month, will OpenPOWER have a presence?</strong></p> + +<p>A: Yes, we’ll have a booth there with some hardware for folks to look at. We’ll have at least Blackbird and Zaius planars to check out. We’re waiting for confirmation on some other machines, too. Oh, and free OpenPOWER stickers of course!</p> + +<p>In addition to our booth, over 20 OpenPOWER members will be at ISC this year with their offerings. IBM will have a node from the world’s fastest super computer on display, Inspur and Inspur Power Systems, E4, Mellanox, Red Hat, Xilinx, NEC, numerous universities and software houses will have their OpenPOWER solutions on show. And don’t forget OpenCAPI will also be there. They are, I believe, the only open high speed cache coherent interconnect available across multiple architectures that is in production use.</p> + +<p>For those interested in learning from subject matter experts on how OpenPOWER systems are used in HPC, the International Workshop on OpenPOWER for HPC (IWOPH’19) is featured as part of the ISC workshops on the Thursday of the event.</p> + +<p><strong>Q: Can you share any insight about what the future holds for OpenPOWER?</strong></p> + +<p>A: I’m a technical person at heart so I hate saying things that sound perilously close to marketing speak, however a couple of things come to mind.</p> + +<p>The roadmap for OpenPOWER CPU-wise is looking really strong with details of both an updated POWER9 and POWER10 offering being discussed.</p> + +<p>We’re seeing more adoption of low cost OpenPOWER hardware by individual developers, researchers and security conscious end users – broadens the ecosystem and overall user base.</p> + +<p>There is at least one other announcement in the works that I think will truly be industry changing, but we’re a couple of months out from being able to discuss more widely. I’d perhaps simply recommend our OpenPOWER North American Summit in San Diego to your readership – it’s connected with the Linux Foundation Open Source Summit and will be the place to be in August.</p> + + + + + VAST Data's storage system architecture + + 2019-02-27T05:23:00-07:00 + https://hpc.social/2019/vast-data-s-storage-system-architecture + <p><a href="https://www.vastdata.com/">VAST Data, Inc</a>, an interesting new storage company, unveiled their new all-flash storage system today amidst a good amount of hype and fanfare.  There’s no shortage of marketing material and trade press coverage out there about their company and the juiciest features of their storage architecture, so to catch up on what all the talk has been about, I recommend taking a look at<br />&lt;div&gt;&lt;ul&gt;&lt;li&gt;The <a href="https://www.vastdata.com/app/pdf/datasheet.pdf">VAST “Universal Storage” datasheet</a>&lt;/li&gt;&lt;li&gt;The Next Platform’s article, “<a href="https://www.nextplatform.com/2019/02/26/vast-data-clustered-flash-storage-bans-the-disk-from-the-datacenter/">VAST Data Clustered Flash Storage Bans The Disk From The Datacenter</a>”&lt;/li&gt;&lt;li&gt;Chris Mellor’s piece, “<a href="https://blocksandfiles.com/2019/02/26/vast-datas-extinction-level-event-for-disk-drives-and-tiering/">VAST Data: The first thing we do, let’s kill all the hard drives</a>”&lt;/li&gt;&lt;/ul&gt;&lt;/div&gt;</p> +<div>The reviews so far are quite sensational in the literal sense since VAST is one of very few storage systems being brought to market that have been designed from top to bottom to use modern storage technologies (containers, NVMe over Fabrics, and byte-addressable non-volatile memory) <i>and</i> tackle the harder challenge of file-based (not block-based) access.</div> +<div><div><br /></div> +<div>In the interests of grounding the hype in reality, I thought I would share various notes I've jotted down based on my understanding of the VAST architecture. &nbsp;That said, I have to make a few disclaimers up front:</div> +<div><ol><li>I have no financial interests in VAST, I am not a VAST customer, I have never tested VAST, and everything I know about VAST has come from just a few conversations with a limited number of people in the company. &nbsp;This essentially means I have no idea what I'm talking about.</li><li>I do not have any NDAs with VAST and none of this material is confidential. &nbsp;Much of it is from public sources now. &nbsp;I am happy to provide references where possible. &nbsp;If you are one of my sources and want to be cited or credited, please let me know.</li><li>These views represent my own personal opinions and not those of my employer, sponsors, or anyone else.</li></ol></div> +<div>With that in mind, what follows is a semi-coherent overview of the VAST storage system as I understand it. &nbsp;If you read anything that is wrong or misguided, rest assured that it is not intentional. &nbsp;Just let me know and I will be more than happy to issue corrections (and provide attribution if you so desire).</div> +</div> +<div><br />(<b>Update on May 12, 2020</b>: There is now an <a href="https://vastdata.com/whitepaper">authoritative whitepaper on how VAST works under the hood</a> on the VAST website. &nbsp;Read that, especially "How It Works," for a better informed description than this post.)<br /><br /></div> +<div><h2>Relevant Technologies</h2><div>A VAST storage system is comprised of two flavors of building blocks:</div> +<div><ol><li><b>JBOFs</b> (VAST calls them "d boxes" or "HA enclosures"). &nbsp;These things are what contain the storage media itself.</li><li><b>I/O servers</b> (VAST calls them "cnodes," "servers," "gateways" or, confusingly, "compute nodes"). &nbsp;These things are what HPC cluster compute nodes talk to to perform I/O via NFS or S3.</li></ol></div> +<div>Tying these two building blocks together is an RDMA fabric of some sort--either InfiniBand or RoCE. &nbsp;Conceptually, it would look something like this:</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://2.bp.blogspot.com/-fwRspws0SAk/XHYc4Oj6tjI/AAAAAAABD98/oxVEAYQMJgAUGgSlX7CfjLokiCHo-Yt0QCLcBGAs/s1600/The%2BVAST%2BData%2BArchitecture.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="338" src="https://2.bp.blogspot.com/-fwRspws0SAk/XHYc4Oj6tjI/AAAAAAABD98/oxVEAYQMJgAUGgSlX7CfjLokiCHo-Yt0QCLcBGAs/s400/The%2BVAST%2BData%2BArchitecture.png" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Conceptual diagram of how VAST Data's storage system (IOS, storage fabric, and JBOFs) might fit into a canonical HPC system. &nbsp;Interestingly, strongly resembles old-school block-based SAN architectures.</td></tr></tbody></table><div><br /></div> +<div>For the sake of clarity, we'll refer to the HPC compute nodes that run applications and perform I/O through an NFS client as "clients" hereafter. &nbsp;We'll also assume that all I/O to and from VAST occurs using NFS, but remember that VAST also supports S3.</div> +<div><br /></div> +<h3>JBOFs</h3><div>JBOFs are dead simple and their only job is to expose each NVMe device attached to them as an NVMe over Fabrics (NVMeoF) target. &nbsp;They are not truly JBOFs because they do have (from <a href="https://www.vastdata.com/app/pdf/datasheet.pdf">the VAST spec sheet</a>):</div> +<div><ol><li>2x embedded active/active servers, each with two Intel CPUs and the necessary hardware to support failover</li><li>4x 100 gigabit NICs, either operating using RoCE or InfiniBand</li><li>38x 15.36 TB U.2 SSD carriers. &nbsp;These are actually carriers that take multiple M.2 SSDs.</li><li>18x 960 GB U.2 Intel Optane SSDs</li></ol></div> +<div>However they are not intelligent. &nbsp;They are not RAID controllers nor do they do <i>any</i> data motion between the SSDs they host. &nbsp;They literally serve each device out to the network and that's it.</div> +<div><br /></div> +<h3>I/O Servers</h3><div>I/O servers are where the magic happens, and they are physically discrete servers that&nbsp;</div> +<div><ol><li>share the same SAN fabric as the JBOFs and speak NVMeoF on one side, and</li><li>share a network with client nodes and talk NFS on the other side</li></ol></div> +<div>These I/O servers are completely stateless; all the data stored by VAST is stored in the JBOFs. &nbsp;The I/O servers have no caches; their job is to turn NFS requests from compute nodes into NVMeoF transfers to JBOFs. &nbsp;Specifically, they perform the following functions:</div> +<div><ol><li>Determine which NVMeoF device(s) to talk to to serve an incoming I/O request from an NFS client. &nbsp;This is done using a hashing function.</li><li>Enforce file permissions, ACLs, and everything else that an NFS client would expect.</li><li>Transfer data to/from SSDs, and transfer data to/from 3D XPoint drives.</li><li>Transfer data between SSDs and 3D XPoint drives. &nbsp;This happens as part of the regular write path, to be discussed later.</li><li>Perform "global compression" (discussed later), rebuilds from parity, and other maintenance tasks.</li></ol></div> +<div>It is also notable that I/O servers do not have an affinity to specific JBOFs as a result of the hash-based placement of data across NVMeoF targets. &nbsp;They are all simply stateless worker bees that process I/O requests from clients and pass them along to the JBOFs. &nbsp;As such, they do not need to communicate with each other or synchronize in any way.</div> +<div><br /></div> +<h3>System Composition</h3><div>Because I/O servers are stateless and operate independently, they can be dynamically added (and removed) from the system at any time to increase or decrease the I/O processing power available to clients. &nbsp;VAST's position is that the peak I/O performance to the JBOFs is virtually always CPU limited since the data path between CPUs (in the I/O servers) and the storage devices (in JBOFs) uses NVMeoF. &nbsp;This is a reasonable assertion since NVMeoF is extremely efficient at moving data as a result of its use of RDMA and simple block-level access semantics.</div> +<div><br /></div> +<div>At the same time, this design requires that every I/O server be able to communicate with every SSD in the entire VAST system via NVMeoF. &nbsp;This means that each I/O server mounts every SSD at the same time; in a relatively small two-JBOF system, this results in 112x NVMe targets on every I/O server. &nbsp;This poses two distinct challenges:</div> +<div><ol><li>From an implementation standpoint, this is <b>pushing the limits of how many NVMeoF targets a single Linux host can effectively manage</b> in practice. &nbsp;For example, a 10 PB VAST system will have over 900 NVMeoF targets mounted on every single I/O server. &nbsp;There is no fundamental limitation here, but this scale will exercise pieces of the Linux kernel in ways it was never designed to be used.</li><li>From a fundamental standpoint, this <b>puts tremendous pressure on the storage network</b>. &nbsp;Every I/O server has to talk to every JBOF as a matter of course, resulting in a network dominated by all-to-all communication patterns. &nbsp;This will make performance extremely sensitive to topology, and while I wouldn't expect any issues at smaller scales, high-diameter fat trees will likely see these sensitivities manifest. &nbsp;The Lustre community turned to fine-grained routing to counter this exact issue on fat trees. &nbsp;Fortunately, InfiniBand now has adaptive routing that I expect will bring much more forgiveness to this design.</li></ol></div> +</div> +<div>This said, VAST has tested their architecture at impressively large scale and has an aggressive scale-out validation strategy.</div> +<div><br /></div> +<h3>Shared-everything consistency</h3> +<div>Mounting every block device on every server may also sound like anathema to anyone familiar with block-based SANs, and generally speaking, it is. &nbsp;NVMeoF (and every other block-level protocol) does not really have locking, so if a single device is mounted by two servers, it is up to those servers to communicate with each other to ensure they aren't attempting to modify the same blocks at the same time. &nbsp;Typical shared-block configurations manage this by simply assigning exclusive ownership of each drive to a single server and relying on heartbeating or quorum (e.g., in HA enclosures or GPFS) to decide when to change a drive's owner. &nbsp;StorNext (formerly CVFS) allows all clients to access all devices, but it uses a central metadata server to manage locks.</div> +<div><br /></div> +<div>VAST can avoid a lot of these problems by simply not caching any I/Os on the I/O servers and instead passing NFS requests through as NVMeoF requests. &nbsp;This is not unlike how parallel file systems like PVFS (now OrangeFS) avoided the lock contention problem; not using caches dramatically reduces the window of time during which two conflicting I/Os can collide. &nbsp;VAST also claws back some of the latency penalties of doing this sort of direct I/O by issuing all writes to nonvolatile memory instead of flash; this will be discussed later.</div> +<div><br /></div> +<div>For the rare cases where two I/O servers are asked to change the same piece of data at the same time though, there is a mechanism by which an extent of a file (which is on the order of 4 KiB) can be locked. &nbsp;I/O servers will flip a lock bit for that extent in the JBOF's memory using an atomic RDMA operation before issuing an update to serialize overlapping I/Os to the same byte range. &nbsp;</div> +<div><br /></div> +<div>VAST also uses redirect-on-write to ensure that writes are always consistent. &nbsp;If a JBOF fails before an I/O is complete, presumably any outstanding locks evaporate since they are resident only in RAM. &nbsp;Any changes that were in flight simply get lost because the metadata structure that describes the affected file's layout only points to updated extents after they have been successfully written. &nbsp;Again, this redirect-on-complete is achieved using an atomic RDMA operation, so data is always consistent. VAST does not need to maintain a write journal as a result.</div> +<div><br /></div> +<div>It is not clear to me what happens to locks in the event that an I/O server fails while it has outstanding I/Os. &nbsp;Since I/O servers do not talk to each other, there is no means by which they can revoke locks or probe each other for timeouts. &nbsp;Similarly, JBOFs are dumb, so they cannot expire locks.</div> +<div><br /></div> +<h2>The VAST write path</h2> +<div>I think the most meaningful way to demonstrate how VAST employs parity and compression while maintaining low latency is to walk through each step of the write path and show what happens between the time an application issues a write(2) call and the time that write call returns.</div> +<div><br /></div> +<div>First, an application on a compute node issues a write(2) call on an open file that happens to reside on an NFS mount that points to a VAST server. &nbsp;That write flows through the standard Linux NFS client stack and eventually results in an NFS RPC being sent over the wire to a VAST server. &nbsp;Because VAST clients use the standard Linux NFS client there are a few standard limitations. &nbsp;For example,<br /><ol><li>There is no parallel I/O from the client. &nbsp;A single client cannot explicitly issue writes to multiple I/O servers. &nbsp;Instead, some sort of <a href="https://www.emc.com/collateral/hardware/white-papers/h8316-wp-smartconnect.pdf">load balancing technique</a> must be inserted between the client and servers.</li><li>VAST violates POSIX because it only ensures NFS close-to-open consistency. &nbsp;If two compute nodes try to modify the same 4 KiB range of the same file at the same time, the result will be corrupt data. &nbsp;VAST's server-side locking cannot prevent this because it happens at the client side. &nbsp;The best way around this is to force all I/O destined to a VAST file system to use direct I/O (e.g., open with O_DIRECT).</li></ol><div>Pictorially, it might look something like this:</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://2.bp.blogspot.com/-u0n8JTcmsYk/XHd-DF6RrvI/AAAAAAABD-M/Iy8PRzE0Yootom8IkmLkNoA8-i5YlWpwgCLcBGAs/s1600/vast-write-1.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="323" src="https://2.bp.blogspot.com/-u0n8JTcmsYk/XHd-DF6RrvI/AAAAAAABD-M/Iy8PRzE0Yootom8IkmLkNoA8-i5YlWpwgCLcBGAs/s640/vast-write-1.png" width="100%" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Step 1 of VAST write path: client issues a standard NFS RPC to a VAST I/O server</td></tr></tbody></table><div>Then the VAST I/O server receives the write RPC and has to figure out to which NVMeoF device(s) the data should be written. &nbsp;This is done by first determining on which NVMe device the appropriate file's metadata is located. &nbsp;This metadata is stored in B-tree like data structures with a very wide fan-out ratio and whose roots are mapped to physical devices algorithmically. &nbsp;Once an I/O server knows which B-tree to begin traversing to find a specific file's metadata algorithmically, it begins traversing that tree to find the file, and then find the location of that file's extents. &nbsp;The majority of these metadata trees live in 3D XPoint, but very large file systems may have their outermost levels stored in NAND.</div> +<div><br /></div> +<div>A key aspect of VAST's architecture is that writes always land on 3D XPoint first; this narrows down the possible NVMeoF targets to those which are storage-class memory devices.<br /><br />Pictorially, this second step may look something like this:<br /><br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://3.bp.blogspot.com/-2abTaRik0t0/XHd_M0XVBlI/AAAAAAABD-U/zwLZSNXiNfEUEVyGzDskuU_bMMjCJcUSQCLcBGAs/s1600/vast-write-2.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="323" src="https://3.bp.blogspot.com/-2abTaRik0t0/XHd_M0XVBlI/AAAAAAABD-U/zwLZSNXiNfEUEVyGzDskuU_bMMjCJcUSQCLcBGAs/s640/vast-write-2.png" width="100%" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Step 2 of VAST write path: I/O server forwards write to 3D XPoint devices. &nbsp;Data is actually triplicated at this point for reasons that will be explained later.</td></tr></tbody></table>VAST uses 3D XPoint for two distinct roles:</div> +<div><ol><li>Temporarily store all incoming writes</li><li>Store the metadata structures used to describe files and where the data for files reside across all of the NVMe devices</li></ol><div>VAST divides 3D XPoint used for case #1 into buckets. &nbsp;Buckets are used to group data based on how long that data is expected to persist before being erased; incoming writes that will be written once and never erased go into one bucket, while incoming writes that may be overwritten (erased) in a very short time will go into another. &nbsp;VAST is able to make educated guesses about this because it knows many user-facing features of the file (its parent directory, extension, owner, group, etc) to which incoming writes are being written, and it tracks file volatility over time.</div> +</div> +<div><br /></div> +<div>Data remains in a 3D XPoint bucket until that bucket is full. &nbsp;The bucket is full when its size can be written to the NAND SSDs such that entire SSD erase blocks (which VAST claims can be on the order of a gigabyte in size) can be written down to NAND at once. &nbsp;Since JBOFs are dumb, this actually results in I/O servers reading back a full bucket out of 3D XPoint:<br /><div class="separator" style="clear: both; text-align: center;"></div> +<br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://3.bp.blogspot.com/-RJGVUcyTBiE/XHeAKVtyEFI/AAAAAAABD-k/PBimD6KGY5YVIBgiaQtp7pLKnr9kTFhUACLcBGAs/s1600/vast-write-3.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="323" src="https://3.bp.blogspot.com/-RJGVUcyTBiE/XHeAKVtyEFI/AAAAAAABD-k/PBimD6KGY5YVIBgiaQtp7pLKnr9kTFhUACLcBGAs/s640/vast-write-3.png" width="100%" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Step 3 of VAST write path: Once sufficient writes have been received to fill a bucket and create a full stripe, the I/O server must read it from 3D XPoint. &nbsp;Note that this diagram may be misleading; it is unclear if a single bucket resides on a single 3D XPoint device, or if a bucket is somehow sharded. &nbsp;My guess is the former (as shown).</td></tr></tbody></table>The I/O server then bounces that bucket back out to NAND devices:<br /><br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://3.bp.blogspot.com/-tNBnyVf_y3Y/XHeBQ2sXi9I/AAAAAAABD-s/0coxApTRWvoeUBhSmplAgzaiufm-gSqBwCLcBGAs/s1600/vast-write-4.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="323" src="https://3.bp.blogspot.com/-tNBnyVf_y3Y/XHeBQ2sXi9I/AAAAAAABD-s/0coxApTRWvoeUBhSmplAgzaiufm-gSqBwCLcBGAs/s640/vast-write-4.png" width="100%" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Step 4 of VAST write path: Once a full stripe has been formed in 3D XPoint and the I/O node has read it into DRAM, it actually writes that stripe down across many NAND devices. &nbsp;Again, this diagram is probably inaccurate as a result of my own lack of understanding; the relationship between a bucket (which maps to a single SSD's erase block) and a stripe (which must touch N+M SSDs) is unclear to me.</td></tr></tbody></table>By writing out an entire erase block at once, VAST avoids the need for the SSD to garbage collect and amplify writes, since erase blocks are never only partially written. &nbsp;Erase blocks are also presumably rarely (or never?) only partially erased either; this is a result of</div> +<div><ol><li>the combined volatility-based bucketing of data (similarly volatile data tends to reside in the same erase block), and</li><li>VAST's redirect-on-write nature (data is never overwritten; updated data is simply written elsewhere and the file's metadata is updated to point to the new data).</li></ol><div>Because VAST relies on cheap consumer NAND SSDs, the data is not safe in the event of a power loss even after the NAND SSD claims the data is persisted. &nbsp;As a result, VAST then forces each NAND SSD to flush its internal caches to physical NAND. &nbsp;Once this flush command returns, the SSDs have guaranteed that the data is power fail-safe. &nbsp;VAST then deletes the bucket contents from 3D XPoint:</div> +</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://1.bp.blogspot.com/-bsKdxL1chJs/XHeDA8F0cJI/AAAAAAABD-4/FK-cCC1S6DYZOOa73C_Iq2G1QDbdrmrDwCLcBGAs/s1600/vast-write-5.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="324" src="https://1.bp.blogspot.com/-bsKdxL1chJs/XHeDA8F0cJI/AAAAAAABD-4/FK-cCC1S6DYZOOa73C_Iq2G1QDbdrmrDwCLcBGAs/s640/vast-write-5.png" width="100%" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Step 5 of the VAST write path: Once data is truly persisted and safe in the event of power loss, VAST purges the original copy of that bucket that resides on the 3D XPoint.</td></tr></tbody></table><div>The metadata structures for all affected files are updated to point at the version of the data that now resides on NAND SSDs, and the bucket is free to be filled by the next generation of incoming writes.<br /><br /></div> +<h3>Data Protection</h3><div>These large buckets also allow VAST to use extremely wide striping for data protection. &nbsp;As writes come in and fill buckets, large stripes are also being built with a minimum of 40+4 parity protection. &nbsp;Unlike in a traditional RAID system where stripes are built in memory, VAST's use of nonvolatile memory (3D XPoint) to store partially full buckets allow very wide stripes to be built over larger windows of time without exposing the data to loss in the event of a power failure. &nbsp;Partial stripe writes never happen because, by definition, a stripe is only written down to flash once it is full.</div> +<div><br /></div> +<div>Bucket sizes (and by extension, stripe sizes) are variable and dynamic. &nbsp;VAST will opportunistically write down a stripe as erase blocks become available. &nbsp;As the number of NVMe devices in the VAST system increases (e.g., more JBOFs are installed), stripes can become wider. &nbsp;This is advantageous when one considers the erasure coding scheme that VAST employs; rather than use a Reed-Solomon code, they have developed their own parity algorithm that allows blocks to be rebuilt from only a subset of the stripe. &nbsp;An example stated by VAST is that a 150+4 stripe only requires 25% of the remaining data to be read to rebuild. &nbsp;As pointed out by <a href="https://glennklockwood.blogspot.com/2019/02/vast-datas-storage-system-architecture.html?showComment=1551464397653#c6795285534302272414">Shuki Bruck though</a>, this is likely a derivative of the <a href="https://doi.org/10.1109/TIT.2012.2227110">Zigzag coding scheme introduced by Tamo, Wang, and Bruck in 2013</a>, where a data coded using N+M parity only require (N+M)/M reads to rebuild.</div> +<div><br /></div> +<div>To summarize, parity-protected stripes are slowly built in storage-class memory over time from bits of data that are expected to be erased at roughly the same time. &nbsp;Once a stripe is fully built in 3D XPoint, it is written down to the NAND devices. &nbsp;As a reminder, I/O servers are responsible for moderating all of this data movement and parity generation; the JBOFs are dumb and simply offer up the 3D XPoint targets.</div> +<div><br /></div> +<div>To protect data as stripes are being built, the contents of the 3D XPoint layer are simply triplicated. &nbsp;This is to say that every partially built stripe's contents appear on three different 3D XPoint devices.<br /><br /></div> +<h3>Performance Expectations</h3><div>This likely has a profound effect on the write performance of VAST; if a single 1 MB write is issued by an NFS client, the I/O server must write 3 MB of data to three different 3D XPoint devices. &nbsp;While this would not affect latency by virtue of the fact that the I/O server can issue NVMeoF writes to multiple JBOFs concurrently, this means that the NICs facing the backend InfiniBand fabric must be able to inject data three times as fast as data arriving from the front-end, client-facing network. &nbsp;<b>Alternatively, VAST is likely to carry an intrinsic 3x performance penalty to writes versus reads.</b></div> +<div><br /></div> +<div>There are several factors that will alter this in practice:</div> +<div><ul><li>Both 3D XPoint SSDs and NAND SSDs have higher read bandwidth than write bandwidth as a result of the power consumption associated with writes. &nbsp;This will further increase the 3:1 read:write performance penalty.</li><li>VAST always writes to 3D XPoint but may often read from NAND. &nbsp;This closes the gap in theory, since 3D XPoint is significantly faster at both reads and writes than NAND is at reads in most cases. &nbsp;However the current 3D XPoint products on the market are PCIe-attached and limited to PCIe Gen3 speeds, so there is not a significant bandwidth advantage to 3D XPoint writes vs. NAND reads.</li></ul><div>It is also important to point out that VAST has yet to publicly disclose any performance numbers. &nbsp;However, using replication to protect writes is perhaps the only viable strategy to deliver extremely high IOPS without sacrificing data protection. &nbsp;WekaIO, which also aims to deliver extremely high IOPS, showed a similar 3:1 read:write performance skew in <a href="https://www.vi4io.org/io500/list/19-01/10node">their IO-500 submission in November</a>. &nbsp;While WekaIO uses a very different approach to achieving low latency at scale, their benchmark numbers indicate that scalable file systems that optimize for IOPS are likely to sacrifice write throughput to achieve this. &nbsp;VAST's architecture and choice to replicate writes is in line with this expectation, but until VAST publishes performance numbers, this is purely speculative. &nbsp;I would like to be proven wrong.<br /><br /><h2>Other Bells and Whistles</h2>The notes presented above are only a small part of the full VAST architecture, and since I am no expert on VAST, I'm sure there's even more that I don't realize I don't know or fully understand. &nbsp;That said, I'll highlight a few examples of which I am tenuously aware:<br /><br />Because every I/O server sees every NVMe device, it can perform <b>global compression</b>. &nbsp;Typical compression algorithms are designed only to compress adjacent data within a fixed block size, which means similar but physically disparate blocks cannot be reduced. &nbsp;VAST tracks a similarity value for extents in its internal metadata and will group these similar extents before compressing them. &nbsp;I envision this to work something like a Burrows-Wheeler transformation (it is definitely not one though) and conceptually combines the best features of compression and deduplication. &nbsp;I have to assume this compression happens somewhere in the write path (perhaps as stripes are written to NAND), but I don't understand this in any detail.<br /><br />The exact compression algorithm is one of VAST's own design, and it is not block-based as a result of VAST not having a fixed block size. &nbsp;This means that decompression is also quite different from block-based compression; according to VAST, their algorithm can decompress only a local subset of data such that reads do not require similar global decompression. &nbsp;The net result is that read performance of compressed data is not significantly compromised. &nbsp;VAST has a very compelling example where they compressed data that was already compressed and saw a significant additional capacity savings as a result of the global nature of their algorithm. &nbsp;While I normally discount claims of high compression ratios since they never hold up for scientific data, the conceptual underpinnings of VAST's approach to compression sounds promising.<br /><br />VAST is also very closely tied to byte-addressable nonvolatile storage from top to bottom, and much of this is a result of their <b>B-tree-based file system metadata structure</b>. &nbsp;They refer to their underlying storage substrate as an "element store" (which I imagine to be similar to a key-value store), and it sounds like it is designed to store a substantial amount of metadata per file. &nbsp;In addition to standard POSIX metadata and the pointers to data extents on various NVMe devices, VAST also stores user metadata (in support of their S3 interface) and internal metadata (such as heuristics about file volatility, versioning for continuous snapshots, etc). &nbsp;This element store API is not exposed to customers, but it sounds like it is sufficiently extensible to support a variety of other access APIs beyond POSIX and S3.<br /><br /><h2>Take-away Messages</h2>VAST is an interesting new all-flash storage system that resulted from taking a green-field approach to storage architecture. &nbsp;It uses a number of new technologies (storage-class memory/3D XPoint, NAND, NVMe over fabrics) in intellectually satisfying ways, and builds on them using a host of byte-granular algorithms. &nbsp;It looks like it is optimized for both cost (in its intelligent optimization of flash endurance) and latency (landing I/Os on 3D XPoint and using triplication) which have been traditionally difficult to optimize together.<br /><br />Its design does rely on an extremely robust backend RDMA fabric, and the way in which every I/O server must mount every storage device sounds like a path to scalability problems--both in terms of software support in the Linux NVMeoF stack and fundamental sensitivities to topology inherent in large, high-diameter RDMA fabrics. &nbsp;The global all-to-all communication patterns and choice to triplicate writes make the back-end network critically important to the overall performance of this architecture.<br /><br />That said, the all-to-all ("shared everything") design of VAST brings a few distinct advantages as well. &nbsp;As the system is scaled to include more JBOFs, the global compression scales as well and can recover an increasing amount of capacity. &nbsp;Similarly, data durability increases as stripes can be made wider and be placed across different failure domains. &nbsp;In this sense, the efficiency of the system increases as it gets larger due to the global awareness of data. &nbsp;VAST's choice to make the I/O servers stateless and independent also adds the benefit of being able to scale the front-end capability of the system independently of the back-end capacity. &nbsp;Provided the practical and performance challenges of scaling out described in the previous paragraph do not manifest in reality, this bigger-is-better design is an interesting contrast to the mass storage systems of today which, at best, do not degrade as they scale out. &nbsp;Unfortunately, VAST has not disclosed any performance or scaling numbers, so the proof will be in the pudding.<br /><br />However, VAST has hinted that the costs are "one fifth to one eighth" of enterprise flash today; by their own estimates of today's cost of enterprise flash, this translates to a cost of between $0.075 and $0.12 per gigabyte of flash when deployed in a VAST system. &nbsp;This remains 3x-5x more expensive than spinning disk today, but the cost of flash is dropping far faster than the cost of hard drives, so the near-term future may truly make VAST cost-comparable to disk. &nbsp;As flash prices continue to plummet though, the VAST cost advantage may become less dramatic over datacenter flash, but their performance architecture will remain compelling when compared to a traditional disk-oriented networked file system.<br /><br />As alluded above, VAST is not the first company to develop a file-based storage system designed specifically for flash, and they share many similar architectural design patterns with their competition. &nbsp;This is creating gravity around a few key concepts:<br /><ul><li>Both flash and RDMA fabrics handle kilobyte-sized transfers with grace, so the days of requiring megabyte-sized I/Os to achieve high bandwidth are nearing an end.</li><li>The desire to deliver high IOPS makes replication an essential part of the data path which will skew I/O bandwidth towards reads. &nbsp;This maps well for read-intensive workloads such as those generated by AI, but this does not bode as well for write-intensive workloads of traditional modeling and simulation.</li><li>Reserving CPU resources exclusively for driving I/O is emerging as a requirement to get low-latency and predictable I/O performance with kilobyte-sized transfers. &nbsp;Although not discussed above, VAST uses containerized I/O servers to isolate performance-critical logic from other noise on the physical host. &nbsp;This pattern maps well to the notion that in exascale, there will be an abundance of computing power relative to the memory bandwidth required to feed computations.</li><li>File-based I/O is not entirely at odds with very low-latency access, but this file-based access is simple one of many interfaces exposed atop a more flexible key-value type of data structure. &nbsp;As such, as new I/O interfaces emerge to serve the needs of extremely latency-sensitive workloads, these flexible new all-flash storage systems can simply expose their underlying performance through other non-POSIX APIs.</li></ul><div>Finally, if you've gotten this far, it is important to underscore that I am in no way speaking authoritatively about anything above. &nbsp;If you are really interested in VAST or related technologies, don't take it from me; talk to the people and companies developing them directly.</div> +</div> +</div> +</div> + + + + + What Should a National Research Computing Platform Be? + + 2019-01-19T00:00:00-07:00 + https://hpc.social/2019/what-should-a-national-research-computing-platform-be- + <h1 id="what-is-a-national-research-computing-platform-for-in-2019">What is a National Research Computing Platform <em>For</em> in 2019?</h1> + +<blockquote> + <p><em>Computers are everywhere now, but computing is still hard. Canada +should build on its competitive advantage by +strengthening existing efforts to provide +expertise, skills and training to researchers and +scholars across the country, and let others provide the increasingly +commodity hardware. The result will be a generation of trainees +with deep research and cloud experience, and a critical mass of +talent at centres focussed on building enabling technologies.</em></p> + +</blockquote> + +<p>As R&amp;D becomes increasingly intertwined with computational techniques, +the need for advanced R&amp;D computing support to power research and +scholarship has grown enormously. What that support looks like, +however, and the kind of services that researchers most need, has +changed radically over the past decades.</p> + +<h2 id="the-history-of-providing-computers-for-research">The history of providing computers for research</h2> + +<p>In the 1990s and 2000s, the overwhelming need was simply access to +computers. With no other providers for computing or storage, it +fell to individual research groups to supply their own. But a +natural economy of scale starts to play out with computational +resources. Purchasing and operating hardware becomes more +cost-effective in bulk; and what was even then the most scarce and +valuable resource - the expertise to operate and make effective use +of the hardware - actually <em>grows</em>, rather than is diminished, by +being involved in different research problems. So quickly individual +researcher “clusters in a closet” gave way to departmental, then +institutional, and finally regional or national platforms for +computational research and data science support. In Canada, the +vast majority of such support is offered through <a href="https://www.computecanada.ca">Compute +Canada</a>.</p> + +<p>As we enter 2019, this landscape looks quite different than it did +in the 90s. Computing resources adequate for research are thick +on the ground. Indeed, as the range of problems researchers tackle +with computing and data broaden, many extremely active areas of +compute- and data-powered research require nothing more than a +powerful desktop.</p> + +<p>And for larger needs, the unavoidable logic of economies of scale +for computers and storage has now entered the marketplace. A +competitive range of commercial vendors provide access to +computing resources that can meet the vast majority of other +researchers needs. While it’s true that those commercial cloud +providers charge a premium (50%-100%, slowly declining over time) +over what it costs to provide the resources in academic research +environments, that premium pays for enormous benefits in improved +uptime, flexibility, and currency of the hardware, all of which +have real value for researchers. Increasingly, even niche technologies +like FPGAs, RDMA-enabled networking, and ARM processors are readily +available on commercial cloud providers, leaving fewer and fewer +use cases where in house provision of computer resources remains a +necessity. Those use cases are important — they include +multi-rack HPC users, and the stewardship and analysis +of data with the strictest regulatory on-premises requirements — +but they represent a minority of computational science needs.</p> + +<h2 id="the-need-for-higher-level-support">The need for higher-level support</h2> + +<p><img alt="We advance research more powerfully by providing clarity than clusters" src="https://www.dursi.ca/assets/what_is_ardc_for/shutterstock_clarity.jpg" style="float: right; width: 50%;" /> +But even while <em>computers</em> for research become ever more accessible, +research <em>computing</em>   for cutting edge research remains a barrier to too many. Scientists and scholars are trained to be experts in +their field, not necessarily experts in computer science or the +latest computer hardware. Even keeping track of the latest +computational methods, which frequently come from neighbouring +fields if not different disciplines entirely, can be a challenge. +Researchers greatly need assistance from and collaborations with +experts in research computation itself. It is the skills, not the +infrastructure, that is scarcest.</p> + +<p>The good news is that the Compute Canada federation has a network +of roughly 200 computational experts, many at the Ph.D. level, +available to directly enable science projects. The bad news is that +the priorities of the organization, and thus most of its effort and +energies, are focussed on procuring and operating on-premises commodity +computing and storage hardware - to the extent that many of those +experts spend most of their time answering basic help-desk +questions or performing routine operational duties for those systems.</p> + +<h2 id="what-should-todays-rd-computing-support-focus-on">What should today’s R&amp;D computing support focus on?</h2> + +<p>With academic institutions now being just one player amongst +many for computing and storage resources, there are a few possible +futures for Canada’s computing centres – centres that have +grown up primarily +focused on purchasing, operating, and providing access to hardware +for researchers. They could downsize, shrinking to focus on those +sorts of hardware not well covered by other providers. Alternatively, +they could double down on the “discount provider” model, +emphasizing low cost, ‘no frills’ access to compute and +storage, competing on price.</p> + +<p>Either of these approaches represent a scandalous squandering of +opportunity, wasting invaluable and nearly irreplaceable expertise +and experience in applying computational techniques to open research +problems. Instead, we should do something different. We should +pursue our competitive advantage by taking the existing network of +computational science advisors that we already have and make those +higher level expert services the primary offering, letting other +providers focus on the lower level procurement and operating of +most computing and storage hardware.</p> + +<h3 id="skills-beat-hardware">Skills beat hardware</h3> + +<p><img src="https://www.dursi.ca/assets/what_is_ardc_for/pixabay_mechanics-3310067.png" style="float: right; width: 50%;" /></p> + +<p>The goal of a research computing support platform is to enable +research, and to help develop the next generation of research talent. +Knowledge transfer and skills development are by far the most +valuable work that a computing team can to to meet those goals - +because skills have longest lasting impact, because it addresses +real needs in Canada’s R&amp;D ecosystem, and simply because no one +else can do it at scale.</p> + +<p>First, deep training with research methods pay +long-lasting dividends. Even in a rapidly changing fields like data +and computational science, skills and experience don’t depreciate the +way computing hardware does. New methods come, but old methods don’t +really go; and fluency in the previous generation of methods makes +learning – or even creating – those newer methods easier.</p> + +<p>And it’s actually even better than that, because not only do the +skills that come from that research experience and training remain +useful in their field for long periods from time, they transfer +to other disiplines extremely well. Methods for solving equations, +or pulling information out of data, have strong relationships with +each other and can often be applied with modest modifications to +problems well outside the fields in which they were first developed. +These broad areas of effort - Data Science, Informatics, +Simulation Science, and the Data Engineering or cloud computing +tools needed for them - are enabling research technologies which +can empower research in many fields. And there lies the second +reason for the importance of the skills devevelopment; these +research-enabling technologies are areas in which Canada +currently lags. A recent report on the +<a href="http://new-report.scienceadvice.ca/assets/report/Competing_in_a_Global_Innovation_Economy_FullReport_EN.pdf">State of Science and Technology and Industrial R&amp;D</a> specifically calls out “enabling technologies” +as a current area of weakness for Canada which is holding high +impact research in other areas back. Focussing on such highly +transferrable skills and talent development in our research computing +platform would help build a critical mass of such expertise both +in the research computing centres themselves and in the community as +a whole.</p> + +<p>Finally, there just aren’t other options for providing high-level +data and computational science collaboration and training to Canada’s +scholars and researchers consistently and across disciplines. We in the +research community know that availability of a collaborator with +complementary interests and skills can make the difference between +a research project happening or not. Unlike access to commodity +computing hardware, the skills involved in making sure researchers +have access to the best methods for their research, and in training +emerging research talent in the computational side of their discipline, +are very much not commodity skills, and cannot be purchased or rented +from somewhere else.</p> + +<h3 id="the-cloud-premium-is-a-price-worth-paying">The cloud premium is a price worth paying</h3> + +<p><img src="https://www.dursi.ca/assets/what_is_ardc_for/pixabay_cloud-computing-2001090.jpg" style="float: right; width: 50%;" /></p> + +<p>The benefits of further efforts in skills development and training +are fairly clear, and this alone would justify redirecting some +effort from hardware to research services, and using comercial +cloud providers to fill the gap. But having substantial commercial +cloud resources available for researchers is worthwhile on its own merits.</p> + +<p>Firstly, cloud provides more flexibility for rapidly changing research. +The resource mix can be much broader and change much more rapidly +than traditional procurement cycles would allow; what’s more, those +changes can be in response to demonstrated researcher needs, rather +than making predictions and assumptions about the next five years +based on existing research users. Like owning systems, dynamically +taking advantage of this flexibility requires top operational staff. +And the uptime availability and hardware currency of these resources +will generally be significantly better than what can be provided in +house.</p> + +<p>Secondly, trainees and staff benefit from gaining extremely relevant +commercial cloud expertise. This goes back to skills development +a bit, but in this case it’s the system tools – the experience +working with commercial cloud services and building data systems +solutions using them – that are valuable in and of themselves, +and will be attractive skills to have in whatever career they +move on to.</p> + +<p>Finally, commercial engagement can proceed much more smoothly, and +be more attractive from the point of view of the commercial partner, +when the collaboration happens in the commercial cloud. The success +of efforts like <a href="https://www.theubercloud.com">Uber Cloud</a> provides +some validation of this. Most companies that would participate in such +engagement either already have or are planning commercial cloud projects, +and are likely more comfortable with such offerings that using +academic systems.</p> + +<h2 id="how-to-proceed">How to proceed</h2> + +<p>Making significant changes to priorities and indeed how we +provision basic services can seem daunting. It may not seem clear +how to get there from here, but there are some basic approaches +and guidelines that can help.</p> + +<dl> + <dt><strong>No need to do it all at once</strong></dt> + <dd>This is a change that can and should be made incrementally. A +team can be quite straightforwardly trained at a new, +small, “national site” to provide access to a slowly +growing range of cloud resources. This can start as a modestly +scaled pilot, expanding in response to researcher needs.</dd> + <dt><strong>Make the hardware you own really count by advancing the mission</strong></dt> + <dd>Many hardware needs are readily outsourceable, whether to commerical +entities or by “buying in” with other academic R&amp;D +computing partners. However, some resources will likely stay in-house. +The way to choose is to ensure that every decision to own +rapidly-depreciating, expensive-to-operate equipment directly +supports the mission of excellent research support and research skills +development. In-house equipment should be significantly better +at that mission than what can be procured from elsewhere. That +may mean making cutting-edge infrastructure that is +in itself publication worthy, or buying still-prototype +experimental systems to evaluate, and to build and share expertise +on.</dd> + <dt><strong>Use the right tools for the job</strong></dt> + <dd>Helpdesk requests and fixing software bugs both +are short-term tasks that benefit from a “ticket tracking” +approach; an issue is identified, someone fixes it and +“closes” the ticket, and the faster the ticket is closed, +the better the service was. That isn’t the right way to think about +higher-level services like collaborations and knowledge transfer, +and using tools for one to manage interactions like the other distorts +both the tool and the interactions. Consulting firms use case +managment software, not ticket trackers, to manage engagements, +and use the effectiveness of the collaboration rather than the duration +of the engagement to judge success. Since interactions with the +researchers are vitally important to the success of the mission, +the best available case management software (and helpdesk software where +appropriate) should be used.</dd> + <dt><strong>Make the expertise really count by building a unified national team</strong></dt> + <dd>Once the right tools are in place, other lessons can be learned +from successful consultancies. The most successful collaborations +will combine staff from across the country with the appropriate +expertise, and staff that are local to the researcher. To achieve +that, the computational experts across the country must be able +to find each other, self-assemble into teams as needed, and collaborate +seamlessly. While the technical infratructure for this exists, +the organizational incentives are still for staff at a site to support +primarily “their” researchers. Such siloing is completely +counter to supporting national research.</dd> +</dl> + +<h2 id="summary">Summary</h2> + +<p><img src="https://www.dursi.ca/assets/what_is_ardc_for/shutterstock_collaboration.jpg" style="float: right; width: 50%;" /></p> + +<p>The goal of a research computing support platform - any research +support resource, really - is to enable research, and to help develop +the next generation of research talent. With that primary mission +in mind, the reasons for focussing the time and effort of computational +science experts on collaboration and skills development rather than +operating commodity hardware could not be clearer:</p> + +<ul> + <li>Collaboration across disciplines - domain science and computational/data expertise - enables better Canadian research;</li> + <li>Computational and data skills maintain their value, while hardware rapidly depreciates; and</li> + <li>Building a critical mass of expertise and talent focussed on emerging data science and computational methods will strengthen Canadian competitiveness not just in research but in innovation.</li> +</ul> + +<p>There are costs to this approach; it will cost somewhat more to +have someone else run much of that hardware. But even those costs +have upsides:</p> + +<ul> + <li>Cloud provides more flexibility for rapidly changing research; capability mixes and system configurations can be changed much faster than hardware procurement cycles;</li> + <li>Commercial cloud infrastructure provides much better uptime and currency for researchers;</li> + <li>Both the computational experts and the research trainees benefit from gaining extremely relevant cloud expertise that will benefit them in any future career; and</li> + <li>Industrial engagement will be much more straightforward around +commercial cloud providers than academic infrastructure.</li> +</ul> + +<p>The prospect of moving to such a different service model may seem +daunting, but it needn’t be:</p> + +<ul> + <li>Move one step at a time, with a new, small, “national site” being a collection of cloud resources;</li> + <li>Not all hardware can be outsourced; make what you do retain an ownership stake in count by having it be best-in-class, enable experimentation and development of new approaches, or otherwise having owning it rather than renting it <em>directly</em> advance the mission;</li> + <li>Choose the best possible tools for staff/researcher interactions; and</li> + <li>Build the best possible computational science team by having them collaborate internally, as well, and ensuring researchers and trainees get the most relevant help and collaboration possible.</li> +</ul> + +<p>These changes will not be easy; they will require participation from +funders, staff, researchers, and all stakeholders. But the research +computing world of today is not that of the 1990s, and how we support +computational research should take advantage of that.</p> + +<p>Images courtesy of <a href="https://www.shutterstock.com/home">shutterstock</a> and <a href="https://pixabay.com">pixabay</a>, used under license</p> + + + + + A week in the life of an SC attendee + + 2018-11-24T01:44:00-07:00 + https://hpc.social/2018/a-week-in-the-life-of-an-sc-attendee + <p>Last week was the annual <a href="https://sc18.supercomputing.org/">Supercomputing conference, held this year in Dallas</a>, and it was as busy as they always are.  Every year I take plenty of photos and post plenty of tweets throughout the week, but this year I thought it might be fun to share some of those photos (and the related things I learned) now that the dust has settled.  Since some people might also be interested in how someone might approach the conference from a technical and philosophical perspective, I figured I’d write a more general piece documenting my entire SC experience this year.<br /><br />This post wound up being a massive, meandering, chronological documentary of a week in my life that includes both technical and non-technical commentary.  For anyone who is only interested in the technical insights I gained during SC, check out the items prefixed with (tech) in this table of contents:<br />&lt;ul&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#before-conf">Before the Conference</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#saturday">Saturday</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#sunday">Sunday</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#monday">Monday</a>&lt;/li&gt;&lt;ul&gt;&lt;li&gt;(tech) <a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#pdsw">PDSW-DISCS 2018 Highlights</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#gala">SC Exhibition Gala</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#bash">The Beowulf Bash</a>&lt;/li&gt;&lt;/ul&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#tuesday">Tuesday</a>&lt;/li&gt;&lt;ul&gt;&lt;li&gt;(tech) <a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#tuesdaytechprog">Technical Program, Data and Storage Paper Track Highlights</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#tuesdayinterlude">Interlude of Meetings</a>&lt;/li&gt;&lt;li&gt;(tech) <a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#tuesdayexpo">Cray and Fujitsu’s Exascale System Hardware on the Expo Floor</a>&lt;/li&gt;&lt;li&gt;(tech) <a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#paralleliobof">Analyzing Parallel I/O BOF Highlights</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#crayparty">The Cray Celebration</a>&lt;/li&gt;&lt;/ul&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#wednesday">Wednesday</a>&lt;/li&gt;&lt;ul&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#wednesdaymorning">SC Student Career Fair and a Booth Talk</a>&lt;/li&gt;&lt;li&gt;(tech) <a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#wednesdayexpo">Flash, Disk, and Tape Technologies on the Expo Floor</a>&lt;/li&gt;&lt;li&gt;(tech) <a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#io500bof">Recap of the IO-500/VI4IO BOF</a>&lt;/li&gt;&lt;/ul&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#thursday">Thursday</a>&lt;/li&gt;&lt;ul&gt;&lt;li&gt;(tech) <a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#thursdaytechprog">WekaIO and Micron at the Exhibitor Forum</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#nsfbof">NSF Future Directions BOF</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#mypaper">My SC Paper</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#perot">SC Technical Program Reception at the Perot Museum</a>&lt;/li&gt;&lt;/ul&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#friday">Friday</a>&lt;/li&gt;&lt;li&gt;<a href="https://glennklockwood.blogspot.com/2018/11/a-week-in-life-of-sc-attendee.html#after-conf">After the Conference</a>&lt;/li&gt;&lt;/ul&gt;<br />Everything that’s not labeled (tech) is part diary and part career development perspective.  Hopefully someone will find something in here that’s of some value.<br /><br />Finally, disclosures:<br />&lt;ul style="font-size: xx-small;"&gt;&lt;li&gt;I omitted some names in the interests of respecting the privacy of the folks who took the time to talk to me one-on-one.  If you’re part of this story and don’t mind having your name out there, I’d be happy to include it.&lt;/li&gt;&lt;li&gt;Everything I paraphrase here is public information or conjecture on my part.  Nothing in this post is either confidential or sensitive.  That said, check your references before citing anything here.  I don’t know what I’m talking about.&lt;/li&gt;&lt;li&gt;Everything here is my personal opinion and does not necessarily reflect the viewpoint of my employer or its funding agency.  I attended the conference as a part the regular course of business in which I am employed.  However I took all photos for personal purposes, and the entirety of this post was written on my own personal time.&lt;/li&gt;&lt;/ul&gt;<br />&lt;h2 id="before-conf"&gt;<span></span>Before the conference&lt;/h2&gt;Everyone’s SC experience is different because it draws such a diverse range of professionals.  There are plenty of activities for everyone ranging from students and early-career staff to senior management and leadership, and people on different career tracks (e.g., facilities staff, computer science researchers, program managers, product sales) are likely to be drawn to very different parts of the conference agenda.  My priorities during the week of SC are definitely shaped by where I am in my career, so when filling out my calendar a few weeks ahead of the conference, I considered the following:<br /><br /><b>My job is half research and half facilities staff.</b>  50% of my time is funded by grant money to do applied research in characterizing parallel I/O systems.  The other half of my time is spent staying current on emerging technologies in computing and storage.  These two responsibilities mean that my SC is usually a mix of attending technical program sessions (to see what my peers in research are doing and see what research ideas might turn up in future technologies) and engaging with vendors.<br /><br /><b>I work in advanced technologies.</b>  This means I am generally not in the trenches directly feeling the pains of operating HPCs today; instead, my job is to identify technologies that will cause less problems tomorrow.  This also means that I don’t have purchasing authority, and I am less likely to be involved with anything that’s going to hit the floor in the next year.  As such, I generally don’t do vendor sales meetings or briefings at SC because they are generally focused on nearer-term products and sales.<br /><br /><b>I did not get to where I am by myself.</b>  I first heard about SC in 2010 when I was a graduate student, and it sounded almost infinitely more exciting than the materials science conferences I was attending.  I had no experience in HPC at the time, but it made me realize what I really wanted to pursue as a career.  I relied heavily on the good will of the online HPC community to learn enough to get my first HPC job at SDSC, and after that, the faith of a great many more to get me to where I am now.  SC is often the only time I get to see people who have helped me out in my early career, and I always make time connect with them.<br /><br />The net result of these goals was a pretty full schedule this year:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;&lt;/div&gt;</p> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://2.bp.blogspot.com/-TzDXu652VHs/W_c4dxJBnXI/AAAAAAABCn8/-deME8LsYgwntvDZo4V_p12MG1ji4L_hwCK4BGAYYCw/s1600/Screen%2BShot%2B2018-11-22%2Bat%2B10.12.27.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="400" src="https://2.bp.blogspot.com/-TzDXu652VHs/W_c4dxJBnXI/AAAAAAABCn8/-deME8LsYgwntvDZo4V_p12MG1ji4L_hwCK4BGAYYCw/s400/Screen%2BShot%2B2018-11-22%2Bat%2B10.12.27.png" width="292" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">My SC'18 schedule. &nbsp;Note that the time zone is PST, or two hours behind Dallas time.</td></tr></tbody></table> +<p><br /><br />I mark everything that I <i>must</i> attend (usually because I’m a speaker) in red to know the immovable obligations. Blue items are things I <i>will</i> attend unless an emergency comes up, and grey things are events I <i>should</i> attend because they sound interesting.<br /><br />White space is very important to me too; between 10am and 6pm, white spaces are when I can walk the expo floor.  A lot of people write off the expo as a waste of time, but I actually feel that it’s one of the most valuable parts of SC.  Since my job is to understand emerging technology (and the market trends that drive them), accosting a pre-sales engineer or product manager in a strategically important technology provider can yield an invaluable peek into the markets they’re serving.  White space in the evenings are equally important for engagements of opportunity or working on slides that have to be presented the next day.<br />&lt;div&gt;<br />&lt;/div&gt;</p> +<h2 id="saturday">Saturday, November 10</h2> +<p>I always fly to SC on the Saturday before the conference starts.  I have historically opted to do workshops on both Sunday and Monday, as I really enjoy attending both <a href="http://www.pmbsworkshop.org/">PMBS</a> and <a href="http://www.pdsw.org/">PDSW-DISCS</a>.  I bring a suitcase with has extra room for conference swag, and doing so this year was critically important because I opted to <a href="https://twitter.com/glennklockwood/status/1061337582858956800">bring along a pair of cowboy boots</a> that I knew I would not want to wear on the flight home.<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-ZUaEGhTWOJA/W_b_laf-LnI/AAAAAAABClE/BH6sxJc8GLI1YhAxfdw1WgfctK1mLbFiACK4BGAYYCw/s1600/IMG_4844.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="320" src="https://4.bp.blogspot.com/-ZUaEGhTWOJA/W_b_laf-LnI/AAAAAAABClE/BH6sxJc8GLI1YhAxfdw1WgfctK1mLbFiACK4BGAYYCw/s320/IMG_4844.jpeg" width="320" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;My brown kicks.  Also Harriet the cat.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />On just about every work flight I’m on, I’ve got PowerPoint slides to review; this trip was no different, and I spent the 3.5-hour flight time reviewing the slides I had to present the next day. Once in Dallas and at my hotel, I carried out my usual work-travel night-of-arrival ritual: order the specialty pizza from a local pizza joint, text home saying I arrived safely, and iron my clothes while watching Forensic Files.<br /><br />&lt;h2 id="sunday"&gt;Sunday, November 11&lt;/h2&gt;This year I had the honor of presenting one part of <a href="https://sc18.supercomputing.org/presentation/?id=tut121&amp;sess=sess238">the famed Parallel I/O in Practice tutorial at SC</a> along with Rob Ross, Brent Welch, and Rob Latham.  This tutorial has been running for over fifteen years now, and at some point over those years, it picked up the curious ritual of being kicked off with some juggling:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-aeDAd6dJWf8/W_cFOyOjQAI/AAAAAAABClc/1e6hwAvpkscqRo7E6SJY18Uremnqb4-pwCK4BGAYYCw/s1600/IMG_4857.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="320" src="https://4.bp.blogspot.com/-aeDAd6dJWf8/W_cFOyOjQAI/AAAAAAABClc/1e6hwAvpkscqRo7E6SJY18Uremnqb4-pwCK4BGAYYCw/s320/IMG_4857.jpeg" width="240" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Brent leading up to the tutorial start time with some juggling.  He brought the pins with him.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />The tutorial itself is really comprehensive and includes everything from device-level performance behavior to parallel file systems architecture and I/O middleware.  Even though I can proudly say that I knew 95% of the material being presented throughout the day (as I probably should since I was a presenter!), I found <a href="https://twitter.com/glennklockwood/status/1061751272339070976">this particular slide that Rob Latham presented</a> particularly insightful:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://2.bp.blogspot.com/-cmNtrmcyeNE/W_cIF0rHUwI/AAAAAAABCl0/1NyV2lPD3FoagXus54zTCwbbPy4ckfgywCLcBGAs/s1600/IMG_4860.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://2.bp.blogspot.com/-cmNtrmcyeNE/W_cIF0rHUwI/AAAAAAABCl0/1NyV2lPD3FoagXus54zTCwbbPy4ckfgywCLcBGAs/s400/IMG_4860.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;The ease and portability of using I/O middleware comes without sacrificing performance!  Sorry for the odd angle; this is the screen as us presenters were able to view it.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />It makes the case that there is no significant performance penalty for using higher-level I/O libraries (like PnetCDF or parallel HDF5) despite how much easier they are to use than raw MPI-IO.  One of the biggest take-home messages of the entire tutorial is to use I/O middleware wherever possible; doing so means that understanding parallel file system architecture isn’t prerequisite to getting good I/O performance.<br /><br />&lt;h2 id="monday"&gt;Monday, November 12&lt;/h2&gt;&lt;div&gt;Monday was the official first day of SC.  Workshops and tutorials went on throughout the day, and the opening keynote and exhibition hall opening gala started in the evening.&lt;/div&gt;</p> +<div><br /></div> +<h3 id="pdsw">PDSW-DISCS 2018</h3> +<p>The <a href="http://www.pdsw.org/">3rd Joint International Workshop on Parallel Data Storage &amp; Data Intensive Scalable Computing Systems (PDSW-DISCS)</a> was on Monday, and I had the honor of being asked to serve as its Publicity Chair this year.<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-DujP5Fbmxeg/W_cDr2Kw8TI/AAAAAAABClQ/QVtCDkG_JPQUlSxwJGtalamSkn9k7dacQCK4BGAYYCw/s1600/IMG_4863.jpeg" style="margin-left: auto; margin-right: auto; text-align: center;"><img border="0" height="320" src="https://1.bp.blogspot.com/-DujP5Fbmxeg/W_cDr2Kw8TI/AAAAAAABClQ/QVtCDkG_JPQUlSxwJGtalamSkn9k7dacQCK4BGAYYCw/s320/IMG_4863.jpeg" width="240" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;The PDSW-DISCS full-day workshop agenda&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />It’s a really great workshop for people working in I/O, storage, and data and always draws a large crowd:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://2.bp.blogspot.com/-1BGZXlbQsxQ/W_cMDZ1LK_I/AAAAAAABCmQ/CC046rCDsP49GYPyCgpBPbERtiJId4kjgCK4BGAYYCw/s1600/IMG_4872.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="225" src="https://2.bp.blogspot.com/-1BGZXlbQsxQ/W_cMDZ1LK_I/AAAAAAABCmQ/CC046rCDsP49GYPyCgpBPbERtiJId4kjgCK4BGAYYCw/s400/IMG_4872.jpeg" width="400" /></a>&lt;/div&gt; +<br />For researchers, it’s a great venue for short papers that IEEE or ACM publishes, and it also has a really nice Work-in-Progress track where a page-long abstract gives you a seven minute spot to pitch your work.  For attendees, it’s always chock full of good talks that range from pure research to applied development.<br /><br />This year’s keynote speaker was <a href="https://www.linkedin.com/in/rangan/">Rangan Sukumar</a>, Cray’s analytics guru.  His talk was interesting in that it approached the oft-mentioned convergence between HPC and AI (which has become an over-used trope by itself) from the perspective of a system architect (which is where the rubber meets the road):<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-FUxOI01WZAQ/W_cNqcINlnI/AAAAAAABCmc/TxatMQ-ANK0yHmGv5RMzrbvBz3MBz_vCgCK4BGAYYCw/s1600/IMG_4866.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="225" src="https://4.bp.blogspot.com/-FUxOI01WZAQ/W_cNqcINlnI/AAAAAAABCmc/TxatMQ-ANK0yHmGv5RMzrbvBz3MBz_vCgCK4BGAYYCw/s400/IMG_4866.jpeg" width="400" /></a>&lt;/div&gt; +<br />As many great keynote speakers are, Rangan used hyperbole at times to contrast HPC and “Big Data” workloads, and this <a href="https://twitter.com/glennklockwood/status/1062002965630910470">stimulated some discussion online</a>.  Although the slides alone tell only part of the story, you can download them from the <a href="http://www.pdsw.org/">PDSW-DISCS’18 website</a>.<br /><br />Later in the morning, Margaret Lawson (University of Illinois, Sandia Labs) presented a follow-on to the <a href="https://dx.doi.org/10.1145/3149393.3149403">EMPRESS metadata system she presented last year</a>:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-CRKTR3ccrGU/W_cRcIo-pRI/AAAAAAABCmo/yNwqAJnhiDoKKNKyjdrkSQR8CK1XPcXjACK4BGAYYCw/s1600/IMG_4874.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="225" src="https://4.bp.blogspot.com/-CRKTR3ccrGU/W_cRcIo-pRI/AAAAAAABCmo/yNwqAJnhiDoKKNKyjdrkSQR8CK1XPcXjACK4BGAYYCw/s400/IMG_4874.jpeg" width="400" /></a>&lt;/div&gt; +<br />Last year, EMPRESS seemed a little too researchy for me (as a facilities person) to sink my teeth into.  This year though, the picture seems a lot more complete and I quite like the architectural framework.  Although EMPRESS may not ever be a household name, the concept of separating data streams and metadata streams underneath some sort of I/O middleware is really solid.  I think that storing data and metadata in different, architecturally distinct storage systems that map to the unique access patterns of data and metadata is ultimately the right way to approach large-scale data and metadata management in HPC, and I expect to see this design pattern proliferate as scientific data analysis becomes a bigger part of large-scale HPC workloads.<br /><br />In the afternoon, researchers from OSU offered a rare peak into Alibaba through a high-level analysis of SSD failure data provided by the Chinese hyperscaler:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-dxNudSspkiM/W_cUTSdBV2I/AAAAAAABCm0/lnCQWJ4BdYccKgd9O9iO3NNy-MapHSZvACK4BGAYYCw/s1600/IMG_4879.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="225" src="https://4.bp.blogspot.com/-dxNudSspkiM/W_cUTSdBV2I/AAAAAAABCm0/lnCQWJ4BdYccKgd9O9iO3NNy-MapHSZvACK4BGAYYCw/s400/IMG_4879.jpeg" width="400" /></a>&lt;/div&gt; +<br /><br />The most alarming finding to me was that 20% of SSD failures were caused by humans yanking the wrong SSD.  This immediately made me wonder who Alibaba is hiring to do routine operational support at their data centers; if people are causing a significant fraction of storage faults, either they aren’t hiring with the same standards as their US counterparts, or their data centers are a mess.  The speaker’s proposed remedy was to use a different SSD form factor for each logical use case for SSDs so that operators could visually identify an SSD reserved for metadata versus one reserved for data.  I personally think a label maker, a barcode scanner, and a decent salary is an easier, standards-based solution.<br /><br />Other highlights included<br />&lt;ul&gt;&lt;li&gt;<i>Characterizing Deep-Learning I/O Workloads in TensorFlow</i>, presented by Stefano Markidis of KTH.  The first time I’ve seen an I/O-centric evaluation of how deep learning workflows will affect storage requirements of future systems.  I learned a lot.&lt;/li&gt;&lt;li&gt;<i>Toward Understanding I/O Behavior in HPC Workflows</i>, presented by Jakob Lüttgau of DKRZ/ANL.  Rather than analyze the I/O pattern of a single MPI job, this paper began examining the I/O patterns of related jobs that all work towards a single scientific objective.  Again, one of the first research papers I’ve seen that takes a critical look at end-to-end workflows from an I/O perspective.&lt;/li&gt;&lt;li&gt;<i>Methodology for the Rapid Development of Scalable HPC Data Services</i>, presented by Matthieu Dorier of ANL.  I think this paper is intended to be the canonical reference for <a href="https://press3.mcs.anl.gov/mochi/">the Mochi project</a>, which I was glad to finally see.  The idea of enabling quickly composable, purpose-built I/O services that are optimized for next-generation media and interconnects is a brilliant one, and I am a huge believer that this approach will be what demonstrates the earliest scientific successes that rely on storage-class memory at scale.&lt;/li&gt;&lt;/ul&gt;<br />There were a number of really promising ideas presented at the WIP sessions as well, and recapping the entirety of the workshop is a blog post in and of itself.  Fortunately, all the papers and slides are openly available on the <a href="http://www.pdsw.org/">PDSW-DISCS website</a>.<br /><br />&lt;h3 id="gala"&gt;SC Opening Keynote and Gala&lt;/h3&gt;I’ve actually stopped going to the SC keynotes over the last year since they’re increasingly focused on the societal impacts enabled by HPC rather than HPC itself.  While I’m definitely not knocking that theme–it’s a great way to inspire early-career individuals, big-picture program management types, and disenchanted technical folks in the trenches–it’s just not why I attend SC.  Instead, I make use of my exhibitor badge and head into the expo floor before it opens to the public; this is the only time during the conference where I seem to be able to reliably find the people I want to meet at their booths.<br /><br />This year I visited a few small businesses with whom I’ve fostered good will over the last few years to say hello, then dropped in on the SDSC booth to catch up with the latest news from my former coworkers.  They also happen to have free beer on the opening night.<br /><br />Once the expo floor opens to the public following the opening keynote, booth activity goes from zero to eleven really quickly.  Every booth has a big splash during the gala which makes it hard to choose just one, but my decision this year was made easier by Cray choosing to unveil its new exascale HPC platform, Shasta, and celebrate its <a href="http://investors.cray.com/phoenix.zhtml?c=98390&amp;p=irol-newsarticle&amp;ID=2374181">first sale of a Shasta system to NERSC</a>.<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-aNYQ3TPdHb8/W_ceLrO4wUI/AAAAAAABCnA/mSzGaeTYLEwQMvIC4k-Hsbo5-1YFM-kmACK4BGAYYCw/s1600/IMG_4890.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://4.bp.blogspot.com/-aNYQ3TPdHb8/W_ceLrO4wUI/AAAAAAABCnA/mSzGaeTYLEwQMvIC4k-Hsbo5-1YFM-kmACK4BGAYYCw/s400/IMG_4890.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Cray CEO Pete Ungaro at the Shasta unveiling ceremony&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />This new system, named <a href="http://www.nersc.gov/systems/perlmutter/">Perlmutter</a>, will be delivered in 2020 and has a bunch of really slick new technologies incorporated into it.<br /><br />After Cray CEO Pete Ungaro unveiled the prototype Shasta blades, there was a celebratory toast and both NERSC and Cray staff donned their “ASK ME ABOUT SAUL” pins:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://2.bp.blogspot.com/-WZaeoBPQOD0/W_cfwQuPqeI/AAAAAAABCnM/s4WPrAzqC3og8NZtGbmArI0OkuujzKFgACK4BGAYYCw/s1600/IMG_1897.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="200" src="https://2.bp.blogspot.com/-WZaeoBPQOD0/W_cfwQuPqeI/AAAAAAABCnM/s4WPrAzqC3og8NZtGbmArI0OkuujzKFgACK4BGAYYCw/s200/IMG_1897.jpeg" width="200" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;NERSC and Cray staff got these VIP pins to promote NERSC’s next system, named after astrophysicist, Nobel laureate, and Berkeley Lab scientist Saul Perlmutter.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />I stuck around to shake hands with my colleagues at Cray (including the CEO himself!  Haven’t washed my hand since) and catch up with some of my counterparts in storage R&amp;D there.<br /><br />&lt;h3 id="bash"&gt;The Beowulf Bash&lt;/h3&gt;The gala shut down at 9 PM, at which time I headed over to the <a href="https://beowulfbash.com/">Beowulf Bash</a> to try to find other some colleagues who said they would be there.  I generally don’t prioritize parties at SC for a couple reasons:<br />&lt;ol&gt;&lt;li&gt;Shouting over music all night is a great way to burn out one’s voice.  This is not good when I have to present something the next day.&lt;/li&gt;&lt;li&gt;The crowds and lines often undercut my enjoyment of catching up with old colleagues (and meeting new ones).&lt;/li&gt;&lt;li&gt;I almost always have slides that need to be finished by the end of the night.&lt;/li&gt;&lt;/ol&gt;&lt;div&gt;I make an exception for the Bash because I personally value many of the people behind organizing and sponsoring it, and it captures the scrappier side of the HPC community which helped me get my foot in the door of the industry.  This year I specifically went to catch up with my colleagues at <a href="https://www.nextplatform.com/">The Next Platform</a>; Nicole and Tim are uncommonly insightful and talented writers and editors, and they always have wacky anecdotes to share about some of the more public figures in our industry.&lt;/div&gt;</p> +<div><br /></div> +<div>More generally and self-servingly though, maintaining a good relationship with members of the HPC trade press at large has tremendous value over time regardless of your affiliation or job title. &nbsp;Behind every interesting HPC news article is an editor with incomparable access to a broad network of people in the industry. &nbsp;Despite this though, they still are subject to the same haters as anyone else who puts something out in the spotlight, so I have to imagine that putting in a kind word in-person will is always worth it.</div> +<div><br /></div> +<div>At around midnight, only the die-hards were still around.</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://1.bp.blogspot.com/-ZdI6xLEacbM/W_cnS_b4VhI/AAAAAAABCnY/J1EGks-vYbI8bw6o6OKlBZl6eHcmSr5YwCK4BGAYYCw/s1600/IMG_4891.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="225" src="https://1.bp.blogspot.com/-ZdI6xLEacbM/W_cnS_b4VhI/AAAAAAABCnY/J1EGks-vYbI8bw6o6OKlBZl6eHcmSr5YwCK4BGAYYCw/s400/IMG_4891.jpeg" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Late night Beowulf Bash at Eddie Deen's Ranch.</td></tr></tbody></table> +<div><br /></div> +<div>Regrettably, I barely had any time to catch up with my colleagues from the FreeNode HPC community at the Bash (or at all). &nbsp;Maybe at ISC.</div> +<div><br /></div> +<div>After getting back to the hotel, I realized I hadn't eaten anything since lunch. &nbsp;I also learned that absolutely nothing that delivers food in the downtown Dallas area is open after midnight. &nbsp;After waiting an hour for a food delivery that wound up going to a restaurant that wasn't even open, I had to settle for a hearty dinner of Hot Pockets from the hotel lobby.</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://3.bp.blogspot.com/-Mo_QP_NnEow/W_coivLrvoI/AAAAAAABCnk/rStboNb1iAQ2GLWPIAElzC3IdCoRqvmcQCK4BGAYYCw/s1600/56378490119__E6748A65-8655-4DDC-8502-639F0A830956.jpg" style="margin-left: auto; margin-right: auto;"><img border="0" height="320" src="https://3.bp.blogspot.com/-Mo_QP_NnEow/W_coivLrvoI/AAAAAAABCnk/rStboNb1iAQ2GLWPIAElzC3IdCoRqvmcQCK4BGAYYCw/s320/56378490119__E6748A65-8655-4DDC-8502-639F0A830956.jpg" width="240" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">I hadn't eaten a Hot Pocket since graduate school. &nbsp;Still taste the same.</td></tr></tbody></table> +<div><br /></div> +<div>Fortunately my Tuesday was relatively light on hard obligations.</div> +<p><br />&lt;h2 id="tuesday"&gt;Tuesday, November 13&lt;/h2&gt;&lt;div&gt;Tuesday was the first day in which the SC technical program and expo were both in full swing.  I split the day between paper talks, meetings, and the expo floor.<br /><br />&lt;/div&gt;</p> +<h3 id="tuesdaytechprog">Technical Program, Part 1 - Data and Storage</h3> +<p>My Tuesday morning began at 10:30 AM with the <a href="https://sc18.supercomputing.org/session/?sess=sess179">Data and Storage paper presentation session</a> in the technical program.  Of note, the <a href="https://twitter.com/glennklockwood/status/1062385999026814976">first two papers presented were about cloud-centric storage</a> paradigms, and only the third one was clearly focused on scientific HPC workloads.<br /><br />&lt;ul&gt;&lt;li&gt;<a href="https://sc18.supercomputing.org/presentation/?id=pap165&amp;sess=sess179">SP-Cache: Load-Balanced, Redundancy-Free Cluster Caching with Selective Partition</a> by Yu et al was a paper squarely aimed at reducing tail latency of reads.  Very important if you want to load an old GMail message without waiting more than a few seconds for it to load.  Less useful for most scientific HPC workloads.&lt;/li&gt;&lt;li&gt;<a href="https://sc18.supercomputing.org/presentation/?id=pap585&amp;sess=sess179">BESPOKV: Application Tailored Scale-Out Key-Value Stores</a> by Anwar et al was a paper presenting a framework that is uncannily similar to the Mochi paper presented at PDSW on the day before.  The premise was to allow people to compose their own Cassandra-like KV store with specific consistency and durability balance without having to reinvent the basic building blocks.&lt;/li&gt;&lt;li&gt;<a href="https://sc18.supercomputing.org/presentation/?id=pap450&amp;sess=sess179">Scaling Embedded In Situ Indexing with DeltaFS</a> by Zheng et al was the talk I really wanted to hear but I had to miss on account of a conflicting meeting.  The DeltaFS work being done by CMU and LANL is a really innovative way to deal with the scalability challenges of parallel file system metadata, and I think it’s going to ultimately be where many of the nascent software-defined storage technologies aimed at HPC will converge.&lt;/li&gt;&lt;/ul&gt;&lt;div&gt;Unfortunately I had to cut out of the session early to meet with a vendor partner at a nearby hotel.&lt;/div&gt; +<br />&lt;h3 id="tuesdayinterlude"&gt;Interlude of Meetings&lt;/h3&gt;The first of my two vendor meetings at this year’s SC was less a sales call and more about continuing a long-running discussion about technology futures in the five-to-ten year timeframe.  No sane vendor will commit to any roadmap that far out, especially given the uncertainty surrounding post-Moore’s Law technologies, but they are receptive to input from customers who are formulating their own strategic directions for the same time period.  Maintaining these sorts of ongoing conversations is a major part of what falls under my job title in “advanced technologies.”<br /><br />Unfortunately that vendor meeting overlapped with the Lustre BOF, but other staff from my institution were able to attend and ensure that our interests were represented.  I was also able to attend the Lustre Lunch that followed the BOF which was very fruitful; in addition to simply being present to remind the Lustre community that I (and the institution I represent) am a part of it, I happened to connect in-person with <a href="https://twitter.com/rajgautam">someone I’ve known for a few years via Twitter and make a valuable connection</a>.  Unfortunately I had to leave the Lustre Lunch early to make another meeting, unrelated to SC, that allowed a geographically distributed committee to meet face-to-face.<br /><br />After that committee meeting, I seized the free hour I had to visit the show room floor.<br /><br />&lt;h3 id="tuesdayexpo"&gt;Expo Floor, Part 1&lt;/h3&gt;The first photo-worthy tech I saw was the Shasta blade at the <b>Cray booth</b>.  Because the booth was mobbed with people during the previous night’s gala, this was actually my first time seeing Shasta hardware up close.  Here’s the compute blade:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-RC-DE7ZI8CY/W_dAcEULkyI/AAAAAAABCoM/yv5pEDrWxrAzyWFn3IlvIfM6zODvsfwgwCK4BGAYYCw/s1600/IMG_4899.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-RC-DE7ZI8CY/W_dAcEULkyI/AAAAAAABCoM/yv5pEDrWxrAzyWFn3IlvIfM6zODvsfwgwCK4BGAYYCw/s400/IMG_4899.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Part of a Cray Shasta compute blade up-close&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />Unlike the Cray XC blade of today’s systems which uses a combination of forced-air convection and heat exchangers to enable liquid cooling, these Shasta blades have direct liquid cooling which is rapidly becoming a de facto minimum requirement for an exascale-capable rack and node design.  I had some questions, so I struck up a conversation with a Cray employee at the booth and learned some neat things about the Shasta packaging.<br /><br />For the sake of clarity, here is a hand-drawn, annotated version of the same photo:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-1x8QsBpe6ok/W_dAql81IwI/AAAAAAABCoU/pRyu5minI_0XK8_k_iFW3kSh6vc_nadJQCK4BGAYYCw/s1600/IMG_4899%2B2.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-1x8QsBpe6ok/W_dAql81IwI/AAAAAAABCoU/pRyu5minI_0XK8_k_iFW3kSh6vc_nadJQCK4BGAYYCw/s400/IMG_4899%2B2.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Part of a Cray Shasta compute blade up-close with my annotations&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />What stood out to me immediately was the interesting way in which the DIMMs were direct-liquid cooled.  Unlike IBM’s attempt at this with the POWER 775 system (the PERCS system of Blue Waters infamy) where cold plates were attached to every DIMM, Cray has opted to use what looks like a heat-conductive foam that wraps copper cooling lines.  To service the DIMMs, the entire copper cooling complex that runs between the two rows of two DIMMs unfastens and lifts up.  There’s enough slack in the liquid cooling lines (highlighted in purple) so that DIMMs (and presumably every other field-replaceable part in the blade) can be serviced without draining the coolant from the blade.<br /><br />The NIC is also pretty interesting; it is a commercial high-end data center Ethernet NIC that’s manufactured in a custom form factor to fit this blade.  It looks like a second CPU is housed underneath the NIC so that it may be the case that the NIC and one of the CPUs shares a common cooling block.  The NIC is also positioned perpendicular to the long edge of the blade, meaning that there are probably some pretty good cable runs going from the front-most NIC all the way to the rear of the blade.  Finally, because the NIC is on a discrete mezzanine card, the networking technology is no longer soldered to the compute as it is with Aries on today’s XC.<br /><br />The network switch (which <a href="https://twitter.com/ernstdj/status/1062425074425315328">I did not photograph, but others did</a>) is another blade that slots into the rear of the Shasta cabinet and mates perpendicularly with a row of compute blades such that a single switch blade can service a fully populated compute chassis.  The engineer with whom I spoke said that these Shasta cabinets have no actual midplane; the compute blades connect directly to the switch blades through a bunch of holes cut out of the sheet metal that separates the front of the cabinet from the rear.  Without a midplane there is presumably one less single point of failure; at the same time though, it wasn’t clear to me how out-of-band management works without a centralized controller somewhere in the chassis.<br /><br />At this point I should point out that all of the above information is what I learned by talking to a Cray booth employee at SC without any special privilege; although I’m sure that more details are available under non-disclosure, I frankly don’t remember any of it because I don’t work on the compute side of the system.<br /><br />My next big stop on the show room floor was at the <b>Fujitsu booth</b>, where they had their post-K prototype hardware on display.  Of particular note was their A64FX engineering sample:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-SxfvUEu4-a8/W_dHNdImJcI/AAAAAAABCok/6AVXnJCCjMgZ2bd1z1Xyg6xBttqofVXSACK4BGAYYCw/s1600/IMG_4903.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="225" src="https://1.bp.blogspot.com/-SxfvUEu4-a8/W_dHNdImJcI/AAAAAAABCok/6AVXnJCCjMgZ2bd1z1Xyg6xBttqofVXSACK4BGAYYCw/s400/IMG_4903.jpeg" width="400" /></a>&lt;/div&gt; +<br /><br />If you look very carefully, you can see the four stacks of high-bandwidth memory (HBM) on-die along with the ARM, which is fantastically historic in that it’s the first general-purpose CPU (of which I am aware) that has integrated HBM2.  What’s not present is any indication of how the on-chip Tofu NIC is broken out; I guess I was expecting something like Intel’s -F series KNLs with on-package OmniPath.<br /><br />A sample node of the post-K system was also on display:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-E-nmVR8mYuk/W_dIVsPZPqI/AAAAAAABCow/wiOV3yLXH60Q3EoqKilPsgD4xkbMnGssACK4BGAYYCw/s1600/IMG_4902.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="300" src="https://1.bp.blogspot.com/-E-nmVR8mYuk/W_dIVsPZPqI/AAAAAAABCow/wiOV3yLXH60Q3EoqKilPsgD4xkbMnGssACK4BGAYYCw/s400/IMG_4902.jpeg" width="400" /></a>&lt;/div&gt; +<br />Seeing as how both this post-K system and Cray Shasta are exascale-capable system architectures, it’s interesting to compare and contrast them.  Both have direct liquid cooling, but the post-K compute blade does not appear to have any field-replaceable units.  Instead, the entire board seems to be a single FRU, so CPUs must be serviced in pairs.  I think the A64FX lacks any cache coherence bus, meaning that two CPUs correspond to two nodes per FRU.<br /><br />That all said, the post-K design does not appear to have any DDR DRAM, and the NIC is integrated directly into the CPU.  With those two components out of the picture, the rate of a single component failure is probably a lot lower in post-K than it would be in Shasta.  Hopefully the post-K HBM has ECC though!<br /><br />In chatting with a Fujitsu engineer about the post-K node architecture at their booth, I also met <a href="https://twitter.com/hei_nyan">a Fujitsu engineer</a> who just happened to be developing LLIO, the post-K system’s burst buffer service:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://2.bp.blogspot.com/-IsqISZrmqBw/W_dNKiGmH2I/AAAAAAABCo8/FFgBybUIUD4JfINVq6BRK7Q6Yait4nbRACK4BGAYYCw/s1600/IMG_4904.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="262" src="https://2.bp.blogspot.com/-IsqISZrmqBw/W_dNKiGmH2I/AAAAAAABCo8/FFgBybUIUD4JfINVq6BRK7Q6Yait4nbRACK4BGAYYCw/s400/IMG_4904.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;LLIO burst buffer slide shown at the Fujitsu booth&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />It sounds a lot like DataWarp in terms of features, and given that Fujitsu is also developing a new Lustre-based file system (FEFS 2.0?) for post-K, we might see a tighter integration between the LLIO burst buffer layer and the FEFS back-end disk storage.  This is definitely a technology that wasn’t on my radar before SC but is definitely worth keeping an eye on as 2021 approaches.<br /><br />As I was racing between a few other booths, I also happened upon my boss (and NERSC-9 chief architect) presenting the Perlmutter system architecture at the <b>NVIDIA booth</b>:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-lsEkqobGmZI/W_dht1RUO-I/AAAAAAABCpI/FRUPAFyIpJYHrRlODSyeFVp8Gma8w4JqACK4BGAYYCw/s1600/IMG_4905.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="225" src="https://1.bp.blogspot.com/-lsEkqobGmZI/W_dht1RUO-I/AAAAAAABCpI/FRUPAFyIpJYHrRlODSyeFVp8Gma8w4JqACK4BGAYYCw/s400/IMG_4905.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;<a href="http://www.nersc.gov/about/nersc-staff/advanced-technologies-group/nicholas-wright/">NERSC’s Nick Wright</a>, chief architect of the Perlmutter system, describing its architecture at the NVIDIA booth&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br /><br />The talk drew a crowd–I’m glad to see people as jazzed about the new system as I am.<br /><br />&lt;h3 id="paralleliobof"&gt;Analyzing Parallel I/O BOF&lt;/h3&gt;The <a href="https://sc18.supercomputing.org/presentation/?id=bof123&amp;sess=sess382">Analyzing Parallel I/O BOF</a> is a must-attend event for anyone in the parallel I/O business, and this year’s BOF was especially good.  Andreas Dilger (of Lustre fame; now CTO of Whamcloud) gave a brief but insightful retrospective on understanding I/O performance:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://3.bp.blogspot.com/-MgBl_XxRySg/W_djpH7FX7I/AAAAAAABCpU/gyigkciDcgEHKl314Li6Qa-9xsD9L637gCK4BGAYYCw/s1600/IMG_4908.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="225" src="https://3.bp.blogspot.com/-MgBl_XxRySg/W_djpH7FX7I/AAAAAAABCpU/gyigkciDcgEHKl314Li6Qa-9xsD9L637gCK4BGAYYCw/s400/IMG_4908.jpeg" width="400" /></a>&lt;/div&gt; +<br />Unfortunately I did not take a picture of Andreas’ second slide (available on <a href="https://hps.vi4io.org/events/2018/bof-analyzing">the Analyzing Parallel I/O BOF’s website</a>) which is a “what is needed?” slide which largely revolves around better integration between storage system software (like Lustre) and user applications.  I/O middleware seems to be at the center of most of the bullets that called for increased development which bodes well for scientific application developers who attended the Parallel I/O in Practice tutorial on Sunday–recall that this was my key takeaway.  It’s good to know that the lead of Lustre development agrees with this vision of the future, and I hope Whamcloud moves Lustre in this direction so users and middleware developers can meet the storage system software somewhere in the middle.<br /><br />The BOF took a darker turn after this, starting with a presentation from Si Liu of TACC about the Optimal Overloaded IO Protection System, or OOOPS.  It’s a library that wraps the standard POSIX I/O calls:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://2.bp.blogspot.com/-QqCtiINUB4o/W_dnlx7FK_I/AAAAAAABCpg/Acch-MI1AKoWQLt2G53_fOJ2UUqI2o8XQCK4BGAYYCw/s1600/IMG_4909.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="240" src="https://2.bp.blogspot.com/-QqCtiINUB4o/W_dnlx7FK_I/AAAAAAABCpg/Acch-MI1AKoWQLt2G53_fOJ2UUqI2o8XQCK4BGAYYCw/s320/IMG_4909.jpeg" width="320" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;OOOPS operates by hijacking standard I/O calls and lagging them.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br /><br />But in addition to passively monitoring how an application performs I/O, it purposely injects latency to throttle the rate at which I/O operations get issued by an application.  That is, it purposely slows down I/O from clients to reduce server-side load and, by extension, the effects of a single bad actor on the I/O performance of all the other users.<br /><br />Ideologically, I have a lot of problems with an HPC facility inserting itself into the user’s workflow and reducing the efficiency with which he or she can accomplish their science relative to the peak capability of the HPC resource.  If a storage system allows a single user to accidentally deny service to other users in pursuit of peak performance, that is a problem with the storage system and it should be addressed at the system level.  And as Andreas pointed out in the BOF, tools exist to allow storage systems to accomplish fair sharing, which is distinctly different from explicitly penalizing users.  Granted, TACC is also the facility where one of its staff went on record as saying that the R language should not be used by anyone since it is a waste of energy.  Perhaps they have an institutionally different relationship with their user community.<br /><br />Fortunately, anything that relies on LD_PRELOAD can be circumvented by users, so OOOPS is unlikely to be used to enforce any kind of resource usage policy as it was pitched during the BOF.  I do see a lot of value in using it to fence data analysis workflows that may hit a pathological condition as a result of their inputs, and being able to trigger changes in application behavior by tracking I/O rates is a technique that could be useful in auto-tuning I/O middleware.<br /><br />Rosemary Francis, CEO of Ellexus, also spoke at the BOF and spoke for the need to make I/O performance analysis a little more accessible for the end users.  I was quite delighted by the visualizations she presented (presumably from her company’s Breeze product) which used both color and human-readable “bad” I/O patterns to create a pie graph that quickly shows how much time an application spent doing I/O in various good, bad, and neutral ways.  Darshan, the tried-and-true open source I/O profiling library, operates at a slightly lower level and assumes a slightly higher level of user sophistication by comparison.<br /><br />The discussion half of the BOF was packed with engagement from the audience–so much so that I didn’t find any moments of silence to seize the opportunity to stump for my own view of the world.  The combination of OOOPS and Rosemary’s I/O war stories did steer the discussion towards ways to punish bad users though.  I can appreciate HPC operators’ frustration in novice users causing system-wide problems, but I don’t think shaming users who do bad I/O is a great solution.  Rather, something between OOOPS’ automatic identification of bad I/O at runtime and Ellexus’ user-centric reporting and feedback, combined with storage systems capable of enforcing QOS, is where we need to go.<br /><br />&lt;h3 id="crayparty"&gt;The Cray Celebration&lt;/h3&gt;I wrote earlier that I normally don’t do the SC vendor party circuit, but the Cray party this year was another exception for two reasons: (1) we had just announced Perlmutter along with Cray’s Shasta unveiling which is worth celebrating, and (2) there were specific Cray staff with whom I wanted to confer sometime during the week.  So after the Parallel I/O BOF, I headed over to the event venue:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-RVFQ8qG9FLg/W_d4w-ZzXpI/AAAAAAABCpw/rj2edkzFiMc5PQYOM6tAWh-475M7BdlQgCK4BGAYYCw/s1600/IMG_4910.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="400" src="https://4.bp.blogspot.com/-RVFQ8qG9FLg/W_d4w-ZzXpI/AAAAAAABCpw/rj2edkzFiMc5PQYOM6tAWh-475M7BdlQgCK4BGAYYCw/s400/IMG_4910.jpeg" width="300" /></a>&lt;/div&gt; +<br />The event was quite nice in that it was not held at a loud bar (which made conversation much easier), it had plenty of food (no need for 2 AM Hot Pockets), and the format was conducive to moving around and meeting a lot of different people.  The event was awash with representatives from all the major Cray customers including the DOE labs, the big oil &amp; gas companies, and the regional leadership computing centers in EMEA including CSCS and KAUST, as well as alumni of all those employers and Cray itself.  I’ve only worked at a Cray customer site for three years now, but I couldn’t walk ten feet without running into someone I knew; in that sense, it felt a little like an event at the annual Cray User Group meeting but with a broader range of attendees.<br /><br />I don’t know what this event would’ve been like if I was a student or otherwise didn’t already know many of the regular faces within the Cray user community and instead had to start conversations cold.  That said, I was busy the entire evening getting to know the people behind all the conference calls I’m on; I find that getting to know my industry counterparts as people rather than just vendor reps really pays dividends when surprises happen and conflicts need to be resolved.  Events like this at SC are invaluable for building and maintaining these sorts of relationships.<br /><br />&lt;h2 id="wednesday"&gt;Wednesday, November 14&lt;/h2&gt;My Wednesday began bright and early with a quick run-around of the expo floor to figure out who I needed to visit before the end of the week.<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-SSHpcrqDu3o/W_eXyaZ5sxI/AAAAAAABCp8/ptdxmFa9eh4_-QgJvcmQqZtCQBYbP5d2wCK4BGAYYCw/s1600/IMG_4913.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="155" src="https://4.bp.blogspot.com/-SSHpcrqDu3o/W_eXyaZ5sxI/AAAAAAABCp8/ptdxmFa9eh4_-QgJvcmQqZtCQBYbP5d2wCK4BGAYYCw/s400/IMG_4913.jpeg" width="400" /></a>&lt;/div&gt; +<br />The expo floor was awkwardly laid out this year, so I really needed to do this to make sure I didn’t spin my tires trying to find certain booths once the crowd showed up.  Incidentally, I did witness a sales person violate the unwritten rule of keeping everything friendly until the expo floor opened to the public–a sales rep selling “the world’s fastest storage system” tried to stir up cold sales leads at my employer’s booth at 8 AM while we were all still drinking our coffee and catching up on e-mail.  If you do this, shame on you!  Respect the exhibitor access and don’t put your game face on until the public is allowed in.<br /><br />&lt;h3 id="wednesdaymorning"&gt;SC Student Career Fair and Booth Talk&lt;/h3&gt;My first meeting was a chat over coffee with <a href="https://www.vastdata.com/">VAST Data</a>, a storage technology company that has some really innovative and exciting ideas in the pipeline, to keep up to date with the latest news as they approach public launch.<br /><br />My second obligation was volunteering at my employer’s booth at the SC Career Fair.  I generally enjoy booth duty and talking to students, and this year I was doubly motivated by my desire to fill some career and student job openings related to my responsibilities.  A diverse cross section of students dropped by our booth looking for both summer internships and full-time jobs; many seemed very well rehearsed in their cold pitch, while some others were a little more casual or cautious.  Although I’m not particularly qualified to give career advice, I will say that knowing how to sell yourself cold can be a valuable skill in your early career.  If you are seeking employment, be prepared to respond to a request to “tell me about yourself” in a way that makes you stand out.<br /><br />After the Career Fair, I wound up hunkering down at the SDSC booth to have lunch with my former coworkers and review the slides I volunteered to present at the adjacent DDN booth.<br /><br />At 2 PM I took the stage (booth?) and one of my colleagues was not only kind enough to sit in on this booth talk, but also <a href="https://twitter.com/suhaibkhan/status/1062797409963724800">share this photo he took</a> right before I started:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://3.bp.blogspot.com/-psgfnpOmiNs/W_eg1Ao9N2I/AAAAAAABCqI/BX_TzjeqBXsPg73VMO7OzW5p2K-wP1lfACK4BGAYYCw/s1600/IMG_1881.jpg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://3.bp.blogspot.com/-psgfnpOmiNs/W_eg1Ao9N2I/AAAAAAABCqI/BX_TzjeqBXsPg73VMO7OzW5p2K-wP1lfACK4BGAYYCw/s400/IMG_1881.jpg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Beginning of my talk at the DDN booth.  Photo credit goes to Suhaib Khan via Twitter.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />I continue to be humbled that anyone would go out of their way to come hear what I have to say, especially when my talk is as unvetted as booth talks tend to be.  Talking at booths rarely goes well for me; the audio is always a wildcard, the audience is often unwitting, and auditory and visual distractions are literally everywhere.  The DDN booth was my sole booth talk of this year and it went about as well as I would have expected.  On the up side, quite a few attendees seemed genuinely interested to hear what I had to say about the variety of ways one can deploy flash in an HPC system.  Unfortunately, I ran a few minutes long and got derailed by external distractions several times during the presentation though.  Flubbing presentations happens, and none of the audience members seemed to mind.<br /><br />Shortly after the booth talk, I had to find a quiet spot to jump on a telecon.  This was no easy task; since cell phones killed the public phone booth, there are very few places to take a call on the expo floor.<br /><br />&lt;h3 id="wednesdayexpo"&gt;Expo Floor, Part 2&lt;/h3&gt;The afternoon afforded me two more hours to race around the expo floor.  Despite my planning earlier in the morning, I wound up spinning my tires looking for a few key vendors who simply didn’t show up to SC this year, including<br /><br />&lt;ul&gt;&lt;li&gt;Samsung and SK Hynix, two of the top three DRAM vendors and the sole manufacturers of HBM2&lt;/li&gt;&lt;li&gt;Seagate, one of two hard disk drive manufacturers&lt;/li&gt;&lt;li&gt;Broadcom/Avago, the company manufacturing most of the serdes used in the upcoming 200G and 400G network devices&lt;/li&gt;&lt;li&gt;Juniper, one of the major players in the 400 GbE space&lt;/li&gt;&lt;li&gt;AdvancedHPC, one of the few US integrators selling BeeGFS&lt;/li&gt;&lt;/ul&gt;<br />I’m not really sure why so many vendors didn’t show up this year, but it made getting a holistic view of the storage and networking technologies markets impossible.  That said, I still saw a few noteworthy things.<br /><br />One of the big open questions in high-performance storage revolves around the battle between the NF1 (formerly NGSFF, promoted by Samsung) and EDSFF (promoted by Intel) form factors for NVMe.  It’s clear that these long-and-skinny NVMe designs are going to have to replace the thermally inefficient 2.5” U.2 and unserviceable HHHL PCIe form factors, but the dust is far from being settled.  On the one hand, Samsung leads flash storage sales worldwide, but their NF1 form factor caps the power consumption (and therefore performance) of its devices to levels that are squarely aimed at cheaper data center flash.  On the other, the EDSFF form factor being pushed by Intel has a short version (competing directly with NF1) and a longer version that allows higher power.<br /><br />The <b>Supermicro booth</b> had actual EDSFF drives on display, and this was the first time I could actually see one up-close:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-7jvyY9w2JfI/W_evRjopdII/AAAAAAABCqY/9HnWCAv9ovAImo6oeokaY_fcnbvZrXaDQCK4BGAYYCw/s1600/IMG_4915.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-7jvyY9w2JfI/W_evRjopdII/AAAAAAABCqY/9HnWCAv9ovAImo6oeokaY_fcnbvZrXaDQCK4BGAYYCw/s400/IMG_4915.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;A long-type EDSFF NVMe drive at the Supermicro booth.  The aluminum casing is actually required to meet the thermals.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br /><br />What I didn’t realize is that the higher thermal specification enabled by the long-version EDSFF drives requires that the entire SSD circuit board be enclosed in the aluminum casing shown to enable better heat dissipation.  This has the nasty side effect of reducing density; while a standard 19” 1U chassis can fit up to 36 NF1 SSDs, the aluminum casing on long EDSFFs reduces the equivalent density to 32 SSDs.  Although long EDSFF drives can compensate for this by packing more NAND dies on the physically longer EDSFF board, supporting these longer SSDs requires more engineering on the chassis design to fit the same amount of compute into a smaller area.<br /><br />Similarly but differently, the <b>Lenovo booth</b> was showcasing their D3284 JBOD which packs 84x 3.5” HDDs into a double-decker 5U chassis.  I had naively assumed that all of these super-dense 84-drive enclosures were top-loading such that each drive mates to a backplane that is mounted to the floor of the chassis, but it turns out that’s not the case:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-hBOA2lL4nlw/W_e1CJZPqoI/AAAAAAABCqk/KC1o-eaOaNYbbvmibNYiD7vfoExdk6jlQCK4BGAYYCw/s1600/IMG_4918.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-hBOA2lL4nlw/W_e1CJZPqoI/AAAAAAABCqk/KC1o-eaOaNYbbvmibNYiD7vfoExdk6jlQCK4BGAYYCw/s400/IMG_4918.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Lenovo’s 5U84 JBOD&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />Instead, each 3.5” drive goes into its 2.5U shelf on its side, and each drive attaches to a carrier that has to be slid slightly toward the front of the JBOD to release the drive, and then slide towards the back of the JBOD to secure it.  This seems a little harder to service than a simple top-load JBOD, but I assume there are thermal efficiencies to be gained by this layout.<br /><br />The <b>Western Digital booth</b> had a pretty broad portfolio of data center products on display.  Their newest gadget seems to be a planar NAND-based U.2 device that can present itself as DRAM through a custom hypervisor.  This sounds like a direct competitor to Intel’s Memory Drive offering which uses ScaleMP’s hypervisor to expose flash as DRAM to a guest VM.  The combination of exposing flash as very slow memory and relying on software virtualization to do this lends this to being a technology not really meant for HPC, and the engineer with whom I spoke confirmed as much.  Virtualized big-and-slow memory is much more appealing to in-memory databases such as SAP HANA.<br /><br />Perhaps more interestingly was the lack of any mention of Western Digital’s investment in storage-class memory and microwave-assisted magnetic recording (MAMR) disk drives.  When I prodded about the state of MAMR, I was assured that the technology will work because there is no future of hard drives without some form of energy-assisted magnetic recording.  However, product announcements are still 18-24 months away, and the capacity for these drives will enter the market at the rather underwhelming range of ~20 TB.  Conveniently, this matches Seagate’s recent cry of wolf that they will <a href="https://www.theregister.co.uk/2018/03/21/seagate_to_drop_multiactuator_hamr_in_2020/">launch HAMR drives in 2020 at a 20 TB capacity point</a>.  Western Digital also made no mention of multi-actuator drives, and asking about it only got me a sly grin; this suggests that Western Digital is either playing slow and steady so as not to over-promise, or Seagate has a slight technological lead.<br /><br />My last substantive stop of the afternoon was at the IBM booth, where they had one of their new TS4500 tape libraries operating in demo mode.  The window was too reflective to take a vide of the robotics, but I will say that there was a perceptible difference between the robotics in IBM’s enterprise tape library and the robotics in another vendor’s LTO tape library.  The IBM enterprise robotics are downright savage in how forcefully they slam tapes around, and I now fully believe IBM’s claims that their enterprise cartridges are constructed to be more physically durable than standard LTO.  I’m sure there’s some latency benefit to being able to ram tapes into drives and library slots at full speed, but it’s unnerving to watch.<br /><br />IBM also had this cheeky infographic on display that was worth a photo:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-3ADWFs9y1M8/W_e-W-67hfI/AAAAAAABCqw/6j4stvBWBici6SvDRTx17gIhMCHQNGjOACK4BGAYYCw/s1600/IMG_4919.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="400" src="https://4.bp.blogspot.com/-3ADWFs9y1M8/W_e-W-67hfI/AAAAAAABCqw/6j4stvBWBici6SvDRTx17gIhMCHQNGjOACK4BGAYYCw/s400/IMG_4919.jpeg" width="300" /></a>&lt;/div&gt; +<br />If I built a tape drive that was still operating after forty years in outer space, I’d want to brag about it too.  But there are a couple of factual issues with this marketing material that probably made every physical scientist who saw it roll their eyes.<br /><br />Over at the compute side of the IBM booth, I learned that the Summit and Sierra systems sitting at the #1 and #2 positions on Top500 are built using node architectures that IBM is selling commercially.  There are 2 CPU + 6 GPU nodes (which is what Summit at OLCF has) which require liquid cooling, and 2 CPU + 4 GPU nodes (which is what Sierra at LLNL has) which can be air- or liquid-cooled.  I asked an IBM technologist which configuration is more commercially popular, and the Sierra configuration is currently leading sales due to the relative lack of infrastructure to support direct liquid cooling in commercial data centers.<br /><br />This has interesting implications for the exascale technologies I looked at on Tuesday; given that the exascale-capable system designs presented by both the Fujitsu and Cray rely on direct liquid cooling, bridging the gap between achieving exascale-level performance and delivering a commercially viable product is pretty wide from a facilities perspective.  Fortunately, the <a href="https://twitter.com/ProfMatsuoka/status/1062771762721644544">Fujitsu A64FX chip usually runs below 200 W</a> and can feasibly be air-cooled with lower-density packaging, and <a href="https://www.nextplatform.com/2018/10/30/cray-slingshots-back-into-hpc-interconnects-with-shasta-systems/">Cray’s Shasta will support standard air-cooled 19” racks</a> via lower-density nodes.<br /><br />&lt;h3 id="io500bof"&gt;The IO-500/VI4IO BOF&lt;/h3&gt;The second must-attend BOF for people working in I/O is the IO-500 and Virtual Institute for I/O BOF.  It’s a very pragmatic BOF where people discuss system architecture, benchmarking, and various related community efforts, and since 2017, also began to include the semiannual unveiling of the IO-500 list.<br /><br />This year was exciting in that the top system, a DDN IME installation at JCAHPC, was unseated by the monstrous storage system attached to the Summit system at Oak Ridge and sustained an astounding 2 TiB/sec and 3 million opens/sec.  In fact, the previous #1 system dropped to #4, and each of the new top three systems was of a different architecture (Spectrum Scale at Oak Ridge, IME at KISTI, and Lustre at Cambridge).<br /><br />Perhaps the most interesting of these new submissions was the #3 system, the Data Accelerator at Cambridge, which is a home-grown whitebox system that was designed to be functionally equivalent to DataWarp’s scratch mode:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://3.bp.blogspot.com/-PmYCsLJDJkg/W_g0D2xheFI/AAAAAAABCrE/4vq_SFQavBMN3jN66nfFXhRaSC7rW4WWQCK4BGAYYCw/s1600/IMG_4927.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://3.bp.blogspot.com/-PmYCsLJDJkg/W_g0D2xheFI/AAAAAAABCrE/4vq_SFQavBMN3jN66nfFXhRaSC7rW4WWQCK4BGAYYCw/s400/IMG_4927.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Alasdair King presenting the Data Accelerator design at the IO-500 BOF&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br /><br />The hardware are just Dell boxes with six NVMe drives and one OPA NIC per socket, and the magic is actually handled by a cleanroom reimplementation of the interface that Slurm uses to instantiate DataWarp partitions on Cray XC systems.  Rather than use a sophisticated orchestration system as DataWarp does though, the Data Accelerator translates Slurm #DW pragmas into Ansible plays that spin up and tear down ephemeral Lustre file systems.<br /><br />The fact that the #3 fastest storage system in the world is a whitebox NVMe system is really remarkable, and my hat is off to the team at Cambridge that did this work.  As all-flash parallel file systems go from the realm of being a high-end boutique solution and become affordably mainstream, relatively scrappy but innovative engineering like the Cambridge system are surely going to cause a rapid proliferation of flash adoption in HPC centers.<br /><br />DDN also presented their software-defined IO-500 submission, this time run in Google Cloud and landing in the #8 position:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://2.bp.blogspot.com/-k8mAG6N4jfM/W_g2VNcP7eI/AAAAAAABCrQ/KVIbo_dGmq0fYeCtLQUiYVRGVxldRJGAACK4BGAYYCw/s1600/IMG_4929%2B2.jpeg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="300" src="https://2.bp.blogspot.com/-k8mAG6N4jfM/W_g2VNcP7eI/AAAAAAABCrQ/KVIbo_dGmq0fYeCtLQUiYVRGVxldRJGAACK4BGAYYCw/s400/IMG_4929%2B2.jpeg" width="400" /></a>&lt;/div&gt; +<br />Since DDN’s embedded SFA product line already runs virtual machines on their controller hardware, it doesn’t seem like a big stretch to run the same SFA VMs in the cloud.  While this sounds a little counterproductive to DDN’s biggest differentiator in providing a fully integrated hardware platform, this idea of running SFA in Google Cloud arose from the growing need for parallel file systems in the cloud.  I can only assume that this need is being largely driven by AI workloads which require a combination of high I/O bandwidth, high IOPS, and POSIX file interfaces.<br /><br />&lt;h2 id="thursday"&gt;Thursday, November 15&lt;/h2&gt;&lt;div&gt;The conference was showing signs of winding down by Thursday, as many attendees brought their luggage with them to the convention center so they could head back home that night.  The expo floor also closes in the mid-afternoon on Thursday.&lt;/div&gt;</p> +<div><br /></div> +<h3 id="thursdaytechprog">Technical Program, Part 2 - Exhibitor Forum</h3> +<p>My Thursday began at 10:30 AM with the <a href="https://sc18.supercomputing.org/session/?sess=sess270">HPC Storage and Memory Architectures session</a> of the Exhibitor Forum.  Liran Zvibel, former CTO and now CEO of WekaIO was the first presenter and gave a surprisingly technical description of the <b>WekaIO Matrix parallel file system</b> architecture:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-OspyBccJT8w/W_hL_EChGsI/AAAAAAABCrc/9qCH5J61FuopiqwpxaMILtA98g_uP8hagCK4BGAYYCw/s1600/IMG_4933.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://4.bp.blogspot.com/-OspyBccJT8w/W_hL_EChGsI/AAAAAAABCrc/9qCH5J61FuopiqwpxaMILtA98g_uP8hagCK4BGAYYCw/s400/IMG_4933.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;WekaIO’s Matrix file system architecture block diagram.  Surprising amount of detail can be cleaned by examining this carefully.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />In terms of building a modern parallel file system from the ground up for all-flash, WekaIO checks off almost all of the right boxes.  It runs almost entirely in user space to keep latency down, it runs in its own reserved pool of CPU cores on each client, and capitalizes on the approximate parity between NVMe latency and modern high-speed network latency.  They make use of a lot of the smart ideas implemented in the enterprise and hyperscale storage space too and are one of the few really future-looking storage companies out there who are really thinking about the new possibilities in the all-flash world while still courting the HPC market.<br /><br />There is a fair amount of magic involved that was not broken down in the talk, although I’ve found that the WekaIO folks are happy to explain some of the more complex details if asked specific questions about how their file system works.  I’m not sure what is and isn’t public though, so I’ll save an architectural deep-dive of their technology for a later date.<br /><br /><b>Andreas Schlapka of Micron Technology</b> was the next speaker, and his talk was quite a bit more high-level.  Aside from the grand statements about how AI will transform technology though, he did have a couple of nice slides that filled some knowledge gaps in my mind.  For example:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-LgZtubR8iyw/W_hUGRxlP2I/AAAAAAABCrs/nwHlQOb8ESwX5xLHUJ0KDFvfhBkpCfnqQCK4BGAYYCw/s1600/IMG_4934.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-LgZtubR8iyw/W_hUGRxlP2I/AAAAAAABCrs/nwHlQOb8ESwX5xLHUJ0KDFvfhBkpCfnqQCK4BGAYYCw/s400/IMG_4934.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Broad strokes highlighting the different computational (and architectural) demands of training and inference workloads&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />Training is what the vast majority of casual AI+HPC pundits are really talking about when extolling the huge compute requirements of deep learning.  Part of that is because GPUs are almost the ideal hardware solution to tackle the mathematics of training (dense matrix-matrix multiplication) and post impressive numbers; the other part is that inference can’t happen without a well-trained model, and models are continually being refined and re-trained.  What I hadn’t fully appreciated is that inference is much more of an interesting computational problem in that it more closely resembles the non-uniform and latency-bound workloads of scientific computing.<br /><br />This has interesting implications for memory technology; while HBM2 definitely delivers more bandwidth than DDR, it does this by increasing the channel width to 128 bits and hard-wiring 8 channels into each stack.  The extra bandwidth helps feed GPUs for training, but it’s not doing much for the inference side of AI which, presumably, will become a much more significant fraction of the cycles required overall.  In my mind, increasing the size of SRAM-based caches, scratchpads, and register files are the more obvious way to reduce latency for inference, but we haven’t really seen a lot of fundamentally new ideas on how to effectively do that yet.<br /><br />The speaker went on to show the following apples-to-apples system-level reference:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-n5FVQeMoPsk/W_hYIrYjrfI/AAAAAAABCr4/YLsuO00mIlYqJYEFoz2wi0vIP29Ag3e_gCK4BGAYYCw/s1600/IMG_4935.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://4.bp.blogspot.com/-n5FVQeMoPsk/W_hYIrYjrfI/AAAAAAABCr4/YLsuO00mIlYqJYEFoz2wi0vIP29Ag3e_gCK4BGAYYCw/s400/IMG_4935.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;System-level speeds and feeds of the memory products available now or in the near future as presented by Micron&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />It’s not terribly insightful, but it lets you back out the bus width of each memory technology (bandwidth / data rate / device #) and figure out where its bandwidth is coming from:<br />&lt;ul&gt;&lt;li&gt;DDR4 and DDR5 use 64-bit channels and relies on increasing channel-level parallelism to improve bandwidth.  This is now putting them in a place where you wind up having to buy way more capacity than you may want just to get sufficient bandwidth.  This is analogous to where HDDs are in the HPC storage hierarchy today; it’s rapidly becoming uneconomical to rely on DDR for bandwidth.&lt;/li&gt;&lt;li&gt;GDDR uses narrower channels (32 bits) but more of them to get better bandwidth.  They also rely on phenomenally high data rates per pin; I don’t really understand how this is possible since they rely on inefficient single-ended signaling.&lt;/li&gt;&lt;li&gt;HBM uses both wide (128 bits) and plentiful channels to get its performance; the table is a misleading in this regard since <a href="https://twitter.com/ernstdj/status/1066178570748420096">each “device” (HBM stack) contains eight channels</a>.  <strike>This is fine for feeding highly parallel arithmetic units like vector ALUs, but this offers no benefit to latency-bound workloads that, for example, chase pointers to traverse a graph.</strike> <span style="font-size: xx-small;">(it turns out HBM is just fine for pointer chasing–thanks to <a href="https://twitter.com/ernstdj">one of the HPC’s memory-wizards-at-large</a> for pointing this out to me!)</span>&lt;/li&gt;&lt;/ul&gt;Micron also made the strange assertion that they are the only company that offers the entire range of memory products.  I guess since Samsung and SK Hynix both opted to skip SC, Micron can say whatever it likes; however, Samsung is currently the only company shipping commercial quantities of HBM, and Hynix’s HBM capability just came online.  As far as I know, Micron has never manufactured a stack of HBM since they spent years promoting the competing-but-now-defunct Hybrid Memory Cube technology.<br /><br />&lt;h3 id="nsfbof"&gt;The NSF Future Directions BOF&lt;/h3&gt;I opted to see what was new with National Science Foundation’s Office of Advanced Cyberinfrastructure (OAC) at their noon BOF.  Despite having left the NSF world when I left San Diego, I still care deeply about NSF computing because they pay for many of the most accessible HPC resources in the US.  I certainly got my start in HPC on the NSF’s dime at SDSC, and I got to see firsthand the huge breadth of impact that SDSC’s XSEDE resources had in enabling smaller research groups at smaller institutions to perform world-class research.  As such, it’s also no surprise that the NSF leads the pack in developing and deploying many of the peripheral technologies that can make HPC accessible such as federated identity, science gateways, and wide-area file systems.<br /><br />That all said, actually listening to the NSF HPC strategic vision makes me rather grumpy since the directions of such an important federal office sometimes appear so scattershot.  And judging by the audience questions at the end of the BOF, I am not the only one–Very Important People(tm) in two different national-level HPC consortia asked very pointed questions of Manish Parashar, the NSF OAC director, that highlighted the dichotomy between OAC’s strategic vision and where it was actually putting money.  I really believe in the critical importance of NSF investment in maintaining national cyberinfrastructure which is probably why I keep showing up to these BOFs and do my best to support my colleagues at SDSC and the other XSEDE SPs.<br /><br />After sitting through this Future Directions BOF, I could write <a href="https://glennklockwood.blogspot.com/2015/01/thoughts-on-nsf-future-directions.html">another updated rant about how I feel about the NSF’s direction in HPC</a> and get myself in trouble.  Instead, I’ll instead share just a few slides I photographed from afar along with some objective statements and leave it at that.<br /><br /><b>The future directions summary slide:</b><br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-dN7IrgBNqMM/W_hh9cs4GoI/AAAAAAABCsE/z1APs1eMzEEpMU00PdclYjYMqUv8jK8swCK4BGAYYCw/s1600/IMG_4938.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="225" src="https://4.bp.blogspot.com/-dN7IrgBNqMM/W_hh9cs4GoI/AAAAAAABCsE/z1APs1eMzEEpMU00PdclYjYMqUv8jK8swCK4BGAYYCw/s400/IMG_4938.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;NSF OAC’s future directions&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;&lt;ul&gt;&lt;li&gt;Performance, capability computing, and global leadership are not mentioned in the above slides.  Terms like “agility, responsiveness, accessibility”) are often used to describe the cloud.&lt;/li&gt;&lt;li&gt;“reduce barriers to CI adoption” indicates that NSF wants to serve more users.  NSF is not increasing investment in capital acquisition (i.e., more or larger HPC systems beyond the status quo of technology refreshes).&lt;/li&gt;&lt;li&gt;“Prioritize investments to maximize impact” does not define what impacts are to be maximized.&lt;/li&gt;&lt;/ul&gt;<br /><b>The Frontera slide:</b><br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-Z6fVo1VzObU/W_hjwTwsYsI/AAAAAAABCsQ/V7shT5OZQsMkmuQiXKL-E0NgFeD5H6UcwCK4BGAYYCw/s1600/IMG_4939.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="225" src="https://4.bp.blogspot.com/-Z6fVo1VzObU/W_hjwTwsYsI/AAAAAAABCsQ/V7shT5OZQsMkmuQiXKL-E0NgFeD5H6UcwCK4BGAYYCw/s400/IMG_4939.jpeg" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;NSF’s next leadership-class HPC, Frontera, to be deployed by TACC&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;&lt;ul&gt;&lt;li&gt;The award amount was $60M.  The previous Track-1 solicitation that funded Blue Waters was $200M.  Stampede was $30M, and Stampede 2 was another $30M.&lt;/li&gt;&lt;li&gt;“leadership-class … for all [science and engineering] applications” either suggests that all science and engineering applications are leadership-capable, or this leadership-class system is not primarily designed to support a leadership computing workload.&lt;/li&gt;&lt;li&gt;It is unclear what the significance of the “CPU” qualifier in “largest CPU system” is in the larger context of leadership computing.&lt;/li&gt;&lt;li&gt;There is mention of “leadership-class” computing.  There is no mention of exascale computing.  There is nothing that acknowledges leveraging the multi-billion-dollar investment the US has made into the Exascale Computing Project.  An audience member politely asked about this omission.&lt;/li&gt;&lt;/ul&gt;&lt;div&gt;<b><br /></b><b>The Midscale Research Infrastructure slide:</b>&lt;/div&gt;</p> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://2.bp.blogspot.com/-hn8mLFrBDC4/W_hmRyzMrKI/AAAAAAABCsc/FiAtDwgi1zMvGYb9JnfcuZJRlmUFGQ--QCK4BGAYYCw/s1600/IMG_4940.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="225" src="https://2.bp.blogspot.com/-hn8mLFrBDC4/W_hmRyzMrKI/AAAAAAABCsc/FiAtDwgi1zMvGYb9JnfcuZJRlmUFGQ--QCK4BGAYYCw/s400/IMG_4940.jpeg" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Upcoming solicitations for research cyberinfrastructure</td></tr></tbody></table> +<ul><li>NSF OAC expects to issue one $6M-$20M solicitation and another $20M-$70M solicitation "soon" to fund HPC systems and the associated infrastructure.</li><li>$6M-$20M is on the same order of magnitude as the Track-2 solicitations that funded SDSC's Gordon ($10M) and Comet ($12M).</li><li>$20M-$70M is on the same order of magnitude as the Track-2 solicitations that funded TACC's Stampede 1 and 2 ($30M). &nbsp;NSF's next leadership-class investment (Frontera) is $60M.</li></ul> +<p><br />&lt;h3 id="mypaper"&gt;My SC Paper&lt;/h3&gt;The next major item on my agenda was presenting my paper, <a href="https://sc18.supercomputing.org/presentation/?id=pap206&amp;sess=sess186">A Year in the Life of a Parallel File System</a>, as the final talk in the final session of the paper track.<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-CRKvFoWeftI/W_hpfC0RUyI/AAAAAAABCso/FqDL12Rd1l4eL0Rutvcs6xYO7PQqIP9yACK4BGAYYCw/s1600/IMG_4941.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="400" src="https://4.bp.blogspot.com/-CRKvFoWeftI/W_hpfC0RUyI/AAAAAAABCso/FqDL12Rd1l4eL0Rutvcs6xYO7PQqIP9yACK4BGAYYCw/s400/IMG_4941.jpeg" width="300" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;My name in lights–or something like that.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />I was admittedly bummed out when I found out that I was going to be the conference closer since a significant number of SC attendees tend to fly out on Thursday night and, presumably, would not stick around for my presentation.  As a result, I didn’t take preparation for it as seriously in the weeks leading up to SC as I normally would have.  I knew the presentation was a 30-35 minute talk that had to be fit into a 25-minute slot, but I figured I would figure out how to manage that on the night before the talk and mostly wing it.<br /><br />What I realized after arriving at SC was that a bunch of people–most of whom weren’t the expected audience of storage researchers–were looking forward to hearing the talk.  This left me scrambling to seriously step up the effort I was going to put into making sure the presentation was well composed despite needing to drop ten minutes of material and fit it into the 25 minutes I was given.  I documented my general approach to crafting presentations in my <a href="https://glennklockwood.blogspot.com/2014/04/being-successful-researcher.html">patented Glenn K. Lockwood Five Keys to Being a Successful Researcher (FKBSR) method</a>, but I’ll mention some of my considerations for the benefit of anyone who is interested in how others approach public speaking.<br />&lt;ol&gt;&lt;li&gt;I absolutely could not overshoot the timing because some attendees had to leave at 5 PM to catch 7 PM flights.  This meant that it would be better for me to undershoot the time and either draw out the conclusions and acknowledgments slides to finish on time or finish early and leave extra time for questions.&lt;/li&gt;&lt;li&gt;The people I met at SC who indicated interest in my talk were storage systems people, not statisticians.  This meant I could probably tone down the statistical rigor in the presentation without offending people’s scientific sensibilities.&lt;/li&gt;&lt;li&gt;Similarly, because attendees were already familiar with typical HPC I/O systems and the relevant technologies, I could gloss over the experimental setup and description of the different compute and storage systems.&lt;/li&gt;&lt;li&gt;Given the above considerations, a reasonable approach would be to punt as many non-essential details into the Q&amp;A after the talk and let people try to poke holes in my methods only if they really cared.&lt;/li&gt;&lt;/ol&gt;&lt;div&gt;I also know two things about myself and the way I present:&lt;/div&gt;</p> +<div><ol><li>I can present either at a casual pace where I average ~70 seconds per slide or in turbo mode where I average ~50 seconds per slide. &nbsp;Orating at turbo speed requires a lot more preparation because it requires speaking through slide transitions rather than pausing to reorient after each slide transition.</li><li>I get distracted easily, so I would rather have people begin to leave after my monologue ended and Q&amp;A began than have the commotion of people getting up derail the tail end of my presentation.</li></ol></div> +<p><br />As a result of all these factors, I opted to both cut a lot of details to get the talk down to ~25-30 minutes when presented at a casual pace, then prepare to present in turbo mode just in case the previous speakers went long (I was last of three speakers), there were A/V issues (they were prolific at this SC, especially for Mac users), or there were any audience interruptions.<br /><br />I also opted to present from my iPad rather than a full laptop since it did a fine job earlier at both PDSW-DISCS and the IO-500/VI4IO BOF.  In sticking with this decision though, I learned two valuable things during the actual presentation:<br />&lt;ol&gt;&lt;li&gt;<b>The iOS “do not disturb” mode does not suppress Twitter notifications</b>.  A couple of people were kind enough to tweet about my presentation as I was giving it, but this meant that my presenter view was blowing up with Twitter noise as I was trying to present!  Fortunately I only needed to look down at my iPad when transitioning between slides so it didn’t derail me.&lt;/li&gt;&lt;li&gt;<b>There’s no usefully sized timer or clock in PowerPoint for iOS’s presenter view</b>, and as a result, I had no idea how I was doing on time as I entered the final third of my slides.  This became a distraction because I was fully expecting a five-minute warning from the session moderator at some point and got worried that I wasn’t going to get one.  As such, I didn’t want to slow down the tail of the presentation without knowing how close I was getting to the target.  It turned out that I didn’t get a five-minute warning because I was already concluding at that point.&lt;/li&gt;&lt;/ol&gt;&lt;div&gt;Fortunately the audience was sufficiently engaged to pad out the Q&amp;A period with many of the questions that would’ve been answered by the slides I had dropped.  Afterwards I got feedback that indicated the presentation was noticeably short to the audience (not great) but that the narrative remained understandable to most attendees throughout the entire presentation (good).&lt;/div&gt;</p> +<div><br /></div> +<div>As far as the technical content of the presentation though, I won't recap that here--until I write up the high-level presentation as another blog post, you may have to read the paper (or invite me to present it at your institution!).</div> +<div><br /></div> +<h3 id="perot">SC Technical Program Reception</h3> +<div>I've never attended the reception that wraps up the last full day of SC for a variety of reasons, and I was going to skip it again this year to fit some me-time into the otherwise frantic week. &nbsp;However the venue (the Perot Museum) and its close proximity to my hotel lured me out.</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://1.bp.blogspot.com/-tI-MQMV8TfY/W_iNlk_tK9I/AAAAAAABCs0/7AgSWy_4EkMETBoz4y2vYAmKtPhInqbPQCLcBGAs/s1600/IMG_4944.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://1.bp.blogspot.com/-tI-MQMV8TfY/W_iNlk_tK9I/AAAAAAABCs0/7AgSWy_4EkMETBoz4y2vYAmKtPhInqbPQCLcBGAs/s400/IMG_4944.jpeg" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">The entryway to the Perot Museum</td></tr></tbody></table> +<div><br /></div> +<div>I am not a "never eat alone" kind of person because I find that my ability to be at the top of my game diminishes without at least some intermittent time to sit back and digest. &nbsp;As such, I approached the reception with very selfish intent: I wanted to see the museum, learn about something that had nothing to do with supercomputing, have a drink and a meal, and then go back to my hotel. &nbsp;So I did just that.</div> +<div><br /></div> +<div>The dinosaurs seemed like a major feature of the museum:</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://3.bp.blogspot.com/-0SzrBWWPg0U/W_iQbFgdV0I/AAAAAAABCtA/pfbaJXLCVKwcY4SBUKh-uO7mCdH8BFkQgCLcBGAs/s1600/IMG_4947.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://3.bp.blogspot.com/-0SzrBWWPg0U/W_iQbFgdV0I/AAAAAAABCtA/pfbaJXLCVKwcY4SBUKh-uO7mCdH8BFkQgCLcBGAs/s400/IMG_4947.jpeg" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Rapetosaurus skeleton on display at the Perot Museum</td></tr></tbody></table> +<div><br /></div> +<div>The archaeological diversity of the dinosaur room reminded me of <a href="https://www.royalsaskmuseum.ca/trex">the dinosaur museum near my wife's hometown</a> in the Canadian prairies, but the exhibit seemed to be largely reproduction fossils that blended science with entertainment.</div> +<div><br /></div> +<div>More impressive to me was the extensive mineral collection:</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://4.bp.blogspot.com/-gXjf9Sexq68/W_iRxM_ABNI/AAAAAAABCtM/EA0wfJOoOvgqTwtOwWxt5Y42vFFM-1hvQCLcBGAs/s1600/IMG_4949.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="400" src="https://4.bp.blogspot.com/-gXjf9Sexq68/W_iRxM_ABNI/AAAAAAABCtM/EA0wfJOoOvgqTwtOwWxt5Y42vFFM-1hvQCLcBGAs/s400/IMG_4949.jpeg" width="300" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">I'm a sucker for quartz. &nbsp;I did my PhD research on silicates.</td></tr></tbody></table> +<div><br /></div> +<div>Not only were the minerals on display of remarkable quality, but many of them were found in Texas. &nbsp;In fact, the museum overall had a remarkably Texas-focused set of exhibits which really impressed me. &nbsp;The most interesting exhibit that caught my attention was a mini-documentary on the geologic history of Texas that explained how plate tectonics and hundreds of millions of years resulted in the world-famous oil and gas reserves throughout the state.</div> +<div><br /></div> +<div>Having learned something and enjoyed some delightful food at the museum, I then called it quits and cashed out.</div> +<p><br />&lt;h2 id="friday"&gt;Friday, November 16&lt;/h2&gt;&lt;div&gt;The last day of SC is always a bit odd because the expo has already wrapped up, most of the vendors and casual attendees have gone home, and the conference is much more quiet and focused.  My day started with a surreal shuttle ride to the conference center in what appeared to be a 90’s-era party bus:&lt;/div&gt;</p> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://3.bp.blogspot.com/-gVuBN4kPkJk/W_iTzpl1fgI/AAAAAAABCtY/EYRjvFdgORAW8buCAMOEPaOgajKt4yTOQCLcBGAs/s1600/IMG_4956.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://3.bp.blogspot.com/-gVuBN4kPkJk/W_iTzpl1fgI/AAAAAAABCtY/EYRjvFdgORAW8buCAMOEPaOgajKt4yTOQCLcBGAs/s400/IMG_4956.jpeg" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Conference shuttle, complete with taped-together audio system, faux leather sofa, and a door that had to be poked with a broom stick to open.</td></tr></tbody></table> +<div><br /></div> +<div><br /></div> +<div>Only six concurrent half-day workshops and a panel were on the agenda:</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://2.bp.blogspot.com/-CcpDEs4gg0g/W_iUdw0L9oI/AAAAAAABCtg/aifOyN4nBx8Lc4eEa5Gs7v19EB6hsvqTgCLcBGAs/s1600/IMG_4957.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="320" src="https://2.bp.blogspot.com/-CcpDEs4gg0g/W_iUdw0L9oI/AAAAAAABCtg/aifOyN4nBx8Lc4eEa5Gs7v19EB6hsvqTgCLcBGAs/s320/IMG_4957.jpeg" width="239" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">The entire Friday agenda fit on a single screen</td></tr></tbody></table> +<div><br /></div> +<div>I stuck my head into the&nbsp;<a href="https://sc18.supercomputing.org/session/?sess=sess145">P3HPC workshop</a>'s first panel discussion to catch the age-old but ever-lively argument over someone's proposed definition of performance portability and productivity either being too broad or too narrow. &nbsp;I/O performance portability generally does not have a place in these sorts of conversations (which I don't fault--algorithmic complexity in I/O is usually hidden from user applications) so I attended only as an interested observer and wasn't as fastidious about taking notes as I was earlier in the week.</div> +<div><br /></div> +<div>At 10:30 AM I headed over to the <a href="https://sc18.supercomputing.org/presentation/?id=pan105&amp;sess=sess306">Convergence between HPC and Big Data: The Day After Tomorrow</a> panel discussion which had a star-studded speaker lineup. &nbsp;<a href="http://www.nersc.gov/about/nersc-staff/center-leadership/katie-antypas/">NERSC's Katie Antypas</a> gave a great overview of the NERSC-9/Perlmutter architecture which fit the panel topic uncannily well since it is a system design from the ground up to meet the needs of both traditional HPC and large-scale data analysis.</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://1.bp.blogspot.com/-D7BDhWU4Ww8/W_iXlgPQFUI/AAAAAAABCts/ZUG5vfMr7TIVeDqPmijx8iZBE_YjrPJWwCLcBGAs/s1600/IMG_4959.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="225" src="https://1.bp.blogspot.com/-D7BDhWU4Ww8/W_iXlgPQFUI/AAAAAAABCts/ZUG5vfMr7TIVeDqPmijx8iZBE_YjrPJWwCLcBGAs/s400/IMG_4959.jpeg" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">The NERSC-9 Project Director describing how the Perlmutter system embodies the convergence of HPC and Big Data in front of a remarkably big crowd in the final session of SC.</td></tr></tbody></table> +<div><br /></div> +<div>Unfortunately I had to duck out shortly after she spoke to get to my last meeting of the week with an old colleague for whom I always make time at SC. &nbsp;Incidentally, some of the most valuable time you can spend at SC is talking to <a href="https://www.nag.com/">industry</a> <a href="https://bioteam.net/">consultants</a>. &nbsp;Not unlike getting to know members of the trade press, good consultants have exposure to a tremendous breadth of problem and solution spaces. &nbsp;They can give you all manner of interesting insights into different vendors, industry verticals, and market trends in an otherwise brief conversation.</div> +<div><br /></div> +<div>After my final meeting was cut short by my colleague's need to run to the airport, I had a quick bite with another Friday holdout then made my own way to the airport to catch up on a week's worth of e-mails. &nbsp;The flight back to Oakland was one of the rare occasions where I was just too worn out to try to catch up on some delinquent report writing and just watched three hours of Dark Tourist on Netflix.</div> +<div><br /></div> +<h2 id="after-conf">After the Conference</h2> +<div>It was technically Saturday by the time I finally got home, but the family was happy to see me (and the swag I had in tow):</div> +<div><br /></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="https://1.bp.blogspot.com/-u5u1MUSi0_I/W_iln_CwlEI/AAAAAAABCt8/W-ssev6VpyIDSMG8XQsHcNM84xTGqDJCgCEwYBhgL/s1600/IMG_4967.jpeg" style="margin-left: auto; margin-right: auto;"><img border="0" height="240" src="https://1.bp.blogspot.com/-u5u1MUSi0_I/W_iln_CwlEI/AAAAAAABCt8/W-ssev6VpyIDSMG8XQsHcNM84xTGqDJCgCEwYBhgL/s320/IMG_4967.jpeg" width="320" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">George fully appreciating the giant pile of conference swag with which I came home</td></tr></tbody></table> +<div><br /></div> +<div>This was definitely the busiest SC of my career, but in many ways it was also the most productive. &nbsp;I owe sincere thanks to everyone in the HPC community who made it such a worthwhile conference to attend--vendors, presenters, old colleagues, and even the new colleagues who occasionally just wanted to introduce themselves and express that they enjoy reading the nonsense I post on Twitter. &nbsp;I always leave SC more amazed and humbled by all the bright minds with whom I connect, and I hope that I am doing my part to pay that experience forward for others now and in the SC conferences to come.</div> +<p><span><!--more--></span><span><!--more--></span><span><!--more--></span></p> + + + + + GPU usage information for jobs in IBM Spectrum LSF + + 2018-10-02T03:21:37-06:00 + https://hpc.social/2018/gpu-usage-information-for-jobs-in-ibm-spectrum-lsf + <p>In my last blog, we ran through an example showing how IBM Spectrum LSF now automatically detects the presence of NVIDIA GPUs on hosts in the cluster and performs the necessary configuration of the scheduler automatically.</p> + +<p>In this blog, we take a closer look at the integration between Spectrum LSF and +NVIDIA DCGM which provides GPU usage information for jobs submitted to the +system.</p> + +<blockquote class="twitter-tweet"><p dir="ltr" lang="en"><a href="https://twitter.com/hashtag/IBMSpectrum?src=hash&amp;ref_src=twsrc%5Etfw">#IBMSpectrum</a> <a href="https://twitter.com/hashtag/LSF?src=hash&amp;ref_src=twsrc%5Etfw">#LSF</a> supports <a href="https://twitter.com/hashtag/NVIDIA?src=hash&amp;ref_src=twsrc%5Etfw">#NVIDIA</a> DCGM, allowing you to get the most our of your <a href="https://twitter.com/hashtag/GPUs?src=hash&amp;ref_src=twsrc%5Etfw">#GPUs</a> <a href="https://t.co/aCo9cFHNkq">https://t.co/aCo9cFHNkq</a> <a href="https://twitter.com/hashtag/HPCmatters?src=hash&amp;ref_src=twsrc%5Etfw">#HPCmatters</a></p> +&mdash; Gábor SAMU (@gabor_samu) <a href="https://twitter.com/gabor_samu/status/806540074888396800?ref_src=twsrc%5Etfw">December 7, 2016</a></blockquote> + +<p>To enable the integration between Spectrum LSF and NVIDIA DCGM, we +need to specify the <em>LSF_DCGM_PORT=&lt;port number&gt;</em> parameter in +<em>LSF_ENVDIR/lsf.conf</em></p> + +<div class="highlight"><pre><code class="language-plaintext">root@kilenc:/etc/profile.d# cd $LSF_ENVDIR +root@kilenc:/opt/ibm/lsfsuite/lsf/conf# cat lsf.conf |grep -i DCGM +LSF_DCGM_PORT=5555</code></pre></div> + +<p>You can find more details about the variable <em>LSF_DCGM_PORT</em> and what it +enables <a href="https://www.ibm.com/support/knowledgecenter/en/SSWRJV_10.1.0/lsf_config_ref/lsf.conf.lsf_dcgm_port.5.html">here</a>.</p> + +<p>Before continuing, please ensure that the DCGM daemon is up and running. Below +we start DCGM on the default port and run a query command to confirm that it&rsquo;s +up and running.</p> + +<div class="highlight"><pre><code class="language-plaintext">root@kilenc:/opt/ibm/lsfsuite/lsf/conf# nv-hostengine +Started host engine version 1.4.6 using port number: 5555 + +root@kilenc:/opt/ibm/lsfsuite/lsf/conf# dcgmi discovery -l +1 GPU found. ++--------+-------------------------------------------------------------------+ +| GPU ID | Device Information | ++========+===================================================================+ +| 0 | Name: Tesla V100-PCIE-32GB | +| | PCI Bus ID: 00000033:01:00.0 | +| | Device UUID: GPU-3622f703-248a-df97-297e-df1f4bcd325c | ++--------+-------------------------------------------------------------------+ </code></pre></div> + +<p>Next, let&rsquo;s submit a GPU job to IBM Spectrum LSF to demonstrate the collection +of GPU accounting. Note that the GPU job must be submitted to Spectrum LSF +with the exclusive mode specified in order for the resource usage to be +collected. As was the case in my previous blog, we submit the <em>gpu-burn</em> test +job (formally known as Multi-GPU CUDA stress test).</p> + +<div class="highlight"><pre><code class="language-plaintext">test@kilenc:~/gpu-burn$ bsub -gpu "num=1:mode=exclusive_process" ./gpu_burn 120 +Job &lt;54086&gt; is submitted to default queue &lt;normal&gt;</code></pre></div> + +<p>Job <em>54086</em> runs to successful completion and we use the Spectrum LSF <em>bjobs</em> command with the <em>-gpu</em> option to display the GPU usage +information in the output below.</p> + +<div class="highlight"><pre><code class="language-plaintext">test@kilenc:~/gpu-burn$ bjobs -l -gpu 54086 + +Job &lt;54086&gt;, User &lt;test&gt;, Project &lt;default&gt;, Status &lt;DONE&gt;, Queue &lt;normal&gt;, Com + mand &lt;./gpu_burn 120&gt;, Share group charged &lt;/test&gt; +Mon Oct 1 11:14:04: Submitted from host &lt;kilenc&gt;, CWD &lt;$HOME/gpu-burn&gt;, Reques + ted GPU &lt;num=1:mode=exclusive_process&gt;; +Mon Oct 1 11:14:05: Started 1 Task(s) on Host(s) &lt;kilenc&gt;, Allocated 1 Slot(s) + on Host(s) &lt;kilenc&gt;, Execution Home &lt;/home/test&gt;, Executi + on CWD &lt;/home/test/gpu-burn&gt;; +Mon Oct 1 11:16:08: Done successfully. The CPU time used is 153.0 seconds. + HOST: kilenc; CPU_TIME: 153 seconds + GPU ID: 0 + Total Execution Time: 122 seconds + Energy Consumed: 25733 Joules + SM Utilization (%): Avg 99, Max 100, Min 64 + Memory Utilization (%): Avg 28, Max 39, Min 9 + Max GPU Memory Used: 30714888192 bytes + + +GPU Energy Consumed: 25733.000000 Joules + + + MEMORY USAGE: + MAX MEM: 219 Mbytes; AVG MEM: 208 Mbytes + + SCHEDULING PARAMETERS: + r15s r1m r15m ut pg io ls it tmp swp mem + loadSched - - - - - - - - - - - + loadStop - - - - - - - - - - - + + EXTERNAL MESSAGES: + MSG_ID FROM POST_TIME MESSAGE ATTACHMENT + 0 test Oct 1 11:14 kilenc:gpus=0; N + + RESOURCE REQUIREMENT DETAILS: + Combined: select[(ngpus&gt;0) &amp;&amp; (type == local)] order[gpu_maxfactor] rusage[ngp + us_physical=1.00] + Effective: select[((ngpus&gt;0)) &amp;&amp; (type == local)] order[gpu_maxfactor] rusage[ + ngpus_physical=1.00] + + GPU REQUIREMENT DETAILS: + Combined: num=1:mode=exclusive_process:mps=no:j_exclusive=yes + Effective: num=1:mode=exclusive_process:mps=no:j_exclusive=yes + + GPU_ALLOCATION: + HOST TASK ID MODEL MTOTAL FACTOR MRSV SOCKET NVLINK + kilenc 0 0 TeslaV100_PC 31.7G 7.0 0M 8 - </code></pre></div> + +<p>And to close, yours truly spoke at the HPC User Forum in April 2018 (Tucson, AZ) giving a +short update in the vendor panel about Spectrum LSF, focusing on GPU support.</p> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + + + + + StashCache By The Numbers + + 2018-09-26T06:00:00-06:00 + https://hpc.social/2018/stashcache-by-the-numbers + <p>The StashCache federation is comprised of 3 components: Origins, Caches, and Clients. There are additional components that increase the usability of StashCache which I will also mention in this post.</p> + +<figure class=""> + <img alt="Diagram of StashCache Infrastructure" src="https://derekweitzel.com/images/posts/StashCache-By-Numbers/StashCache-Diagram.png" /><figcaption> + Diagram of the StashCache Federation + + </figcaption></figure> + +<figure class=""> + <img alt="Cumulative Usage of StashCache" src="https://derekweitzel.com/images/posts/StashCache-By-Numbers/StashCache-Cumulative.png" /><figcaption> + Cumulative Usage of StashCache over the last 90 days + + </figcaption></figure> + +<h2 id="origins">Origins</h2> + +<p>A StashCache Origin is the authoritative source of data. The origin receives data location requests from the central redirectors. These requests take the form of “Do you have the file X”, to which the origin will respond “Yes” or “No”. The redirector then returns a list of origins that claim to have the requested file to the client.</p> + +<p>An Origin is a simple XRootD server, exporting a directory or set of directories for access.</p> + +<table> + <thead> + <tr> + <th>Origin</th> + <th>Base Directory</th> + <th>Data Read</th> + </tr> + </thead> + <tbody> + <tr> + <td>LIGO Open Data</td> + <td>/gwdata</td> + <td>926TB</td> + </tr> + <tr> + <td>OSG Connect</td> + <td>/user</td> + <td>246TB</td> + </tr> + <tr> + <td>FNAL</td> + <td>/pnfs</td> + <td>166TB</td> + </tr> + <tr> + <td>OSG Connect</td> + <td>/project</td> + <td>63TB</td> + </tr> + </tbody> +</table> + +<p>A list of Origins and their base directories.</p> + +<h2 id="clients">Clients</h2> + +<p>The clients interact with the StashCache federation on the user’s behalf. They are responsible for choosing the “best” cache. The available clients are <a href="https://cernvm.cern.ch/portal/filesystem">CVMFS</a> and <a href="https://github.com/opensciencegrid/StashCache">StashCP</a>.</p> + +<figure class="half "> + + + <a href="https://derekweitzel.com/posts/StashCache-By-Numbers/StashCache-CVMFS.png" title="Client Usage By Tool"> + <img alt="Client Usage By Tool" src="https://derekweitzel.com/posts/StashCache-By-Numbers/StashCache-CVMFS.png" /> + </a> + + + + <a href="https://derekweitzel.com/posts/StashCache-By-Numbers/StashCP-Usage.png" title="StashCP Usage"> + <img alt="StashCP Usage" src="https://derekweitzel.com/posts/StashCache-By-Numbers/StashCP-Usage.png" /> + </a> + + + + <figcaption>StashCache Client Usage +</figcaption> + +</figure> + +<p>In the pictures above, you can see that most users of StashCache use CVMFS to access the federation. GeoIP is used by all clients in determining the “best” cache. GeoIP location services are provided by the CVMFS infrastructure in the U.S. The geographically nearest cache is used.</p> + +<p>The GeoIP service runs on multiple CVMFS Stratum 1s and other servers. The request to the GeoIP service includes all of the cache hostnames. The GeoIP service takes the requesting IP address and attempts to locate the requester. After determining the location of all of the caches, the service returns an ordered list of nearest caches.</p> + +<p>The GeoIP service uses the <a href="https://www.maxmind.com/">MaxMind database</a> to determine locations by IP address.</p> + +<h3 id="cvmfs">CVMFS</h3> + +<p>Most (if not all) origins on are indexed in an <code class="language-plaintext highlighter-rouge">*.osgstorage.org</code> repo. For example, the OSG Connect origin is indexed in the <code class="language-plaintext highlighter-rouge">stash.osgstorage.org</code> repo. It uses a special feature of CVMFS where the namespace and data are separated. The file metadata such as file permissions, directory structure, and checksums are stored within CVMFS. The file contents are not within CVMFS.</p> + +<p>When accessing a file, CVMFS will use the directory structure to form an HTTP request to an external data server. CVMFS uses GeoIP to determine the nearest cache.</p> + +<p>The indexer may also configure a repo to be “authenticated”. A whitelist of certificate DN’s is stored within the repo metadata and distributed to each client. The CVMFS client will pull the certificate from the user’s environment. If the certificate DN matches a DN in the whitelist, it uses the certificate to authenticate with an authenticated cache.</p> + +<h3 id="stashcp">StashCP</h3> + +<p>StashCP works in the order:</p> + +<ol> + <li>Check if the requested file is available from CVMFS. If it is, copy the file from CVMFS.</li> + <li>Determine the nearest cache by sending cache hostnames to the GeoIP service.</li> + <li>After determining the nearest cache, run the <code class="language-plaintext highlighter-rouge">xrdcp</code> command to copy the data from the nearest cache.</li> +</ol> + +<h2 id="caches">Caches</h2> + +<figure class=""> + <img alt="Cache Locations" src="https://derekweitzel.com/images/posts/StashCache-By-Numbers/CacheLocations.png" /><figcaption> + Cache Locations in the U.S. + + </figcaption></figure> + +<p>The cache is half XRootD cache and half XRootd client. When a cache receives a data request from a client, it searches it’s own cache directory for the files. If the file is not in the cache, it uses the built-in client to retrieve the file from one of the origins. The cache will request the data location from the central redirector which in turn, asks the origins for the file location.</p> + +<p>The cache listens on port 1094 to regular XRootD protocol, and port 8000 for HTTP.</p> + +<h3 id="authenticated-caches">Authenticated Caches</h3> + +<p>Authenticated caches use GSI certificates to authenticate access to files within the cache. The client will authenticate with the cache using the client’s certificate. If the file is not in the cache, the cache will use it’s own certificate to authenticate with the origin to download the file.</p> + +<p>Authenticated caches use port 8443 for HTTPS.</p> + + + + + A hands-on look at GPU "autoconfig" in IBM Spectrum LSF + + 2018-09-14T16:38:29-06:00 + https://hpc.social/2018/a-hands-on-look-at-gpu-autoconfig-in-ibm-spectrum-lsf + <p>It&rsquo;s been a long time since I&rsquo;ve posted to my goulash blog. I&rsquo;ve not disappeared, rather I&rsquo;ve been writing articles for the +IBM Accelerated Insights solution channel on <a href="https://www.hpcwire.com/solution_channel/ibm/">HPCWire</a>. Since then, I&rsquo;ve been +fortunate enough to have access to a POWER9 based developer system equipped with a NVIDIA Tesla V100 PCIe card to put through it&rsquo;s +paces. This is very timely for me as there is some exciting new functionality in IBM Spectrum LSF known as GPU auto detect, which +I recently wrote about in the article <a href="https://www.hpcwire.com/solution_content/ibm/cross-industry/the-taming-of-the-gpu/">The Taming of the GPU</a> that I&rsquo;ve been meaning to try out hands on.</p> + +<blockquote class="twitter-tweet"><p dir="ltr" lang="en">Much like the number of CPUs and cores, <a href="https://twitter.com/hashtag/IBM?src=hash&amp;ref_src=twsrc%5Etfw">#IBM</a> Spectrum LSF automatically detects the presence of <a href="https://twitter.com/hashtag/NVIDIA?src=hash&amp;ref_src=twsrc%5Etfw">#NVIDIA</a> GPUs on each node in the cluster - so LSF can immediately schedule GPU workloads, correctly. Read more <a href="https://t.co/dGVbXBo1Ly">https://t.co/dGVbXBo1Ly</a> <a href="https://twitter.com/hashtag/HPC?src=hash&amp;ref_src=twsrc%5Etfw">#HPC</a></p> +&mdash; Gábor SAMU (@gabor_samu) <a href="https://twitter.com/gabor_samu/status/1121569605707862016?ref_src=twsrc%5Etfw">April 26, 2019</a></blockquote> + +<p>Back in Dark Ages (no not literally), administrators of HPC clusters had to specify in the configuration of the workload scheduler +which nodes were equipped with GPUs, the model of the GPUs and so on. This was relatively straightforward when nodes were equipped +with single GPUs and clusters were smaller. With the proliferation of GPUs, nodes are frequently equipped with multiple GPUs and +often times we can end up with a mix of GPU models in a single cluster where rolling upgrades of hardware has occurred. Factor in +hybrid cloud environments where nodes can come and go as needed, and what is seemingly an easy update to configuration files of a +workload scheduler can become complex, quickly. Take into account that if a user is requesting a GPU for a job they&rsquo;ve submitted +and the scheduler is not fully aware of which nodes are equipped with GPUs, you can end up with under-utilization of these assets.</p> + +<p>Enter Spectrum LSF with a new capability known as GPU auto detect, which helps simplify the administration of heterogeneous computing +environments by detecting the presence of NVIDIA GPUs in nodes and automatically performing the necessary scheduler configuration.<br /> +For a detailed list of GPU support enhancements in the latest update to IBM Spectrum LSF please refer to the following <a href="https://www.ibm.com/support/knowledgecenter/en/SSWRJV_10.1.0/lsf_release_notes/lsf_relnotes_gpu10.1.0.6.html">page</a>.</p> + +<p>My testing environment is configured as follows:</p> + +<ul> +<li>dual-socket POWER9 development system</li> +<li>1 x NVIDIA Tesla V100 (PCIe)</li> +<li>Ubuntu 18.04.1 LTS (Bionic Beaver)</li> +<li>IBM Spectrum LSF Suite for Enterprise</li> +<li>NVIDIA CUDA 9.2</li> +</ul> +<p>Note that the following assumes that NVIDIA CUDA and IBM Spectrum LSF Suite for Enterprise are installed and functioning nominally.</p> + +<p>By default, the latest version of IBM Spectrum LSF Suite v10.2.0.6 has the following parameters enabled by default in +<em>$LSF_ENVDIR/lsf.conf</em>:</p> + +<div class="highlight"><pre><code class="language-bash">LSF_GPU_AUTOCONFIG<span style="color: #f92672;">=</span>Y +LSB_GPU_NEW_SYNTAX<span style="color: #f92672;">=</span>extend</code></pre></div> + +<p>The above parameters enable the new GPU support wizardry in the product.</p> + +<ol> +<li>So let&rsquo;s get right into it. We start by checking if the Spectrum LSF cluster is up and running.</li> +</ol> +<div class="highlight"><pre><code class="language-bash">test@kilenc:~$ lsid +IBM Spectrum LSF 10.1.0.6, May <span style="color: #ae81ff;">25</span> <span style="color: #ae81ff;">2018</span> +Suite Edition: IBM Spectrum LSF Suite <span style="color: #66d9ef;">for</span> Enterprise 10.2.0 +Copyright International Business Machines Corp. 1992, 2016. +US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule Contract with IBM Corp. + +My cluster name is Klaszter +My master name is kilenc + + + +test@kilenc:~$ lsload +HOST_NAME status r15s r1m r15m ut pg ls it tmp swp mem +kilenc ok 0.6 0.3 0.3 0% 0.0 <span style="color: #ae81ff;">1</span> <span style="color: #ae81ff;">18</span> 791G 853M 7.3G + + + +test@kilenc:~$ bhosts +HOST_NAME STATUS JL/U MAX NJOBS RUN SSUSP USUSP RSV +kilenc ok - <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span></code></pre></div> + +<p>We confirm above that the status of the cluster is OK. Meaning it&rsquo;s up and ready to accept jobs. Note that I have not done any +supplementary configuration in Spectrum LSF for GPUs apart from enabling the two above variables noted above.</p> + +<ol start="2"> +<li>Eureka! Spectrum LSF has automatically detected the presence of GPUs on the system. The single GPU in this case is now +configured as a resource for Spectrum LSF and can be scheduled to. We have used the new -<em>gpu</em> and -<em>gpuload</em> options for the +Spectrum LSF user commands to check this.</li> +</ol> +<div class="highlight"><pre><code class="language-bash">test@kilenc:~$ lshosts -gpu +HOST_NAME gpu_id gpu_model gpu_driver gpu_factor numa_id +kilenc <span style="color: #ae81ff;">0</span> TeslaV100_PCIE_ 396.37 7.0 <span style="color: #ae81ff;">8</span> + + + +test@kilenc:~$ lsload -gpu +HOST_NAME status ngpus gpu_shared_avg_mut gpu_shared_avg_ut ngpus_physical +kilenc ok <span style="color: #ae81ff;">1</span> 0% 0% <span style="color: #ae81ff;">1</span> + + +test@kilenc:~$ lsload -gpuload +HOST_NAME gpuid gpu_model gpu_mode gpu_temp gpu_ecc gpu_ut gpu_mut gpu_mtotal gpu_mused gpu_pstate gpu_status gpu_error +kilenc <span style="color: #ae81ff;">0</span> TeslaV100_P 0.0 46C 0.0 0% 0% 31.7G 0M <span style="color: #ae81ff;">0</span> ok - </code></pre></div> + +<p>As we can see above, Spectrum LSF has correctly detected the presence of the single Telsa V100 which is present in the node. It&rsquo;s +also displaying a number of metrics about the CPU including mode, temperature, and memory.</p> + +<ol start="3"> +<li>Next, let&rsquo;s submit some GPU workloads to the environment. I found the samples included with NVIDIA CUDA to be fairly short running +on the Tesla V100, so I turned to the trusty Multi-GPU CUDA stress test aka <em>gpu-burn</em>. You can read more about that utility here. +To submit a job to GPU workload to Spectrum LSF, we use the -<em>gpu</em> option. This can be used to specify the detailed requirements for +your GPU job including the number of GPUs, GPU mode, GPU model, etc. For the purpose of this test, we&rsquo;ll use the default value &ldquo;-&rdquo; +which specifies the following options: &ldquo;<em>num=1:mode=shared:mps=no:j_exclusive=nonvlink=no</em>&rdquo;.</li> +</ol> +<div class="highlight"><pre><code class="language-bash">test@kilenc:~/gpu-burn$ bsub -gpu - ./gpu_burn <span style="color: #ae81ff;">300</span> +Job &lt;51662&gt; is submitted to default queue &lt;normal&gt;.</code></pre></div> + +<ol start="4"> +<li>Next we confirm that the job has started successfully.</li> +</ol> +<div class="highlight"><pre><code class="language-bash">test@kilenc:~/gpu-burn$ bjobs -l <span style="color: #ae81ff;">51662</span> + +Job &lt;51662&gt;, User &lt;test&gt;, Project &lt;default&gt;, Status &lt;RUN&gt;, Queue &lt;normal&gt;, Comm + and &lt;./gpu_burn 300&gt;, Share group charged &lt;/test&gt; +Fri Sep <span style="color: #ae81ff;">14</span> 12:52:09: Submitted from host &lt;kilenc&gt;, CWD &lt;$HOME/gpu-burn&gt;, Reques + ted GPU; +Fri Sep <span style="color: #ae81ff;">14</span> 12:52:09: Started <span style="color: #ae81ff;">1</span> Task<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> on Host<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> &lt;kilenc&gt;, Allocated <span style="color: #ae81ff;">1</span> Slot<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> + on Host<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> &lt;kilenc&gt;, Execution Home &lt;/home/test&gt;, Executi + on CWD &lt;/home/test/gpu-burn&gt;; +Fri Sep <span style="color: #ae81ff;">14</span> 12:52:10: Resource usage collected. + MEM: <span style="color: #ae81ff;">4</span> Mbytes; SWAP: <span style="color: #ae81ff;">0</span> Mbytes; NTHREAD: <span style="color: #ae81ff;">3</span> + PGID: 95095; PIDs: <span style="color: #ae81ff;">95095</span> <span style="color: #ae81ff;">95096</span> <span style="color: #ae81ff;">95097</span> + + + MEMORY USAGE: + MAX MEM: <span style="color: #ae81ff;">4</span> Mbytes; AVG MEM: <span style="color: #ae81ff;">4</span> Mbytes + + SCHEDULING PARAMETERS: + r15s r1m r15m ut pg io ls it tmp swp mem + loadSched - - - - - - - - - - - + loadStop - - - - - - - - - - - + + EXTERNAL MESSAGES: + MSG_ID FROM POST_TIME MESSAGE ATTACHMENT + <span style="color: #ae81ff;">0</span> test Sep <span style="color: #ae81ff;">14</span> 12:52 kilenc:gpus<span style="color: #f92672;">=</span>0; N </code></pre></div> + +<ol start="5"> +<li>We cross confirm with the NVIDIA nvidia-smi command that the gpu-burn process is running on the GPU.</li> +</ol> +<div class="highlight"><pre><code class="language-bash">test@kilenc:~/gpu-burn$ nvidia-smi +Fri Sep <span style="color: #ae81ff;">14</span> 12:52:18 <span style="color: #ae81ff;">2018</span> ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 396.37 Driver Version: 396.37 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +|<span style="color: #f92672;">===============================</span>+<span style="color: #f92672;">======================</span>+<span style="color: #f92672;">======================</span>| +| <span style="color: #ae81ff;">0</span> Tesla V100-PCIE... Off | 00000033:01:00.0 Off | <span style="color: #ae81ff;">0</span> | +| N/A 68C P0 247W / 250W | 29303MiB / 32510MiB | 100% Default | ++-------------------------------+----------------------+----------------------+ + ++-----------------------------------------------------------------------------+ +| Processes: GPU Memory | +| GPU PID Type Process name Usage | +|<span style="color: #f92672;">=============================================================================</span>| +| <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">95107</span> C ./gpu_burn 29292MiB | ++-----------------------------------------------------------------------------+</code></pre></div> + +<ol start="6"> +<li>Next we use the Spectrum LSF <em>lsload</em> command with the -<em>gpuload</em> option to check the GPU utilization. This should closely match +what we see above.</li> +</ol> +<div class="highlight"><pre><code class="language-bash">test@kilenc:~/gpu-burn$ lsload -gpuload +HOST_NAME gpuid gpu_model gpu_mode gpu_temp gpu_ecc gpu_ut gpu_mut gpu_mtotal gpu_mused gpu_pstate gpu_status gpu_error +kilenc <span style="color: #ae81ff;">0</span> TeslaV100_P 0.0 70C 0.0 100% 29% 31.7G 28.6G <span style="color: #ae81ff;">0</span> ok -</code></pre></div> + +<ol start="7"> +<li>After 300 seconds (5 minutes), the job completes and exits without error. We inspect the history of the job using the +Spectrum LSF <em>bhist</em> command which shows the changes in state of the job from start to finish.</li> +</ol> +<div class="highlight"><pre><code class="language-bash">test@kilenc:~/gpu-burn$ bhist -l <span style="color: #ae81ff;">51662</span> + +Job &lt;51662&gt;, User &lt;test&gt;, Project &lt;default&gt;, Command &lt;./gpu_burn 300&gt; +Fri Sep <span style="color: #ae81ff;">14</span> 12:52:09: Submitted from host &lt;kilenc&gt;, to Queue &lt;normal&gt;, CWD &lt;$HOM + E/gpu-burn&gt;, Requested GPU; +Fri Sep <span style="color: #ae81ff;">14</span> 12:52:09: Dispatched <span style="color: #ae81ff;">1</span> Task<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> on Host<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> &lt;kilenc&gt;, Allocated <span style="color: #ae81ff;">1</span> Slot + <span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> on Host<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> &lt;kilenc&gt;, Effective RES_REQ &lt;<span style="color: #66d9ef;">select</span><span style="color: #f92672;">[((</span>ngpus + &gt;0<span style="color: #f92672;">))</span> <span style="color: #f92672;">&amp;&amp;</span> <span style="color: #f92672;">(</span>type <span style="color: #f92672;">==</span> local<span style="color: #f92672;">)]</span> order<span style="color: #f92672;">[</span>gpu_maxfactor<span style="color: #f92672;">]</span> rusage<span style="color: #f92672;">[</span>ngpus + _physical<span style="color: #f92672;">=</span>1.00<span style="color: #f92672;">]</span> &gt;; +Fri Sep <span style="color: #ae81ff;">14</span> 12:52:09: External Message <span style="color: #e6db74;">"GPU_ALLOC="</span>kilenc<span style="color: #f92672;">{</span>0<span style="color: #f92672;">[</span>0:0<span style="color: #f92672;">]}</span><span style="color: #e6db74;">"GPU_MODELS="</span>Te + slaV100_PCIE_32GB-32510<span style="color: #f92672;">{</span>0<span style="color: #f92672;">[</span>0<span style="color: #f92672;">]}</span><span style="color: #e6db74;">"GPU_FACTORS="</span>7.0<span style="color: #f92672;">{</span>0<span style="color: #f92672;">[</span>0<span style="color: #f92672;">]}</span><span style="color: #e6db74;">"GPU_S +</span><span style="color: #e6db74;"> OCKETS="</span>8<span style="color: #f92672;">{</span>0<span style="color: #f92672;">[</span>0<span style="color: #f92672;">]}</span><span style="color: #e6db74;">"GPU_NVLINK="</span>0<span style="color: #f92672;">[</span>0#0<span style="color: #f92672;">]</span><span style="color: #e6db74;">""</span> was posted from <span style="color: #e6db74;">"_sys +</span><span style="color: #e6db74;"> tem_"</span> to message box 131; +Fri Sep <span style="color: #ae81ff;">14</span> 12:52:10: Starting <span style="color: #f92672;">(</span>Pid 95095<span style="color: #f92672;">)</span>; +Fri Sep <span style="color: #ae81ff;">14</span> 12:52:10: Running with execution home &lt;/home/test&gt;, Execution CWD &lt;/ + home/test/gpu-burn&gt;, Execution Pid &lt;95095&gt;; +Fri Sep <span style="color: #ae81ff;">14</span> 12:52:10: External Message <span style="color: #e6db74;">"kilenc:gpus=0;EFFECTIVE GPU REQ: num=1:m +</span><span style="color: #e6db74;"> ode=shared:mps=no:j_exclusive=no;"</span> was posted from <span style="color: #e6db74;">"test"</span> + to message box 0; +Fri Sep <span style="color: #ae81ff;">14</span> 12:57:12: Done successfully. The CPU time used is 302.0 seconds; +Fri Sep <span style="color: #ae81ff;">14</span> 12:57:12: Post job process <span style="color: #66d9ef;">done</span> successfully; + + +MEMORY USAGE: +MAX MEM: <span style="color: #ae81ff;">220</span> Mbytes; AVG MEM: <span style="color: #ae81ff;">214</span> Mbytes + +Summary of time in seconds spent in various states by Fri Sep <span style="color: #ae81ff;">14</span> 12:57:12 + PEND PSUSP RUN USUSP SSUSP UNKWN TOTAL + <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">303</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">303</span> </code></pre></div> + +<p>This has only been a teaser of the GPU support capabilities in Spectrum LSF. Spectrum LSF also includes support for NVIDIA DCGM which +is used to collect GPU resource utilization per job. But that&rsquo;s a topic for another blog :). À la prochaine fois!</p> + + + + + HTCondor Pull Mode + + 2018-08-31T18:28:42-06:00 + https://hpc.social/2018/htcondor-pull-mode + <p>For a recent project to utilize HPC clusters for HTC workflows, I had to add the ability to transfer the input and output sandboxes to and from HTCondor. HTCondor already has the ability to spool input files to a SchedD, and pull the output sandbox. These functions are intended to stage jobs to an HTCondor pool. But, HTCondor did not have the ability to pull jobs from an HTCondor pool.</p> + +<p>The anticipated steps for a job pulled from an HTCondor pool:</p> + +<ol> + <li>Download the <strong>input</strong> sandbox</li> + <li>Submit the job to the local scheduler</li> + <li>Watch the job status of the job</li> + <li>Once completed, transfer the <strong>output</strong> sandbox to the origin SchedD</li> +</ol> + +<p>The sandboxes are:</p> + +<ul> + <li><strong>Input</strong>: + <ul> + <li>Input files</li> + <li>Executable</li> + <li>Credentials</li> + </ul> + </li> + <li><strong>Output</strong>: + <ul> + <li>Stdout / Stderr from job</li> + <li>Output files or any files that may have changed while the job ran</li> + </ul> + </li> +</ul> + +<h2 id="api-additions">API Additions</h2> + +<p>In order to transfer the input sandbox and output sandbox, 2 new commands where added to the SchedD, as well as a new client function and python bindings to use them.</p> + +<p>The function for transferring input files is:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>transferInputSandbox(constraint, destination) +</code></pre></div> +</div> + +<p><code class="language-plaintext highlighter-rouge">jobs</code> is a HTCondor constraint selecting the jobs whose input files should be transferred. <code class="language-plaintext highlighter-rouge">destination</code> is a directory to put the sandboxes. The sandboxes will be placed in directories named <code class="language-plaintext highlighter-rouge">destination/&lt;ClusterId&gt;/&lt;ProcId&gt;/</code>.</p> + +<p>For transferring output files, the function is:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>transferOutputSandbox( jobs ) +</code></pre></div> +</div> + +<p>Where <code class="language-plaintext highlighter-rouge">jobs</code> is a list of tuples. The structure of the tuple is <code class="language-plaintext highlighter-rouge">( classad, sandboxdir )</code>. <code class="language-plaintext highlighter-rouge">classad</code> is the full classad of the original job, and <code class="language-plaintext highlighter-rouge">sandboxdir</code> is the location of the output sandbox to send.</p> + +<h2 id="current-status">Current Status</h2> + +<p>I have created a <a href="https://github.com/djw8605/htcondor-pull">repo</a> for an example that uses these functions in order to pull a job from a remote SchedD.</p> + +<p>Also, my changes to <a href="https://github.com/djw8605/htcondor/tree/add_sandbox_transfers">HTCondor</a> are in my repo, and I have begun the discussion about merging in my changes.</p> + + + + + A Killer Feature for Scientific Development Frameworks- An Incremental Path To Maturity + + 2018-07-16T01:00:00-06:00 + https://hpc.social/2018/a-killer-feature-for-scientific-development-frameworks-an-incremental-path-to-maturity + <p>( <strong>Note</strong>: This is a bit of a work in progress; even more so than usual, comments/criticisms/additions welcome )</p> + +<h3 id="the-stages-of-research-software-development">The Stages of Research Software Development</h3> + +<p>Research software development covers a lot of ground — it’s the development of software for research, +and research is a broad endeavour that covers a lot of use cases.</p> + +<p>The part of research software development that I find the most interesting is the part that +<em>is a research effort itself</em>; the creation of new simulation methods, new data analysis techniques, +new ways to combining different sorts of approaches. Like any new tools, this work +can enable people to ask entirely new questions, or answer old questions in new ways, pushing +scholarship forward along previously unexplored paths.</p> + +<p>But for new methods to live up to their potential and have that impact, they have to be developed +and disseminated. As a community, we’re still developing the training and tool chains that +make this routine; without them, there are still too many bottlenecks in the method development +pipeline that mean good ideas for new tools get delayed, sometimes indefinitely, before adoption.</p> + +<p>Computational tools for science and scholarship go through stages of development like any experimental technique:</p> + +<ol> + <li><strong>Will this even work?</strong> Testing the core ideas out, usually interactively</li> + <li><strong>Will this answer my question?</strong> Developing a very early prototype on your own data set/conditions</li> + <li><strong>Is this an interesting question to others?</strong> Sharing a more robust prototype with friendly collaborators who think it might be useful</li> + <li><strong>Becoming Research Infrastructure</strong> The robust, usable, automatable tool becomes something strangers start to use routinely in their own research</li> +</ol> + +<p>These steps can be thought of as a sort of an internal-to-the-research-endeavour version of +the <a href="https://en.wikipedia.org/wiki/Technology_readiness_level">Technology Readiness Levels</a> +that are used to describe the maturity of technologies and tools, now often used when talking +about commercialization.</p> + +<p>Not every idea has to go through all four stages to be successful; sometimes a tool will be a ‘one-off’ +or nearly so, used for one or two projects and that’s it. This isn’t at all a bad thing, +if it served its one purpose well.</p> + +<p>But each transition between stages represents a potential barrier for ideas becoming new tools, +a jump in level of development skills and effort required. Every tool that stalls at between +stages solely because there isn’t training or tooling to allow incremental progress along +the pipeline is a tool that is unnecessarily lost to researchers who might have made use of it.</p> + +<h3 id="training-research-software-developers-to-tackle-all-stages">Training Research Software Developers To Tackle all Stages</h3> + +<p>The set of techniques that we mean when we talk about “Software +Engineering” is most useful at step 4 — these techniques +largely assume that there already exists a well-posed problem and +an understood, implementable solution. I’ve argued in the past +that it’s not only unnecessary but actually irresponsible to build +“well-engineered” software for tools at stage 1 or 2, +where the answers will often turn out to be “No”.</p> + +<p>It was understood fairly early that the lifecycle for scientific +projects differed a great deal from scientific software development. +Realizing that something correspondingly different training was needed, in the late 90s +<a href="https://software-carpentry.org">Software Carpentry</a>, and later <a href="https://carpentries.org">The Carpentries</a>, +started teaching more research trainees enough modern programming skills to ask their own +questions — to navigate the biggest transition from nothing to stage 1, when existing tools +won’t work for their questions; and to get started on the journey of the next transition, to +stage 2, building an entire early prototype. That training may or may not get students +all the way to the end of stage 2, with issues like speed or advanced functionality remaining, +but those issues will vary from research project to research project, and the goal is to +get the students to the point where they can learn additional material themselves.</p> + +<p>There still isn’t a lot of training for researchers to make the next big jump, from +prototype-for-self to tool-some-others-can-use. However, authors are beginning to write +resources for students wanting to learn how to proceed<sup id="fnref:1"><a class="footnote" href="https://www.dursi.ca/feed.xml#fn:1" rel="footnote">1</a></sup><sup>,</sup><sup id="fnref:2"><a class="footnote" href="https://www.dursi.ca/feed.xml#fn:2" rel="footnote">2</a></sup><sup>,</sup><sup id="fnref:3"><a class="footnote" href="https://www.dursi.ca/feed.xml#fn:3" rel="footnote">3</a></sup><sup>,</sup><sup id="fnref:4"><a class="footnote" href="https://www.dursi.ca/feed.xml#fn:4" rel="footnote">4</a></sup>.</p> + +<p>The second-biggest transition in that list, that from 3 to 4, is the one I worry the least +about. It’s at that stage that existing software engineering teaching, tooling, +and resources become the most helpful. And while the effort to learn those techniques +and apply them can be significant, at this point the ideas and the tool have proven themselves +useful enough that it is much easier to find the time, people, and resources to complete a +“research infrastructure”-grade implementation.</p> + +<p>Of course, once the set of ideas is implemented as research infrastructure, it’s +much harder for most practicing researchers to get under the hood and start +tinkering with by making changes or incorporating additional ideas. And so the cycle starts again.</p> + +<h3 id="the-best-scientific-development-frameworks-will-allow-an-incremental-path-towards-maturity">The Best Scientific Development Frameworks will Allow an Incremental Path Towards Maturity</h3> + +<p>While the research computing community has made great progress in creating development training +specific to their needs, there’s been much less success with programming languages, tools, or +frameworks which reflect the path of research programs.</p> + +<p>Arguably the best programming language for science, and certainly one of the most successful, +has been a general purpose programming language, Python. I think the reasons for this include +the relatively smooth path scientific software development can take towards maturity in the +Python ecosystem:</p> + +<ul> + <li>One can easily and rapidly test out ideas at the REPL and in a notebook. (Stage 1)</li> + <li>The large standard library and even larger ecosystem lets you quickly implement a lot of functionality (Stages 1/2)</li> + <li>Great tooling exists, including <a href="https://code.visualstudio.com">VSCode</a> which makes much IDE functionality available for free (Stages 2/3)</li> + <li>Compared to languages more commonly used earlier like C and FORTRAN, the exception system lets +you implement a number of things and still understand what’s happening before you have to start +implementing boilerplate error handling, making it something that can be added incrementally at later stages. (Stages 2/3/4)</li> + <li>Tools like <a href="http://numba.pydata.org">Numba</a>, <a href="https://www.pypy.org">PyPy</a>, or <a href="http://cython.org">Cython</a> allow +substantial but incremental performance improvement for many kinds of computation (Stages 2/3/4)</li> + <li>Tools like <a href="https://www.pypy.org">Dask</a> offer an incremental path to scale (Stages 3/4)</li> +</ul> + +<p>It’s useful to consider incrementalism-as-a-feature in the context +of existing programming environments, each of which have some ideas useful to +scientific computing. <a href="http://www.ada2012.org">Ada</a>, a highish-level programming +language with an emphasis on correctness, has a reputation of being +a somewhat authoritarian programming environment; however, many of its correctness +features are things you can incrementally add on (things like pre- and post-conditions). +On the other hand, <a href="https://www.rust-lang.org/en-US/">Rust</a>, a lower level +language aimed at systems programming where reliability and security in an environment +where memory bugs continue to cause problems, enables very low-level concurrency +features but one very quickly has to wrestle with Rust’s powerful +<a href="https://doc.rust-lang.org/1.8.0/book/references-and-borrowing.html">borrow checker</a>; +adding non-trivial sharing semantics to code in Rust results in a +dramatically non-incremental development effort, which is arguably +the right choice for a low-level systems programming language.</p> + +<p>While Python and other general programming languages have flourished, +other frameworks, aimed more directly at solving needs particular +to research or branches of research, have struggled. Much of this, +of course, has to do with the simple math of adoption; but most +have not made much effort to make tools which ease the development +of increasingly mature research software.</p> + +<p>To their credit, the <a href="https://julialang.org">Julia</a> community has +come closest, but they are focussed on a narrow piece of the issue; +the need for a framework for incremental adoption becomes “one +language for everything” with tools like Numba or PyPy as, +essentially, cheating; and the only maturity metric focused on is +performance. It’s better to have fast code than not, of course, but it is by no means +the primary development problem of most researchers.</p> + +<p>Having said that, most other programming languages aimed for +scientific communities have not made nearly as much progress on key +usability issues for researchers. I’ll certainly be watching the +progress of their 1.x releases with some interest.</p> + +<h3 id="the-developing-field-of-research-software-engineering">The Developing Field of Research Software Engineering</h3> + +<p>It’s been fascinating to watch from the sidelines over the past two decades +as research software engineering and RSE as a profession has gone from +basically nothing to <a href="https://rse.ac.uk/conf2018/">conferences</a>, +<a href="https://carpentries.org">organizations</a>, and research. I’m enormously +heartened by the fact that training now exists to tackle the specific +challenges of developing software that itself is research into methods +development.</p> + +<p>I’m still somewhat pessimistic, however, on the state of development frameworks +for research computing. My current work with web services development +just drives home the point of how scarce the tooling is for building +research software.</p> + +<p>The history of research computing since Fortran’s dominance has +been that research software engineering has grafted itself on to +a set of existing general purpose programming languages like C++ +or Python, each of which has advantages but also gaps for research +computing. There are exciting experiments here and there with new +languages, but none are yet particularly compelling.</p> + +<p>As Data Science/Data Engineering becomes more and more common in +commercial enterprises and as a computing use case, we may yet end +up finding frameworks which, if not actually designed for science, +are made for similar purposes. The good news is that people problems +are hard, while technology problems are (comparatively) tractable. +If one or more promising development frameworks appear in the coming +years, ones that allow a path from “basic methods science” +to “methods commercialization”, other people’s hard +work has led to a generation of research software developers who are ready +to take the plunge.</p> + +<div class="footnotes"> + <ol> + <li id="fn:1"> + <p><a href="http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005412"><em>Ten simple rules for making research software more robust</em>, Taschuk &amp; Wilson</a> <a class="reversefootnote" href="https://www.dursi.ca/feed.xml#fnref:1">&#8617;</a></p> + + </li> + <li id="fn:2"> + <p><a href="http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005265"><em>Ten Simple Rules for Developing Usable Software in Computational Biology</em>, List, Ebert, &amp; Albrecht</a> <a class="reversefootnote" href="https://www.dursi.ca/feed.xml#fnref:2">&#8617;</a></p> + + </li> + <li id="fn:3"> + <p><a href="http://katyhuff.github.io/python-testing"><em>Testing and Continuous Integration with Python</em>, Huff</a> <a class="reversefootnote" href="https://www.dursi.ca/feed.xml#fnref:3">&#8617;</a></p> + + </li> + <li id="fn:4"> + <p><a href="https://arxiv.org/pdf/1609.00037.pdf"><em>Good Enough Practices in Scientific Computing</em>, Wilson et al.</a> <a class="reversefootnote" href="https://www.dursi.ca/feed.xml#fnref:4">&#8617;</a></p> + + </li> + </ol> +</div> + + + + + The Taming of the GPU + + 2018-06-21T03:21:37-06:00 + https://hpc.social/2018/the-taming-of-the-gpu + <p>The media has been alight with articles regarding the groundbreaking Summit supercomputer recently unveiled at Oak Ridge National Laboratory. It sports a mind boggling 9,216 IBM POWER9 CPUs, 27,648 NVIDIA Tesla GPUs, underpinned with 250 petabytes of storage. This muscle will be put to good use running traditional HPC as well as AI workloads across a broad range of sciences.</p> + +<p>Looking at the landscape of systems being built for HPC and now AI, there is one commonality – many are hybrid CPU-GPU systems. Whether we’re considering systems at the pinnacle of computing such as Summit, or commercial HPC and AI systems, GPUs have become a defacto method for accelerating code and providing copious amounts of floating point performance.</p> + +<p>The early days of clustered computing saw the advent of workload and resource managers which were a means of taming environments by orchestrating access to, and bringing computing resources to bear, in a predictable manner – aligned with the needs of scientists and businesses alike. As environments have grown in scale to meet the growing thirst for HPC, GPUs and accelerated computing have stepped out on stage to take a bow.</p> + +<p>Software developers have and continue to port and optimize applications to benefit from the capabilities provided by GPUs. According to a recent report from November 2017, a high percentage of HPC applications now offer GPU support.</p> + +<blockquote> +<p><strong>“According to the latest HPC User Site Census data and additional research, of the 50 most popular application packages mentioned by HPC users, 34 offer GPU support (including two under current development), including 9 of the top 10.”</strong></p> + +</blockquote> +<p>Indeed, the recent Top 500 list (November 2017) includes no less than 87 hybrid CPU-GPU systems (and more counting other types of accelerators).</p> + +<p>So how do GPU-heavy systems impact the task of the workload and resource managers? Fundamentally, as GPUs are resources, workload schedulers have had to adapt too.</p> + +<p><strong>A wild west land grab</strong></p> + +<p>It’s not just large-scale supercomputers that face the challenges of compute supply versus user demands. Commercial HPC environments are also now increasingly hybrid CPU-GPU based with potentially hundreds of users and millions of jobs per day in high-throughput computing use cases. These are complex environments and large investments requiring workload management software with sophisticated capabilities to reign in all the resources – so that users end up with GPU workloads running on the right servers.</p> + +<p>Computing environments today can have some servers with GPUs, some without, varied GPU configurations including models and memory, and a different number of GPUs per node. Adding to this complexity, in a typical data center, servers can come and go so the configuration is not always static.</p> + +<p>In general, workload schedulers require the administrator to specify in the configuration whether a given server is equipped with GPUs, often requiring additional information such as the GPU model, etc. Without this crucial information, the workload scheduler cannot effectively route jobs to nodes – potentially leading to a Wild West grab for resources.</p> + +<p><strong>Call in the Cavalry</strong></p> + +<p>IBM Spectrum LSF has been continuously innovating to address the needs of increasingly complex HPC environments of scale since 1992. Support for NVIDIA GPUs was first introduced in IBM Spectrum LSF in 2007. Continuing this long tradition of enhancements to NVIDIA GPU support, IBM Spectrum LSF now includes a new capability designed to dramatically simplify the administration of GPU servers and enables users to be more productive faster. With “zero config” for NVIDIA GPUs, IBM Spectrum LSF detects the presence of GPUs and automatically performs the necessary scheduler configuration – without any interaction from the administrator. IBM Spectrum LSF will help tame the GPU environment for you, allowing users with GPU ready codes to be productive from the moment the environment is setup.</p> + + + + + Are FPGAs the answer to HPC's woes? + + 2018-02-24T09:21:00-07:00 + https://hpc.social/2018/are-fpgas-the-answer-to-hpc-s-woes- + <h2>Executive Summary</h2> +<p>Not yet.  I’ll demonstrate why no domain scientist would ever want to program in Verilog, then highlight a few promising directions of development that are addressing this fact.<br /><br />The usual disclaimer also applies: the opinions and conjectures expressed below are mine alone and not those of my employer.  Also I am not a computer scientist, so I probably don’t know what I’m talking about.  And even if it seems like I do, remember that I am a storage architect who is wholly unqualified to speak on applications and processor performance.<br /><br />&lt;h2&gt;Premise&lt;/h2&gt;We’re now in an age where CPU cores aren’t getting any faster, and the difficulties of shrinking processes below 10 nm means we can’t really pack any more CPU cores on a die.  Where’s performance going to come from if we ever want to get to exascale and beyond?<br /><br />Some vendors are betting on <b>larger and larger vectors</b>–<a href="https://community.arm.com/processors/b/blog/posts/technology-update-the-scalable-vector-extension-sve-for-the-armv8-a-architecture">ARM (with its Scalable Vector Extensions)</a> and <a href="http://www.nec.com/en/global/solutions/hpc/sx/vector_engine.html">NEC (with its Aurora coprocessors)</a> are going down this path.  However, algorithms that aren’t predominantly dense linear algebra will need very efficient scatter and gather operations that can pack vector registers quickly enough to make doing a single vector operation worthwhile.  For example, gathering eight 64-bit values from different parts of memory to issue an eight-wide (512-bit) vector multiply requires pulling eight different cache lines–that’s moving 4096 bits of memory for what amounts to 512 bits of computation.  In order to continue scaling vectors out, CPUs will have to rethink how their vector units interact with memory.  This means either (a) getting a lot more memory bandwidth to support these low <a href="http://www.nersc.gov/users/application-performance/measuring-arithmetic-intensity/">flops-per-byte ratios</a>, or (b) pack vectors closer to the memory so that pre-packed vectors can be fetched through the existing memory channels.<br /><br />Another option to consider are <b>GPUs</b>, which work around the vector packing issue by implementing a massive numbers of registers and giant crossbars to plumb those bytes into arithmetic units.  Even then, though, relying on a crossbar to connect compute and data is difficult to continue scaling; the interconnect industry gave up on this long ago, which is why today’s clusters now connect hundreds or thousands of crossbars into larger fat trees, hypercubes, and dragonflies.  GPUs are still using larger and larger crossbars–NVIDIA’s V100 GPU is one of the <a href="https://arstechnica.com/gadgets/2017/05/nvidia-tesla-v100-gpu-details/">physically largest single-die chips ever made</a>–but there’s an economic limit to how large a die can be.<br /><br />This bleak outlook has begun to drive HPC designers towards thinking about smarter ways to use silicon.  Rather than build a general-purpose processor that can do all multiplication and addition operations at a constant rate, the notion is to bring hardware design closer to the algorithms being implemented.  This isn’t a new idea (for example, <a href="http://dx.doi.org/10.1098/rsta.2013.0387">RIKEN’s MDGRAPE</a> and <a href="http://dx.doi.org/10.1109/SC.2014.9">DESRES’s Anton</a> are famous examples of purpose-built chips for specific scientific application areas), but this approach historically has been very expensive relative to just using general-purpose processor parts.  Only now are we at a place where special-purpose hardware may be the only way to sustain HPC’s performance trajectory.<br /><br />Given the diversity of applications that run on the modern supercomputer though, expensive and custom chips that only solve one problem aren’t very appetizing.  A close compromise are FPGAs though, and there has been a growing buzz surrounding the viability of relying on FPGAs in mainstream HPC workloads.<br /><br />Many of us non-computer scientists in the HPC business only have a vague and qualitative notion of how FPGAs can realistically be used to carry out computations, though.  Since there is growing excitement around FPGAs for HPC as exascale approaches though, I set out to get my hands dirty and figure out how they might fit in the larger HPC ecosystem.<br /><br />&lt;h2&gt;Crash course in Verilog&lt;/h2&gt;Verilog can be very difficult to grasp for people who already know how to program languages like C or Fortran (like me!).  On the one hand, it looks a bit like C in that has variables to which values can be assigned, if/then/else controls, for loops, and so on.  However these similarities are deceptive because Verilog does <i>not</i> execute like C; whereas a C program executes code line by line, one statement after the other, Verilog sort of execute all of the lines at the same time, all the time.<br /><br />A C program to turn an LED on and off repeatedly might look like:<br /><br />&lt;div&gt;&lt;/div&gt; +where the LED is turned on, <i>then</i> the LED is turned off, <i>then</i> we repeat.<br /><br />In Verilog, you really have to describe <i>what</i> components your program will have and <i>how</i> they are connected. In the most basic way, the code to blink an LED in Verilog would look more like<br /><br />&lt;div&gt;&lt;/div&gt; +<br />Whereas C is a <i>procedural</i> language in that you describe a procedure for solving a problem, Verilog is more like a <i>declarative</i> language in that you describe how widgets can be arranged to solve the problem.<br /><br />This can make tasks that are simple to accomplish in C comparatively awkward in Verilog. Take our LED blinker C code above as an example; if you want to slow down the blinking frequency, you can do something like<br /><br />&lt;div&gt;&lt;/div&gt; +<br />Because Verilog is not procedural, there is no simple way to say “wait a second <i>after</i> you turn on the LED before doing something else.” Instead, you have to rely on knowing how much time passes between consecutive clock signals (<code>clk</code> incrementing).<br /><br />For example, the DE10-Nano has a 50 MHz clock generator, so every 1/(50 MHz) (20 nanoseconds), and everything time-based has to be derived from this fundamental clock timer. The following Verilog statement:<br /><br />&lt;div&gt;&lt;/div&gt; +<br />indicates that every 20 ns, increment the <code>cnt</code> register (variable) by one. To make the LED wait for one second after the LED is turned on, we need to figure out a way to do nothing for 50,000,000 clock cycles (1 second / 20 nanoseconds). The canonical way to do this is to<br />&lt;ol&gt;&lt;li&gt;create a big register that can store a number up to 50 million&lt;/li&gt;&lt;li&gt;express that this register should be incremented by 1 on every clock cycle&lt;/li&gt;&lt;li&gt;create a logic block that turns on the LED when our register is larger than 50 million&lt;/li&gt;&lt;li&gt;rely on the register eventually overflowing to go back to zero&lt;/li&gt;&lt;/ol&gt;If we make <code>cnt</code> a 26-bit register, it can count up to 67,108,864 different numbers and our Verilog can look something like<br /><br />&lt;div&gt;&lt;/div&gt; +<br />However, we are still left with two problems:<br />&lt;ol&gt;&lt;li&gt;<code>cnt</code> will overflow back to zero once <code>cnt</code> surpasses 2<sup>26</sup> - 1&lt;/li&gt;&lt;li&gt;We don’t yet know how to express how the LED is connected to our FPGA and should be controlled by our circuit&lt;/li&gt;&lt;/ol&gt;Problem #1 (<code>cnt</code> overflows) means that the LED will stay <i>on</i> for exactly 50,000,000 clock cycles (1 second), but it’ll turn <i>off</i> for only 2<sup>26</sup> - 1 - 50,000,000 cycles (17,108,860 cycles, or 0.34 seconds). Not exactly the one second on, one second off that our C code does.<br /><br />Problem #2 is solved by understanding the following:<br /><br />&lt;ul&gt;&lt;li&gt;our LED is external to the FPGA, so it will be at the end of an <i>output wire</i>&lt;/li&gt;&lt;li&gt;the other end of that <i>output wire</i> must be connected to something inside our circuit–a register, another wire, or something else&lt;/li&gt;&lt;/ul&gt;<br />The conceptually simplest solution to this problem is to create another register (variable), this time only one bit wide, in which our LED state will be stored. We can then change the state of this register in our <code>if (cnt &gt; 5000000)</code> block and wire that register to our external LED:<br /><br />&lt;div&gt;&lt;/div&gt; +<br />Note that our <code>assign</code> statement is outside of our <code>always @(posedge clk)</code> block because this assignment–connecting our <code>led</code> output wire to our <code>led_state</code> register–is a persistent declaration, <i>not</i> the assignment of a particular value. We are saying “whatever value is stored in <code>led_state</code> should always be carried to whatever is on the other end of the <code>led</code> wire.” Whenever <code>led_state</code> changes, <code>led</code> will simultaneously change as a result.<br /><br />With this knowledge, we can actually solve Problem #1 now by<br />&lt;ol&gt;&lt;li&gt;only counting up to 50 million and not relying on overflow of <code>cnt</code> to turn the LED on or off, and&lt;/li&gt;&lt;li&gt;overflowing the 1-bit <code>led_state</code> register every 50 million clock cycles&lt;/li&gt;&lt;/ol&gt;Our Verilog module would look like<br /><br />&lt;div&gt;&lt;/div&gt; +<br />and we accomplish the “hello world” of circuit design:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://4.bp.blogspot.com/-HwPyRg8Kc6U/Wm0vBaXxf5I/AAAAAAAA0Ho/QKNf3Kn4EqcqdPSl3uUxX8h_fAB9oxSeACLcBGAs/s1600/fpga-blink-1sec.gif" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="239" src="https://4.bp.blogspot.com/-HwPyRg8Kc6U/Wm0vBaXxf5I/AAAAAAAA0Ho/QKNf3Kn4EqcqdPSl3uUxX8h_fAB9oxSeACLcBGAs/s320/fpga-blink-1sec.gif" width="320" /></a>&lt;/div&gt; +<br />This Verilog is actually still missing a number of additional pieces and makes very inefficient use of the FPGA’s hardware resources. However, it shows how awkward it can be to express a simple, four-line procedural program using a hardware description language like Verilog.<br /><br />&lt;h2&gt;So why bother with FPGAs at all?&lt;/h2&gt;It should be clear that solving a scientific problem using a procedural language like C is generally more straightforward than with a declarative language like Verilog. That ease of programming is made possible by a ton of hardware logic that isn’t always used, though.<br /><br />Consider our blinking LED example; because the C program is procedural, it takes one CPU thread to walk through the code in our program. Assuming we’re using a 64-core computer, that means we can only blink up to 64 LEDs at once. On the other hand, our Verilog module consumes a tiny number of the programmable logic blocks on an FPGA. When compiled for a $100 hobbyist-grade DE10-Nano FPGA system, it uses only 21 of 41,910 programmable blocks, meaning it can control almost 2,000 LEDs concurrently**. A high-end FPGA would easily support tens of thousands. <br /><br />&lt;table cellpadding="0" cellspacing="0" class="tr-caption-container" style="display: block; float: right; margin-left: 1em; text-align: right;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://4.bp.blogspot.com/-dv03oqFBdTs/WpEAAg9THGI/AAAAAAAA0Tc/LV6L-sK7S4k3jK4bY8SHja0NynW518QqwCLcBGAs/s1600/cm200-6.jpg" style="clear: right; margin-bottom: 1em; margin-left: auto; margin-right: auto;"><img border="0" height="261" src="https://4.bp.blogspot.com/-dv03oqFBdTs/WpEAAg9THGI/AAAAAAAA0Tc/LV6L-sK7S4k3jK4bY8SHja0NynW518QqwCLcBGAs/s320/cm200-6.jpg" width="320" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;The CM2 illuminated an LED whenever an operation was in flight. Blinking the LED in Verilog is easy.  Reproducing the CM2 microarchitecture is a different story.  Image credit to <a href="http://www.corestore.org/cm200.htm">Corestore</a>.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;Of course, blinking LEDs haven’t been relevant to HPC since the days of Connection Machines, but if you were to replace LED-blinking logic with floating point arithmetic units, the same conclusions apply.  In principle, a single FPGA can process a huge number of FLOPS every cycle by giving up its ability to perform many of the tasks that a more general-purpose CPU would be able to do.  And because FPGAs are reprogrammable, they can be quickly configured to have an optimal mix of special-purpose parallel ALUs and general purpose capabilities to suit different application requirements.<br /><br />However, the fact that the fantastic potential of FPGAs hasn’t materialized into widespread adoption is a testament to how difficult it is to bridge the wide chasm between understanding how to solve a physics problem and understanding how to design a microarchitecture.<br /><br />&lt;h2&gt;Where FPGAs fit in HPC today&lt;/h2&gt;To date, a few scientific domains have had success in using FPGAs.  For example,<br /><br />&lt;ul&gt;&lt;li&gt;Experimental instruments that generate data commonly deploy FPGAs close to their detectors to perform very repetitive, relatively simple data filtering or manipulation at extremely high rates.  For example, <a href="https://blogs.swarthmore.edu/Illumina+GAIIx+Teardown/?p=125#div-comment-191">Illumina HiSeq DNA sequencers incorporate both Altera and Xilinx FPGAs</a> to assist with the high-throughput image processing, and <a href="https://www.nextplatform.com/2016/01/05/an-expanding-role-for-fpgas-in-cerns-future/">high-energy physics experiments routinely use FPGAs</a> for signal processing.&lt;/li&gt;&lt;li&gt;Closer to the HPC side, <a href="https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3186629/">Convey implemented loadable FPGA blocks to perform many algorithms common to bioinformatics</a>.  For example, they provided an FPGA-accelerated Smith-Waterman algorithm; this algorithm is used to align short DNA sequences along a reference genome and must be executed thousands of times per genome before actual genomic analysis can start.&lt;/li&gt;&lt;li&gt;More recently, <a href="http://edicogenome.com/dragen-bioit-platform/">Edico Genome</a> has been very successful in implementing a wide range of common bioinformatics algorithms on FPGA and providing end-to-end analysis processing pipelines that act as drop-in replacements for standard genomic analysis pipelines.&lt;/li&gt;&lt;/ul&gt;&lt;div&gt;The success of these FPGA products is due in large part to the fact that the end-user scientists don’t ever have to directly interact with the FPGAs.  In the case of experimental detectors, FPGAs are sufficiently close to the detector that the “raw” data that is delivered to the researcher has already been processed by the FPGAs.  Convey and Edico products incorporate their FPGAs into an appliance, and the process of offloading certain tasks to the FPGA in proprietary applications that, to the research scientist, look like any other command-line analysis program.&lt;/div&gt;</p> +<div><br /></div> +<div>With all this said, the fact remains that these use cases are all on the fringe of HPC. &nbsp;They present a black-and-white decision to researchers; to benefit from FPGAs, scientists must completely buy into the applications, algorithms, and software stacks. &nbsp;Seeing as how these FPGA HPC stacks are often closed-source and proprietary, the benefit of being able to see, modify, and innovate on open-source scientific code often outweighs the speedup benefits of the fast-but-rigid FPGA software ecosystem.</div> +<div><br /></div> +<h2>Where FPGAs will fit in HPC tomorrow</h2> +<div>The way I see it, there are two things that must happen before FPGAs can become a viable general-purpose technology for accelerating HPC:</div> +<div><ol><li>Users must be able to integrate FPGA acceleration into their existing applications rather than replace their applications wholesale with proprietary FPGA analogues.</li><li>It has to be as easy as&nbsp;<span>f90 -fopenacc</span>&nbsp;or&nbsp;<span>nvcc</span>&nbsp;to build an FPGA-accelerated application, and running the resulting accelerated binary has to be as easy as running an unaccelerated binary.</li></ol><div>The first steps towards realizing this have already been made; both <a href="https://www.xilinx.com/products/design-tools/software-zone/sdaccel.html">Xilinx</a> and <a href="https://www.altera.com/products/design-software/embedded-software-developers/opencl/overview.html">Intel/Altera</a> now offer OpenCL runtime environments that allow scientific applications to offload computational kernels to the FPGA. &nbsp;The Xilinx environment operates much like an OpenCL accelerator, where specific kernels are compiled for the FPGA and loaded as application-specific logic; the Altera environment installs a special OpenCL runtime environment on the FPGA. &nbsp;However, there are a couple of challenges:</div> +</div> +<div><ul><li>OpenCL tends to be very messy to code in compared to simpler APIs such as OpenACC, OpenMP, CUDA, or HIP. &nbsp;As a result, not many HPC application developers are investing in OpenCL anymore.</li><li>Compiling an application for OpenCL on an FPGA still requires going through the entire Xilinx or Altera toolchain. &nbsp;At present, this is <i><u>not</u></i> as simple as <span>f90 -fopenacc</span> or <span>nvcc</span>, and the process of compiling code that targets an FPGA can take orders of magnitude longer than it would for a CPU due to the NP-hard nature of placing and routing across all the programmable blocks.</li><li>The FPGA OpenCL stacks are not as polished and scientist-friendly right now; performance analysis and debugging generally still has to be done at the circuit level, which is untenable for domain scientists.</li></ul><div>Fortunately, these issues are under very active development, and the story surrounding FPGAs for HPC application improves on a month by month basis. &nbsp;We're still years from FPGAs becoming a viable option for accelerating scientific applications in a general sense, but when that day comes, I predict that programming in Verilog for FPGAs will seem as exotic as programming in assembly is for CPUs.</div> +</div> +<div><br /></div> +<div>Rather, applications will likely rely on large collections of pre-compiled FPGA IP blocks (often called&nbsp;<i>FPGA overlays</i>) that map to common compute kernels. &nbsp;It will then be the responsibility of compilers to identify places in the application source code where these logic blocks should be used to offload certain loops. &nbsp;Since it's unlikely that a magic compiler will be able to identify these loops on their own, users will still have to rely on OpenMP, OpenACC, or some other API to provide hints at compile time. &nbsp;Common high-level functions, such as those provided by LAPACK, will probably also be provided by FPGA vendors as pre-compiled overlays that are hand-tuned.</div> +<div><br /></div> +<h2>Concluding Thoughts</h2> +<div>We're still years away from FPGAs being a viable option for mainstream HPC, and as such, I don't anticipate them as being the key technology that will underpin the world's first exascale systems. &nbsp;Until the FPGA software ecosystem and toolchain mature to a point where domain scientists never have to look at a line of Verilog, FPGAs will remain an accelerator technology at the fringes of HPC.</div> +<div><br /></div> +<div>However, there is definitely a path for FPGAs to become mainstream, and forward progress is being made. &nbsp;Today's clunky OpenCL implementations are already being followed up by <a href="https://www.nextplatform.com/2016/10/19/turning-openmp-programs-parallel-hardware/">research into providing OpenMP-based FPGA acceleration</a>, and proofs of concept demonstrating <a href="https://ft.ornl.gov/sites/default/files/IPDPS16_OpenACC2FPGA_PPT.pdf">OpenACC-based FPGA acceleration</a> have shown promising levels of performance portability. &nbsp;On the hardware side, FPGAs are also approaching first-class citizenship with <a href="https://www.nextplatform.com/2017/10/02/intel-gears-fpga-push/">Intel planning to ship Xeons with integrated FPGAs in 2H2018</a> and <a href="https://www.alpha-data.com/dcp/products.php?product=adm-pcie-9v3">OpenPOWER beginning to ship Xilinx FPGAs with OpenCAPI-based coherence links for POWER9</a>.</div> +<div><br /></div> +<div>The momentum is growing, and the growing urgency surrounding post-Moore computing technology is driving investments and demand from both public and private sectors. &nbsp;FPGAs won't be the end-all solution that gets us to exascale, nor will it be the silver bullet that gets us beyond Moore's Law computing, but they will definitely play an increasingly important role in HPC over the next five to ten years.</div> +<div><br /></div> +<div>If you've gotten this far and are interested in more information, I strongly encourage you to check out <a href="https://science.energy.gov/~/media/ascr/ascac/pdf/meetings/201612/Finkel_FPGA_ascac.pdf">FPGAs for Supercomputing: The Why and How</a>, presented by Hal Finkel, Kazutomo Yoshii, and Franck Cappello at ASCAC. &nbsp;It provides more insight into the application motifs that FPGAs can accelerate, and a deeper architectural treatment of FPGAs as understood by real computer scientists.</div> +<div><br /></div> +<p><span style="font-size: xx-small;">** This is not really true.  Such a design would be limited by the number of physical pins coming out of the FPGA; in reality, output pins would have to be multiplexed, and additional logic to drive this multiplexing would take up FPGA real estate.  But you get the point.</span><br /><span>Save</span><span>Save</span><br /><span>Save</span><span>Save</span><span>Save</span><span>Save</span></p> + + + + + Cleaning Up GRACC + + 2017-11-06T19:09:23-07:00 + https://hpc.social/2017/cleaning-up-gracc + <p>The <a href="https://opensciencegrid.github.io/gracc/">GRid ACcounting Collector</a> (GRACC) is the OSG’s new version of accounting software, replacing Gratia. It has been running in production since March 2017. Last week, on Friday November 3rd, we held a GRACC Focus Day. Our goal was to clean up data that is presented in GRACC. My changes where:</p> + +<ul> + <li>Update the GRACC-Collector to version <a href="https://github.com/opensciencegrid/gracc-collector/tree/v1.1.8">1.1.8</a>. The primary change in this release is setting the messages sent to RabbitMQ to be “persistent”. The persistent messages are then saved to disk in order to survive a RabbitMQ reboot.</li> + <li>Use case-insenstive comparisons to determine the <a href="https://oim.grid.iu.edu/oim/home">Open Science Grid Information Management system</a> (OIM) information. This was an issue with GPGrid (Fermilab), which was registered as <strong>GPGRID</strong>.</li> + <li>Set the <code class="language-plaintext highlighter-rouge">OIM_Site</code> equal to the <code class="language-plaintext highlighter-rouge">Host_description</code> attribute if the OIM logic is unable to determine the registered OIM site. This is especially useful for the LIGO collaboration, which uses sites in Europe that are not registered in OIM. Now, instead of a lot of Unknown sites listed on the LIGO site listing, it shows the somewhat reported site name of where the job ran.</li> +</ul> + +<figure class=""> + <img alt="GRACC Projects Page" src="https://derekweitzel.com/images/posts/GRACC-Cleanup/GRACC_Projects_Ligo.png" /><figcaption> + GRACC Projects Page for LIGO + + </figcaption></figure> + +<h2 id="regular-expression-corrections"><a id="regex"></a>Regular Expression Corrections</h2> + +<p>One of the common problems we have in GRACC is poor data coming from the various probes installed at hundreds of sites. We don’t control the data coming into GRACC, so occasionally we must make corrections to the data for clarity or correctness. One of these corrections is misreporting the “site” that the jobs ran on.</p> + +<p>In many instances, the probe is unable to determine the site and simply lists the hostname of the worker node where the job ran. This can cause the cardinality of sites listed in GRACC to increase dramatically as we get new hostnames inserted into the sites listing. If the hostnames are predictable, a regular expression matching algorithm can match a worker node hostname to a proper site name.</p> + +<p>The largest change for GRACC was the regular expression corrections. With this new feature, GRACC administrators can set corrections to match on attributes using regular expression patterns. For example, consider the following correction configuration.</p> + +<div class="language-toml highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="nn">[[Corrections]]</span> +<span class="py">index</span> <span class="p">=</span> <span class="s">'gracc.corrections'</span> +<span class="py">doc_type</span> <span class="p">=</span> <span class="s">'host_description_regex'</span> +<span class="py">match_fields</span> <span class="p">=</span> <span class="nn">['Host_description']</span> +<span class="py">source_field</span> <span class="p">=</span> <span class="s">'Corrected_OIM_Site'</span> +<span class="py">dest_field</span> <span class="p">=</span> <span class="s">'OIM_Site'</span> +<span class="py">regex</span> <span class="p">=</span> <span class="kc">true</span> +</code></pre></div> +</div> + +<p>This configuration means:</p> + +<blockquote> + <p>Match the <code class="language-plaintext highlighter-rouge">Host_description</code> field in the incoming job record with the regular expression <code class="language-plaintext highlighter-rouge">Host_description</code> field in the corrections table. If they are a match, take the value in the <code class="language-plaintext highlighter-rouge">Corrected_OIM_Site</code> field in the corrections table and place it into the <code class="language-plaintext highlighter-rouge">OIM_Site</code> field in the job record.</p> + +</blockquote> + +<p>And the correction document would look like:</p> + +<div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="p">{</span><span class="w"> + </span><span class="nl">"_index"</span><span class="p">:</span><span class="w"> </span><span class="s2">"gracc.corrections-0"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"_type"</span><span class="p">:</span><span class="w"> </span><span class="s2">"host_description_regex"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"_id"</span><span class="p">:</span><span class="w"> </span><span class="s2">"asldkfj;alksjdf"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"_score"</span><span class="p">:</span><span class="w"> </span><span class="mi">1</span><span class="p">,</span><span class="w"> + </span><span class="nl">"_source"</span><span class="p">:</span><span class="w"> </span><span class="p">{</span><span class="w"> + </span><span class="nl">"Host_description"</span><span class="p">:</span><span class="w"> </span><span class="s2">".*</span><span class="se">\.</span><span class="s2">bridges</span><span class="se">\.</span><span class="s2">psc</span><span class="se">\.</span><span class="s2">edu"</span><span class="p">,</span><span class="w"> + </span><span class="nl">"Corrected_OIM_Site"</span><span class="p">:</span><span class="w"> </span><span class="s2">"PSC Bridges"</span><span class="p">,</span><span class="w"> + </span><span class="p">}</span><span class="w"> +</span><span class="p">}</span><span class="w"> +</span></code></pre></div> +</div> + +<p>The regular expression is in the <code class="language-plaintext highlighter-rouge">Host_description</code> FIELD.</p> + +<p>So, if the incoming job record is similar to :</p> + +<div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="p">{</span><span class="w"> +</span><span class="err">...</span><span class="w"> +</span><span class="nl">"Host_description"</span><span class="p">:</span><span class="w"> </span><span class="s2">"l006.pvt.bridges.psc.edu"</span><span class="w"> +</span><span class="err">...</span><span class="w"> +</span><span class="p">}</span><span class="w"> +</span></code></pre></div> +</div> + +<p>Then the correction would modify or create values such that the final record would approximate:</p> + +<div class="language-json highlighter-rouge"><div class="highlight"><pre class="highlight"><code><span class="p">{</span><span class="w"> +</span><span class="err">...</span><span class="w"> +</span><span class="nl">"Host_description"</span><span class="p">:</span><span class="w"> </span><span class="s2">"l006.pvt.bridges.psc.edu"</span><span class="p">,</span><span class="w"> +</span><span class="nl">"OIM_Site"</span><span class="p">:</span><span class="w"> </span><span class="s2">"PSC Bridges"</span><span class="p">,</span><span class="w"> +</span><span class="nl">"RawOIM_Site"</span><span class="p">:</span><span class="w"> </span><span class="s2">""</span><span class="w"> +</span><span class="err">...</span><span class="w"> +</span><span class="p">}</span><span class="w"> +</span></code></pre></div> +</div> + +<p>Note that the <code class="language-plaintext highlighter-rouge">Host_description</code> field stays the same. We must keep it the same because it is used in record duplicate detection. If we modified the field and resummarized previous records, then it would cause multiple records to represent the same job.</p> + + + + + Cool and quiet benchmarking on MACCHIATObin (Armada 8040) + + 2017-09-15T17:37:03-06:00 + https://hpc.social/2017/cool-and-quiet-benchmarking-on-macchiatobin-armada-8040- + <p>I&rsquo;ve recently taken delivery of a few new goodies to complement the MACCHIATObin Arm v8 powered board that I&rsquo;ve written about recently on my blog.</p> + +<ul> +<li><a href="https://www.gaborsamu.com/blog/spectrumlsf_armv8/">Standing up a IBM Spectrum LSF Community Edition cluster on Arm v8</a></li> +<li><a href="https://www.gaborsamu.com/blog/turning_up_heat_armv8/">Turning up the heat&hellip;on my Armada 8040</a></li> +</ul> +<p>Youi'll recall that my efforts to do some rudimentary testing including running HPL were thwarted by overheating. So I decided to +address the issue with some parts I&rsquo;ve been meaning to pickup anyway for some other interesting projects I have in the pipeline +(fingers crossed):</p> + +<ul> +<li>1 x <a href="https://noctua.at/en/products/product-lines/line-industrial">Noctua NF-A14 cooling fan</a></li> +<li>1 x <a href="http://nofancomputer.com/eng/products/P-500A.php">NOFAN P-500A fanless power supply</a></li> +<li>1 x (red) <a href="https://openbenchtable.com/">Open benchtable</a></li> +</ul> +<p>And this is what is looks like now&hellip;</p> + +<figure><img src="https://www.gaborsamu.com/images/red_armada.jpg" /> +</figure> + +<p>Now, the red workbench and shiny heatsinks scream performance. So what about my run of HPL (Linpack)? Well, I decided to start over +from scratch and built my own Linpack against ATLAS, which I also compiled from scratch (let that run overnight).</p> + +<p>The result? I went from hitting the thermal limiter (and a non-result) to a successful Linpack run - with the CPU temperature never +really going much past 50C. As for my Linpack score, you can see that below.</p> + +<figure><img src="https://www.gaborsamu.com/images/linpack_17gflops.png" /> +</figure> + + + + + Installing SciTokens on a Mac + + 2017-09-07T19:20:04-06:00 + https://hpc.social/2017/installing-scitokens-on-a-mac + <p>In case I ever have to install <a href="https://scitokens.org/">SciTokens</a> again, the steps I took to make it work on my mac. The most difficult part of this is installing openssl headers for the jwt python library. I followed the advice on this <a href="https://solitum.net/openssl-os-x-el-capitan-and-brew/">blog post</a>.</p> + +<ol> + <li>Install <a href="https://brew.sh/">Homebrew</a></li> + <li> + <p>Install openssl:</p> + + + <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> brew install openssl +</code></pre></div> + </div> + + </li> + <li> + <p>Download the SciTokens library:</p> + + + <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> git clone https://github.com/scitokens/scitokens.git + cd scitokens +</code></pre></div> + </div> + + </li> + <li> + <p>Create the virtualenv to install the <a href="https://jwt.io/">jwt</a> library</p> + + + <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> virtualenv jwt + . jwt/bin/activate +</code></pre></div> + </div> + + </li> + <li> + <p>Install jwt pointing to the Homebrew installed openssl headers:</p> + + + <div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code> env LDFLAGS="-L$(brew --prefix openssl)/lib" CFLAGS="-I$(brew --prefix openssl)/include" pip install cryptography PyJWT +</code></pre></div> + </div> + + </li> +</ol> + + + + + Standing up a IBM Spectrum LSF Community Edition cluster on Arm v8 + + 2017-08-31T18:01:46-06:00 + https://hpc.social/2017/standing-up-a-ibm-spectrum-lsf-community-edition-cluster-on-arm-v8 + <p>So, you&rsquo;ve got yourself a shiny new (or maybe not) system based upon a 64-bit Arm (Arm v8) processor that you want to put through it&rsquo;s +paces. For me, this happens to be a MACCHIATObin board powered by a Marvell ARAMADA 8040 based on ARM Cortex A-72 cores and installed +with Ubuntu 16.04.3 LTS. You can read about my <a href="https://www.gaborsamu.com/blog/turning_up_heat_armv8/">shenanigans running HPL</a> on my system with a passively cooled CPU and running up +against some overheating conditions - much like the head gasket failure in my car this past summer - but I digress!</p> + +<p>While I wait for the Noctua cooling fan to arrive, it&rsquo;s given me the opportunity to revisit installing a job scheduler on the system. +I&rsquo;ll be using this as a way to manage access to the system resources which will be necessary to arbitrate the various benchmark jobs +that I expect to be running over time. There exists a number of workload schedulers today, from open source to closed source +proprietary. I&rsquo;ve selected IBM Spectrum LSF Community Edition as it&rsquo;s free to download and use (with restrictions) and supports Linux +on Arm v8. Did you know that Spectrum LSF (known previously as Platform LSF) has been around for 25 years? That&rsquo;s quite a pedigree +and because it&rsquo;s shipped as binaries, I won&rsquo;t have to muck about compiling it - which is an added bonus. To download IBM Spectrum LSF +Community Edition follow the QR code below :)</p> + +<figure><img src="https://www.gaborsamu.com/images/LSFcommunity.jpg" /> +</figure> + +<p>Below I walk through the steps to install IBM Spectrum LSF Community Edition on my Arm v8 based system. The steps should be the same +for the other platforms supported by IBM Spectrum LSF Community Edition including Linux on POWERLE and Linux on x86-64. The procedure +below assumes that you have a supported OS installed and have configured networking, and necessary user accounts. This is not meant +to be an exhaustive tutorial on IBM Spectrum LSF Community Edition. If you&rsquo;re looking for help, check out the forum <a href="https://ibm.biz/LSF_UserGroup">here</a>.</p> + +<p><strong>1. Download and extract</strong></p> + +<p>We begin by downloading the <em>armv8</em> IBM Spectrum LSF Community Edition package and quick start guide. We expand the gzipped tarball +to get the installer and &ldquo;armv8&rdquo; binary compressed tarballs. Next, we extract the <em>lsfinstall</em> tarball. This contains the installer +for IBM Spectrum LSF Community Edition.</p> + +<div class="highlight"><pre><code class="language-bash">root@flotta:/tmp# ls +lsfce10.1-armv8.tar.gz lsfce10.1_quick_start.pdf +root@flotta:/tmp# gunzip lsfce10.1-armv8.tar.gz +root@flotta:/tmp# tar -xvf lsfce10.1-armv8.tar +lsfce10.1-armv8/ +lsfce10.1-armv8/lsf/ +lsfce10.1-armv8/lsf/lsf10.1_lnx312-lib217-armv8.tar.Z +lsfce10.1-armv8/lsf/lsf10.1_no_jre_lsfinstall.tar.Z + +root@flotta:/tmp/lsfce10.1-armv8/lsf# zcat lsf10.1_no_jre_lsfinstall.tar.Z | tar xvf - +lsf10.1_lsfinstall/ +lsf10.1_lsfinstall/instlib/ +lsf10.1_lsfinstall/instlib/lsflib.sh +lsf10.1_lsfinstall/instlib/lsferror.tbl +lsf10.1_lsfinstall/instlib/lsfprechkfuncs.sh +lsf10.1_lsfinstall/instlib/lsflicensefuncs.sh +lsf10.1_lsfinstall/instlib/lsfunpackfuncs.sh +lsf10.1_lsfinstall/instlib/lsfconfigfuncs.sh +lsf10.1_lsfinstall/instlib/resconnectorconfigfuncs.sh +lsf10.1_lsfinstall/instlib/lsf_getting_started.tmpl +.... +....</code></pre></div> + +<p><strong>2. Configure the installer</strong></p> + +<p>After extracting the <em>lsfinstall</em> tarball, you&rsquo;ll find the installation configuration file <em>install.config</em>. This file controls the +installation location, LSF administrator account, name of cluster, master node (where scheduler daemon runs), location of binary +source packages among other things. I&rsquo;ve run a diff here to show the settings. In brief, I&rsquo;ve configured the following:</p> + +<ul> +<li>installation location: <em>/raktar/LSFCE</em></li> +<li>LSF administrator account: <em>gsamu</em></li> +<li>LSF cluster name: <em>Klaszter</em></li> +<li>scheduler node: <em>flotta</em></li> +<li>location of LSF binary source packages: <em>/tmp/lsfce10.1-armv8/lsf</em> (here is located <em>lsf10.1_lnx312-lib217-armv8.tar.Z</em> from step 1)</li> +</ul> +<div class="highlight"><pre><code class="language-bash">root@flotta:/tmp/lsfce10.1-armv8/lsf/lsf10.1_lsfinstall# diff -u4 install.config install.config.org +--- install.config 2017-08-30 20:17:13.148583971 -0400 ++++ install.config.org 2017-08-30 20:15:30.283904454 -0400 +@@ -40,9 +40,8 @@ + <span style="color: #75715e;"># (During an upgrade, specify the existing value.)</span> + <span style="color: #75715e;">#**********************************************************</span> + <span style="color: #75715e;"># -----------------</span> + <span style="color: #75715e;"># LSF_TOP="/usr/share/lsf"</span> +-LSF_TOP<span style="color: #f92672;">=</span><span style="color: #e6db74;">"/raktar/LSFCE"</span> + <span style="color: #75715e;"># -----------------</span> + <span style="color: #75715e;"># Full path to the top-level installation directory {REQUIRED}</span> + <span style="color: #75715e;">#</span> + <span style="color: #75715e;"># The path to LSF_TOP must be shared and accessible to all hosts</span> +@@ -51,9 +50,8 @@ + <span style="color: #75715e;"># all host types (approximately 300 MB per host type).</span> + <span style="color: #75715e;">#</span> + <span style="color: #75715e;"># -----------------</span> + <span style="color: #75715e;"># LSF_ADMINS="lsfadmin user1 user2"</span> +-LSF_ADMINS<span style="color: #f92672;">=</span><span style="color: #e6db74;">"gsamu"</span> + <span style="color: #75715e;"># -----------------</span> + <span style="color: #75715e;"># List of LSF administrators {REQUIRED}</span> + <span style="color: #75715e;">#</span> + <span style="color: #75715e;"># The first user account name in the list is the primary LSF</span> +@@ -69,9 +67,8 @@ + <span style="color: #75715e;"># Secondary LSF administrators are optional.</span> + <span style="color: #75715e;">#</span> + <span style="color: #75715e;"># -----------------</span> + <span style="color: #75715e;"># LSF_CLUSTER_NAME="cluster1"</span> +-LSF_CLUSTER_NAME<span style="color: #f92672;">=</span><span style="color: #e6db74;">"Klaszter"</span> + <span style="color: #75715e;"># -----------------</span> + <span style="color: #75715e;"># Name of the LSF cluster {REQUIRED}</span> + <span style="color: #75715e;">#</span> + <span style="color: #75715e;"># It must be 39 characters or less, and cannot contain any</span> +@@ -85,9 +82,8 @@ + <span style="color: #75715e;">#**********************************************************</span> + <span style="color: #75715e;">#</span> + <span style="color: #75715e;"># -----------------</span> + <span style="color: #75715e;"># LSF_MASTER_LIST="hostm hosta hostc"</span> +-LSF_MASTER_LIST<span style="color: #f92672;">=</span><span style="color: #e6db74;">"flotta"</span> + <span style="color: #75715e;"># -----------------</span> + <span style="color: #75715e;"># List of LSF server hosts to be master or master candidate in the</span> + <span style="color: #75715e;"># cluster {REQUIRED when you install for the first time or during</span> + <span style="color: #75715e;"># upgrade if the parameter does not already exist.}</span> +@@ -96,9 +92,8 @@ + <span style="color: #75715e;"># cluster. The first host listed is the LSF master host.</span> + <span style="color: #75715e;">#</span> + <span style="color: #75715e;"># -----------------</span> + <span style="color: #75715e;"># LSF_TARDIR="/usr/share/lsf_distrib/"</span> +-LSF_TARDIR<span style="color: #f92672;">=</span><span style="color: #e6db74;">"/tmp/lsfce10.1-armv8/lsf"</span> + <span style="color: #75715e;"># -----------------</span> + <span style="color: #75715e;"># Full path to the directory containing the LSF distribution tar files.</span> + <span style="color: #75715e;">#</span> + <span style="color: #75715e;"># Default: Parent directory of the current working directory.</span></code></pre></div> + +<p><strong>3. Install IBM Spectrum LSF Community Edition</strong></p> + +<p>With the installation configuration complete, we can now invoke the installer script. Note that I had to install JRE on my system +(in my case <em>apt-get install default-jre</em>) as it&rsquo;s a requirement for IBM Spectrum LSF Community Edition.</p> + +<div class="highlight"><pre><code class="language-bash">root@flotta:/tmp/lsfce10.1-armv8/lsf/lsf10.1_lsfinstall# ./lsfinstall -f ./install.config + +Logging installation sequence in /tmp/lsfce10.1-armv8/lsf/lsf10.1_lsfinstall/Install.log + +International License Agreement <span style="color: #66d9ef;">for</span> Non-Warranted Programs + +Part <span style="color: #ae81ff;">1</span> - General Terms + +BY DOWNLOADING, INSTALLING, COPYING, ACCESSING, CLICKING ON +AN <span style="color: #e6db74;">"ACCEPT"</span> BUTTON, OR OTHERWISE USING THE PROGRAM, +LICENSEE AGREES TO THE TERMS OF THIS AGREEMENT. IF YOU ARE +ACCEPTING THESE TERMS ON BEHALF OF LICENSEE, YOU REPRESENT +AND WARRANT THAT YOU HAVE FULL AUTHORITY TO BIND LICENSEE +TO THESE TERMS. IF YOU DO NOT AGREE TO THESE TERMS, + +* DO NOT DOWNLOAD, INSTALL, COPY, ACCESS, CLICK ON AN +<span style="color: #e6db74;">"ACCEPT"</span> BUTTON, OR USE THE PROGRAM; AND + +* PROMPTLY RETURN THE UNUSED MEDIA AND DOCUMENTATION TO THE + +Press Enter to <span style="color: #66d9ef;">continue</span> viewing the license agreement, or +enter <span style="color: #e6db74;">"1"</span> to accept the agreement, <span style="color: #e6db74;">"2"</span> to decline it, <span style="color: #e6db74;">"3"</span> +to print it, <span style="color: #e6db74;">"4"</span> to read non-IBM terms, or <span style="color: #e6db74;">"99"</span> to go back +to the previous screen. +<span style="color: #ae81ff;">1</span> +LSF pre-installation check ... + +Checking the LSF TOP directory /raktar/LSFCE ... +... Done checking the LSF TOP directory /raktar/LSFCE ... +You are installing IBM Spectrum LSF - 10.1 Community Edition. + +Checking LSF Administrators ... + LSF administrator<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span>: <span style="color: #e6db74;">"gsamu"</span> + Primary LSF administrator: <span style="color: #e6db74;">"gsamu"</span> +Checking the configuration template ... + Done checking configuration template ... + Done checking ENABLE_STREAM ... + +<span style="color: #f92672;">[</span>Wed Aug <span style="color: #ae81ff;">30</span> 20:36:46 EDT 2017:lsfprechk:WARN_2007<span style="color: #f92672;">]</span> + Hosts defined in LSF_MASTER_LIST must be LSF server hosts. The + following hosts will be added to server hosts automatically: flotta. + +Checking the patch history directory ... +Creating /raktar/LSFCE/patch ... +... Done checking the patch history directory /raktar/LSFCE/patch ... + +Checking the patch backup directory ... +... Done checking the patch backup directory /raktar/LSFCE/patch/backup ... + + +Searching LSF 10.1 distribution tar files in /tmp/lsfce10.1-armv8/lsf Please wait ... + + 1<span style="color: #f92672;">)</span> linux3.12-glibc2.17-armv8 + +Press <span style="color: #ae81ff;">1</span> or Enter to install this host type: <span style="color: #ae81ff;">1</span> + +You have chosen the following tar file<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span>: + lsf10.1_lnx312-lib217-armv8 + +Checking selected tar file<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> ... +... Done checking selected tar file<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span>. + + +Pre-installation check report saved as text file: +/tmp/lsfce10.1-armv8/lsf/lsf10.1_lsfinstall/prechk.rpt. + +... Done LSF pre-installation check. + +Installing LSF binary files <span style="color: #e6db74;">" lsf10.1_lnx312-lib217-armv8"</span>... +Creating /raktar/LSFCE/10.1 ... + +Copying lsfinstall files to /raktar/LSFCE/10.1/install +Creating /raktar/LSFCE/10.1/install ... + +.... + +.... + +lsfinstall is <span style="color: #66d9ef;">done</span>. + +To complete your LSF installation and get your +cluster <span style="color: #e6db74;">"Klaszter"</span> up and running, follow the steps in +<span style="color: #e6db74;">"/tmp/lsfce10.1-armv8/lsf/lsf10.1_lsfinstall/lsf_getting_started.html"</span>. + +After setting up your LSF server hosts and verifying +your cluster <span style="color: #e6db74;">"Klaszter"</span> is running correctly, +see <span style="color: #e6db74;">"/raktar/LSFCE/10.1/lsf_quick_admin.html"</span> +to learn more about your new LSF cluster. + +After installation, remember to bring your cluster up to date +by applying the latest updates and bug fixes. </code></pre></div> + +<p><strong>4. Siesta time!</strong></p> + +<p>Wow, that was easy. IBM Spectrum LSF Community Edition is now installed. Pat yourself on the back and grab your favourite BEvERage +as a reward.</p> + +<p><strong>5. Fire it up!</strong></p> + +<p>Now that IBM Spectrum LSF Community Edition is installed, we can start it up so that it&rsquo;s ready to accept and manage work! As the root +user we source the environment for IBM Spectrum LSF Community Edition which sets the PATH and other needed environment variables. Next, +we issue 3 commands to start up the IBM Spectrum LSF Community Edition daemons.</p> + +<div class="highlight"><pre><code class="language-bash">root@flotta:/raktar/LSFCE/conf# . ./profile.lsf + +root@flotta:/raktar/LSFCE/conf# lsadmin limstartup +Starting up LIM on &lt;flotta.localdomain&gt; ...... <span style="color: #66d9ef;">done</span> +root@flotta:/raktar/LSFCE/conf# lsadmin resstartup +Starting up RES on &lt;flotta.localdomain&gt; ...... <span style="color: #66d9ef;">done</span> +root@flotta:/raktar/LSFCE/conf# badmin hstartup +Starting up slave batch daemon on &lt;flotta.localdomain&gt; ...... <span style="color: #66d9ef;">done</span></code></pre></div> + +<p>With IBM Spectrum LSF Community Edition now running, we should be able to query the cluster for status. Note that we&rsquo;ve setup a single +node cluster. IBM Spectrum LSF Community Edition allows you to build up clusters with up to 10 nodes. We run a series of commands to +check if the cluster is alive and well.</p> + +<div class="highlight"><pre><code class="language-bash">root@flotta:/raktar/LSFCE/conf# lsid +IBM Spectrum LSF Community Edition 10.1.0.0, Jun <span style="color: #ae81ff;">15</span> <span style="color: #ae81ff;">2016</span> +Copyright IBM Corp. 1992, 2016. All rights reserved. +US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule Contract with IBM Corp. + +My cluster name is Klaszter +My master name is flotta.localdomain + + + +root@flotta:/raktar/LSFCE/conf# lsload +HOST_NAME status r15s r1m r15m ut pg ls it tmp swp mem +flotta.localdom ok 0.0 0.3 0.4 13% 0.0 <span style="color: #ae81ff;">1</span> <span style="color: #ae81ff;">0</span> 1444M 0M 3.3 + + + +root@flotta:/raktar/LSFCE/conf# bhosts +HOST_NAME STATUS JL/U MAX NJOBS RUN SSUSP USUSP RSV +flotta.localdomain ok - <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span></code></pre></div> + +<p>The above commands show that the IBM Spectrum LSF Community Edition cluster is up and running. The batch system is now ready to accept +workload!</p> + +<p><strong>6. Now what?</strong></p> + +<p>We are ready to rock n' roll! To christen this environment, I decided to run some MPI tests. Coincidentally, MPI is also celebrating +a silver anniversary this year.</p> + +<p>And what better MPI tests to run on my Arm system than the <a href="https://software.intel.com/en-us/articles/intel-mpi-benchmarks">Intel MPI Benchmarks</a> :) Of course, the Intel MPI Benchmarks have to be +compiled. To keep things simple, I only compiled the MPI1 benchmark set. This required me to change the CC designation in the +make_ict makefie from mpiicc to mpicc (as I am obviously not using Intel Compilers).</p> + +<div class="highlight"><pre><code class="language-bash">root@flotta:/raktar/imb/imb/src# gmake -f make_ict IMB-MPI1 +sleep 1; touch exe_mpi1 *.c; rm -rf exe_io exe_ext exe_nbc exe_rma +gmake -f Makefile.base MPI1 CPP<span style="color: #f92672;">=</span>MPI1 +gmake<span style="color: #f92672;">[</span>1<span style="color: #f92672;">]</span>: Entering directory <span style="color: #e6db74;">'/raktar/imb/imb/src'</span> +mpicc -DMPI1 -c IMB.c +mpicc -DMPI1 -c IMB_utils.c +mpicc -DMPI1 -c IMB_declare.c +mpicc -DMPI1 -c IMB_init.c +mpicc -DMPI1 -c IMB_mem_manager.c +mpicc -DMPI1 -c IMB_parse_name_mpi1.c +mpicc -DMPI1 -c IMB_benchlist.c +mpicc -DMPI1 -c IMB_strgs.c +mpicc -DMPI1 -c IMB_err_handler.c +mpicc -DMPI1 -c IMB_g_info.c +mpicc -DMPI1 -c IMB_warm_up.c +mpicc -DMPI1 -c IMB_output.c +mpicc -DMPI1 -c IMB_pingpong.c +mpicc -DMPI1 -c IMB_pingping.c +mpicc -DMPI1 -c IMB_allreduce.c + +.... + +.... +mpicc -o IMB-MPI1 IMB.o IMB_utils.o IMB_declare.o IMB_init.o IMB_mem_manager.o IMB_parse_name_mpi1.o IMB_benchlist.o IMB_strgs.o IMB_err_handler.o IMB_g_info.o IMB_warm_up.o IMB_output.o IMB_pingpong.o IMB_pingping.o IMB_allreduce.o IMB_reduce_scatter.o IMB_reduce.o IMB_exchange.o IMB_bcast.o IMB_barrier.o IMB_allgather.o IMB_allgatherv.o IMB_gather.o IMB_gatherv.o IMB_scatter.o IMB_scatterv.o IMB_alltoall.o IMB_alltoallv.o IMB_sendrecv.o IMB_init_transfer.o IMB_chk_diff.o IMB_cpu_exploit.o IMB_bandwidth.o +gmake<span style="color: #f92672;">[</span>1<span style="color: #f92672;">]</span>: Leaving directory <span style="color: #e6db74;">'/raktar/imb/imb/src'</span></code></pre></div> + +<p>So we have our MPI benchmark compiled and our workload scheduler up and running. Let&rsquo;s get busy! As user <em>gsamu</em> we source the +environment for IBM Spectrum LSF Community Edition, and submit a 4-way instance of the Intel MPI Benchmark MPI1 test suite for +execution on our cluster.</p> + +<div class="highlight"><pre><code class="language-bash">gsamu@flotta:/$ . /raktar/LSFCE/conf/profile.lsf +gsamu@flotta:/$ lsid +IBM Spectrum LSF Community Edition 10.1.0.0, Jun <span style="color: #ae81ff;">15</span> <span style="color: #ae81ff;">2016</span> +Copyright IBM Corp. 1992, 2016. All rights reserved. +US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule Contract with IBM Corp. + +My cluster name is Klaszter +My master name is flotta.localdomain</code></pre></div> + +<p>Drumroll please&hellip;The MPI benchmark runs through successfully. Note that we&rsquo;ve submitted the job to IBM Spectrum LSF Community Edition +interactively - with the -I parameter. Jobs can also be run non-interactively and users can peek at the standard output during +runtime using the bpeek command.</p> + +<div class="highlight"><pre><code class="language-bash">gsamu@flotta:/$ bsub -I -q interactive -n <span style="color: #ae81ff;">4</span> mpirun -np <span style="color: #ae81ff;">4</span> /raktar/imb/imb/src/IMB-MPI1 +Job &lt;1396&gt; is submitted to queue &lt;interactive&gt;. +&lt;&lt;Waiting <span style="color: #66d9ef;">for</span> dispatch ...&gt;&gt; +<span style="color: #e6db74;">&lt;&lt;Starting on flotta.localdomain&gt;&gt; +</span><span style="color: #e6db74;">#------------------------------------------------------------ +</span><span style="color: #e6db74;"># Intel (R) MPI Benchmarks 2017 update 2, MPI-1 part +</span><span style="color: #e6db74;">#------------------------------------------------------------ +</span><span style="color: #e6db74;"># Date : Thu Aug 30 21:46:28 2017 +</span><span style="color: #e6db74;"># Machine : aarch64 +</span><span style="color: #e6db74;"># S</span>ystem : Linux +<span style="color: #75715e;"># Release : 4.4.8-armada-17.02.2-g4126e30</span> +<span style="color: #75715e;"># Version : #1 SMP PREEMPT Sat May 27 18:52:53 CDT 2017</span> +<span style="color: #75715e;"># MPI Version : 3.0</span> +<span style="color: #75715e;"># MPI Thread Environment: </span> + + +<span style="color: #75715e;"># Calling sequence was: </span> + +<span style="color: #75715e;"># /raktar/imb/imb/src/IMB-MPI1</span> + +<span style="color: #75715e;"># Minimum message length in bytes: 0</span> +<span style="color: #75715e;"># Maximum message length in bytes: 4194304</span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># MPI_Datatype : MPI_BYTE </span> +<span style="color: #75715e;"># MPI_Datatype for reductions : MPI_FLOAT</span> +<span style="color: #75715e;"># MPI_Op : MPI_SUM </span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;">#</span> + +<span style="color: #75715e;"># List of Benchmarks to run:</span> + +<span style="color: #75715e;"># PingPong</span> +<span style="color: #75715e;"># PingPing</span> +<span style="color: #75715e;"># Sendrecv</span> +<span style="color: #75715e;"># Exchange</span> +<span style="color: #75715e;"># Allreduce</span> +<span style="color: #75715e;"># Reduce</span> +<span style="color: #75715e;"># Reduce_scatter</span> +<span style="color: #75715e;"># Allgather</span> +<span style="color: #75715e;"># Allgatherv</span> +<span style="color: #75715e;"># Gather</span> +<span style="color: #75715e;"># Gatherv</span> +<span style="color: #75715e;"># Scatter</span> +<span style="color: #75715e;"># Scatterv</span> +<span style="color: #75715e;"># Alltoall</span> +<span style="color: #75715e;"># Alltoallv</span> +<span style="color: #75715e;"># Bcast</span> +<span style="color: #75715e;"># Barrier</span> + +<span style="color: #75715e;">#---------------------------------------------------</span> +<span style="color: #75715e;"># Benchmarking PingPong </span> +<span style="color: #75715e;"># #processes = 2 </span> +<span style="color: #75715e;"># ( 2 additional processes waiting in MPI_Barrier)</span> +<span style="color: #75715e;">#---------------------------------------------------</span> + <span style="color: #75715e;">#bytes #repetitions t[usec] Mbytes/sec</span> + <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1000</span> 1.03 0.00 + <span style="color: #ae81ff;">1</span> <span style="color: #ae81ff;">1000</span> 1.17 0.85 + <span style="color: #ae81ff;">2</span> <span style="color: #ae81ff;">1000</span> 1.18 1.69 + <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">1000</span> 1.19 3.36 + <span style="color: #ae81ff;">8</span> <span style="color: #ae81ff;">1000</span> 0.70 11.43 + <span style="color: #ae81ff;">16</span> <span style="color: #ae81ff;">1000</span> 0.67 23.89 + <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">1000</span> 0.68 47.37 + <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">1000</span> 0.70 90.84 + <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">1000</span> 0.72 176.78 + <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">1000</span> 0.80 319.61 + <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">1000</span> 1.12 455.51 + <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">1000</span> 1.89 540.52 + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">1000</span> 2.20 932.37 + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">1000</span> 3.93 1042.37 + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">1000</span> 5.93 1380.77 + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">1000</span> 8.76 1869.69 + <span style="color: #ae81ff;">32768</span> <span style="color: #ae81ff;">1000</span> 14.92 2195.65 + <span style="color: #ae81ff;">65536</span> <span style="color: #ae81ff;">640</span> 24.37 2689.26 + <span style="color: #ae81ff;">131072</span> <span style="color: #ae81ff;">320</span> 41.37 3168.39 + <span style="color: #ae81ff;">262144</span> <span style="color: #ae81ff;">160</span> 81.48 3217.12 + <span style="color: #ae81ff;">524288</span> <span style="color: #ae81ff;">80</span> 193.81 2705.22 + <span style="color: #ae81ff;">1048576</span> <span style="color: #ae81ff;">40</span> 443.74 2363.05 + <span style="color: #ae81ff;">2097152</span> <span style="color: #ae81ff;">20</span> 860.30 2437.71 + <span style="color: #ae81ff;">4194304</span> <span style="color: #ae81ff;">10</span> 1692.45 2478.24 + +<span style="color: #75715e;">#---------------------------------------------------</span> +<span style="color: #75715e;"># Benchmarking PingPing </span> +<span style="color: #75715e;"># #processes = 2 </span> +<span style="color: #75715e;"># ( 2 additional processes waiting in MPI_Barrier)</span> +<span style="color: #75715e;">#---------------------------------------------------</span> + <span style="color: #75715e;">#bytes #repetitions t[usec] Mbytes/sec</span> + <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1000</span> 0.81 0.00 + <span style="color: #ae81ff;">1</span> <span style="color: #ae81ff;">1000</span> 0.85 1.17 + <span style="color: #ae81ff;">2</span> <span style="color: #ae81ff;">1000</span> 0.85 2.34 + <span style="color: #ae81ff;">4</span> <span style="color: #ae81ff;">1000</span> 0.85 4.68 + <span style="color: #ae81ff;">8</span> <span style="color: #ae81ff;">1000</span> 0.86 9.34 + <span style="color: #ae81ff;">16</span> <span style="color: #ae81ff;">1000</span> 0.89 17.96 + <span style="color: #ae81ff;">32</span> <span style="color: #ae81ff;">1000</span> 0.89 35.99 + <span style="color: #ae81ff;">64</span> <span style="color: #ae81ff;">1000</span> 0.92 69.71 + <span style="color: #ae81ff;">128</span> <span style="color: #ae81ff;">1000</span> 0.97 132.63 + <span style="color: #ae81ff;">256</span> <span style="color: #ae81ff;">1000</span> 1.01 254.50 + <span style="color: #ae81ff;">512</span> <span style="color: #ae81ff;">1000</span> 1.31 391.16 + <span style="color: #ae81ff;">1024</span> <span style="color: #ae81ff;">1000</span> 2.45 418.61 + <span style="color: #ae81ff;">2048</span> <span style="color: #ae81ff;">1000</span> 3.06 670.15 + <span style="color: #ae81ff;">4096</span> <span style="color: #ae81ff;">1000</span> 5.27 776.63 + <span style="color: #ae81ff;">8192</span> <span style="color: #ae81ff;">1000</span> 8.09 1012.73 + <span style="color: #ae81ff;">16384</span> <span style="color: #ae81ff;">1000</span> 11.56 1417.31 + <span style="color: #ae81ff;">32768</span> <span style="color: #ae81ff;">1000</span> 18.10 1810.29 + <span style="color: #ae81ff;">65536</span> <span style="color: #ae81ff;">640</span> 31.07 2108.97 + <span style="color: #ae81ff;">131072</span> <span style="color: #ae81ff;">320</span> 67.01 1955.95 + <span style="color: #ae81ff;">262144</span> <span style="color: #ae81ff;">160</span> 134.24 1952.73 + <span style="color: #ae81ff;">524288</span> <span style="color: #ae81ff;">80</span> 358.88 1460.91 + <span style="color: #ae81ff;">1048576</span> <span style="color: #ae81ff;">40</span> 895.85 1170.49 + <span style="color: #ae81ff;">2097152</span> <span style="color: #ae81ff;">20</span> 1792.75 1169.79 + <span style="color: #ae81ff;">4194304</span> <span style="color: #ae81ff;">10</span> 3373.79 1243.20 + +.... + +.... + +<span style="color: #75715e;"># All processes entering MPI_Finalize</span></code></pre></div> + +<p>There you have it. if you&rsquo;re after more information about IBM Spectrum LSF, visit <a href="https://www.ibm.com/us-en/marketplace/hpc-workload-management">here</a>. .</p> + + + + + Turning up the heat...on my Armada 8040 + + 2017-08-29T20:07:13-06:00 + https://hpc.social/2017/turning-up-the-heat-on-my-armada-8040 + <p>Although I took delivery of a shiny new <a href="https://www.solid-run.com/marvell-armada-family/macchiatobin/">SolidRun Marvell macchiatoBIN</a> a few months back (end May), I&rsquo;ve not really had a chance +to put it through it&rsquo;s paces until now. For those of you who are not familiar with the board, it&rsquo;s a high-performance 64-bit Arm +(v8) board designed really for networking. It&rsquo;s based on the Marvell ARMADA 8040 processor for those who like to keep track. For +those looking for more information about the board, there is a community page <a href="http://macchiatobin.net/">here</a>.</p> + +<p>What struck me about the board when I originally unpacked it were the shiny heatsinks. Definitely looks cool on my workbench +(desk)! They did seem up to the task of keeping the mighty ARMADA 8040 cool as a cucumber - +or so I thought.</p> + +<figure><img src="https://www.gaborsamu.com/images/8040_heatsinks.jpg" /> +</figure> + +<p>Following the procedure (struggling) to install Ubuntu as described on the macchiatoBIN <a href="http://wiki.macchiatobin.net/tiki-index.php?page=BSP+HowTo">wiki</a> - which ironically required me to use +an x86 box to compile some necessary bits, I was off to the races with Ubuntu 16.04.3 LTS (Xenial Xerus). Note that this whole +procedure left much to be desired as it was my understanding that this board was to be ARM <a href="https://en.wikipedia.org/wiki/Server_Base_System_Architecture">SBSA</a> compliant - which would allow any +compliant OS distro to be used. This is something which at the time of writing is not the case - hope that an update does address +this.</p> + +<p>Being a high-performance computing kind of guy, my first challenge was to run the High-Performance Linpack (HPL) on the system. HPL +you say? Yes, I know we can debate the merits of HPL all day long, but nevertheless it&rsquo;s still a measure of some specific dimensions +of system performance - and indeed it&rsquo;s used to rank systems on the TOP500 list of Supercomputers. Because I was looking to run more +than just HPL on the system, I opted to install Phoronix test suite which includes HPCC (HPC Challenge) as an available benchmark.</p> + +<p>To get warmed up, I decided to first run the well know <em>stream</em> memory benchmark. Via Phoronix, I installed the stream benchmark and +executed it.</p> + +<div class="highlight"><pre><code class="language-bash">root@flotta:~# phoronix-test-suite install-test pts/stream + + + +Phoronix Test Suite v5.2.1 + + + To Install: pts/stream-1.3.1 + + + Determining File Requirements ........................................... + Searching Download Caches ............................................... + + + <span style="color: #ae81ff;">1</span> Test To Install + + <span style="color: #ae81ff;">1</span> File To Download <span style="color: #f92672;">[</span>0.01MB<span style="color: #f92672;">]</span> + 1MB Of Disk Space Is Needed + + + pts/stream-1.3.1: + + Test Installation <span style="color: #ae81ff;">1</span> of <span style="color: #ae81ff;">1</span> + <span style="color: #ae81ff;">1</span> File Needed <span style="color: #f92672;">[</span>0.01 MB / <span style="color: #ae81ff;">1</span> Minute<span style="color: #f92672;">]</span> + Downloading: stream-2013-01-17.tar.bz2 <span style="color: #f92672;">[</span>0.01MB<span style="color: #f92672;">]</span> + Estimated Download Time: 1m ......................................... + Installation Size: 0.1 MB + Installing Test @ 19:45:06 + +<span style="color: #f92672;">[</span>NOTICE<span style="color: #f92672;">]</span> Undefined: <span style="color: #ae81ff;">0</span> in phodevi_cpu:267 + +<span style="color: #f92672;">[</span>NOTICE<span style="color: #f92672;">]</span> Undefined: <span style="color: #ae81ff;">0</span> in phodevi_cpu:272</code></pre></div> + +<p>Next, we execute the benchmark <em>stream</em></p> + +<div class="highlight"><pre><code class="language-bash">root@flotta:~# phoronix-test-suite benchmark pts/stream + + + +Phoronix Test Suite v5.2.1 + + + + Installed: pts/stream-1.3.1 + + + + + +Stream 2013-01-17: + + pts/stream-1.3.1 + + Memory Test Configuration + + 1: Copy + + 2: Scale + + 3: Add + + 4: Triad + + 5: Test All Options + + Type: <span style="color: #ae81ff;">5</span> + + + + + +System Information + + + +Hardware: + +Processor: Unknown @ 1.30GHz <span style="color: #f92672;">(</span><span style="color: #ae81ff;">4</span> Cores<span style="color: #f92672;">)</span>, Memory: 4096MB, Disk: 8GB 8GME4R + + + +Software: + +OS: Ubuntu 16.04, Kernel: 4.4.8-armada-17.02.2-g4126e30 <span style="color: #f92672;">(</span>aarch64<span style="color: #f92672;">)</span>, Compiler: GCC 5.4.0 20160609, File-System: ext4 + + + + Would you like to save these test results <span style="color: #f92672;">(</span>Y/n<span style="color: #f92672;">)</span>: n + + + + + +Stream 2013-01-17: + + pts/stream-1.3.1 <span style="color: #f92672;">[</span>Type: Copy<span style="color: #f92672;">]</span> + + Test <span style="color: #ae81ff;">1</span> of <span style="color: #ae81ff;">4</span> + + Estimated Trial Run Count: <span style="color: #ae81ff;">5</span> + + Estimated Test Run-Time: <span style="color: #ae81ff;">7</span> Minutes + + Estimated Time To Completion: <span style="color: #ae81ff;">25</span> Minutes + + Started Run <span style="color: #ae81ff;">1</span> @ 19:46:03 + + Started Run <span style="color: #ae81ff;">2</span> @ 19:48:09 + + Started Run <span style="color: #ae81ff;">3</span> @ 19:50:14 + + Started Run <span style="color: #ae81ff;">4</span> @ 19:52:19 + + Started Run <span style="color: #ae81ff;">5</span> @ 19:54:24 <span style="color: #f92672;">[</span>Std. Dev: 0.35%<span style="color: #f92672;">]</span> + + + + Test Results: + + 6701.1 + + 6669.1 + + 6655.9 + + 6637.4 + + 6657.1 + + + + Average: 6664.12 MB/s + + + + + +Stream 2013-01-17: + + pts/stream-1.3.1 <span style="color: #f92672;">[</span>Type: Scale<span style="color: #f92672;">]</span> + + Test <span style="color: #ae81ff;">2</span> of <span style="color: #ae81ff;">4</span> + + Estimated Trial Run Count: <span style="color: #ae81ff;">5</span> + + Estimated Test Run-Time: <span style="color: #ae81ff;">7</span> Minutes + + Estimated Time To Completion: <span style="color: #ae81ff;">19</span> Minutes + + Started Run <span style="color: #ae81ff;">1</span> @ 19:56:27 + + Started Run <span style="color: #ae81ff;">2</span> @ 19:56:27 + + Started Run <span style="color: #ae81ff;">3</span> @ 19:56:27 + + Started Run <span style="color: #ae81ff;">4</span> @ 19:56:27 + + Started Run <span style="color: #ae81ff;">5</span> @ 19:56:27 <span style="color: #f92672;">[</span>Std. Dev: 0.12%<span style="color: #f92672;">]</span> + + + + Test Results: + + 7248.8 + + 7261.8 + + 7252.8 + + 7245.6 + + 7266.1 + + + + Average: 7255.02 MB/s + + + + + +Stream 2013-01-17: + + pts/stream-1.3.1 <span style="color: #f92672;">[</span>Type: Triad<span style="color: #f92672;">]</span> + + Test <span style="color: #ae81ff;">3</span> of <span style="color: #ae81ff;">4</span> + + Estimated Trial Run Count: <span style="color: #ae81ff;">5</span> + + Estimated Test Run-Time: <span style="color: #ae81ff;">7</span> Minutes + + Estimated Time To Completion: <span style="color: #ae81ff;">13</span> Minutes + + Started Run <span style="color: #ae81ff;">1</span> @ 19:56:27 + + Started Run <span style="color: #ae81ff;">2</span> @ 19:56:27 + + Started Run <span style="color: #ae81ff;">3</span> @ 19:56:27 + + Started Run <span style="color: #ae81ff;">4</span> @ 19:56:27 + + Started Run <span style="color: #ae81ff;">5</span> @ 19:56:27 <span style="color: #f92672;">[</span>Std. Dev: 0.47%<span style="color: #f92672;">]</span> + + + + Test Results: + + 6872.3 + + 6895.9 + + 6934.9 + + 6847.9 + + 6889.5 + + + + Average: 6888.10 MB/s + + + + + +Stream 2013-01-17: + + pts/stream-1.3.1 <span style="color: #f92672;">[</span>Type: Add<span style="color: #f92672;">]</span> + + Test <span style="color: #ae81ff;">4</span> of <span style="color: #ae81ff;">4</span> + + Estimated Trial Run Count: <span style="color: #ae81ff;">5</span> + + Estimated Time To Completion: <span style="color: #ae81ff;">7</span> Minutes + + Started Run <span style="color: #ae81ff;">1</span> @ 19:56:27 + + Started Run <span style="color: #ae81ff;">2</span> @ 19:56:27 + + The test run ended prematurely. + + Started Run <span style="color: #ae81ff;">3</span> @ 19:56:27 + + Started Run <span style="color: #ae81ff;">4</span> @ 19:56:27 + + Started Run <span style="color: #ae81ff;">5</span> @ 19:56:27 + + The test run ended prematurely. <span style="color: #f92672;">[</span>Std. Dev: 0.09%<span style="color: #f92672;">]</span> + + + + Test Results: + + 6559.5 + + 6549.8 + + 6560.8 + + + + Average: 6556.70 MB/s</code></pre></div> + +<p>We see during execution that stream definitely puts the system through it&rsquo;s paces</p> + +<div class="highlight"><pre><code class="language-bash">top - 19:47:20 up 3:07, <span style="color: #ae81ff;">2</span> users, load average: 3.10, 1.21, 0.70 + +Tasks: <span style="color: #ae81ff;">119</span> total, <span style="color: #ae81ff;">2</span> running, <span style="color: #ae81ff;">117</span> sleeping, <span style="color: #ae81ff;">0</span> stopped, <span style="color: #ae81ff;">0</span> zombie +%Cpu<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span>: 96.3 us, 0.4 sy, 0.0 ni, 1.7 id, 0.0 wa, 0.0 hi, 1.7 si, 0.0 st +KiB Mem : <span style="color: #ae81ff;">3779668</span> total, <span style="color: #ae81ff;">986744</span> free, <span style="color: #ae81ff;">2410052</span> used, <span style="color: #ae81ff;">382872</span> buff/cache +KiB Swap: <span style="color: #ae81ff;">0</span> total, <span style="color: #ae81ff;">0</span> free, <span style="color: #ae81ff;">0</span> used. <span style="color: #ae81ff;">1287376</span> avail Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND +<span style="color: #ae81ff;">18920</span> root <span style="color: #ae81ff;">20</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">2370508</span> 2.236g <span style="color: #ae81ff;">1336</span> R 389.1 62.0 4:54.12 stream-bin +<span style="color: #ae81ff;">6854</span> root <span style="color: #ae81ff;">20</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 2.3 0.0 0:07.30 kworker/u8+ +<span style="color: #ae81ff;">18924</span> root <span style="color: #ae81ff;">20</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">9288</span> <span style="color: #ae81ff;">3208</span> <span style="color: #ae81ff;">2632</span> R 0.7 0.1 0:00.37 top + <span style="color: #ae81ff;">3</span> root <span style="color: #ae81ff;">20</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.3 0.0 0:00.50 ksoftirqd/0 + <span style="color: #ae81ff;">1</span> root <span style="color: #ae81ff;">20</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">6868</span> <span style="color: #ae81ff;">5144</span> <span style="color: #ae81ff;">3476</span> S 0.0 0.1 0:03.90 systemd + <span style="color: #ae81ff;">2</span> root <span style="color: #ae81ff;">20</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:00.00 kthreadd + <span style="color: #ae81ff;">5</span> root <span style="color: #ae81ff;">0</span> -20 <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:00.00 kworker/0:+ + <span style="color: #ae81ff;">7</span> root <span style="color: #ae81ff;">20</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:01.82 rcu_preempt + <span style="color: #ae81ff;">8</span> root <span style="color: #ae81ff;">20</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:00.00 rcu_sched + <span style="color: #ae81ff;">9</span> root <span style="color: #ae81ff;">20</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:00.00 rcu_bh + <span style="color: #ae81ff;">10</span> root rt <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:00.08 migration/0 + <span style="color: #ae81ff;">11</span> root rt <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:00.14 watchdog/0 + <span style="color: #ae81ff;">12</span> root rt <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:00.11 watchdog/1 + <span style="color: #ae81ff;">13</span> root rt <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:00.07 migration/1 + <span style="color: #ae81ff;">14</span> root <span style="color: #ae81ff;">20</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:00.03 ksoftirqd/1 + <span style="color: #ae81ff;">16</span> root <span style="color: #ae81ff;">0</span> -20 <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:00.00 kworker/1:+ + <span style="color: #ae81ff;">17</span> root rt <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> S 0.0 0.0 0:00.13 watchdog/2</code></pre></div> + +<p>Onward and upward as they say. Moving to the HPCC benchmark which contains HPL. We install the <em>pts/hpcc</em> test for Phoronix.</p> + +<div class="highlight"><pre><code class="language-bash">root@flotta:~# phoronix-test-suite install-test pts/hpcc + + + +Phoronix Test Suite v5.2.1 + + + + To Install: pts/hpcc-1.2.0 + + + + Determining File Requirements ........................................... + + Searching Download Caches ............................................... + + + + <span style="color: #ae81ff;">1</span> Test To Install + + <span style="color: #ae81ff;">1</span> File To Download <span style="color: #f92672;">[</span>0.63MB<span style="color: #f92672;">]</span> + + 9MB Of Disk Space Is Needed + + + + pts/hpcc-1.2.0: + + Test Installation <span style="color: #ae81ff;">1</span> of <span style="color: #ae81ff;">1</span> + + <span style="color: #ae81ff;">1</span> File Needed <span style="color: #f92672;">[</span>0.63 MB / <span style="color: #ae81ff;">1</span> Minute<span style="color: #f92672;">]</span> + + Downloading: hpcc-1.5.0.tar.gz <span style="color: #f92672;">[</span>0.63MB<span style="color: #f92672;">]</span> + + Estimated Download Time: 1m ......................................... + + Installation Size: <span style="color: #ae81ff;">9</span> MB + + Installing Test @ 20:00:53 + + + + <span style="color: #f92672;">[</span>NOTICE<span style="color: #f92672;">]</span> Supported install-time optional variables include $MPI_PATH, + + $MPI_INCLUDE, $MPI_CC, $MPI_LIBS, $LA_PATH, $LA_INCLUDE, $LA_LIBS, + + $CFLAGS, $LD_FLAGS, and $MPI_LD + + + + <span style="color: #f92672;">[</span>NOTICE<span style="color: #f92672;">]</span> Supported run-time optional environment variables include + + $N, $NB, $MPI_NUM_THREADS, $HOSTFILE</code></pre></div> + +<p>Before starting the HPL run, I put together a quick script to monitor the temperature utilization during the HPL run. The script simply +prints out the values of <em>/sys/class/thermal/thermal_zone[X]/temp</em> in human readable format.</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;">#!/bin/sh +</span><span style="color: #75715e;"></span> +<span style="color: #66d9ef;">while</span> <span style="color: #f92672;">[</span> true <span style="color: #f92672;">]</span> + +<span style="color: #66d9ef;">do</span> + +echo <span style="color: #e6db74;">"</span><span style="color: #66d9ef;">$(</span>date<span style="color: #66d9ef;">)</span><span style="color: #e6db74;"> @ </span><span style="color: #66d9ef;">$(</span>hostname<span style="color: #66d9ef;">)</span><span style="color: #e6db74;">"</span> + +echo <span style="color: #e6db74;">"-----------------------"</span> + +cpu0<span style="color: #f92672;">=</span><span style="color: #e6db74;">`</span>cat /sys/class/thermal/thermal_zone0/temp<span style="color: #e6db74;">`</span> + +cpu1<span style="color: #f92672;">=</span><span style="color: #e6db74;">`</span>cat /sys/class/thermal/thermal_zone1/temp<span style="color: #e6db74;">`</span> + +cpu2<span style="color: #f92672;">=</span><span style="color: #e6db74;">`</span>cat /sys/class/thermal/thermal_zone2/temp<span style="color: #e6db74;">`</span> + +echo <span style="color: #e6db74;">"thermal_zone0 = </span><span style="color: #66d9ef;">$((</span>cpu0/1000<span style="color: #66d9ef;">))</span><span style="color: #e6db74;"> 'C"</span> + +echo <span style="color: #e6db74;">"thermal_zone1 = </span><span style="color: #66d9ef;">$((</span>cpu1/1000<span style="color: #66d9ef;">))</span><span style="color: #e6db74;"> 'C"</span> + +echo <span style="color: #e6db74;">"thermal_zone2 = </span><span style="color: #66d9ef;">$((</span>cpu2/1000<span style="color: #66d9ef;">))</span><span style="color: #e6db74;"> 'C"</span> + +/bin/sleep <span style="color: #ae81ff;">10</span> + +<span style="color: #66d9ef;">done</span></code></pre></div> + +<p>After starting HPL and monitoring the temperatures, I found that they rapidly climbed to some rather uncomfortable levels - especially given +that my ARMADA 8040 board currently has no cooling fan. So after kicking off the initial run (see below) I decided to err on the side of +caution and get a fan setup before I do any serious damage to the board.</p> + +<div class="highlight"><pre><code class="language-bash">gsamu@flotta:~$ phoronix-test-suite benchmark pts/hpcc + + + +Phoronix Test Suite v5.2.1 + + + + Installed: pts/hpcc-1.2.0 + + + + + +HPC Challenge 1.5.0: + + pts/hpcc-1.2.0 + + Processor Test Configuration + + 1: G-HPL + + 2: G-Ptrans + + 3: G-Random Access + + 4: G-Ffte + + 5: EP-STREAM Triad + + 6: EP-DGEMM + + 7: Random Ring Latency + + 8: Random Ring Bandwidth + + 9: Max Ping Pong Bandwidth + + 10: Test All Options + + Test / Class: <span style="color: #ae81ff;">1</span> + + + + + +System Information + + + + + +<span style="color: #f92672;">[</span>NOTICE<span style="color: #f92672;">]</span> Undefined: <span style="color: #ae81ff;">0</span> in phodevi_cpu:267 + + + +<span style="color: #f92672;">[</span>NOTICE<span style="color: #f92672;">]</span> Undefined: <span style="color: #ae81ff;">0</span> in phodevi_cpu:272 + +Hardware: + +Processor: Unknown @ 1.30GHz <span style="color: #f92672;">(</span><span style="color: #ae81ff;">4</span> Cores<span style="color: #f92672;">)</span>, Memory: 4096MB, Disk: 8GB 8GME4R + + + +Software: + +OS: Ubuntu 16.04, Kernel: 4.4.8-armada-17.02.2-g4126e30 <span style="color: #f92672;">(</span>aarch64<span style="color: #f92672;">)</span>, Compiler: GCC 5.4.0 20160609, File-System: ext4 + + + + Would you like to save these test results <span style="color: #f92672;">(</span>Y/n<span style="color: #f92672;">)</span>: n + + + + + +HPC Challenge 1.5.0: + + pts/hpcc-1.2.0 <span style="color: #f92672;">[</span>Test / Class: G-HPL<span style="color: #f92672;">]</span> + + Test <span style="color: #ae81ff;">1</span> of <span style="color: #ae81ff;">1</span> + + Estimated Trial Run Count: <span style="color: #ae81ff;">3</span> + + Estimated Time To Completion: <span style="color: #ae81ff;">1</span> Hour, <span style="color: #ae81ff;">28</span> Minutes + + Started Run <span style="color: #ae81ff;">1</span> @ 20:12:13^C</code></pre></div> + +<p>The above run was aborted when the temperature shown by my temperature monitoring script peaked 100 C.</p> + +<div class="highlight"><pre><code class="language-bash">Tue Aug <span style="color: #ae81ff;">29</span> 20:23:12 EDT <span style="color: #ae81ff;">2017</span> @ flotta.localdomain + +----------------------- + +thermal_zone0 <span style="color: #f92672;">=</span> <span style="color: #ae81ff;">99</span> <span style="color: #e6db74;">'C +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">thermal_zone1 = 91 '</span>C + +thermal_zone2 <span style="color: #f92672;">=</span> <span style="color: #ae81ff;">92</span> <span style="color: #e6db74;">'C +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">Tue Aug 29 20:23:22 EDT 2017 @ flotta.localdomain +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">----------------------- +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">thermal_zone0 = 100 '</span>C + +thermal_zone1 <span style="color: #f92672;">=</span> <span style="color: #ae81ff;">92</span> <span style="color: #e6db74;">'C +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">thermal_zone2 = 92 '</span>C + +Tue Aug <span style="color: #ae81ff;">29</span> 20:23:32 EDT <span style="color: #ae81ff;">2017</span> @ flotta.localdomain + +----------------------- + +thermal_zone0 <span style="color: #f92672;">=</span> <span style="color: #ae81ff;">101</span> <span style="color: #e6db74;">'C +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">thermal_zone1 = 93 '</span>C + +thermal_zone2 <span style="color: #f92672;">=</span> <span style="color: #ae81ff;">93</span> <span style="color: #e6db74;">'C +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">Tue Aug 29 20:23:42 EDT 2017 @ flotta.localdomain +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">----------------------- +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">thermal_zone0 = 101 '</span>C + +thermal_zone1 <span style="color: #f92672;">=</span> <span style="color: #ae81ff;">94</span> <span style="color: #e6db74;">'C +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">thermal_zone2 = 93 '</span>C + +Tue Aug <span style="color: #ae81ff;">29</span> 20:23:52 EDT <span style="color: #ae81ff;">2017</span> @ flotta.localdomain + +----------------------- + +thermal_zone0 <span style="color: #f92672;">=</span> <span style="color: #ae81ff;">101</span> <span style="color: #e6db74;">'C +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">thermal_zone1 = 94 '</span>C + +thermal_zone2 <span style="color: #f92672;">=</span> <span style="color: #ae81ff;">94</span> <span style="color: #960050; background-color: #1e0010;">'</span>C</code></pre></div> + +<p>So as I wait for my new cooling fan and Open Benchtable to arrive, I&rsquo;ll get back to thrashing some good old Intel hardware&hellip;Hey for some +real fun, I can disconnect the CPU fans on those ones :)</p> + +<p>Gábor out!</p> + +<p><strong>UPDATE!!!</strong></p> + +<p>Well I decided to press ahead tonight with a run of HPL on my macchiatoBIN board. To monitor the temperature (recall that my current +configuration is with passive cooling) I put together a small script to dump the values of the following to a text file during the HPL run:</p> + +<ul> +<li><em>/sys/class/thermal/thermal_zone0/temp</em></li> +<li><em>/sys/class/thermal/thermal_zone1/temp</em></li> +<li><em>/sys/class/thermal/thermal_zone2/temp</em></li> +</ul> +<p>The run started ok, but I lost contact with the macchiatoBIN after about 55 minutes&hellip;when it was on run 2 of HPL:</p> + +<div class="highlight"><pre><code class="language-bash">gsamu@flotta:~$ phoronix-test-suite benchmark pts/hpcc + + + +Phoronix Test Suite v5.2.1 + + + + Installed: pts/hpcc-1.2.0 + + + + + +HPC Challenge 1.5.0: + + pts/hpcc-1.2.0 + + Processor Test Configuration + + 1: G-HPL + + 2: G-Ptrans + + 3: G-Random Access + + 4: G-Ffte + + 5: EP-STREAM Triad + + 6: EP-DGEMM + + 7: Random Ring Latency + + 8: Random Ring Bandwidth + + 9: Max Ping Pong Bandwidth + + 10: Test All Options + + Test / Class: <span style="color: #ae81ff;">1</span> + + + + + +System Information + + + + + +<span style="color: #f92672;">[</span>NOTICE<span style="color: #f92672;">]</span> Undefined: <span style="color: #ae81ff;">0</span> in phodevi_cpu:267 + + + +<span style="color: #f92672;">[</span>NOTICE<span style="color: #f92672;">]</span> Undefined: <span style="color: #ae81ff;">0</span> in phodevi_cpu:272 + +Hardware: + +Processor: Unknown @ 1.30GHz <span style="color: #f92672;">(</span><span style="color: #ae81ff;">4</span> Cores<span style="color: #f92672;">)</span>, Memory: 4096MB, Disk: 8GB 8GME4R + + + +Software: + +OS: Ubuntu 16.04, Kernel: 4.4.8-armada-17.02.2-g4126e30 <span style="color: #f92672;">(</span>aarch64<span style="color: #f92672;">)</span>, Compiler: GCC 5.4.0 20160609, File-System: ext4 + + + + Would you like to save these test results <span style="color: #f92672;">(</span>Y/n<span style="color: #f92672;">)</span>: n + + + + + +HPC Challenge 1.5.0: + + pts/hpcc-1.2.0 <span style="color: #f92672;">[</span>Test / Class: G-HPL<span style="color: #f92672;">]</span> + + Test <span style="color: #ae81ff;">1</span> of <span style="color: #ae81ff;">1</span> + + Estimated Trial Run Count: <span style="color: #ae81ff;">3</span> + + Estimated Time To Completion: <span style="color: #ae81ff;">1</span> Hour, <span style="color: #ae81ff;">28</span> Minutes + + Started Run <span style="color: #ae81ff;">1</span> @ 19:20:10 + + Started Run <span style="color: #ae81ff;">2</span> @ 19:50:34</code></pre></div> + +<p>And I guess this gives the reason (from <em>/var/log/messages</em>)&hellip;</p> + +<div class="highlight"><pre><code class="language-bash">Feb <span style="color: #ae81ff;">6</span> 20:18:35 flotta kernel: armada_thermal f06f808c.thermal: Overheat critical high threshold temperature reached</code></pre></div> + +<p>Plotting the temperature metrics with <em>gnuplot</em> - we see that we were well in the triple digits. Oh my! At this stage, +I should probably stop abusing this poor board and wait until my Noctua industrial fan arrives :)</p> + +<figure><img src="https://www.gaborsamu.com/images/armada_gnuplot.jpg" /> +</figure> + + + + + StashCache + + 2017-06-14T17:11:55-06:00 + https://hpc.social/2017/stashcache + <p><a href="https://opensciencegrid.github.io/StashCache/">StashCache</a> is a framework to distribute data across the Open Science Grid. It is designed to help opportunistic users to transfer data without the need for dedicated storage or frameworks of their own, like CMS and ATLAS have deployed. StashCache has several regional caches and a small set of origin servers. Caches have fast network connections, and sizable disk storage to quickly distribute data to the execution hosts in the OSG.</p> + +<p>StashCache is named for the Stash filesystem located at the University of Chicago’s OSG-Connect service. It is primarily intended to be used to cache data from the Stash filesystem, though, data origins exist for other experiments.</p> + +<figure> + +<img alt="Regional Caches" src="https://derekweitzel.com/images/posts/StashCache/StashCacheMap.png" /> + + <figcaption>Regional Caches</figcaption> +</figure> + +<h2 id="components">Components</h2> +<p>The worker nodes are where the user jobs will run. The transfer tools are used on the worker nodes to download data from StashCache caches. Worker nodes are geographically distributed across the US, and will select the nearest cache based upon a GeoIP database.</p> + +<figure> + <img alt="StashCache Architecture" src="https://derekweitzel.com/images/posts/StashCache/StashCache-Arch-Big.png" /> + <figcaption>StashCache Architecture</figcaption> +</figure> + +<p>The caches are distributed to computing sites across the U.S. They are are running the <a href="http://xrootd.org/">XRootD</a> software. The worker nodes connect directly to the regional caches, which in turn download from the Origin servers. The caching proxies discover the data origin by querying the Redirectors. The caching algorithm used is Least Recently Used (LRU). In this algorithm, the cache will only delete cached data when storage space is near capacity, and will delete the least recently used data first.</p> + +<p>The origin servers are the primary source of data for the StashCache framework. StashCache was named after the Stash data store at the University of Chicago’s OSG-Connect service, but other origins also utilize the framework. The origin is the initial source of data, but once the data is stored on the Caches, the origin is no longer used. Updates to data on the origin are not reflected in the caches automatically. The caches treat the data from the origin as immutable, and therefore do not check for updates. If a user requires new data to be pulled into the cache, the name or location of the data on the origin must be changed.</p> + +<p>Redirectors are used to discover the location of data. They are run only at the Indiana Grid Operations Center (GOC). The redirectors help in the discovery of the origin for data. Only the caching proxies communicate with the redirectors.</p> + +<h2 id="tools-to-transfer">Tools to transfer</h2> +<p>Two tools exist to download data from StashCache, CVMFS and StashCP. With either of these tools, the first step for users is to copy the data to the Stash filesystem. Once the user has an OSG-Connect account, they may copy their data to the /stash//public directory. Once there, both of the tools can view and download the files.</p> + +<p><a href="https://cernvm.cern.ch/portal/filesystem">CVMFS</a> (CERN Virtual Machine File System) is a mountable filesystem that appears to the user as a regular directory. CVMFS provides transparent access for users to data in the Stash filesystem. The namespace, such as the size and name of files, and the data are separate in the Stash CVMFS. CVMFS distributes the namespace information for the Stash filesystem over a series of HTTP Forward Proxies that are separate from the StashCache federation. Data is retrieved through the Stash proxies.</p> + +<p>In order to map the Stash filesystem into CVMFS, a process is constantly scanning the Stash filesystem checking for new files. When new files are discovered, they are checksummed and the meta-data is stored in the CVMFS namespace. Since this scanning can take a while for a filesystem the size of Stash, it may take several hours for a file placed in Stash to be available through CVMFS.</p> + +<p>Using CVMFS, copying files is as easy as copying files with any other filesystem:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ cp /cvmfs/stash.osgstorage.org/user/&lt;username&gt;/public/… dest/ +</code></pre></div> +</div> + +<p>CVMFS access also has other features that are beneficial for Stash access. CVMFS will cache files locally so that multiple accesses to the same file on the same node will be very fast. Also, CVMFS can fallback to other nearby caches if the first fails.</p> + +<p><a href="https://support.opensciencegrid.org/support/solutions/articles/12000002775-transferring-data-with-stashcache">StashCP</a> is the second tool that can download data from StashCache. StashCP uses CVMFS above, as well as falling back to the caching proxies and eventually the origin. The order of operations that StashCP performs:</p> + +<ol> + <li>Check for the file in CVMFS mount under /cvmfs/stash.osgstorage.org/…</li> + <li>If CVMFS copy fails, connect directly to the nearest proxy and attempt to download the file.</li> + <li>If the proxy fails, then connect directly to the origin server.</li> +</ol> + +<p>Since StashCP doesn’t rely on the CVMFS mount only, files are immediately available to transfer with StashCP.</p> + +<p>StashCP is distributed with OSG-Connect’s module system. Using StashCP is nearly as simple as using the <code class="language-plaintext highlighter-rouge">cp</code> command:</p> + +<div class="language-plaintext highlighter-rouge"><div class="highlight"><pre class="highlight"><code>$ module load stashcp +$ stashcp /user/&lt;username&gt;/public/… dest/ +</code></pre></div> +</div> + +<h2 id="conclusions">Conclusions</h2> +<p>The StashCache framework is very useful for downloading data to execution hosts across the OSG. It was designed to help opportunistic users to transfer data without the need for dedicated storage or frameworks of their own, like CMS and ATLAS have deployed.</p> + +<p>StashCache has been used to transfer over 3 PB of data this year. Check out some of the papers written about using StashCache:</p> + +<ul> + <li>Derek Weitzel, Brian Bockelman, Duncan A. Brown, Peter Couvares, and Frank Wu ̈rthwein, Edgar Fajardo Hernandez. 2017. Data Access for LIGO on the OSG. In Proceedings of PEARC17, New Orleans, LA, USA, July 09-13, 2017, 6 pages. DOI: 10.1145/3093338.3093363 <a href="https://arxiv.org/abs/1705.06202">Online</a></li> + <li>Derek Weitzel, Brian Bockelman, Dave Dykstra, Jakob Blomer, and René Meusel, 2017. Accessing Data Federations with CVMFS. In Journal of Physics - Conference Series. <a href="https://drive.google.com/open?id=0B_RVv_OjWcURUi15cmtUaXotVkU">Online</a></li> +</ul> + + + + + Chapel's Home in the Landscape of New Scientific Computing Languages + + 2017-06-04T01:00:00-06:00 + https://hpc.social/2017/chapel-s-home-in-the-landscape-of-new-scientific-computing-languages + <p>I was invited to speak at this past weekend’s fourth annual Chapel Implementers and Users Workshop (<a href="http://chapel.cray.com/CHIUW2017.html">CHIUW 2017</a>). +It was a great meeting, with lots of extremely high-quality talks on work being done with and on Chapel. The slides from the presentations +will be up shortly, and I recommend them - the <a href="http://chapel.cray.com/CHIUW/2017/choi-slides.pdf">libfabric</a>, +<a href="http://chapel.cray.com/CHIUW/2017/kayraklioglu-slides.pdf">KNL</a>, <a href="http://chapel.cray.com/CHIUW/2017/krishna-slides.pdf">use-after-free tracking</a>, and <a href="http://chapel.cray.com/CHIUW/2017/azad-slides.pdf">GraphBLAS</a> works +were of particular interest to me. The Code Camp on the next day, working with members the Chapel team on individual particular projects, was also a lot of fun.</p> + +<p>The topic of my own talk was “Chapel’s Home in the Landscape +of New Scientific Computing Languages (and what it can learn from +the neighbours)”; the materials from the talk can be found +<a href="http://github.com/ljdursi/CHIUW2017">on github</a>. I described +the sorts of problems I’m particularly interested in, surveyed +some of the languages/frameworks in there, and tried to identify +what I saw as Chapel’s role in the environment.</p> + +<p>My slides can be seen below or on <a href="http://ljdursi.github.io/CHIUW2017/#1">github</a>, where <a href="http://github.com/ljdursi/CHIUW2017">the complete materials can be found</a>.</p> + + + + + Compute Canadian- Building a successful and federated computational research enterprise, together + + 2017-06-01T01:00:00-06:00 + https://hpc.social/2017/compute-canadian-building-a-successful-and-federated-computational-research-enterprise-together + <p>Canada is a federated nation, and this is particularly visible in +areas of research funding, where both the federal and provincial +orders of government play a role. In building a successful digital +research infrastructure to support Canadian science and scholarship, +we must recognize that reality, and rely on the successful examples +of many organizations in Canada and around the world that embrace +such a federated approach.</p> + +<p>In <a href="https://www.dursi.ca/assets/pdfs/ComputeCanadianDiscussionPaper.pdf">this discussion paper</a>, +my colleague Jill Kowalchuck and I lay out what we hope to be the beginnings +of a discussion of what a renewed federation for supporting Canadian +science with advanced research computing and data could look like.</p> + +<h3 id="executive-summary">Executive Summary</h3> + +<p>Computing and data, and the expertise and tools to make use of both, is +now central to all fields of study. Ten years after the creation of +Compute Canada in response to the National Platforms Fund call, and +after the Naylor Report on science funding, it is an apt time for the +Canadian community built around this national research platform to take +stock. Is it doing what we need it to do for Canadian researchers? Is it +working the way we want it to? What should a Canadian computation and +data platform for supporting research look like in the coming years? +This document aims to begin that discussion within the community.</p> + +<p>Here we propose seven principles to guide us in this discussion — that +our project should serve Canadian research in a researcher-centred, +service-oriented, and truly national way; and that it should operate as +a true federation of equal partners, interoperable but not identical, +collaborative and up-to-date. We suggest in particular that it is vital +that our national platform is adaptive and responsive to researchers, +making choices driven by research needs and not technical choices, and +should make full use of the diversity and specialization that a Canadian +federation and its partners offer.</p> + +<p>From those principles, we make evidence-based proposals for a renewed +Canadian organization. Comparisons with successful examples of federated +organizations within Canada and abroad suggest that while the basic +architecture of our federation is sound, important roles and +relationships need to be clarified. While a central office must be +responsible for the processes of defining priorities, strategies, and +standards of interoperability, a successful federation requires those +processes to have buy-in from partners committed to the goals of the +federation. The Board of Directors of the central office in a federation +must have experience and training to handle the delicate task of +governing a central office but being responsible to a national +community. The Members need adequate visibility into the operations of +the central office and the federation as a whole so that they can +support their vital role to the organization. And that engagement needs +to extend to all who are invested in the success of research in Canada: +regional staff and Boards, institutional staff, researchers and funders, +and other organizations that provide digital infrastructure for research +in Canada. This document focusses on Compute Canada in particular, but +the principles and proposals apply to any digital research +infrastructure providers, or the system as a whole.</p> + +<p>Success for this document will mean starting conversations, inspiring +other documents and differing points of view, and the emerging of a +consensus within the community of what a renewed national platform for +the next ten years looks like. That does not mean this document is a +straw-man. The authors have played roles in the national platform +starting at its inception, from researcher to consortium and regional +(east and west) staff and management, and within the Compute Canada +central office, and hope that experience plus the benefit of some +distance have produced a coherent and compelling vision of what the +Compute Canada national project could be. But what matters is not this +proposal; it is what the community as a whole decides it wants its +national platform to be.</p> + + + + + Should I use Chapel or Julia for my next project? + + 2017-05-28T01:00:00-06:00 + https://hpc.social/2017/should-i-use-chapel-or-julia-for-my-next-project- + <p><a href="https://julialang.org">Julia</a> and <a href="http://chapel.cray.com">Chapel</a> +are both newish languages aimed at productitive scientific computing, +with parallel computing capabilities baked in from the start. +There’s lots of information about both online, but not much comparing +the two. If you are starting a new scientific computing project +and are willing to try something new, which should you choose? What +are their strengths and weaknesses, and how do they compare?</p> + +<p>Here we walk through a comparison, focusing on distributed-memory +parallelism of the sort one would want for HPC-style simulation. +Both have strengths in largely disjoint areas. If you want matlib-like +interactivity and plotting, and need only coodinator-worker parallelism, +Julia is the clear winner; if you want MPI+OpenMPI type scability +on rectangular distributed arrays (dense or sparse), Chapel wins +handily. Both languages and environments have clear untapped +potential and room to grow; we’ll talk about future prospects of +the two languages at the end.</p> + +<p><strong>Update</strong>: I’ve updated the timings - I hadn’t been using <code>@inbounds</code> +in the Julia code, and I had misconfigured my Chapel install so +that the compiles weren’t optimized; this makes a huge difference on +the 2d advection problem. All timings now are on an AWS c4.8x instance.</p> + +<ul id="markdown-toc"> + <li><a href="https://www.dursi.ca/feed.xml#a-quick-overview-of-the-two-languages" id="markdown-toc-a-quick-overview-of-the-two-languages">A quick overview of the two languages</a> <ul> + <li><a href="https://www.dursi.ca/feed.xml#julia" id="markdown-toc-julia">Julia</a></li> + <li><a href="https://www.dursi.ca/feed.xml#chapel" id="markdown-toc-chapel">Chapel</a></li> + </ul> + </li> + <li><a href="https://www.dursi.ca/feed.xml#similarities-and-differences" id="markdown-toc-similarities-and-differences">Similarities and differences</a> <ul> + <li><a href="https://www.dursi.ca/feed.xml#standard-library" id="markdown-toc-standard-library">Standard library</a></li> + <li><a href="https://www.dursi.ca/feed.xml#other-packages" id="markdown-toc-other-packages">Other packages</a></li> + <li><a href="https://www.dursi.ca/feed.xml#language-features" id="markdown-toc-language-features">Language features</a></li> + </ul> + </li> + <li><a href="https://www.dursi.ca/feed.xml#simple-computational-tasks" id="markdown-toc-simple-computational-tasks">Simple computational tasks</a> <ul> + <li><a href="https://www.dursi.ca/feed.xml#linear-algebra" id="markdown-toc-linear-algebra">Linear algebra</a></li> + <li><a href="https://www.dursi.ca/feed.xml#stencil-calculation" id="markdown-toc-stencil-calculation">Stencil calculation</a></li> + <li><a href="https://www.dursi.ca/feed.xml#kmer-counting" id="markdown-toc-kmer-counting">Kmer counting</a></li> + </ul> + </li> + <li><a href="https://www.dursi.ca/feed.xml#parallel-primitives" id="markdown-toc-parallel-primitives">Parallel primitives</a> <ul> + <li><a href="https://www.dursi.ca/feed.xml#remote-function-execution" id="markdown-toc-remote-function-execution">Remote function execution</a></li> + <li><a href="https://www.dursi.ca/feed.xml#futures-atomics-and-synchronization" id="markdown-toc-futures-atomics-and-synchronization">Futures, atomics and synchronization</a></li> + <li><a href="https://www.dursi.ca/feed.xml#parallel-loops-reductions-and-maps" id="markdown-toc-parallel-loops-reductions-and-maps">Parallel loops, reductions, and maps</a></li> + <li><a href="https://www.dursi.ca/feed.xml#threading" id="markdown-toc-threading">Threading</a></li> + <li><a href="https://www.dursi.ca/feed.xml#distributed-data" id="markdown-toc-distributed-data">Distributed data</a></li> + <li><a href="https://www.dursi.ca/feed.xml#communications" id="markdown-toc-communications">Communications</a></li> + </ul> + </li> + <li><a href="https://www.dursi.ca/feed.xml#a-2d-advection-problem" id="markdown-toc-a-2d-advection-problem">A 2d advection problem</a></li> + <li><a href="https://www.dursi.ca/feed.xml#strengths-weaknesses-and-future-prospects" id="markdown-toc-strengths-weaknesses-and-future-prospects">Strengths, Weaknesses, and Future Prospects</a> <ul> + <li><a href="https://www.dursi.ca/feed.xml#julia-1" id="markdown-toc-julia-1">Julia</a></li> + <li><a href="https://www.dursi.ca/feed.xml#chapel-1" id="markdown-toc-chapel-1">Chapel</a></li> + </ul> + </li> + <li><a href="https://www.dursi.ca/feed.xml#my-conclusions" id="markdown-toc-my-conclusions">My conclusions</a> <ul> + <li><a href="https://www.dursi.ca/feed.xml#both-projects-are-strong-and-useable-right-now-at-different-things" id="markdown-toc-both-projects-are-strong-and-useable-right-now-at-different-things">Both projects are strong and useable, right now, at different things</a></li> + <li><a href="https://www.dursi.ca/feed.xml#both-projects-have-as-yet-untapped-potential" id="markdown-toc-both-projects-have-as-yet-untapped-potential">Both projects have as-yet untapped potential</a></li> + </ul> + </li> +</ul> + +<h2 id="a-quick-overview-of-the-two-languages">A quick overview of the two languages</h2> + +<h3 id="julia">Julia</h3> + +<p>The <a href="https://julialang.org">Julia project</a> describes Julia as “a +high-level, high-performance dynamic programming language for +numerical computing.” It exploits type inference of rich types, +just-in-time compilation, and <a href="https://en.wikipedia.org/wiki/Multiple_dispatch">multiple +dispatch</a> (think +of R, with say <code>print()</code> defined to operate differently on scalars, +data frames, or linear regression fits) to provide a dynamic, +interactive, “scripting language”-type high level numerical programming +language that gives performance less than but competitive with +C or Fortran.</p> + +<p>The project sees the language as more or less a matlab-killer, and +so focusses on that sort of interface; interactive, through a REPL +or Jupyter notebook (both available to try <a href="https://juliabox.com">online</a>), +with integrated plotting; also, indexing begins at one, as God +intended.<sup id="fnref:1"><a class="footnote" href="https://www.dursi.ca/feed.xml#fn:1" rel="footnote">1</a></sup></p> + +<table style="border: 1px solid black;"> +<tbody> +<tr> +<td>Example from <a href="https://github.com/dpsanders/scipy_2014_julia">David Sanders’ SciPy 2014 tutorial</a></td> +<td></td> +</tr> +<tr> +<td> + +<figure class="highlight"><pre><code class="language-julia"><span class="k">using</span> <span class="n">PyPlot</span> + +<span class="c"># julia set</span> +<span class="k">function</span><span class="nf"> julia</span><span class="x">(</span><span class="n">z</span><span class="x">,</span> <span class="n">c</span><span class="x">;</span> <span class="n">maxiter</span><span class="o">=</span><span class="mi">200</span><span class="x">)</span> + <span class="k">for</span> <span class="n">n</span> <span class="o">=</span> <span class="mi">1</span><span class="o">:</span><span class="n">maxiter</span> + <span class="k">if</span> <span class="n">abs2</span><span class="x">(</span><span class="n">z</span><span class="x">)</span> <span class="o">&gt;</span> <span class="mi">4</span> + <span class="k">return</span> <span class="n">n</span><span class="o">-</span><span class="mi">1</span> + <span class="k">end</span> + <span class="n">z</span> <span class="o">=</span> <span class="n">z</span><span class="o">*</span><span class="n">z</span> <span class="o">+</span> <span class="n">c</span> + <span class="k">end</span> + <span class="k">return</span> <span class="n">maxiter</span> +<span class="k">end</span> + +<span class="n">jset</span> <span class="o">=</span> <span class="x">[</span> <span class="kt">UInt8</span><span class="x">(</span><span class="n">julia</span><span class="x">(</span><span class="n">complex</span><span class="x">(</span><span class="n">r</span><span class="x">,</span><span class="n">i</span><span class="x">),</span> <span class="n">complex</span><span class="x">(</span><span class="o">-.</span><span class="mi">06</span><span class="x">,</span><span class="o">.</span><span class="mi">67</span><span class="x">)))</span> + <span class="k">for</span> <span class="n">i</span><span class="o">=</span><span class="mi">1</span><span class="o">:-.</span><span class="mi">002</span><span class="o">:-</span><span class="mi">1</span><span class="x">,</span> <span class="n">r</span><span class="o">=-</span><span class="mf">1.5</span><span class="o">:.</span><span class="mi">002</span><span class="o">:</span><span class="mf">1.5</span> <span class="x">];</span> +<span class="n">get_cmap</span><span class="x">(</span><span class="s">"RdGy"</span><span class="x">)</span> +<span class="n">imshow</span><span class="x">(</span><span class="n">jset</span><span class="x">,</span> <span class="n">cmap</span><span class="o">=</span><span class="s">"RdGy"</span><span class="x">,</span> <span class="n">extent</span><span class="o">=</span><span class="x">[</span><span class="o">-</span><span class="mf">1.5</span><span class="x">,</span><span class="mf">1.5</span><span class="x">,</span><span class="o">-</span><span class="mi">1</span><span class="x">,</span><span class="mi">1</span><span class="x">])</span></code></pre></figure> + +</td> +<td> +<img alt="Julia set plot" src="https://www.dursi.ca/assets/julia_v_chapel/juliaset_in_julia.png" /> +</td></tr> +</tbody> +</table> + +<p>Julia blurs the distinction between scientific users of Julia and +developers in two quite powerful ways. The first is lisp-like +<a href="https://docs.julialang.org/en/stable/manual/metaprogramming/">metaprogramming</a>, +where julia code can be generated or modified from within Julia, +making it possible to build domain-specific langauges (DSLs) inside Julia +for problems; this allows simple APIs for broad problem sets which +nonetheless take full advantage of the structure of the particular +problems being solved; <a href="https://github.com/JuliaStats">JuliaStats</a>, +<a href="https://github.com/JuliaDiffEq/DifferentialEquations.jl">DifferentialEquations.jl</a>, +<a href="https://github.com/JuliaFEM/JuliaFEM.jl">JuliaFEM</a>, and +<a href="https://github.com/JuliaOpt/JuMP.jl">JuMP</a> offer hints of what +that could look like. Another sort of functionality this enables +is <a href="https://julialang.org/blog/2016/03/parallelaccelerator">Parallel Accellerator</a>, an +intel package that can rewrite some regular array operations into +fast, vectorized native code. This code-is-data aspect of Julia, +combined with the fact that much of Julia itself is written in Julia, +puts user-written code on an equal footing with much “official” +julia code.</p> + +<p>The second way Julia blurs the line between user and developer is +the <a href="https://docs.julialang.org/en/stable/manual/packages/">package system</a> +which uses git and GitHub; this means that once you’ve installed +someone’s package, you’re very close to being able to file a pull +request if you find a bug, or to fork the package to specialize +it to your own needs; and it’s similarly very easy to +contribute a package if you’re already using GitHub to develop the +package.</p> + +<p>Julia has support for remote function execution (“out of the box” +using SSH + TCP/IP, but other transports are available through +packages), and distributed rectangular arrays; thread support +is still experimental, as is shared-memory on-node arrays.</p> + +<h3 id="chapel">Chapel</h3> + +<p>While Julia is a scientific programming language with parallel +computing support, Chapel is a programming language for parallel +scientific computing. It is a <a href="https://en.wikipedia.org/wiki/Partitioned_global_address_space">PGAS</a> +language, with partitioned but globally-accessible variables, using +<a href="https://gasnet.lbl.gov">GASNet</a> for communications. It takes PGAS +two steps further however than languages like <a href="https://www.dursi.ca/post/coarray-fortran-goes-mainstream-gcc-5-1.html">Coarray +Fortran</a>, +<a href="http://upc.lbl.gov">UPC</a>, or <a href="http://x10-lang.org">X10</a>.</p> + +<p>The first extension is to define all large data structures (arrays, +associative arrays, graphs) as being defined over <em>domains</em>, and +then definining a library of <em>domain maps</em> for distributing these +domains over different locality regions (“locales”) (nodes, or NUMA +nodes, or KNL accellerators) and <em>layouts</em> for describing their layout +within a locale. By far the best tested and optimized domain maps +are for the cases of dense (and to a lesser extent, CSR-layout +sparse) rectangular arrays, as below, although there support for +associative arrays (dictionaries) and unstructured meshes/graphs +as well.</p> + +<p>The second is to couple those domain maps with parallel iterators +over the domains, meaning that one can loop over the data in parallel +in one loop (think OpenMP) with a “global view” rather than expressing +the parallelism explicitly as a SIMD-type program. This decouples +the expression of the layout of the data from the expression of the +calculation over the data, which is essential for productive parallel +computing; it means that tweaking the layouts (or the dimensionality of +the program, or…) doesn’t require rewriting the internals of the +computation.</p> + +<p>The distributions and layouts are written in Chapel, so that users can +contribute new domain maps to the project.</p> + +<table style="border: 1px solid black;"> +<tbody> +<tr> <td> +Example from <a href="http://chapel.cray.com/tutorials/ACCU2017/06-DomainMaps.pdf">Chapel tutorial at ACCU 2017</a> +</td> </tr> +<tr> <td> + +<figure class="highlight"><pre><code class="language-chapel">var Dom: {1..4, 1..8} dmapped Block({1..4, 1..8});</code></pre></figure> + +</td> </tr> +<tr> <td> +<img alt="Block Distribution" src="https://www.dursi.ca/assets/julia_v_chapel/block-dist.png" /> +</td> </tr> +<tr> <td> + +<figure class="highlight"><pre><code class="language-chapel">var Dom: {1..4, 1..8} dmapped Cyclic(startIdx=(1,1));</code></pre></figure> + +</td> </tr> +<tr> <td> +<img alt="Block Distribution" src="https://www.dursi.ca/assets/julia_v_chapel/cyclic-dist.png" /> +</td> </tr> +<tr> <td> + +<figure class="highlight"><pre><code class="language-chapel">// either case: + +var Inner : subdomain(Dom) = {2..3, 2..7}; +const north = (-1,0), south = (1,0), east = (0,1), west = (0,-1); + +var data, data_new : [Dom] real; +var delta : real; + +forall ij in Inner { + data_new(ij) = (data(ij+north) + data(ij+south) + + data(ij+east) + data(ij+west)) / 4.0; +} +delta = max reduce abs(data_new[Dom] - data[Dom]);</code></pre></figure> + +</td> </tr> +</tbody> +</table> + +<p>Chapel also exposes its lower-level parallel computing functionality — +such as remote function execution, fork/join task parallelism — so +that one can write a MPI-like SIMD program by explicity launching +a function on each core:</p> + +<figure class="highlight"><pre><code class="language-chapel">coforall loc in Locales do + on loc do + coforall tid in 0..#here.maxTaskPar do + do_simd_program(loc, tid);</code></pre></figure> + +<p>At roughly eight years old as a publically available project, Chapel +is a slightly older and more mature language than Julia. However, +the language continues to evolve and there are breaking changes +between versions; these are much smaller and more localized breaking +changes than with Julia, so that most recent example code online +works readily. As its focus has always been on large-scale parallelism +rather than desktop computing, its potential market is smaller +so has attracted less interest and fewer users than Julia +— however, if you read this blog, Chapel’s niche is one you are +almost certainly very interested in. The relative paucity of users +is reflected in the smaller number of contributed packages, although +an upcoming package manager will likely lower the bar to future +contributions.</p> + +<p>Chapel also lacks a REPL, which makes experimentation and testing +somewhat harder — there’s no equivalent of <a href="https://juliabox.com">JuliaBox</a> +where one can play with the language at a console or in a notebook. +There is an effort in that direction now which may be made easier +by ongoing work on the underlying compiler architecture.</p> + +<h2 id="similarities-and-differences">Similarities and differences</h2> + +<h3 id="standard-library">Standard library</h3> + +<p>Both <a href="https://docs.julialang.org/en/stable">Julia</a> and <a href="http://chapel.cray.com/docs/latest/">Chapel</a> +have good documentation, and the basic modules or capabilities one would expect from languages +aimed at technical computing:</p> + +<ul> + <li>Complex numbers</li> + <li>Mathematical function libraries</li> + <li>Random numbers</li> + <li>Linear algebra</li> + <li>FFTs</li> + <li>C, Python interoperability</li> + <li>Multi-precision floats / BigInts</li> + <li>MPI interoperability</li> + <li>Profiling</li> +</ul> + +<p>although there are differences - in Julia, Python interoperability +is much more complete (the Julia set example above used matplotlib +plotting, while <a href="https://pychapel.readthedocs.io">pychapel</a> focuses +on calling Chapel from within python). Also, Julia’s linear algebra +support is much slicker, styled after Matlab syntax and with a rich +set of matrix types (symmetric, tridiagonal, <em>etc.</em>), so that for +linear solves, say, a sensible method is chosen automatically; the +consise syntax and “do the right thing” approach are particularly +helpful for interactive use<sup id="fnref:2"><a class="footnote" href="https://www.dursi.ca/feed.xml#fn:2" rel="footnote">2</a></sup>, which is a primary use-case of Julia.</p> + +<p>On profiling, the Julia support is primariy for serial profiling +and text based; Chapel has a very nice tool called +<a href="http://chapel.cray.com/docs/1.14/tools/chplvis/chplvis.html">chplvis</a> +for visualizing parallel performance.</p> + +<h3 id="other-packages">Other packages</h3> + +<p>Julia’s early adoption of a package management framework and very +large initial userbase has lead to a <a href="http://pkg.julialang.org">very large ecosystem</a> +of contributed packages. As with all such package ecosystems, +the packages themselves are a bit of a mixed bag – lots are broken or +abandoned, many are simply wrappers to other tools – but there +are also excellent, substantial packages taking full advantage of +Julia’s capabalities that are of immediate interest +to those doing scientific computing, such as +<a href="https://github.com/JuliaDiffEq/DifferentialEquations.jl">DifferentialEquations.jl</a> +for ODEs, SDEs, and and FEM for some PDEs, +<a href="https://github.com/BioJulia">BioJulia</a> for bioinformatics, +<a href="http://www.juliadiff.org">JuliaDiff</a> for automatic differentiation, +and <a href="http://juliastats.github.io">JuliaStats</a> for R-like +statistical computing. The julia project would benefit from +having a more curated view of the package listings easily available +so that these high-quality tools were more readily visible to +new users.</p> + +<p>On the other hand, there are almost no packages available for Chapel +outside of the main project. There are efforts to develop a package +manager inspired by cargo (Rust) and glide (Go); this would be an +important and needed development, almost certainly necessary +to grow the Chapel community.</p> + +<h3 id="language-features">Language features</h3> + +<p>The biggest language feature difference is undoubtedly Julia’s +JIT-powered lisp-metaprogramming capabilities; Chapel is a more +statically-compiled language, with generics and reflection but not +full lisp-like code-is-data. A small downside of Julia’s JIT +approach is that functions are often slow the first time they are +called, as they must be compiled. Relatedly, Julia is garbage-collected, +which can lead to pauses and memory pressure at unexpected times. +On the other hand, Chapel’s compile time, which is still quite long +even compared to other compilers, makes the development cycle much +slower than it would be with Julia or Python.</p> + +<p>Beyond that, Julia and Chapel are both quite new and have functionality +one might expect in a modern language: first class functions, lambda +functions, comprehensions, keyword/optional parameters, type +inference, generics, reflection, iterators, ranges, coroutines and +green threads, futures, and JuliaDoc/chpldoc python packages for +generating online documentation from source code and embedded +comments.</p> + +<p>More minor but something that quickly comes up: there’s difference +in command-line argument handling which reflects the use +cases each team finds important. Both give access to an argv-like array of +strings passed to the command line; in base Julia with its interactive +nature, that’s it (although there’s a nice python-argparse inspired +<a href="http://carlobaldassi.github.io/ArgParse.jl/latest/">contributed package</a>), +while in Chapel, intended to make compiled long-running executables +one can define a constant (<code>const n = 10;</code>) and make it settable +on the command line by prefixing the <code>const</code> with <code>config</code> and running +the program with <code>--n 20</code>.</p> + +<h2 id="simple-computational-tasks">Simple computational tasks</h2> + +<p>Here we take a look at a couple common single-node scientific +computation primitives in each framework (with Python for comparison) +to compare the language features. Full code for the examples are +available <a href="http://www.github.com/ljdursi/julia_v_chapel">on GitHub</a>.</p> + +<h3 id="linear-algebra">Linear algebra</h3> + +<p>For linear algebra operations, Julia’s matlab lineage and +interactive really shine:</p> + +<table style="border: 1px solid black;"> +<tbody> +<tr><td><strong>Julia</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-julia"><span class="c"># ...</span> +<span class="n">n</span> <span class="o">=</span> <span class="mi">500</span> +<span class="n">B</span> <span class="o">=</span> <span class="n">rand</span><span class="x">(</span><span class="n">n</span><span class="x">,</span> <span class="n">n</span><span class="x">)</span> +<span class="n">x</span> <span class="o">=</span> <span class="n">rand</span><span class="x">(</span><span class="n">n</span><span class="x">)</span> + +<span class="n">A</span> <span class="o">=</span> <span class="n">x</span><span class="o">*</span><span class="n">x</span><span class="err">'</span> +<span class="n">y</span> <span class="o">=</span> <span class="n">B</span><span class="o">*</span><span class="n">x</span> + +<span class="n">println</span><span class="x">(</span><span class="n">A</span><span class="x">[</span><span class="mi">1</span><span class="x">,</span><span class="mi">1</span><span class="x">])</span> + +<span class="n">A</span> <span class="o">=</span> <span class="n">eye</span><span class="x">(</span><span class="n">n</span><span class="x">)</span> +<span class="n">y</span> <span class="o">=</span> <span class="n">A</span><span class="o">\</span><span class="n">x</span> + +<span class="n">println</span><span class="x">(</span><span class="n">sum</span><span class="x">(</span><span class="n">abs</span><span class="o">.</span><span class="x">(</span><span class="n">x</span><span class="o">-</span><span class="n">y</span><span class="x">)))</span> +<span class="c"># ...</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Chapel</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-c"><span class="n">use</span> <span class="n">LinearAlgebra</span><span class="p">;</span> +<span class="n">use</span> <span class="n">LAPACK</span><span class="p">;</span> +<span class="n">use</span> <span class="n">Random</span><span class="p">;</span> + +<span class="n">config</span> <span class="k">const</span> <span class="n">n</span><span class="o">=</span><span class="mi">500</span><span class="p">;</span> + +<span class="n">var</span> <span class="n">A</span> <span class="o">=</span> <span class="n">Matrix</span><span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="n">n</span><span class="p">),</span> + <span class="n">B</span> <span class="o">=</span> <span class="n">Matrix</span><span class="p">(</span><span class="n">n</span><span class="p">,</span> <span class="n">n</span><span class="p">),</span> + <span class="n">x</span><span class="p">,</span> <span class="n">y</span> <span class="o">=</span> <span class="n">Vector</span><span class="p">(</span><span class="n">n</span><span class="p">);</span> + +<span class="n">fillRandom</span><span class="p">(</span><span class="n">B</span><span class="p">);</span> +<span class="n">fillRandom</span><span class="p">(</span><span class="n">x</span><span class="p">);</span> + +<span class="n">y</span> <span class="o">=</span> <span class="n">dot</span><span class="p">(</span><span class="n">B</span><span class="p">,</span> <span class="n">x</span><span class="p">);</span> +<span class="n">A</span> <span class="o">=</span> <span class="n">outer</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">);</span> + +<span class="n">writeln</span><span class="p">(</span><span class="n">A</span><span class="p">[</span><span class="mi">1</span><span class="p">,</span><span class="mi">1</span><span class="p">]);</span> + +<span class="n">var</span> <span class="n">X</span> <span class="o">=</span> <span class="n">Matrix</span><span class="p">(</span><span class="n">n</span><span class="p">,</span><span class="mi">1</span><span class="p">);</span> +<span class="n">var</span> <span class="n">Y</span> <span class="o">=</span> <span class="n">Matrix</span><span class="p">(</span><span class="n">n</span><span class="p">,</span><span class="mi">1</span><span class="p">);</span> +<span class="n">X</span><span class="p">({</span><span class="mi">1</span><span class="p">..</span><span class="n">n</span><span class="p">},</span><span class="mi">1</span><span class="p">)</span> <span class="o">=</span> <span class="n">x</span><span class="p">({</span><span class="mi">1</span><span class="p">..</span><span class="n">n</span><span class="p">});</span> + +<span class="n">A</span> <span class="o">=</span> <span class="n">eye</span><span class="p">(</span><span class="n">n</span><span class="p">);</span> +<span class="n">var</span> <span class="n">ipiv</span> <span class="o">:</span> <span class="p">[</span><span class="mi">1</span><span class="p">..</span><span class="n">n</span><span class="p">]</span> <span class="n">c_int</span><span class="p">;</span> +<span class="n">Y</span> <span class="o">=</span> <span class="n">X</span><span class="p">;</span> +<span class="n">var</span> <span class="n">info</span> <span class="o">=</span> <span class="n">gesv</span><span class="p">(</span><span class="n">lapack_memory_order</span><span class="p">.</span><span class="n">row_major</span><span class="p">,</span> <span class="n">A</span><span class="p">,</span> <span class="n">ipiv</span><span class="p">,</span> <span class="n">Y</span><span class="p">);</span> + +<span class="n">var</span> <span class="n">res</span> <span class="o">=</span> <span class="o">+</span> <span class="n">reduce</span> <span class="nf">abs</span><span class="p">(</span><span class="n">x</span><span class="o">-</span><span class="n">y</span><span class="p">);</span> + +<span class="n">writeln</span><span class="p">(</span><span class="n">res</span><span class="p">);</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Python</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-python"><span class="kn">from</span> <span class="nn">__future__</span> <span class="kn">import</span> <span class="n">print_function</span> +<span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="n">np</span> + +<span class="n">n</span> <span class="o">=</span> <span class="mi">500</span> +<span class="n">B</span> <span class="o">=</span> <span class="n">np</span><span class="p">.</span><span class="n">random</span><span class="p">.</span><span class="n">rand</span><span class="p">(</span><span class="mi">500</span><span class="p">,</span> <span class="mi">500</span><span class="p">)</span> +<span class="n">x</span> <span class="o">=</span> <span class="n">np</span><span class="p">.</span><span class="n">random</span><span class="p">.</span><span class="n">rand</span><span class="p">(</span><span class="mi">500</span><span class="p">)</span> + +<span class="n">A</span> <span class="o">=</span> <span class="n">np</span><span class="p">.</span><span class="n">outer</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">np</span><span class="p">.</span><span class="n">transpose</span><span class="p">(</span><span class="n">x</span><span class="p">))</span> +<span class="n">y</span> <span class="o">=</span> <span class="n">np</span><span class="p">.</span><span class="n">dot</span><span class="p">(</span><span class="n">B</span><span class="p">,</span> <span class="n">x</span><span class="p">)</span> + +<span class="k">print</span><span class="p">(</span><span class="n">A</span><span class="p">[</span><span class="mi">0</span><span class="p">,</span><span class="mi">0</span><span class="p">])</span> + +<span class="n">A</span> <span class="o">=</span> <span class="n">np</span><span class="p">.</span><span class="n">eye</span><span class="p">(</span><span class="n">n</span><span class="p">)</span> +<span class="n">y</span> <span class="o">=</span> <span class="n">np</span><span class="p">.</span><span class="n">linalg</span><span class="p">.</span><span class="n">solve</span><span class="p">(</span><span class="n">A</span><span class="p">,</span> <span class="n">x</span><span class="p">)</span> + +<span class="k">print</span><span class="p">(</span><span class="n">np</span><span class="p">.</span><span class="nb">sum</span><span class="p">(</span><span class="n">np</span><span class="p">.</span><span class="nb">abs</span><span class="p">(</span><span class="n">x</span><span class="o">-</span><span class="n">y</span><span class="p">)))</span></code></pre></figure> + +</td></tr> +</tbody> +</table> + +<p>The new Chapel <code>LinearAlgebra</code> and <code>LAPACK</code> modules don’t really +work well together yet, so one has to awkwardly switch between +the two idioms, but that’s readily easily fixed. Julia’s nice +matrix type system allows “do the right-thing” type linear solves, +which is incredibly handy for interactive work, although for a +compiled program that will be used repeatedly, the clarity of +specifying a specific solver (which Julia also allows) is probably +advantageous.</p> + +<h3 id="stencil-calculation">Stencil calculation</h3> + +<p>Below we take a look at a simple 1-d explicit heat diffusion equation, +requiring a small stencil, and see how it compares across the languges.</p> + +<table style="border: 1px solid black;"> +<tbody> +<tr><td><strong>Julia</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-julia"><span class="c"># ...</span> +<span class="k">for</span> <span class="n">i</span> <span class="k">in</span> <span class="mi">2</span><span class="o">:</span><span class="n">ngrid</span><span class="o">+</span><span class="mi">1</span> + <span class="nd">@inbounds</span> <span class="n">temp</span><span class="x">[</span><span class="n">i</span><span class="x">]</span> <span class="o">=</span> <span class="mf">0.</span> +<span class="k">end</span> + +<span class="n">temp</span><span class="x">[</span><span class="mi">1</span><span class="x">]</span> <span class="o">=</span> <span class="n">tleft</span> +<span class="n">temp</span><span class="x">[</span><span class="n">ngrid</span><span class="o">+</span><span class="mi">2</span><span class="x">]</span> <span class="o">=</span> <span class="n">tright</span> + +<span class="k">for</span> <span class="n">iteration</span> <span class="k">in</span> <span class="mi">1</span><span class="o">:</span><span class="n">ntimesteps</span> + <span class="k">for</span> <span class="n">i</span> <span class="k">in</span> <span class="mi">2</span><span class="o">:</span><span class="n">ngrid</span><span class="o">+</span><span class="mi">1</span> + <span class="nd">@inbounds</span> <span class="n">temp_new</span><span class="x">[</span><span class="n">i</span><span class="x">]</span> <span class="o">=</span> <span class="n">temp</span><span class="x">[</span><span class="n">i</span><span class="x">]</span> <span class="o">+</span> <span class="n">kappa</span><span class="o">*</span><span class="n">dt</span><span class="o">/</span><span class="x">(</span><span class="n">dx</span><span class="o">*</span><span class="n">dx</span><span class="x">)</span><span class="o">*</span> + <span class="x">(</span><span class="n">temp</span><span class="x">[</span><span class="n">i</span><span class="o">-</span><span class="mi">1</span><span class="x">]</span> <span class="o">-</span> <span class="mi">2</span><span class="o">*</span><span class="n">temp</span><span class="x">[</span><span class="n">i</span><span class="x">]</span> <span class="o">+</span> <span class="n">temp</span><span class="x">[</span><span class="n">i</span><span class="o">+</span><span class="mi">1</span><span class="x">])</span> + <span class="k">end</span> + <span class="k">for</span> <span class="n">i</span> <span class="k">in</span> <span class="mi">2</span><span class="o">:</span><span class="n">ngrid</span><span class="o">+</span><span class="mi">1</span> + <span class="nd">@inbounds</span> <span class="n">temp</span><span class="x">[</span><span class="n">i</span><span class="x">]</span> <span class="o">=</span> <span class="n">temp_new</span><span class="x">[</span><span class="n">i</span><span class="x">]</span> + <span class="k">end</span> +<span class="k">end</span> +<span class="c"># ...</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Chapel</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-c"><span class="c1">// ...</span> +<span class="k">const</span> <span class="n">ProblemSpace</span> <span class="o">=</span> <span class="p">{</span><span class="mi">1</span><span class="p">..</span><span class="n">ngrid</span><span class="p">},</span> + <span class="n">BigDomain</span> <span class="o">=</span> <span class="p">{</span><span class="mi">0</span><span class="p">..</span><span class="n">ngrid</span><span class="o">+</span><span class="mi">1</span><span class="p">};</span> +<span class="n">var</span> <span class="n">T</span><span class="p">,</span> <span class="n">TNew</span><span class="o">:</span> <span class="p">[</span><span class="n">BigDomain</span><span class="p">]</span> <span class="n">real</span><span class="p">(</span><span class="mi">64</span><span class="p">)</span> <span class="o">=</span> <span class="mi">0</span><span class="p">.</span><span class="mi">0</span><span class="p">;</span> + +<span class="n">var</span> <span class="n">iteration</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> +<span class="n">T</span><span class="p">[</span><span class="mi">0</span><span class="p">]</span> <span class="o">=</span> <span class="n">tleft</span><span class="p">;</span> +<span class="n">T</span><span class="p">[</span><span class="n">ngrid</span><span class="o">+</span><span class="mi">1</span><span class="p">]</span> <span class="o">=</span> <span class="n">tright</span><span class="p">;</span> + +<span class="k">const</span> <span class="n">left</span> <span class="o">=</span> <span class="o">-</span><span class="mi">1</span><span class="p">,</span> <span class="n">right</span> <span class="o">=</span> <span class="mi">1</span><span class="p">;</span> + +<span class="k">for</span> <span class="n">iteration</span> <span class="n">in</span> <span class="mi">1</span><span class="p">..</span><span class="n">ntimesteps</span> <span class="p">{</span> + <span class="k">for</span> <span class="n">i</span> <span class="n">in</span> <span class="n">ProblemSpace</span> <span class="p">{</span> + <span class="n">TNew</span><span class="p">(</span><span class="n">i</span><span class="p">)</span> <span class="o">=</span> <span class="n">T</span><span class="p">(</span><span class="n">i</span><span class="p">)</span> <span class="o">+</span> <span class="n">kappa</span><span class="o">*</span><span class="n">dt</span><span class="o">/</span><span class="p">(</span><span class="n">dx</span><span class="o">*</span><span class="n">dx</span><span class="p">)</span> <span class="o">*</span> + <span class="p">(</span><span class="n">T</span><span class="p">(</span><span class="n">i</span><span class="o">+</span><span class="n">left</span><span class="p">)</span> <span class="o">-</span> <span class="mi">2</span><span class="o">*</span><span class="n">T</span><span class="p">(</span><span class="n">i</span><span class="p">)</span> <span class="o">+</span> <span class="n">T</span><span class="p">(</span><span class="n">i</span><span class="o">+</span><span class="n">right</span><span class="p">));</span> + <span class="p">}</span> + <span class="k">for</span> <span class="n">i</span> <span class="n">in</span> <span class="n">ProblemSpace</span> <span class="p">{</span> + <span class="n">T</span><span class="p">(</span><span class="n">i</span><span class="p">)</span> <span class="o">=</span> <span class="n">TNew</span><span class="p">(</span><span class="n">i</span><span class="p">)</span> + <span class="p">}</span> +<span class="p">}</span> +<span class="c1">// ...</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Python</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-python"><span class="c1"># ... +</span><span class="o">@</span><span class="n">jit</span><span class="p">(</span><span class="s">'f8[:](i4, i4, f8, f8, f8, f8, f8)'</span><span class="p">,</span> <span class="n">nopython</span><span class="o">=</span><span class="bp">True</span><span class="p">,</span> <span class="n">nogil</span><span class="o">=</span><span class="bp">True</span><span class="p">)</span> +<span class="k">def</span> <span class="nf">onedheat</span><span class="p">(</span><span class="n">ngrid</span><span class="p">,</span> <span class="n">ntimesteps</span><span class="p">,</span> <span class="n">kappa</span><span class="p">,</span> <span class="n">xleft</span><span class="p">,</span> <span class="n">xright</span><span class="p">,</span> <span class="n">tleft</span><span class="p">,</span> <span class="n">tright</span><span class="p">):</span> + <span class="n">dx</span> <span class="o">=</span> <span class="p">(</span><span class="n">xright</span><span class="o">-</span><span class="n">xleft</span><span class="p">)</span><span class="o">/</span><span class="p">(</span><span class="n">ngrid</span><span class="o">-</span><span class="mi">1</span><span class="p">)</span> + <span class="n">dt</span> <span class="o">=</span> <span class="mf">0.25</span><span class="o">*</span><span class="n">dx</span><span class="o">*</span><span class="n">dx</span><span class="o">/</span><span class="n">kappa</span> + + <span class="n">temp</span> <span class="o">=</span> <span class="n">np</span><span class="p">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">ngrid</span><span class="o">+</span><span class="mi">2</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">np</span><span class="p">.</span><span class="n">double</span><span class="p">)</span> + <span class="n">temp_new</span> <span class="o">=</span> <span class="n">np</span><span class="p">.</span><span class="n">zeros</span><span class="p">(</span><span class="n">ngrid</span><span class="o">+</span><span class="mi">2</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">np</span><span class="p">.</span><span class="n">double</span><span class="p">)</span> + <span class="n">temp</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="n">temp</span><span class="p">[</span><span class="n">ngrid</span><span class="o">+</span><span class="mi">1</span><span class="p">]</span> <span class="o">=</span> <span class="n">tleft</span><span class="p">,</span> <span class="n">tright</span> + + <span class="k">for</span> <span class="n">iteration</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="n">ntimesteps</span><span class="p">):</span> + <span class="n">temp_new</span><span class="p">[</span><span class="mi">1</span><span class="p">:</span><span class="n">ngrid</span><span class="p">]</span> <span class="o">=</span> <span class="n">temp</span><span class="p">[</span><span class="mi">1</span><span class="p">:</span><span class="n">ngrid</span><span class="p">]</span> <span class="o">+</span> <span class="n">kappa</span><span class="o">*</span><span class="n">dt</span><span class="o">/</span><span class="p">(</span><span class="n">dx</span><span class="o">*</span><span class="n">dx</span><span class="p">)</span> <span class="o">*</span> \ + <span class="p">(</span><span class="n">temp</span><span class="p">[</span><span class="mi">2</span><span class="p">:</span><span class="n">ngrid</span><span class="o">+</span><span class="mi">1</span><span class="p">]</span> <span class="o">-</span> <span class="mf">2.</span><span class="o">*</span><span class="n">temp</span><span class="p">[</span><span class="mi">1</span><span class="p">:</span><span class="n">ngrid</span><span class="p">]</span> <span class="o">+</span> <span class="n">temp</span><span class="p">[</span><span class="mi">0</span><span class="p">:</span><span class="n">ngrid</span><span class="o">-</span><span class="mi">1</span><span class="p">])</span> + + <span class="n">temp</span><span class="p">[</span><span class="mi">1</span><span class="p">:</span><span class="n">ngrid</span><span class="p">]</span> <span class="o">=</span> <span class="n">temp_new</span><span class="p">[</span><span class="mi">1</span><span class="p">:</span><span class="n">ngrid</span><span class="p">]</span> + + <span class="k">return</span> <span class="n">temp</span><span class="p">[</span><span class="mi">1</span><span class="p">:</span><span class="n">ngrid</span><span class="p">]</span> +<span class="c1"># ...</span></code></pre></figure> + +</td></tr> +</tbody> +</table> + +<p>The main difference above is that the easiest way to get fast array +operations out of Julia is to explicitly write out the loops as vs. +numpy, and of explicitly using domains in Chapel. Timings are +below, for 10,000 timesteps of a domain of size 1,001. The Julia +script included a “dummy” call to the main program to “warm up” the +JIT, and then called on the routine. In Julia, for performance we +have to include the <code>@inbounds</code> macro; Julia’s JIT doesn’t recognize +that the stencil calculation over fixed bounds is in bounds of the +array defined with those same fixed bounds a couple of lines before. +Compile times are included for the Julia and Python JITs (naively +calculated as total run time minus the final time spent running the +calculation)</p> + +<table style="border: 1px solid black; margin: 0 auto; border-collapse: collapse;"> +<thead> +<th>time</th> <th>Julia</th> <th>Chapel</th> <th>Python + Numpy + Numba</th><th>Python + Numpy</th> +</thead> +<tbody style="border: 1px solid black;"> +<tr><td style="border: 1px solid black;">run</td><td style="border: 1px solid black;">0.0084</td><td style="border: 1px solid black;">0.0098 s</td><td style="border: 1px solid black;">0.017 s</td><td style="border: 1px solid black;">0.069 s</td></tr> +<tr><td style="border: 1px solid black;">compile</td><td style="border: 1px solid black;">0.57 s</td><td style="border: 1px solid black;">4.8s</td><td style="border: 1px solid black;">0.73 s</td><td style="border: 1px solid black;"> - </td></tr> +</tbody> +</table> + +<p>Julia wins this test, edging out Chapel by 16%; Python with numba is +surprisingly (to me) fast, coming within a factor of two.</p> + +<h3 id="kmer-counting">Kmer counting</h3> + +<p>Fields like bioinformatics or digital humanities push research +computing beyond matrix-slinging and array manipulations into the +more difficult areas of text handling, string manipulation, and +indexing. Here we mock up a trivial kmer-counter, reading in +genomic sequence data and counting the distribution of k-length +substrings. A real implementation (such as in BioJulia or BioPython) +would optimize for the special case we’re in – a small fixed known +alphabet, and a hash function which takes advantage of the fact that +two neighbouring kmers overlap in k-1 characters – but +but here we’re just interested in the dictionary/associative array +handling and simple string slicing. Here we’re using pure Python for +the Python implementation:</p> + +<table style="border: 1px solid black;"> +<tbody> +<tr><td><strong>Julia</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-julia"><span class="c"># ...</span> +<span class="n">sequences</span> <span class="o">=</span> <span class="n">read_sequences</span><span class="x">(</span><span class="n">infile</span><span class="x">)</span> + +<span class="n">counts</span> <span class="o">=</span> <span class="n">DefaultDict</span><span class="x">{</span><span class="kt">String</span><span class="x">,</span> <span class="kt">Int8</span><span class="x">}(</span><span class="mi">0</span><span class="x">)</span> +<span class="k">for</span> <span class="n">seq</span> <span class="k">in</span> <span class="n">sequences</span> + <span class="k">for</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">1</span><span class="o">:</span><span class="n">length</span><span class="x">(</span><span class="n">seq</span><span class="x">)</span><span class="o">-</span><span class="n">k</span><span class="o">+</span><span class="mi">1</span> + <span class="n">kmer</span> <span class="o">=</span> <span class="n">seq</span><span class="x">[</span><span class="n">i</span> <span class="o">:</span> <span class="n">i</span><span class="o">+</span><span class="n">k</span><span class="o">-</span><span class="mi">1</span><span class="x">]</span> + <span class="n">counts</span><span class="x">[</span><span class="n">kmer</span><span class="x">]</span> <span class="o">+=</span> <span class="mi">1</span> + <span class="k">end</span> +<span class="k">end</span> +<span class="c"># ...</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Chapel</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-c"><span class="c1">// ...</span> +<span class="n">var</span> <span class="n">sequences</span> <span class="o">=</span> <span class="n">readfasta</span><span class="p">(</span><span class="n">input_filename</span><span class="p">);</span> + +<span class="n">var</span> <span class="n">kmers</span> <span class="o">:</span> <span class="n">domain</span><span class="p">(</span><span class="n">string</span><span class="p">);</span> +<span class="n">var</span> <span class="n">kmer_counts</span><span class="o">:</span> <span class="p">[</span><span class="n">kmers</span><span class="p">]</span> <span class="kt">int</span><span class="p">;</span> + +<span class="k">for</span> <span class="n">sequence</span> <span class="n">in</span> <span class="n">sequences</span> <span class="p">{</span> + <span class="k">for</span> <span class="n">i</span> <span class="n">in</span> <span class="mi">1</span><span class="p">..(</span><span class="n">sequence</span><span class="p">.</span><span class="n">length</span><span class="o">-</span><span class="n">k</span><span class="o">+</span><span class="mi">1</span><span class="p">)</span> <span class="p">{</span> + <span class="n">var</span> <span class="n">kmer</span><span class="o">:</span> <span class="n">string</span> <span class="o">=</span> <span class="n">sequence</span><span class="p">[</span><span class="n">i</span><span class="p">..</span><span class="err">#</span><span class="n">k</span><span class="p">];</span> + <span class="k">if</span> <span class="o">!</span><span class="n">kmers</span><span class="p">.</span><span class="n">member</span><span class="p">(</span><span class="n">kmer</span><span class="p">)</span> <span class="p">{</span> + <span class="n">kmer_counts</span><span class="p">[</span><span class="n">kmer</span><span class="p">]</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> + <span class="p">}</span> + <span class="n">kmer_counts</span><span class="p">[</span><span class="n">kmer</span><span class="p">]</span> <span class="o">+=</span> <span class="mi">1</span><span class="p">;</span> + <span class="p">}</span> +<span class="p">}</span> +<span class="c1">// ...</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Python</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-python"><span class="c1"># ... +</span><span class="k">def</span> <span class="nf">kmer_counts</span><span class="p">(</span><span class="n">filename</span><span class="p">,</span> <span class="n">k</span><span class="p">):</span> + <span class="n">sequences</span> <span class="o">=</span> <span class="n">readfasta</span><span class="p">(</span><span class="n">filename</span><span class="p">)</span> + <span class="n">counts</span> <span class="o">=</span> <span class="n">collections</span><span class="p">.</span><span class="n">defaultdict</span><span class="p">(</span><span class="nb">int</span><span class="p">)</span> + <span class="k">for</span> <span class="n">sequence</span> <span class="ow">in</span> <span class="n">sequences</span><span class="p">:</span> + <span class="k">for</span> <span class="n">i</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="nb">len</span><span class="p">(</span><span class="n">sequence</span><span class="p">)</span><span class="o">-</span><span class="n">k</span><span class="o">+</span><span class="mi">1</span><span class="p">):</span> + <span class="n">kmer</span> <span class="o">=</span> <span class="n">sequence</span><span class="p">[</span><span class="n">i</span><span class="p">:</span><span class="n">i</span><span class="o">+</span><span class="n">k</span><span class="p">]</span> + <span class="n">counts</span><span class="p">[</span><span class="n">kmer</span><span class="p">]</span> <span class="o">+=</span> <span class="mi">1</span> + <span class="k">return</span> <span class="n">counts</span> + +<span class="c1"># ...</span></code></pre></figure> + +</td></tr> +</tbody> +</table> + +<p>Other than the syntax differences, the main difference here is +Python and Chapel have convenience functions in their <code>defaultdict</code>s +which mean you don’t have to handle the key-not-yet-found case +separately, and Chapel has the user explicitly declare the domain +of keys. All perform quite well, particularly Julia; on a 4.5Mb +FASTA file for the reference genome of a strain of E. coli, +we get timings as below</p> + +<table style="border: 1px solid black; margin: 0 auto; border-collapse: collapse;"> +<thead> +<th>time</th> <th>Julia</th> <th>Chapel</th> <th>Python</th> +</thead> +<tbody style="border: 1px solid black;"> +<tr><td style="border: 1px solid black;">run</td><td style="border: 1px solid black;">5.3 s</td><td style="border: 1px solid black;">6.6s</td><td style="border: 1px solid black;">7.7s</td></tr> +<tr><td style="border: 1px solid black;">compile</td><td style="border: 1px solid black;">-</td><td style="border: 1px solid black;">6.2s</td><td style="border: 1px solid black;">-</td></tr> +</tbody> +</table> + +<p>Beating pure Python on dictionary and string operations isn’t +actually a given, even for a compiled language, as those features +are heavily optimized in Python implementations.</p> + +<p>(One caveat about the timings; pairwise string concatenation in Julia is <em>slow</em>; +in reading in the file, concatenating the sequence data in Julia +as it was done in the other languages resulted in a runtime of 54 seconds! +Instead, all sequence fragments were read in and the result put together +at once with <code>join()</code>.)</p> + +<h2 id="parallel-primitives">Parallel primitives</h2> + +<p>Since we’re interested in large-scale computation, parallel features are of +particular interest to us; here we walk through the parallel primitives +available to the languages and compare them.</p> + +<h3 id="remote-function-execution">Remote function execution</h3> + +<p>Both Julia and Chapel make it easy to explicitly launch tasks on other +processors:</p> + +<table style="border: 1px solid black;"> +<tbody> +<tr><td><strong>Julia</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-julia"><span class="nd">@everywhere</span> <span class="k">function</span><span class="nf"> whoami</span><span class="x">()</span> + <span class="n">println</span><span class="x">(</span><span class="n">myid</span><span class="x">(),</span> <span class="n">gethostname</span><span class="x">())</span> +<span class="k">end</span> + +<span class="n">remotecall_fetch</span><span class="x">(</span><span class="n">whoami</span><span class="x">,</span> <span class="mi">2</span><span class="x">)</span> +<span class="n">remotecall_fetch</span><span class="x">(</span><span class="n">whoami</span><span class="x">,</span> <span class="mi">4</span><span class="x">)</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Chapel</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-c"><span class="n">proc</span> <span class="nf">main</span><span class="p">()</span> <span class="p">{</span> + <span class="k">const</span> <span class="n">numTasks</span> <span class="o">=</span> <span class="n">here</span><span class="p">.</span><span class="n">numPUs</span><span class="p">();</span> + <span class="k">for</span> <span class="n">taskid</span> <span class="n">in</span> <span class="mi">0</span><span class="p">..</span><span class="err">#</span><span class="n">numTasks</span> <span class="p">{</span> + <span class="n">begin</span> <span class="p">{</span> + <span class="n">writeln</span><span class="p">(</span><span class="n">here</span><span class="p">.</span><span class="n">id</span><span class="p">,</span> <span class="s">" "</span><span class="p">,</span> <span class="n">here</span><span class="p">.</span><span class="n">name</span><span class="p">,</span> <span class="s">" "</span><span class="p">,</span> <span class="n">taskid</span><span class="p">);</span> + <span class="p">}</span> + <span class="p">}</span> + + <span class="n">coforall</span> <span class="n">loc</span> <span class="n">in</span> <span class="n">Locales</span> <span class="p">{</span> + <span class="n">on</span> <span class="n">loc</span> <span class="p">{</span> + <span class="n">writeln</span><span class="p">(</span><span class="n">loc</span><span class="p">.</span><span class="n">id</span><span class="p">,</span> <span class="s">" "</span><span class="p">,</span> <span class="n">loc</span><span class="p">.</span><span class="n">name</span><span class="p">);</span> + <span class="p">}</span> + <span class="p">}</span> +<span class="p">}</span></code></pre></figure> + +</td></tr> +</tbody> +</table> + +<p>In Julia, starting julia with <code>juila -p 4</code> will launch julia with +4 worker tasks (and one coordinator task) on the local host; a <code>--machinefile</code> +option can be set to launch the tasks on remote hosts (over ssh, +by default, although other “ClusterManager”s are available, for +instance launching tasks on SGE clusters). In Chapel, launching a +chapel program with <code>-nl 4</code> will run a program distributed over 4 +locales, with options for those hosts set by environment variables. +Within each locale, Chapel will by default run across as many threads as +sensible (as determined by the extremely useful +<a href="https://www.open-mpi.org/projects/hwloc/">hwloc</a> library).</p> + +<p>As seen above, Chapel distinuishes between starting up local and +remote tasks; this is intrinsic to its “multiresolution” approach +to parallelism, so that it can take advantage of within-NUMA-node, +across-NUMA-node, and across-the-network parallism in different +ways.</p> + +<h3 id="futures-atomics-and-synchronization">Futures, atomics and synchronization</h3> + +<p>Once one can have tasks running asynchronously, synchronization +becomes an issue. Julia and Chapel both have “futures” for +asynchronous (non-blocking) function calls; futures can be +tested on, waited on or fetched from, with a fetch generally +blocking until the future has been “filled”. Futures can only +be filled once.</p> + +<p>In fact, in the above, Julia’s <code>remotecall_fetch</code> performs +the remote call and then fetches, mimicing a blocking call; the +<code>begin</code> blocks in Chapel do not block.</p> + +<p>Futures work the following way in Julia and Chapel:</p> + +<table style="border: 1px solid black;"> +<tbody> +<tr><td><strong>Julia</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-julia"><span class="n">A</span> <span class="o">=</span> <span class="nd">@async</span> <span class="mi">2</span><span class="o">*</span><span class="mi">42</span> + +<span class="n">println</span><span class="x">(</span><span class="n">fetch</span><span class="x">(</span><span class="n">A</span><span class="x">))</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Chapel</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-c"><span class="n">use</span> <span class="n">Futures</span><span class="p">;</span> +<span class="n">config</span> <span class="k">const</span> <span class="n">X</span> <span class="o">=</span> <span class="mi">42</span><span class="p">;</span> + +<span class="k">const</span> <span class="n">A</span> <span class="o">=</span> <span class="n">async</span><span class="p">(</span><span class="n">lambda</span><span class="p">(</span><span class="n">x</span><span class="o">:</span> <span class="kt">int</span><span class="p">)</span> <span class="p">{</span> <span class="k">return</span> <span class="mi">2</span> <span class="o">*</span> <span class="n">x</span><span class="p">;</span> <span class="p">},</span> <span class="n">X</span><span class="p">);</span> + +<span class="n">writeln</span><span class="p">(</span><span class="n">A</span><span class="p">.</span><span class="n">get</span><span class="p">());</span></code></pre></figure> + +</td></tr> +</tbody> +</table> + +<p>Both Julia and Chapel have thread-safe atomic primitive +variables, and <code>sync</code> blocks for joining tasks launched +within them before proceeding.</p> + +<h3 id="parallel-loops-reductions-and-maps">Parallel loops, reductions, and maps</h3> + +<p>Both languages make parallel looping, and reduction +over those parallel loops straightforward:</p> + +<table style="border: 1px solid black;"> +<tbody> +<tr><td><strong>Julia</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-julia"><span class="c"># parallel loop</span> +<span class="nd">@parallel</span> <span class="k">for</span> <span class="n">i</span><span class="o">=</span><span class="mi">1</span><span class="o">:</span><span class="mi">10000</span> + <span class="n">a</span><span class="x">[</span><span class="n">i</span><span class="x">]</span> <span class="o">=</span> <span class="n">b</span><span class="x">[</span><span class="n">i</span><span class="x">]</span> <span class="o">+</span> <span class="n">alpha</span><span class="o">*</span><span class="n">c</span><span class="x">[</span><span class="n">i</span><span class="x">]</span> +<span class="k">end</span> + +<span class="c"># parallel reduction</span> +<span class="n">asum</span> <span class="o">=</span> <span class="nd">@parallel</span> <span class="x">(</span><span class="o">+</span><span class="x">)</span> <span class="k">for</span> <span class="n">i</span><span class="o">=</span><span class="mi">1</span><span class="o">:</span><span class="mi">10000</span> + <span class="n">a</span><span class="x">[</span><span class="n">i</span><span class="x">]</span> +<span class="k">end</span> + +<span class="k">function</span><span class="nf"> twox</span><span class="x">(</span><span class="n">x</span><span class="x">)</span> + <span class="mi">2</span><span class="n">x</span> +<span class="k">end</span> + +<span class="n">pmap</span><span class="x">(</span><span class="n">twox</span><span class="x">,</span> <span class="n">a</span><span class="x">)</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Chapel</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-c"><span class="n">forall</span> <span class="n">i</span> <span class="n">in</span> <span class="mi">1</span><span class="p">..</span><span class="mi">10000</span> <span class="p">{</span> + <span class="n">a</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">b</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">+</span> <span class="n">alpha</span><span class="o">*</span><span class="n">c</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> +<span class="p">}</span> + +<span class="n">var</span> <span class="n">asum</span> <span class="o">=</span> <span class="o">+</span> <span class="n">reduce</span> <span class="n">a</span> + +<span class="n">b</span> <span class="o">=</span> <span class="mi">2</span><span class="o">*</span><span class="n">a</span><span class="p">;</span></code></pre></figure> + +</td></tr> +</tbody> +</table> + +<h3 id="threading">Threading</h3> + +<p>In Chapel, parallel for loops are automatically assigned hierarchically +according to what the runtime knows about the architecture; threading is +used on-node if multiple cores are available. Threading is an +<a href="https://docs.julialang.org/en/stable/manual/parallel-computing/#multi-threading-experimental">experimental feature</a> +in Julia, not quite ready to use for production work yet.</p> + +<h3 id="distributed-data">Distributed data</h3> + +<p>Julia has a +<a href="https://github.com/JuliaParallel/DistributedArrays.jl">DistributedArrays</a> +package which are sort of half-PGAS arrays: they can be read from +at any index, but only the local part can be written to. Chapel +is built around its PGAS distributions and iterators atop them.</p> + +<p>Julia’s DistributedArrays are known not to perform particularly well, +and have been taken out of the base language since 0.4. They have +been worked on since in preparation for the 0.6 release; however, +the main branch does not appear to be working with 0.6-rc2, or +at least I couldn’t get it working. This section then mostly covers the +previous version of DistributedArrays.</p> + +<p>Accessing remote values over DistributedArrays is quite slow. As +such, DistributedArrays performs quite badly for the sort of thing +one might want to use Chapel distributed arrays for; they’re really +more for Monte-Carlo or other mostly-embarrasingly-parallel +calculations, where read access is only needed at the end of the +comptuation or a small number of other times. Programming for a +stencil-type case or other iterative non-local computations is also +a little awkard; currently one has to remotely spawn tasks where +on the remote array fragments repeatedly to usher along each element +of the computation. The new version of the arrays will have a +<code>simd()</code> function which makes doing that nicer; it also allows for +MPI-style communications, which seems like it is faster than accessing +the data through the distributed array, but for use cases where +that is handy, it’s not clear what one would use the distributed +array for rather than just having each task have its own local +array.</p> + +<p>However, for largely local computation (such as coordinator-worker type +operations), the distributed arrays work well. Here +we have a STREAM calculation:</p> + +<table style="border: 1px solid black;"> +<tbody> +<tr><td><strong>Julia</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-julia"><span class="k">using</span> <span class="n">DistributedArrays</span> +<span class="nd">@everywhere</span> <span class="n">importall</span> <span class="n">DistributedArrays</span> + +<span class="nd">@everywhere</span> <span class="k">function</span><span class="nf"> dostreamcalc</span><span class="x">(</span><span class="n">alpha</span><span class="x">,</span> <span class="n">bval</span><span class="x">,</span> <span class="n">cval</span><span class="x">,</span> <span class="n">A</span><span class="x">,</span> <span class="n">B</span><span class="x">,</span> <span class="n">C</span><span class="x">)</span> + <span class="k">for</span> <span class="n">i</span> <span class="k">in</span> <span class="mi">1</span><span class="o">:</span><span class="n">length</span><span class="x">(</span><span class="n">localindexes</span><span class="x">(</span><span class="n">B</span><span class="x">)[</span><span class="mi">1</span><span class="x">])</span> + <span class="n">localpart</span><span class="x">(</span><span class="n">B</span><span class="x">)[</span><span class="n">i</span><span class="x">]</span> <span class="o">=</span> <span class="n">bval</span> + <span class="k">end</span> + <span class="k">for</span> <span class="n">i</span> <span class="k">in</span> <span class="mi">1</span><span class="o">:</span><span class="n">length</span><span class="x">(</span><span class="n">localindexes</span><span class="x">(</span><span class="n">C</span><span class="x">)[</span><span class="mi">1</span><span class="x">])</span> + <span class="n">localpart</span><span class="x">(</span><span class="n">C</span><span class="x">)[</span><span class="n">i</span><span class="x">]</span> <span class="o">=</span> <span class="n">cval</span> + <span class="k">end</span> + + <span class="k">for</span> <span class="n">i</span> <span class="k">in</span> <span class="mi">1</span><span class="o">:</span><span class="n">length</span><span class="x">(</span><span class="n">localindexes</span><span class="x">(</span><span class="n">A</span><span class="x">)[</span><span class="mi">1</span><span class="x">])</span> + <span class="n">localpart</span><span class="x">(</span><span class="n">A</span><span class="x">)[</span><span class="n">i</span><span class="x">]</span> <span class="o">=</span> <span class="n">localpart</span><span class="x">(</span><span class="n">B</span><span class="x">)[</span><span class="n">i</span><span class="x">]</span> <span class="o">+</span> <span class="n">alpha</span><span class="o">*</span><span class="n">localpart</span><span class="x">(</span><span class="n">C</span><span class="x">)[</span><span class="n">i</span><span class="x">]</span> + <span class="k">end</span> +<span class="k">end</span> + +<span class="c">#...</span> + +<span class="n">A</span> <span class="o">=</span> <span class="n">dzeros</span><span class="x">(</span><span class="n">problem_size</span><span class="x">)</span> +<span class="n">B</span> <span class="o">=</span> <span class="n">copy</span><span class="x">(</span><span class="n">A</span><span class="x">)</span> +<span class="n">C</span> <span class="o">=</span> <span class="n">copy</span><span class="x">(</span><span class="n">A</span><span class="x">)</span> + +<span class="n">ps</span> <span class="o">=</span> <span class="n">procs</span><span class="x">(</span><span class="n">A</span><span class="x">)</span> +<span class="n">refs</span> <span class="o">=</span> <span class="x">[(</span><span class="nd">@spawnat</span> <span class="n">p</span> <span class="n">dostreamcalc</span><span class="x">(</span><span class="n">alpha</span><span class="x">,</span> <span class="n">bval</span><span class="x">,</span> <span class="n">cval</span><span class="x">,</span> <span class="n">A</span><span class="x">,</span> <span class="n">B</span><span class="x">,</span> <span class="n">C</span><span class="x">))</span> <span class="k">for</span> <span class="n">p</span> <span class="k">in</span> <span class="n">ps</span><span class="x">]</span> +<span class="n">pmap</span><span class="x">(</span><span class="n">fetch</span><span class="x">,</span> <span class="n">refs</span><span class="x">)</span> +<span class="c"># ...</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Chapel</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-c"><span class="c1">// ...</span> + <span class="k">const</span> <span class="n">ProblemSpace</span><span class="o">:</span> <span class="n">domain</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span> <span class="n">dmapped</span> <span class="n">Block</span><span class="p">(</span><span class="n">boundingBox</span><span class="o">=</span><span class="p">{</span><span class="mi">1</span><span class="p">..</span><span class="n">problem_size</span><span class="p">})</span> <span class="o">=</span> <span class="p">{</span><span class="mi">1</span><span class="p">..</span><span class="n">problem_size</span><span class="p">};</span> + + <span class="n">var</span> <span class="n">A</span><span class="p">,</span> <span class="n">B</span><span class="p">,</span> <span class="n">C</span><span class="o">:</span> <span class="p">[</span><span class="n">ProblemSpace</span><span class="p">]</span> <span class="n">real</span><span class="p">;</span> + + <span class="n">A</span> <span class="o">=</span> <span class="mi">0</span><span class="p">.</span><span class="mi">0</span><span class="p">;</span> + <span class="n">B</span> <span class="o">=</span> <span class="n">bval</span><span class="p">;</span> + <span class="n">C</span> <span class="o">=</span> <span class="n">cval</span><span class="p">;</span> + + <span class="n">forall</span> <span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">,</span> <span class="n">c</span><span class="p">)</span> <span class="n">in</span> <span class="n">zip</span><span class="p">(</span><span class="n">A</span><span class="p">,</span> <span class="n">B</span><span class="p">,</span> <span class="n">C</span><span class="p">)</span> <span class="k">do</span> + <span class="n">a</span> <span class="o">=</span> <span class="n">b</span> <span class="o">+</span> <span class="n">alpha</span> <span class="o">*</span> <span class="n">c</span><span class="p">;</span> + +<span class="c1">// ...</span></code></pre></figure> + +</td></tr> +</tbody> +</table> + +<h3 id="communications">Communications</h3> + +<p>Julia has explicit support for <a href="https://en.wikipedia.org/wiki/Communicating_sequential_processes">CSP-style</a> +channels, like <code>go</code>, which are something like a cross between queues and futures; they can keep being written to from multiple +tasks:</p> + +<figure class="highlight"><pre><code class="language-julia"><span class="nd">@everywhere</span> <span class="k">function</span><span class="nf"> putmsg</span><span class="x">(</span><span class="n">pid</span><span class="x">)</span> + <span class="n">mypid</span> <span class="o">=</span> <span class="n">myid</span><span class="x">()</span> + <span class="n">msg</span> <span class="o">=</span> <span class="s">"Hi from </span><span class="si">$</span><span class="s">mypid"</span> + <span class="n">rr</span> <span class="o">=</span> <span class="kt">RemoteChannel</span><span class="x">(</span><span class="n">pid</span><span class="x">)</span> + <span class="n">put!</span><span class="x">(</span><span class="n">rr</span><span class="x">,</span> <span class="n">msg</span><span class="x">)</span> + <span class="n">println</span><span class="x">(</span><span class="n">myid</span><span class="x">(),</span> <span class="s">" sent "</span><span class="x">,</span> <span class="n">msg</span><span class="x">,</span> <span class="s">" to "</span><span class="x">,</span> <span class="n">pid</span><span class="x">)</span> + <span class="k">return</span> <span class="n">rr</span> +<span class="k">end</span> + +<span class="nd">@everywhere</span> <span class="k">function</span><span class="nf"> getmsg</span><span class="x">(</span><span class="n">rr</span><span class="x">)</span> + <span class="n">msg</span> <span class="o">=</span> <span class="n">fetch</span><span class="x">(</span><span class="n">rr</span><span class="x">)</span> + <span class="n">println</span><span class="x">(</span><span class="n">myid</span><span class="x">(),</span> <span class="s">" got: "</span><span class="x">,</span> <span class="n">msg</span><span class="x">)</span> +<span class="k">end</span> + +<span class="n">rr</span> <span class="o">=</span> <span class="n">remotecall_fetch</span><span class="x">(</span><span class="n">putmsg</span><span class="x">,</span> <span class="mi">2</span><span class="x">,</span> <span class="mi">3</span><span class="x">)</span> +<span class="n">remotecall_wait</span><span class="x">(</span><span class="n">getmsg</span><span class="x">,</span> <span class="mi">3</span><span class="x">,</span> <span class="n">rr</span><span class="x">)</span></code></pre></figure> + +<p>Chapel, by contrast, doesn’t expose these methods; communications +is done implicitly through remote data access or remote code +invocation.</p> + +<h2 id="a-2d-advection-problem">A 2d advection problem</h2> + +<p>Having seen the parallel computing tools available in each language, +we try here a simple distributed computation. Here we try Julia, +Chapel, and Python using <a href="http://dask.pydata.org/en/latest/">Dask</a> +on a simple distributed-memory stencil problem, two dimensional +upwinded advection. A Gaussian blob is advected by a constant +velocity field; shown below is the initial condition, the blob moved +slightly after a few timesteps, and the difference.</p> + +<p><img alt="2D Advection Plot" src="https://www.dursi.ca/assets/julia_v_chapel/twod_advection.png" /></p> + +<p>We do this in Julia using DistributedArrays, in Chapel using Stencil-distributed +arrays, and in Python using Dask arrays. The relevant code snippets follow below.</p> + +<table style="border: 1px solid black;"> +<tbody> +<tr><td><strong>Julia</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-julia"><span class="nd">@everywhere</span> <span class="k">function</span><span class="nf"> get_data_plus_gc</span><span class="x">(</span><span class="n">domain</span><span class="x">,</span> <span class="n">nguard</span><span class="x">,</span> <span class="n">ngrid</span><span class="x">)</span> + <span class="k">if</span> <span class="n">myid</span><span class="x">()</span> <span class="k">in</span> <span class="n">procs</span><span class="x">(</span><span class="n">domain</span><span class="x">)</span> + <span class="n">li</span> <span class="o">=</span> <span class="n">localindexes</span><span class="x">(</span><span class="n">domain</span><span class="x">)</span> + <span class="n">lp</span> <span class="o">=</span> <span class="n">localpart</span><span class="x">(</span><span class="n">domain</span><span class="x">)</span> + + <span class="n">s</span> <span class="o">=</span> <span class="n">size</span><span class="x">(</span><span class="n">lp</span><span class="x">)</span> + <span class="n">data_plus_gc</span> <span class="o">=</span> <span class="n">zeros</span><span class="x">(</span><span class="n">s</span><span class="x">[</span><span class="mi">1</span><span class="x">]</span><span class="o">+</span><span class="mi">2</span><span class="o">*</span><span class="n">nguard</span><span class="x">,</span> <span class="n">s</span><span class="x">[</span><span class="mi">2</span><span class="x">]</span><span class="o">+</span><span class="mi">2</span><span class="o">*</span><span class="n">nguard</span><span class="x">)</span> + <span class="k">for</span> <span class="n">j</span> <span class="k">in</span> <span class="mi">1</span><span class="o">:</span><span class="n">s</span><span class="x">[</span><span class="mi">2</span><span class="x">]</span> + <span class="k">for</span> <span class="n">i</span> <span class="k">in</span> <span class="mi">1</span><span class="o">:</span><span class="n">s</span><span class="x">[</span><span class="mi">1</span><span class="x">]</span> + <span class="n">data_plus_gc</span><span class="x">[</span><span class="n">i</span><span class="o">+</span><span class="n">nguard</span><span class="x">,</span> <span class="n">j</span><span class="o">+</span><span class="n">nguard</span><span class="x">]</span> <span class="o">=</span> <span class="n">lp</span><span class="x">[</span><span class="n">i</span><span class="x">,</span><span class="n">j</span><span class="x">]</span> + <span class="k">end</span> + <span class="k">end</span> + + <span class="n">xstart</span> <span class="o">=</span> <span class="n">li</span><span class="x">[</span><span class="mi">1</span><span class="x">][</span><span class="mi">1</span><span class="x">]</span> + <span class="n">xend</span> <span class="o">=</span> <span class="n">li</span><span class="x">[</span><span class="mi">1</span><span class="x">][</span><span class="k">end</span><span class="x">]</span> + <span class="n">ystart</span> <span class="o">=</span> <span class="n">li</span><span class="x">[</span><span class="mi">2</span><span class="x">][</span><span class="mi">1</span><span class="x">]</span> + <span class="n">yend</span> <span class="o">=</span> <span class="n">li</span><span class="x">[</span><span class="mi">2</span><span class="x">][</span><span class="k">end</span><span class="x">]</span> + + <span class="k">for</span> <span class="n">g</span> <span class="k">in</span> <span class="mi">1</span><span class="o">:</span><span class="n">nguard</span> + <span class="n">xsg</span> <span class="o">=</span> <span class="x">(</span><span class="n">xstart</span><span class="o">-</span><span class="mi">1</span><span class="o">-</span><span class="n">g</span> <span class="o">+</span> <span class="n">ngrid</span><span class="x">)</span> <span class="o">%</span> <span class="n">ngrid</span> <span class="o">+</span> <span class="mi">1</span> + <span class="n">xeg</span> <span class="o">=</span> <span class="x">(</span><span class="n">xend</span><span class="o">-</span><span class="mi">1</span><span class="o">+</span><span class="n">g</span><span class="x">)</span> <span class="o">%</span> <span class="n">ngrid</span> <span class="o">+</span> <span class="mi">1</span> + + <span class="k">for</span> <span class="n">j</span> <span class="k">in</span> <span class="mi">1</span><span class="o">+</span><span class="n">nguard</span><span class="o">:</span><span class="n">s</span><span class="x">[</span><span class="mi">2</span><span class="x">]</span><span class="o">+</span><span class="n">nguard</span> + <span class="n">data_plus_gc</span><span class="x">[</span><span class="n">nguard</span><span class="o">+</span><span class="mi">1</span><span class="o">-</span><span class="n">g</span><span class="x">,</span> <span class="n">j</span><span class="x">]</span> <span class="o">=</span> <span class="n">domain</span><span class="x">[</span><span class="n">xsg</span><span class="x">,</span> <span class="n">j</span><span class="o">-</span><span class="n">nguard</span><span class="o">+</span><span class="n">ystart</span><span class="o">-</span><span class="mi">1</span><span class="x">]</span> + <span class="n">data_plus_gc</span><span class="x">[</span><span class="n">s</span><span class="x">[</span><span class="mi">1</span><span class="x">]</span><span class="o">+</span><span class="n">nguard</span><span class="o">+</span><span class="n">g</span><span class="x">,</span> <span class="n">j</span><span class="x">]</span> <span class="o">=</span> <span class="n">domain</span><span class="x">[</span><span class="n">xeg</span><span class="x">,</span> <span class="n">j</span><span class="o">-</span><span class="n">nguard</span><span class="o">+</span><span class="n">ystart</span><span class="o">-</span><span class="mi">1</span><span class="x">]</span> + <span class="k">end</span> + + <span class="c">#...</span> + <span class="k">end</span> + <span class="k">end</span> + <span class="k">return</span> <span class="n">data_plus_gc</span> +<span class="k">end</span> + +<span class="nd">@everywhere</span> <span class="k">function</span><span class="nf"> advect_data</span><span class="x">(</span><span class="n">dens</span><span class="x">,</span> <span class="n">nguard</span><span class="x">,</span> <span class="n">ngrid</span><span class="x">,</span> <span class="n">velx</span><span class="x">,</span> <span class="n">vely</span><span class="x">,</span> <span class="n">dx</span><span class="x">,</span> <span class="n">dy</span><span class="x">,</span> <span class="n">dt</span><span class="x">)</span> + <span class="n">locdens</span> <span class="o">=</span> <span class="n">get_data_plus_gc</span><span class="x">(</span><span class="n">dens</span><span class="x">,</span> <span class="n">nguard</span><span class="x">,</span> <span class="n">ngrid</span><span class="x">)</span> + + <span class="c">#...calculate gradients on locdens</span> + + <span class="k">for</span> <span class="n">j</span> <span class="k">in</span> <span class="mi">1</span><span class="o">+</span><span class="n">nguard</span><span class="o">:</span><span class="n">ny</span><span class="o">+</span><span class="n">nguard</span> + <span class="k">for</span> <span class="n">i</span> <span class="k">in</span> <span class="mi">1</span><span class="o">+</span><span class="n">nguard</span><span class="o">:</span><span class="n">nx</span><span class="o">+</span><span class="n">nguard</span> + <span class="n">localpart</span><span class="x">(</span><span class="n">dens</span><span class="x">)[</span><span class="n">i</span><span class="o">-</span><span class="n">nguard</span><span class="x">,</span> <span class="n">j</span><span class="o">-</span><span class="n">nguard</span><span class="x">]</span> <span class="o">-=</span> <span class="n">dt</span><span class="o">*</span><span class="x">(</span><span class="n">velx</span><span class="o">*</span><span class="n">gradx</span><span class="x">[</span><span class="n">i</span><span class="x">,</span><span class="n">j</span><span class="x">]</span> <span class="o">+</span> <span class="n">vely</span><span class="o">*</span><span class="n">grady</span><span class="x">[</span><span class="n">i</span><span class="x">,</span><span class="n">j</span><span class="x">])</span> + <span class="k">end</span> + <span class="k">end</span> +<span class="k">end</span> + +<span class="c">#...</span> + +<span class="k">function</span><span class="nf"> timestep</span><span class="x">(</span><span class="n">dens</span><span class="x">,</span> <span class="n">nguard</span><span class="x">,</span> <span class="n">ngrid</span><span class="x">,</span> <span class="n">velx</span><span class="x">,</span> <span class="n">vely</span><span class="x">,</span> <span class="n">dx</span><span class="x">,</span> <span class="n">dy</span><span class="x">,</span> <span class="n">dt</span><span class="x">)</span> + <span class="n">ps</span> <span class="o">=</span> <span class="n">procs</span><span class="x">(</span><span class="n">dens</span><span class="x">)</span> + <span class="n">refs</span> <span class="o">=</span> <span class="x">[(</span><span class="nd">@spawnat</span> <span class="n">p</span> <span class="n">advect_data</span><span class="x">(</span><span class="n">dens</span><span class="x">,</span> <span class="n">nguard</span><span class="x">,</span> <span class="n">ngrid</span><span class="x">,</span> <span class="n">velx</span><span class="x">,</span> <span class="n">vely</span><span class="x">,</span> <span class="n">dx</span><span class="x">,</span> <span class="n">dy</span><span class="x">,</span> <span class="n">dt</span><span class="x">))</span> <span class="k">for</span> <span class="n">p</span> <span class="k">in</span> <span class="n">ps</span><span class="x">]</span> + <span class="n">pmap</span><span class="x">(</span><span class="n">fetch</span><span class="x">,</span> <span class="n">refs</span><span class="x">)</span> +<span class="k">end</span> + +<span class="c">#...</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Chapel</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-c"><span class="c1">//...</span> + + <span class="k">const</span> <span class="n">ProblemSpace</span> <span class="o">=</span> <span class="p">{</span><span class="mi">1</span><span class="p">..</span><span class="n">ngrid</span><span class="p">,</span> <span class="mi">1</span><span class="p">..</span><span class="n">ngrid</span><span class="p">},</span> + <span class="n">ProblemDomain</span> <span class="o">:</span> <span class="n">domain</span><span class="p">(</span><span class="mi">2</span><span class="p">)</span> <span class="n">dmapped</span> <span class="n">Stencil</span><span class="p">(</span><span class="n">boundingBox</span><span class="o">=</span><span class="n">ProblemSpace</span><span class="p">,</span> <span class="n">fluff</span><span class="o">=</span><span class="p">(</span><span class="n">nguard</span><span class="p">,</span><span class="n">nguard</span><span class="p">),</span> <span class="n">periodic</span><span class="o">=</span><span class="nb">true</span><span class="p">)</span> <span class="o">=</span> <span class="n">ProblemSpace</span><span class="p">;</span> + + <span class="c1">//...</span> + <span class="n">var</span> <span class="n">dens</span><span class="o">:</span> <span class="p">[</span><span class="n">ProblemDomain</span><span class="p">]</span> <span class="n">real</span> <span class="o">=</span> <span class="mi">0</span><span class="p">.</span><span class="mi">0</span><span class="p">;</span> + + <span class="c1">// density a gaussian of width sigma centred on (initialposx, initialposy)</span> + <span class="n">forall</span> <span class="n">ij</span> <span class="n">in</span> <span class="n">ProblemSpace</span> <span class="p">{</span> + <span class="n">var</span> <span class="n">x</span> <span class="o">=</span> <span class="p">(</span><span class="n">ij</span><span class="p">(</span><span class="mi">1</span><span class="p">)</span><span class="o">-</span><span class="mi">1</span><span class="p">.</span><span class="mi">0</span><span class="p">)</span><span class="o">/</span><span class="n">ngrid</span><span class="p">;</span> + <span class="n">var</span> <span class="n">y</span> <span class="o">=</span> <span class="p">(</span><span class="n">ij</span><span class="p">(</span><span class="mi">2</span><span class="p">)</span><span class="o">-</span><span class="mi">1</span><span class="p">.</span><span class="mi">0</span><span class="p">)</span><span class="o">/</span><span class="n">ngrid</span><span class="p">;</span> + <span class="n">dens</span><span class="p">(</span><span class="n">ij</span><span class="p">)</span> <span class="o">=</span> <span class="n">exp</span><span class="p">(</span><span class="o">-</span><span class="p">((</span><span class="n">x</span><span class="o">-</span><span class="n">initialposx</span><span class="p">)</span><span class="o">**</span><span class="mi">2</span> <span class="o">+</span> <span class="p">(</span><span class="n">y</span><span class="o">-</span><span class="n">initialposy</span><span class="p">)</span><span class="o">**</span><span class="mi">2</span><span class="p">)</span><span class="o">/</span><span class="p">(</span><span class="n">sigma</span><span class="o">**</span><span class="mi">2</span><span class="p">));</span> + <span class="p">}</span> + + <span class="k">for</span> <span class="n">iteration</span> <span class="n">in</span> <span class="mi">1</span><span class="p">..</span><span class="n">ntimesteps</span> <span class="p">{</span> + <span class="c1">// update the boundary conditions - periodic</span> + <span class="n">dens</span><span class="p">.</span><span class="n">updateFluff</span><span class="p">();</span> + + <span class="c1">// calculate the upwinded gradient</span> + <span class="c1">// ...</span> + + <span class="n">dens</span> <span class="o">=</span> <span class="n">dens</span> <span class="o">-</span> <span class="n">dt</span><span class="o">*</span><span class="p">(</span><span class="n">velx</span><span class="o">*</span><span class="n">gradx</span> <span class="o">+</span> <span class="n">vely</span><span class="o">*</span><span class="n">grady</span><span class="p">);</span> +<span class="c1">//...</span> +<span class="p">}</span></code></pre></figure> + +</td></tr> +<tr><td><strong>Python + Dask</strong></td> +<td> + +<figure class="highlight"><pre><code class="language-python"><span class="c1">#... +</span> +<span class="k">def</span> <span class="nf">dask_step</span><span class="p">(</span><span class="n">subdomain</span><span class="p">,</span> <span class="n">nguard</span><span class="p">,</span> <span class="n">dx</span><span class="p">,</span> <span class="n">dy</span><span class="p">,</span> <span class="n">dt</span><span class="p">,</span> <span class="n">u</span><span class="p">):</span> + <span class="s">""" + map_overlap applies a function to a subdomain of a dask array, + filling the guardcells in first + """</span> + <span class="k">return</span> <span class="n">subdomain</span><span class="p">.</span><span class="n">map_overlap</span><span class="p">(</span><span class="n">advect</span><span class="p">,</span> <span class="n">depth</span><span class="o">=</span><span class="n">nguard</span><span class="p">,</span> <span class="n">boundary</span><span class="o">=</span><span class="s">'periodic'</span><span class="p">,</span> + <span class="n">dx</span><span class="o">=</span><span class="n">dx</span><span class="p">,</span> <span class="n">dy</span><span class="o">=</span><span class="n">dy</span><span class="p">,</span> <span class="n">dt</span><span class="o">=</span><span class="n">dt</span><span class="p">,</span> <span class="n">u</span><span class="o">=</span><span class="n">u</span><span class="p">)</span> + + +<span class="k">def</span> <span class="nf">initial_conditions</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">,</span> <span class="n">initial_posx</span><span class="o">=</span><span class="mf">0.3</span><span class="p">,</span> <span class="n">initial_posy</span><span class="o">=</span><span class="mf">0.3</span><span class="p">,</span> <span class="n">sigma</span><span class="o">=</span><span class="mf">0.15</span><span class="p">):</span> + <span class="n">xx</span><span class="p">,</span> <span class="n">yy</span> <span class="o">=</span> <span class="n">np</span><span class="p">.</span><span class="n">meshgrid</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span> + <span class="n">density</span> <span class="o">=</span> <span class="n">np</span><span class="p">.</span><span class="n">exp</span><span class="p">(</span><span class="o">-</span><span class="p">((</span><span class="n">xx</span><span class="o">-</span><span class="n">initial_posx</span><span class="p">)</span><span class="o">**</span><span class="mi">2</span> <span class="o">+</span> <span class="p">(</span><span class="n">yy</span><span class="o">-</span><span class="n">initial_posy</span><span class="p">)</span><span class="o">**</span><span class="mi">2</span><span class="p">)</span><span class="o">/</span><span class="p">(</span><span class="n">sigma</span><span class="o">**</span><span class="mi">2</span><span class="p">))</span> + <span class="k">return</span> <span class="n">density</span> + + +<span class="k">if</span> <span class="n">__name__</span> <span class="o">==</span> <span class="s">"__main__"</span><span class="p">:</span> + <span class="c1">#... +</span> + <span class="n">dens</span> <span class="o">=</span> <span class="n">initial_conditions</span><span class="p">(</span><span class="n">x</span><span class="p">,</span> <span class="n">y</span><span class="p">)</span> + <span class="n">subdomain_init</span> <span class="o">=</span> <span class="n">da</span><span class="p">.</span><span class="n">from_array</span><span class="p">(</span><span class="n">dens</span><span class="p">,</span> <span class="n">chunks</span><span class="o">=</span><span class="p">((</span><span class="n">npts</span><span class="o">+</span><span class="mi">1</span><span class="p">)</span><span class="o">//</span><span class="mi">2</span><span class="p">,</span> <span class="p">(</span><span class="n">npts</span><span class="o">+</span><span class="mi">1</span><span class="p">)</span><span class="o">//</span><span class="mi">2</span><span class="p">))</span> + + <span class="c1"># These create the steps, but they don't actually perform the execution... +</span> <span class="n">subdomain</span> <span class="o">=</span> <span class="n">dask_step</span><span class="p">(</span><span class="n">subdomain_init</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">dx</span><span class="p">,</span> <span class="n">dy</span><span class="p">,</span> <span class="n">dt</span><span class="p">,</span> <span class="n">u</span><span class="p">)</span> + <span class="k">for</span> <span class="n">step</span> <span class="ow">in</span> <span class="nb">range</span><span class="p">(</span><span class="mi">1</span><span class="p">,</span> <span class="n">nsteps</span><span class="p">):</span> + <span class="n">subdomain</span> <span class="o">=</span> <span class="n">dask_step</span><span class="p">(</span><span class="n">subdomain</span><span class="p">,</span> <span class="mi">2</span><span class="p">,</span> <span class="n">dx</span><span class="p">,</span> <span class="n">dy</span><span class="p">,</span> <span class="n">dt</span><span class="p">,</span> <span class="n">u</span><span class="p">)</span> + + <span class="c1"># _this_ performs the execution +</span> <span class="n">start</span> <span class="o">=</span> <span class="n">time</span><span class="p">.</span><span class="n">clock</span><span class="p">()</span></code></pre></figure> + +</td></tr> +</tbody> +</table> + +<p>As with the stream benchmark, we see that the Julia DistributedArrays +require a lot of bookkeeping to use; both Chapel and Dask are much +more straightforward.</p> + +<p>The one-node timings here aren’t even close. By forcing Chapel to run +on each core separately, the performance isn’t that different than Julia. +But when informed that there is one “locale” and letting +it sort out the details, Chapel benefits dramatically from being +able to use multiple levels of parallelism, and with no extra work; +on a single 8-processor node, running a 1000x1000 grid with all cores +takes the following amount of time:</p> + +<table style="border: 1px solid black; margin: 0 auto; border-collapse: collapse;"> +<thead> +<th>Julia -p=1</th><th>Julia -p=8</th><th>Chapel -nl=1 ParTasksPerLocale=8</th><th>Chapel -nl=8 ParTasksPerLocale=1</th><th>Python</th> +</thead> +<tbody style="border: 1px solid black;"> +<tr> +<td style="border: 1px solid black;">177s s</td> +<td style="border: 1px solid black;">264 s</td> +<td style="border: 1px solid black;"><b>0.4 s</b></td> +<td style="border: 1px solid black;">145 s</td> +<td style="border: 1px solid black;">193 s</td></tr> +</tbody> +</table> + +<p>The 0.4s is not a typo. Threading matters. Admittedly, +this is a bit of an extreme case, 1000x1000 isn’t a big +grid to distribute over 8 processes, so communications +overhead dominates; Julia seems to suffer that overhead even +with just one process.</p> + +<p>Another interesting thing here is that Python+Numpy+Dask (numba didn’t +help here) is competitive even with Chapel <em>if</em> you force Chapel +to not use threading on-node, and either made it much easier to +write the program than Julia.</p> + +<h2 id="strengths-weaknesses-and-future-prospects">Strengths, Weaknesses, and Future Prospects</h2> + +<p>Both Julia and Chapel are perfectly useable today for problems that +fall within their current bailiwicks, at least for advanced users. +They are strong projects and interesting technologies. In addition, +both have significant potential and “room to grow” beyond their +current capabilities; but both face challenges as well.</p> + +<h3 id="julia-1">Julia</h3> + +<p>Julia’s great flexibility - the metaprogramming and the type system +in particular - gives it a very real opportunity to become a platform +on which many domanin-specific language are written for particular scientific problems. +We see some of that potential in tools like <a href="https://github.com/JuliaDiffEq/DifferentialEquations.jl">DifferentialEquations.jl</a>, +where a simple, general API can nonetheless be used to provide efficient +solutions to problems that span a wide range of regimes and structures; +the <code>solve()</code> function and the problem definition language essentially +becomes a DSL for a wide range of differential equation problems. +And Julia’s interactive and dynamic nature makes it a natural for +scientists noodling around on problems, performing numerical +experiments and looking at the results. While large-scale computing +— in an HPC or Spark-style Big-data sense — is not a forte of +Julia’s right now, the basic pieces are there and it certainly could +be in the future.</p> + +<p>Many of Julia’s disadvantages are inevitable flip sides of some of +those advantages. Because of the dynamic nature of +the language and its reliance on JIT and type inference, it is +<a href="https://discourse.julialang.org/t/julia-static-compilation/296/27">still not +possible</a> +to fully compile a Julia script into a static executable, meaning +that there will be JIT pauses in initial iterations of running code; +too, the dynamic nature of the language relies on garbage collection, +which can cause either GC pauses (and thus jitter at scale) or +unexpected memory pressure throughout execution. Similarly, the +fact that it’s so easy to contribute a package to the Julia package +ecosystem means that the package listing is littered with abandoned +and broken packages.</p> + +<p>But some of the disadvantages seem more self-inflicted. While the +language has been public and actively developed for <a href="https://julialang.org/blog/2012/02/why-we-created-julia">over five +years</a>, +the language is still at v0.6. While any language will evolve over +time, the Julia community has spent the past five years contininually +re-litigating minor but foundational decisions of syntax and behaviour +in the interests of conceptual purity – v0.4 in late 2015 changed +the capitalization of unsigned integer types and radically changed +the dictionary syntax, while 0.5 in late 2016 dramatically (although +less dramatically than originally proposed after community pushback) +changed the behaviour of arrays (!!) in an event termed the +Arraypocolypse. Discussions on the correct choice for string +concatenation operator span enormous and non-stop github issue +discussions from late 2012 to mid 2015. At least one more round +of significant breaking changes are planned before a 1.0 release. +As a result, most non-trivial example code online simply doesn’t +work; thus also the accelerated bitrot of software in the Julia +package listing. It has been difficult to implement new functionality +on top of base Julia; it’s hard to build powerful parallel computing +tools when one can’t even depend on the behavour of arrays. +I would have liked to use Intel’s ParallelAccelerator for Julia to +see how it worked on the advection problem above, for instance, but Julia 0.6 +breaks the ParallelAccelerator, and Julia 0.6 is needed for the <code>@simd</code> +feature with DistributedArrays.</p> + +<p>So Julia living up to its potential is not a given. If I were on +Julia’s project team, things that would concern me would include:</p> + +<dl> + <dt><strong>Peak Julia?</strong></dt> + <dd>Julia grew very quickly early on, but since then seems to have topped out; +for example, <a href="https://g.co/trends/qzmA9">flat google trends interest</a>, +and falling off the radar of “languages to watch” lists such as the +<a href="http://redmonk.com/sogrady/2017/03/17/language-rankings-1-17/">Redmonk language rankings</a>, +This may be unfair; these trends may say more about the large initial +surge of interest than stagnation or decline. “A hugely popular +scientific programing language” almost seems like an oxymoron, after all. +<a href="https://insights.stackoverflow.com/trends?tags=julia-lang">Declining Stack Overflow</a> +interest may simply reflect that the community has successfully moved discussion +to its <a href="https://discourse.julialang.org">discourse</a> site. +A five-year old language for numerical computing that still hasn’t +reached 1.0 but has popularity comparable to Rust (which started +at the same time but is a more general systems-programming language) +or Fortran (which has an enormous installed base) is pretty remarkable; +further growth may inevitably be more modest simply because of the +small number of scientific programmers out there. Still, I think +one would want to see interest growing ahead of a 1.0 release, +rather than flat or declining.</dd> + <dt><strong>Instability driving off users, developers</strong></dt> + <dd>Very early on, community members who used Julia started building +what became <a href="http://juliastats.github.io">JuliaStats</a>, with R-like +data frames, data tables, random distributions, and a growing number +of statistics and machine-learning tools built atop. This took +significant developer effort, as fundamental to statistical use +cases is “Not Available” or “NA” values, with semantics different +from the NaNs that we in the simulation computing community are so +frequently (if usually unintentionally) familar with. Thus dataframes +and tables couldn’t simply be built directly on top of numerical +arrays of basic numerical types, but took some effort to build +efficient “nullable” types atop of. But partly because of instability +in the underlying language, Julia DataFrames and DataArrays have +themselves been under flux, which is show-stopping to R users +considering Julia, and demoralizing to developers. Many other similar +examples exist in other domains. If it is true that there is +declining or stagnant interest in Julia, this would certainly be a +contributing factor.</dd> + <dt><strong>The JIT often needs help, even for basic numerical computing tasks</strong></dt> + <dd>Julia is designed around its JIT compiler, which enables some +of the language’s very cool features - the metaprogramming, the +dynamic nature of the language, the interactivity. But the JIT +compiler often needs a lot of help to get reasonable performance, +such as use of the the <code>@inbounds</code> macro in the stencil calculation. +Writing numerical operations in the more readable +vectorized form (like for the stream example in Chapel, <code>C = A + B</code> +rather than looping over the indices) <a href="http://www.johnmyleswhite.com/notebook/2013/12/22/the-relationship-between-vectorized-and-devectorized-code/">has long been slow in Julia</a>, +although <a href="https://julialang.org/blog/2017/01/moredots">a new feature</a> +may have fixed that. <a href="http://parallelacceleratorjl.readthedocs.io/en/latest/index.html">A third party package</a> +exists which helps many of the common cases (speeding up stencil +operations on rectangular arrays), which on one hand indicates the +power of Julia metaprogramming capabilities. But on the other, one +might naturally think that fast numerical operations on arrays would +be something that the core language came with. Part of the problem here +is that while the Julia ecosystem broadly has a very large number of +contributors, the core language internals (like the JIT itself) +has only a handful, and complex issues like performance problems +can take a very long time to get solved.</dd> + <dt><strong>The 800lb pythonic gorilla</strong></dt> + <dd>Python is enormously popular in scientific and data-science type +applications, has huge installed base and number of packages, and +with <a href="http://www.numpy.org">numpy</a> and <a href="http://numba.pydata.org">numba</a> +can be quite fast. The scientific computing community is now +grudgingly starting to move to Python 3, and with Python 3.5+ +supporting <a href="https://docs.python.org/3/library/typing.html">type annotations</a>, +I think there’d start to be a quite real concern that Python would get +Julia-fast (or close enough) before Julia got Python-big. The fact +that some of Julia’s nicest features like notebook support and coolest new projects +like <a href="https://github.com/JuliaParallel/Dagger.jl">Dagger</a> rely on +or are ports of work originally done for Python (ipython notebook +and <a href="http://dask.pydata.org/en/latest/">Dask</a>) indicate the danger +if Python gets fast enough.</dd> +</dl> + +<p>Of those four, only the middle two are completely under the Julia +team’s control; a v1.0 released soon, and with solemn oaths sworn +to have no more significant breaking changes until v2.0 would help +developers and users, and onboarding more people into core internals +development would help the underlying technology.</p> + +<h3 id="chapel-1">Chapel</h3> + +<p>If I were on the Chapel team, my concerns would be different:</p> + +<dl> + <dt><strong>Adoption</strong></dt> + <dd>It’s hard to escape the fact that Chapel’s user base is very +small. The good news is that Chapel’s niche, unlike Julia’s, has +no serious immediate competitor — I’d consider other productive +parallel scientific programming languages to be more research +projects than products — which gives it a bit more runway. But +the niche itself is small, and Chapel’s modest adoption rate within +that niche needs to be addressed in the near future if the language +is to thrive. The Chapel team is doing many of the right things +— the package is easy to install (no small feat for a performant +parallel programming language); the compiler is getting faster and +producing faster code; there’s lots of examples, tutorials and +documentation available; and the community is extremely friendly +and welcoming — but it seems clear that users need to be given +more reason to start trying the language.</dd> + <dt><strong>Small number of external contributors</strong></dt> + <dd>Admittedly, this is related to the fact that the number of users +is small, but it’s also the case that contributing code is nontrivial +if you want to contribute it to the main project, and there’s no central +place where other people could look for your work if you wanted to have +it as an external package. A package manager would be a real help, +and it doesn’t have to be elaborate (especially in the initial version).</dd> + <dt><strong>Not enough packages</strong></dt> + <dd>In turn, this is caused by the small number of external contributors, +and helps keep the adoption low. Chapel already has the fundamentals +to start building some really nice higher-level packages and solvers +that would make it easy to start writing some types of scientific +codes. A distributed-memory n-dimensional FFT over one of its +domains; the beginnings of a Chapel-native set of solvers from +<a href="http://www.netlib.org/scalapack/">Scalapack</a> or +<a href="http://www.mcs.anl.gov/petsc/index.html">PETSc</a> (both of which are +notoriously hard to get started with, and in PETSc’s case, even +install); simple static-sized R-style dataframes with some analysis +routines; these are tools which would make it very easy to get +started writing some non-trivial scientific software in Chapel.</dd> + <dt><strong>Too few domain maps and layouts</strong></dt> + <dd>Being able to, in a few lines of code, write performant, threaded, +NUMA-aware, and distributed memory operations on statically-decomposed +rectangular multidimensional arrays, and have that code work on a +cluster or your desktop is amazing. But many scientific problems +do not match neatly onto these domains. Many require dynamically-sized +domains (block-adaptive meshes) or load balancing (tree codes, +dynamic hash tables); others may be static but not quite look like +CSR-sparse arrays. Domain maps, layouts, and the parallel iterators +which loop over them are the “secret sauce” of Chapel, and can be +written in user code if the underlying capabilities they need are +supported, so they can be contributed externally, but there is little +documention/examples (compared to that on using existing domain maps) available.</dd> +</dl> + +<p>The good news is that these items are all under the Chapel community’s +control. Programs that are natural to write in Chapel currently are +easy to write and can perform quite well; the goal then is to expand +the space of those programs by leveraging early adopters into writing +packages.</p> + +<h2 id="my-conclusions">My conclusions</h2> + +<p>This is entitled “<em>My</em> conclusions” because my takeaways might reasonably be +different than yours. Here’s my take.</p> + +<h3 id="both-projects-are-strong-and-useable-right-now-at-different-things">Both projects are strong and useable, right now, at different things</h3> + +<p>I’d have no qualms about recommending Chapel to someone who wanted +to tackle computations on large distributed rectangular arrays, +dense or sparse, or Julia for someone who had a short-lived project +and wanted something interactive and requiring only single-node or +coordinator-worker computations (or patterns that were more about +concurrency than parallelism). Julia also seems like a good choice for +prototyping a DSL for specific scientific problems.</p> + +<p>Neither project is really a competitor for the other; for Julia the +nearest competitor is likely the Python ecosystem, and for Chapel +it would be status quo (X + MPI + OpenMP/OpenACC) or that people +might try investigating a research project or start playing with +Spark (which is good at a lot of things, but not really scientific +simulation work.)</p> + +<p>Scientific computing communities are very wary of new technologies +(it took 10+ years for Python to start getting any traction), with +the usual, self-fulfulling, fear being “what if it goes away”. I +don’t think there’s any concern about dead code here for projects +that are started with either. Chapel will be actively supported +for another couple of years at least, and the underlying tools (like +GASNet) underpin many projects and aren’t going anywhere. One’s +code wouldn’t be “locked into” Chapel at any rate, as there are MPI +bindings, so that there’s always a path to incrementally port your +code back to MPI if you chose to. For Julia, the immediate worry +is less about lack of support and more that the project might be +<em>too</em> actively maintained; that one would have to continually exert +effort to catch your code up with the current version. In either +case, there are clear paths to follow (porting or upgrading) to +keep your code working.</p> + +<h3 id="both-projects-have-as-yet-untapped-potential">Both projects have as-yet untapped potential</h3> + +<p>What’s exciting about both of these projects is how far they could +go. Chapel already makes certain class of MPI+OpenMP type programs +extremely simple to write with fairly good performance; if that +class of programs expands (either through packages built atop of +current functionality, or expanded functionality through additional +well-supported domain maps) and as performance continues to improve, +it could make large-scale scientific computation accessible to a +much broader community of scientists (and thus science).</p> + +<p>Julia has the same potential to broaden computational science on +the desktop, and (at least in the near term) for computations +requiring only minimal communication like coordinator-worker computations. +But Python is already doing this, and making suprising inroads on +the distributed-memory computing front, and there will be something of a +race to see which gets there first.</p> + +<hr /> + +<div class="footnotes"> + <ol> + <li id="fn:1"> + <p>Yes, I said it. Offsets into buffers can begin at 0, sure, but indices into mathematical objects begin at 1; anything else is madness. Also: oxford comma, two spaces after a period, and vi are all the correct answers to their respective questions. <a class="reversefootnote" href="https://www.dursi.ca/feed.xml#fnref:1">&#8617;</a></p> + + </li> + <li id="fn:2"> + <p>“Do the right thing” isn’t free, however; as with matlab or numpy, when combining objects of different shapes or sizes, the “right thing” can be a bit suprising unless one is very familiar with the tool’s <a href="https://docs.julialang.org/en/stable/manual/arrays/?highlight=broadcasting#broadcasting">broadcasting rules</a> <a class="reversefootnote" href="https://www.dursi.ca/feed.xml#fnref:2">&#8617;</a></p> + + </li> + </ol> +</div> + + + + + Reviewing the state of the art of burst buffers + + 2017-03-13T01:07:00-06:00 + https://hpc.social/2017/reviewing-the-state-of-the-art-of-burst-buffers + <!-- <div style="border-left: 3px solid #32aaff; border: 1px solid #32aaff; float: right; font-size: x-small; margin-left: 6px; padding: 6px; width: 250px;">If you're interested in burst buffers and happen to be a student, please reach out and contact me! We have an <a href="https://lbl.taleo.net/careersection/2/jobdetail.ftl?job=83459">internship opportunity in performance analysis of our 1.8 PB/1.5 TB/sec burst buffer</a> for students of all levels of experience.</div> + --> +<p>Just over two years ago I attended my first DOE workshop as a guest representative of the NSF supercomputing centers, and I wrote a post that summarized my key observations of how the DOE was approaching the increase in data-intensive computing problems.  At the time, the most significant thrusts seemed to be<br />&lt;ol&gt;&lt;li&gt;understanding scientific workflows to keep pace with the need to process data in complex ways&lt;/li&gt;&lt;li&gt;deploying burst buffers to overcome the performance limitations of spinning disk relative to the increasing scale of simulation data&lt;/li&gt;&lt;li&gt;developing methods and processes to curate scientific data&lt;/li&gt;&lt;/ol&gt;Here we are now two years later, and these issues still take center stage in the discussion surrounding the future of  data-intensive computing.  The DOE has made significant progress in defining its path forward in these areas though, and in particular, both the roles of burst buffers and scientific workflows have a much clearer focus on DOE’s HPC roadmap.  Burst buffers in particular are becoming a major area of interest since they are now becoming commercially available, so in the interests of updating some of the incorrect or incomplete thoughts I wrote about two years ago, I thought I’d write about the current state of the art in burst buffers in HPC.<br /><br />Two years ago I had observed that there were two major camps in burst buffer implementations: one that is more tightly integrated with the compute side of the platform that utilizes explicit allocation and use, and another that is more closely integrated with the storage subsystem and acts as a transparent I/O accelerator.  Shortly after I made that observation though, Oak Ridge and Lawrence Livermore announced their GPU-based leadership systems, Summit and Sierra, which would feature a new type of burst buffer design altogether that featured on-node nonvolatile memory.<br /><br />This CORAL announcement, combined with the deployment of production, large-scale burst buffers at <a href="http://www.nersc.gov/news-publications/nersc-news/nersc-center-news/2015/early-users-to-test-new-burst-buffer-on-cori/">NERSC</a>, <a href="http://permalink.lanl.gov/object/tr?what=info:lanl-repo/lareport/LA-UR-15-27819">Los Alamos</a>, and <a href="https://www.hpc.kaust.edu.sa/content/datawarp-burst-buffer-0">KAUST</a>, has led me to re-think my taxonomy of burst buffers.  Specifically, it really is important to divide burst buffers into their hardware architectures and software usage modes; different burst buffer architectures can provide the same usage modalities to users, and different modalities can be supported by the same architecture.<br />&lt;div&gt;<br />&lt;/div&gt; +For the sake of laying it all out, let’s walk through the taxonomy of <i>burst buffer hardware architectures</i> and <i>burst buffer software usage modalities</i>.<br /><br />&lt;h2&gt;Burst Buffer Hardware Architectures&lt;/h2&gt;First, consider your typical medium- or large-scale HPC system architecture <i>without</i> a burst buffer:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://4.bp.blogspot.com/-3ETIppfFZVU/WMWRxNCWvSI/AAAAAAAAo7Y/qXtIJNn2LvQf-oyMSA-t3m2zQ7M7MeAPgCLcB/s1600/architecture-baseline.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="162" src="https://4.bp.blogspot.com/-3ETIppfFZVU/WMWRxNCWvSI/AAAAAAAAo7Y/qXtIJNn2LvQf-oyMSA-t3m2zQ7M7MeAPgCLcB/s320/architecture-baseline.png" width="320" /></a>&lt;/div&gt; +<br />In this design, you have<br /><br />&lt;ul&gt;&lt;li&gt;<b>Compute Nodes (CN)</b>, which might be commodity whitebox nodes like the <a href="https://www.nextplatform.com/2015/06/24/hyperscale-systems-make-headway-into-hpc/">Dell C6320 nodes in SDSC’s Comet system</a> or Cray XC compute blades&lt;/li&gt;&lt;li&gt;<b>I/O Nodes (ION)</b>, which might be commodity Lustre LNET routers (commodity clusters), <a href="http://docs.cray.com/PDF/XC_Series_DVS_Administration_Guide_CLE60UP01.pdf">Cray DVS nodes</a> (Cray XC), or <a href="http://glennklockwood.com/data-intensive/storage/io-forwarding.html#ciod-blue-gene-s-i-o-forwarder">CIOD forwarders</a> (Blue Gene)&lt;/li&gt;&lt;li&gt;<b>Storage Nodes (SN)</b>, which might be Lustre Object Storage Servers (OSSes) or GPFS Network Shared Disk (NSD) servers&lt;/li&gt;&lt;li&gt;<b>The compute fabric</b> (blue lines), which is typically Mellanox InfiniBand, Intel OmniPath, or Cray Aries&lt;/li&gt;&lt;li&gt;<b>The storage fabric</b> (red lines), which is typically Mellanox InfiniBand or Intel OmniPath&lt;/li&gt;&lt;/ul&gt;<br />Given all these parts, there are a bunch of different places you can stick flash devices to create a burst buffer.  For example…<br /><br />&lt;h3&gt;ION-attached Flash&lt;/h3&gt;You can put SSDs inside IO nodes, resulting in an <b>ION-attached flash architecture</b> that looks like this:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://2.bp.blogspot.com/-jc5J5bDY5RU/WMWU6URyljI/AAAAAAAAo7k/weeYZm3yRR0VFuD1dOsGnHv8DIEWP1aMQCLcB/s1600/architecture-on-ion.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="162" src="https://2.bp.blogspot.com/-jc5J5bDY5RU/WMWU6URyljI/AAAAAAAAo7k/weeYZm3yRR0VFuD1dOsGnHv8DIEWP1aMQCLcB/s320/architecture-on-ion.png" width="320" /></a>&lt;/div&gt; +<br />Gordon, which was the <a href="https://www.slideshare.net/glennklockwood/the-protoburst-buffer-experience-with-the-flashbased-file-system-on-sdscs-gordon">first large-scale deployment of what one could call a burst buffer</a>, had this architecture.  The flash was presented to the compute nodes as block devices using iSCSI, and a compute node could have anywhere between zero and <a href="https://kb.iu.edu/d/bcua">sixteen SSDs</a> mounted to it entirely via software.  More recently, the Tianhe-2 system at NUDT also deployed this architecture and exposes the flash to user applications via <a href="https://link.springer.com/article/10.1007/s11704-014-3499-6">their H<sup>2</sup>FS middleware</a>.<br /><br />&lt;h3&gt;Fabric-attached Flash&lt;/h3&gt;A very similar architecture is to add specific burst buffer nodes on the compute fabric that <i>don’t</i> route I/O, resulting in a <b>fabric-attached flash architecture</b>:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-Q5-lIwe8-UE/WMWZ8xgzkKI/AAAAAAAAo70/9OEOYVKanBY3z8r1nOE1bKbG84d3pu63wCLcB/s1600/architecture-on-edge.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="180" src="https://1.bp.blogspot.com/-Q5-lIwe8-UE/WMWZ8xgzkKI/AAAAAAAAo70/9OEOYVKanBY3z8r1nOE1bKbG84d3pu63wCLcB/s320/architecture-on-edge.png" width="320" /></a>&lt;/div&gt; +Like the ION-attached flash design of Gordon, the flash is still embedded within the compute fabric and is logically closer to the compute nodes than the storage nodes.  <a href="https://cug.org/proceedings/cug2016_proceedings/includes/files/pap105-file2.pdf">Cray’s DataWarp solution uses this architecture</a>.<br /><br />Because the flash is still on the compute fabric, this design is very similar to ION-attached flash and the decision to chose it over the ION-attached flash design is mostly non-technical.  It can be more economical to embed flash directly in I/O nodes if those nodes have enough peripheral ports (or physical space!) to support the NICs for the compute fabric, the NICs for the storage fabric, and the flash devices.  However as flash technology moves away from being attached via SAS and towards being directly attached to PCIe, it becomes more difficult to stuff that many high-performance peripherals into a single box without imbalancing something.  As such, it is likely that fabric-attached flash architectures will replace ION-attached flash going forward.<br /><br />Fortunately, any burst buffer software designed for ION-attached flash designs will also probably work on fabric-attached flash designs just fine.  The only difference is that the burst buffer software will no longer have to compete against the I/O routing software for on-node resources like memory or PCIe bandwidth.<br /><br />&lt;h3&gt;CN-attached Flash&lt;/h3&gt;A very different approach to building burst buffers is to attach a flash device to every single compute node in the system, resulting in a <b>CN-attached flash architecture</b>:<br /><br />&lt;div&gt;&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://2.bp.blogspot.com/-lL1iGUOJOg4/WMWjk_pBKqI/AAAAAAAAo8I/Xd_3yi3-I0Usm_wnMswE8N18ciqMBmvZgCLcB/s1600/architecture-on-cn.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="183" src="https://2.bp.blogspot.com/-lL1iGUOJOg4/WMWjk_pBKqI/AAAAAAAAo8I/Xd_3yi3-I0Usm_wnMswE8N18ciqMBmvZgCLcB/s320/architecture-on-cn.png" width="320" /></a>&lt;/div&gt; +<br />This design is neither superior nor inferior to the ION/fabric-attached flash design.  The advantages it has over ION/fabric-attached flash include<br /><br />&lt;ul&gt;&lt;li&gt;<b>Extremely high peak I/O performance</b> -The peak performance scales linearly with the number of compute nodes, so the larger your job, the more performance your job can have.&lt;/li&gt;&lt;li&gt;<b>Very low variation in I/O performance</b> - Because each compute node has direct access to its locally attached SSD, contention on the compute fabric doesn’t affect I/O performance.&lt;/li&gt;&lt;/ul&gt;&lt;div&gt;However, these advantages come at a cost:&lt;/div&gt;</p> +<div><ul><li><b>Limited support for shared-file I/O</b> - &nbsp;Because each compute node doesn't share its SSD with other compute nodes, having many compute nodes write to a single shared file is not a straightforward process. &nbsp;The solution to this issue include from such N-1 style I/O being simply impossible (the default case), relying on <a href="http://computation.llnl.gov/projects/scalable-checkpoint-restart-for-mpi">I/O middleware like the SCR library</a> to manage data distribution, or relying on <a href="http://sc16.supercomputing.org/sc-archive/tech_poster/poster_files/post255s2-file2.pdf">sophisticated I/O services like Intel CPPR</a> to essentially journal all I/O to the node-local flash and flush it to the parallel file system asynchronously.</li><li><b>Data movement outside of jobs becomes difficult</b> - Burst buffers allow users to stage data into the flash <i>before</i> their job starts and stage data back to the parallel file system <i>after</i> their job ends. &nbsp;However in CN-attached flash, this staging will occur while someone else's job might be using the node. &nbsp;This can cause interference, capacity contention, or bandwidth contention. &nbsp;Furthermore, it becomes very difficult to persist data on a burst buffer allocation across multiple jobs without flushing and re-staging it.</li><li><b>Node failures become more problematic</b> - The point of writing out a checkpoint file is to allow you to restart a job in case one of its nodes fails. &nbsp;If your checkpoint file is actually stored on one of the nodes that failed, though, the whole checkpoint gets lost when a node fails. &nbsp;Thus, it becomes critical to flush checkpoint files to the parallel file system as quickly as possible so that your checkpoint file is safe if a node fails. &nbsp;Realistically though, most application failures are not caused by node failures; a study by LLNL found that <a href="http://ieeexplore.ieee.org/document/5645453/">85% of job interrupts do not take out the whole node</a>.</li><li><b>Performance cannot be decoupled from job size</b> - Since you get more SSDs by requesting more compute nodes, there is no way to request only a few nodes and a lot of SSDs. &nbsp;While this is less an issue for extremely large HPC jobs whose I/O volumes typically scale linearly with the number of compute nodes, data-intensive applications often have to read and write large volumes of data but cannot effectively use a huge number of compute nodes.</li></ul><div>If you take a step back and look at what these strengths and weaknesses play to, you might be able to envision what sort of supercomputer design might be best suited for this type of architecture:</div> +</div> +<div><ul><li><b>Relatively low node count</b>, so that you aren't buying way more SSD capacity or performance than you can realistically use given the bandwidth of the parallel file system to which the SSDs must eventually flush</li><li><b>Relatively beefy compute nodes</b>, so that the low node count doesn't hurt you and so that you can tolerate running I/O services to facilitate the asynchronous staging of data and middleware to support shared-file I/O</li><li><b>Relatively beefy network injection bandwidth</b>, so that asynchronous stage in/out doesn't severely impact the MPI performance of the jobs that run before/after yours</li></ul><div>There are also specific application workloads that are better suited to this CN-attached flash design:</div> +<ul><li><b>Relatively large job sizes on average</b>, so that applications routinely use enough compute nodes to get enough I/O bandwidth. &nbsp;Small jobs may be better off using the parallel file system directly, since parallel file systems can usually deliver more I/O bandwidth to smaller compute node counts.</li><li><b>Relatively low diversity of applications</b>, so that any applications that rely on shared-file I/O (which is not well supported by CN-attached flash, as we'll discuss later) can either be converted into using the necessary I/O middleware like SCR, or can be restructured to use only file-per-process or not rely on any strong consistency semantics.</li></ul></div> +<div>And indeed, if you look at the systems that are planning on deploying this type of CN-attached flash burst buffer in the near future, they all fit this mold. &nbsp;In particular, the CORAL Summit and Sierra systems will be deploying these burst buffers at extreme scale, and before them, <a href="https://twitter.com/ProfMatsuoka/status/837438733133754376">Tokyo Tech's Tsubame 3.0</a> will as well. &nbsp;All of these systems derive the majority of their performance from GPUs, leaving the CPUs with the capacity to implement more functionality of their burst buffers in software on the CNs.</div> +<div><br /></div> +<h3>Storage Fabric-attached Flash</h3> +<div>The last notable burst buffer architecture involves attaching the flash on the storage fabric rather than the compute fabric, resulting in SF-attached flash:</div> +<div><br /></div> +<div class="separator" style="clear: both; text-align: center;"><a href="https://3.bp.blogspot.com/-Eu5ZEFFdU4Q/WMWw2M_rJqI/AAAAAAAAo8c/y8twoMUx0h4cGUCTy0LPH9rkonVlW9gMwCLcB/s1600/architecture-backend.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="155" src="https://3.bp.blogspot.com/-Eu5ZEFFdU4Q/WMWw2M_rJqI/AAAAAAAAo8c/y8twoMUx0h4cGUCTy0LPH9rkonVlW9gMwCLcB/s320/architecture-backend.png" width="320" /></a></div> +<div><br /></div> +<div>This is not a terribly popular design because</div> +<div><ol><li>it moves the flash far away from the compute node, which is counterproductive to low latency</li><li>it requires that the I/O forwarding layer (the IONs) support enough bandwidth to saturate the burst buffer, which can get expensive</li></ol><div>However, for those HPC systems with custom compute fabrics that are not amenable to adding third-party burst buffers, this may be the only possible architecture. &nbsp;For example, the Argonne Leadership Computing Facility has deployed a <a href="http://files.gpfsug.org/presentations/2016/anl-june/ESS_GPFSUG.pdf">high-performance GPFS file system as a burst buffer</a> alongside their high-capacity GPFS file system in this fashion because it is impractical to integrate flash into their Blue Gene/Q's proprietary compute fabric. &nbsp;Similarly, sites that deploy DDN's Infinite Memory Engine burst buffer solution on systems with proprietary compute fabrics (e.g., Cray Aries on Cray XC) will have to deploy their burst buffer nodes on the storage fabric.</div> +</div> +<div><br /></div> +<h2>Burst Buffer Software</h2> +<div>Ultimately, all of the different burst buffer architectures still amount to sticking a bunch of SSDs into a supercomputing system, and if this was all it took to make a burst buffer though, burst buffers wouldn't be very interesting. &nbsp;Thus, there is another half of the burst buffer ecosystem: the software and middleware that transform a pile of flash into an I/O layer that applications can actually use productively.</div> +<div><br /></div> +<div>In the absolute simplest case, this software layer can just be an XFS file system atop RAIDed SSDs that is presented to user applications as node-local storage. &nbsp;And indeed, this is what SDSC's Gordon system did; for many workloads such as file-per-process I/O, it is a suitable way to get great performance. &nbsp;However, as commercial vendors have gotten into the burst buffer game, they have all started using this software layer to differentiate their burst buffer solutions from their competitors'. &nbsp;This has resulted in modern burst buffers now having a lot of functionality that allow users to do interesting new things with their I/O.</div> +<div><br /></div> +<div>Because this burst buffer differentiation happens entirely in software, it should be no surprise that these burst buffer software solutions look a lot like the software-defined storage products being sold in the enterprise cloud space. &nbsp;The difference is that burst buffer software can be optimized specifically for HPC workloads and technologies, resulting in much nicer and accessible ways in which they can be used by HPC applications.</div> +<div><br /></div> +<h3>Common Software Features</h3> +<div>Before getting too far, it may be helpful to enumerate the features common to many burst buffer software solutions:</div> +<div><ul><li><b>Stage-in and stage-out</b> - Burst buffers are designed to make a job's input data already be available on the burst buffer immediately when the job starts, and to allow the flushing of output data to the parallel file system after the job ends. &nbsp;To make this happen, the burst buffer service must give users a way to indicate what files they want to be available on the burst buffer when they submit their job, and they must also have a way to indicate what files they want to flush back to the file system after the job ends.</li><li><b>Background data movement</b> - Burst buffers are also not designed to be long-term storage, so their reliability can be lower than the underlying parallel file system. &nbsp;As such, users must also have a way to tell the burst buffer to flush intermediate data back to the parallel file system while the job is still running. &nbsp;This should happen using server-to-server copying that doesn't involve the compute node at all.</li><li><b>POSIX I/O API compatibility</b> - The vast majority of HPC applications rely on the POSIX I/O API (open/close/read/write) to perform I/O, and most job scripts rely on tools developed for the POSIX I/O API (cd, ls, cp, mkdir). &nbsp;As such, all burst buffers provide the ability to interact with data through the POSIX I/O API so that they look like regular old file systems to user applications. &nbsp;That said, the POSIX I/O <i>semantics</i> might not be fully supported; as will be described below, you may get an I/O error if you try to perform I/O in a fashion that is not supported by the burst buffer.</li></ul><div>With all this being said, there are still a variety of ways in which these core features can be implemented into a complete burst buffer software solution. &nbsp;Specifically, burst buffers can be accessed through one of several different modes, and each mode provides a different balance of peak performance and usability.</div> +</div> +<div><br /></div> +<h3>Transparent Caching Mode</h3> +<div>The most user-friendly burst buffer mode uses flash to simply act as a giant cache for the parallel file system which I call <b>transparent caching mode</b>. &nbsp;Applications see the burst buffer as a mount point on their compute nodes, and this mount point mirrors the contents of the parallel file system, and any changes I make to one will appear on the other. &nbsp;For example,<br /><br /></div> +<div><pre>$ ls /mnt/lustre/glock<br />bin project1 project2 public_html src<br /><br />### Burst buffer mount point contains the same stuff as Lustre<br />$ ls /mnt/burstbuffer/glock<br />bin project1 project2 public_html src<br /><br />### Create a file on Lustre...<br />$ touch /mnt/lustre/glock/hello.txt<br /><br />$ ls /mnt/lustre/glock<br />bin hello.txt project1 project2 public_html src<br /><br />### ...and it automatically appears on the burst buffer.<br />$ ls /mnt/burstbuffer/glock<br />bin hello.txt project1 project2 public_html src<br /><br />### However its contents are probably not on the burst buffer's flash<br />### yet since we haven't read its contents through the burst buffer<br />### mount point, which is what would cause it to be cached<br /></pre><div><br />However, if I access a file through the burst buffer mount (<code>/mnt/burstbuffer/glock</code>) rather than the parallel file system mount (<code>/mnt/lustre/glock</code>),<br /><ol><li>if hello.txt is already cached on the burst buffer's SSDs, it will be read directly from flash</li><li>if hello.txt is not already cached on the SSDs, the burst buffer will read it from the parallel file system, cache its contents on the SSDs, and return its contents to me</li></ol><div>Similarly, if I write to hello.txt via the burst buffer mount, my data will be cached to the SSDs and <i>will not</i> immediately appear on the parallel file system. &nbsp;It will eventually flush out to the parallel file system, or I could tell the burst buffer service to explicitly flush it myself.</div> +<div><br /></div> +<div>This transparent caching mode is by far the easiest, since it looks exactly like the parallel file system for all intents and purposes. &nbsp;However if you know that your application will never read any data more than once, it's far less useful in this fully transparent mode. &nbsp;As such, burst buffers that implement this mode provide proprietary APIs that allow you to stage-in data, control the caching heuristics, and explicitly flush data from the flash to the parallel file system. &nbsp;</div> +<div><br /></div> +<div>DDN's Infinite Memory Engine and Cray's DataWarp both implement this transparent caching mode, and, in principle, it can be implemented on any of the burst buffer architectures outlined above.</div> +</div> +<div><br /></div> +<div><h3>Private PFS Mode</h3>Although the transparent caching mode is the easiest to use, it doesn't give users a lot of control over what data does or doesn't need to be staged into the burst buffer. &nbsp;Another access mode involves creating a private parallel file system on-demand for jobs, which I will call <b>private PFS mode</b>. &nbsp;It provides a new parallel file system that is only mounted on your job's compute nodes, and this mount point contains only the data you explicitly copy to it:</div> +<div><br /><pre>### Burst buffer mount point is empty; we haven't put anything there,<br />### and this file system is private to my job<br />$ ls /mnt/burstbuffer<br /><br />### Create a file on the burst buffer file system...<br />$ dd if=/dev/urandom of=/mnt/burstbuffer/mydata.bin bs=1M count=10<br />10+0 records in<br />10+0 records out<br />10485760 bytes (10 MB) copied, 0.776115 s, 13.5 MB/s<br /><br />### ...it appears on the burst buffer file system...<br />$ ls -l /mnt/burstbuffer<br />-rw-r----- 1 glock glock 10485760 Jan 1 00:00 mydata.bin<br /><br />### ...and Lustre remains entirely unaffected<br />$ ls /mnt/lustre/glock<br />bin project1 project2 public_html src</pre><br /></div> +This is a little more complicated than transparent caching mode because you must now manage two file system namespaces: the parallel file system and your private burst buffer file system. &nbsp;However this gives you the option to target your I/O to one or the other, so that a tiny input deck can stay on Lustre while your checkpoints are written out to the burst buffer file system.<br /><br />In addition, the burst buffer private file system is strongly consistent; as soon as you write data out to it, you can read that data back from any other node in your compute job. &nbsp;While this is true of transparent caching mode <i>if you always access your data through the burst buffer mount point</i>, you can run into trouble if you accidentally try to read a file from the original parallel file system mount point after writing out to the burst buffer mount. &nbsp;Since private PFS mode provides a completely different file system and namespace, it's a bit harder to make this mistake.<br /><br />Cray's DataWarp implements private PFS mode, and the <a href="https://twitter.com/ProfMatsuoka/status/837440717836414976">Tsubame 3.0 burst buffer will be implementing private PFS mode using on-demand BeeGFS</a>. &nbsp;This mode is most easily implemented on fabric/ION-attached flash architectures, but Tsubame 3.0 is demonstrating that it can also be done on CN-attached flash.<br /><br /><h3>Log-structured/Journaling Mode</h3>As probably the least user-friendly but highest-performing use mode, <b>log-structured (or journaling) mode</b> burst buffers present themselves to users like a file system, but they do not support the full extent of file system features. &nbsp;Under the hood, writes are saved to the flash not as files, but as records that contain a timestamp, the data to be written, and the location in the file to which the data should be written. &nbsp;These logs are continually appended as the application performs its writes, and when it comes time to flush the data to the parallel file system, the logs are replayed to effectively reconstruct the file that the application was trying to write.<br /><br />This can perform extremely well since even random I/O winds up being restructured as sequentially appended I/O. &nbsp;Furthermore, there can be as many logs as there are writers; this allows writes to happen with zero lock contention, since contended writes are resolved out when the data is re-played and flushed.<br /><br />Unfortunately, log-structured writes make reading very difficult, since the read can no longer seek directly to a file offset to find the data it needs. &nbsp;Instead, the log needs to be replayed to some degree, effectively forcing a flush to occur. &nbsp;Furthermore, if the logs are spread out across different logical flash domains (as would happen in CN-attached flash architectures), read-back may require the logs to be centrally collected before the replay can happen, or it may require inter-node communication to coordinate who owns the different bytes that the application needs to read.<br /><br />What this amounts to is functionality that may present itself like a private parallel file system burst buffer, but behaves very differently on reads and writes. &nbsp;For example, attempting to read the data that exists in a log that doesn't belong to the writer might generate an I/O error, so applications (or I/O middleware) probably need to have very well-behaved I/O to get the full performance benefits of this mode. &nbsp;Most extreme-scale HPC applications already do this, so log-structured/journaling mode is a very attractive approach for very large applications that rely on extreme write performance to checkpoint their progress.<br /><br />Log-structured/journaling mode is well suited for CN-attached flash since logs do not need to live on a file system that presents a single shared namespace across all compute nodes. &nbsp;In practice, the IBM CORAL systems will probably provide log-structured/journaling mode through IBM's burst buffer software. &nbsp;Oak Ridge National Laboratory has also demonstrated <a href="http://ieeexplore.ieee.org/document/7004215/">a log-structured burst buffer system called BurstMem</a> on a fabric-attached flash architecture. &nbsp;Intel's CPPR library, to be deployed with the Argonne Aurora system, <a href="file://C:/Users/Glenn/Downloads/MS19_CORAL_NRE_CPPR_HLD_v1.1_Final.pdf">may also implement this functionality</a>&nbsp;atop the 3D XPoint to be embedded in each compute node.<br /><br /><h3>Other Modes </h3>The above three modes are not the only ones that burst buffers may implement, and some burst buffers support more than one of the above modes. &nbsp;For example, Cray's DataWarp, in addition to supporting private PFS and transparent caching modes, also has a swap mode that allows compute nodes to use the flash as swap space to prevent hard failures for data analysis applications that consume non-deterministic amounts of memory. &nbsp;In addition, <a href="file://C:/Users/Glenn/Downloads/MS18_CORAL_NRE_CPPR_ScopeStatement_V1.1_final%20(3).pdf">Intel's CPPR library is targeting byte-addressable nonvolatile memory</a> which would expose a load/store interface, rather than the typical POSIX open/write/read/close interface, to applications.<br /><br /><h2>Outlook</h2></div> +<p>&lt;/div&gt;</p> +<div>Burst buffers, practically speaking, remain in their infancy, and there is a lot of room for the landscape I've outlined here to change. &nbsp;For example, the common software features I highlighted (staging, background data movement, and POSIX API support) are still largely implemented via proprietary, non-standard APIs at present. &nbsp;There is effort to get burst buffer vendors to agree to a common API, and as this process proceeds, features may appear or disappear as customers define what is and isn't a worthwhile differentiating feature.</div> +<div><br /></div> +<div>On the hardware front, the burst buffer ecosystem is also in flux. &nbsp;ION-attached flash is where burst buffers began, but as discussed above, they are likely to be replaced by dedicated fabric-attached flash servers. &nbsp;In addition, the emergence of storage-class memory (that is, byte-addressable nonvolatile memory) will also add a new dimension to burst buffers that may make one architecture the clear winner over the others. &nbsp;At present though, both fabric-attached and CN-attached burst buffers have their strengths and weaknesses, and neither is at risk of disappearing in the next five years.</div> +<div><br /></div> +<div>As more extreme-scale systems begin to hit the floor and users figure out what does and doesn't work across the diversity of burst buffer hardware and software features, the picture is certain to become clearer. &nbsp;Once that happens, I'll be sure to post another update.</div> + + + + + Beyond Single Core R- Parallel Data Analysis + + 2017-02-15T00:00:00-07:00 + https://hpc.social/2017/beyond-single-core-r-parallel-data-analysis + <p>I was asked recently to do short presentation for the <a href="https://www.meetup.com/Greater-Toronto-Area-GTA-R-Users-Group">Greater Toronto R Users Group</a> +on parallel computing in R; My slides can be seen below or on <a href="https://ljdursi.github.io/beyond-single-core-R">github</a>, where <a href="https://github.com/ljdursi/beyond-single-core-R">the complete materials can be found</a>.</p> + +<p>I covered some similar things I had covered in a half-day workshop +a couple of years earlier (though, obviously, without the hands-on +component):</p> + +<ul> + <li>How to think about parallelism and scalability in data analysis</li> + <li>The standard parallel package, including what was the snow and multicore facilities, using airline data as an example</li> + <li>The foreach package, using airline data and simple stock data;</li> + <li>A summary of best practices,</li> +</ul> + +<p>with some bonus material tacked on the end touching on a couple advanced topics.</p> + +<p>I was quite surprised at how little had changed since late 2014, other than +further development of <a href="http://spark.apache.org/docs/latest/sparkr.html">SparkR</a> (which +I didn’t cover), and the interesting but seemingly not very much used <a href="https://cran.r-project.org/web/packages/future/index.html">future</a> +package. I was also struck by how hard it is to find similar materials +online, covering a range of parallel computing topics in R - it’s rare enough +that even this simple effort made it to the <a href="https://cran.r-project.org/web/views/HighPerformanceComputing.html">HPC project view on CRAN</a> +(under “related links”). R <a href="http://spectrum.ieee.org/computing/software/the-2016-top-programming-languages">continues to grow in popularity</a> for data analysis; +is this all desktop computing? Is Spark siphoning off the clustered-dataframe +usage?</p> + +<p>(This was also my first time with <a href="https://support.rstudio.com/hc/en-us/articles/200486468-Authoring-R-Presentations">RPres</a> in RStudio; +wow, not a fan, RPres was <em>not</em> ready for general release. And I’m a big fan of RMarkdown.)</p> + + + + + Oh Woe is 2016 - NOT! + + 2017-01-09T21:17:58-07:00 + https://hpc.social/2017/oh-woe-is-2016-not- + <p>As we enter a new year, 2016 seems to have been tarnished it the closing month by events +around the world. Far be it for me to to talk about world events here, I’d like to focus on +the good - at least from my perspective. 2016 was a great year for me. It was the year in +which I managed to:</p> + +<ul> +<li>Moved house</li> +<li>Upgraded from a late 1980’s to a late 1990’German station wagon (“estate” for those who +speak real English)</li> +<li>Moved from Blackberry 10 to Android - <em>blech</em> - but I’ll admit my HTC 10 is a fantastic +piece of hardware</li> +<li>Decided that I no longer revere Apple products as I once did - before any harsh words, I +am writing this on a Macbook Pro Retina…and I have a veritable museum of Apple kit at home</li> +<li>Took my first steps to learn about machine learning frameworks like Caffe, Tensorflow - +yes I’ve been tinkering with Caffe on one of my ARM developer boards</li> +<li>Stuck with Linux for my work laptop even with the tantalizing choice of a shiny new +Macbook with OS X</li> +<li>Entrusted the security of my home internet to the <a href="https://www.turris.cz/en/">Turris Omnia</a></li> +</ul> +<ul> +<li>because using a router that hasn’t been patched in years is well - silly, to put it politely</li> +</ul> +<ul> +<li>Finally got myself an OpenPOWER t-shirt at ISC High-Performance - which I wear proudly +because OpenPOWER rocks!</li> +<li>Understood that getting the future generations interested in technology is key - and did +my part by giving an intro to High-Performance Computing talk at a local school</li> +<li>Successfully launched IBM Spectrum LSF 10.1 with the help of my many great peers. And +yes, it does run on Linux on Arm v7&amp;v8 and Linux on POWER8 Little Endian :)</li> +</ul> +<p>And that’s just what I can think of as I write this blog…so for me, 2016 has an aura rather +than a tarnish to it.</p> + +<p>So as we enter the year of Canada’s 150th birthday with a full head of steam, I’m looking +forward to hitchin' my wagon to some of cool things coming up including:</p> + +<ul> +<li>Exploring the wonderful national parks of Canada at no charge with my Parks Canada pass</li> +<li>OpenPOWER and IBM POWER9</li> +<li>Building up of my home ARMy with a pre-ordered Armada 8040 Community Board, which should +help to speed up the machine learning I’ve been tinkering with</li> +</ul> +<p>And that’s just for starters. What’s your plan?</p> + + + + + SC16- Stir it up! + + 2016-11-11T00:54:36-07:00 + https://hpc.social/2016/sc16-stir-it-up- + <p>It&rsquo;s been ages since I&rsquo;ve posted to this blog. I&rsquo;ve not forgotten about it - I&rsquo;ve been +figuratively stirring the technical computing goulash pot over on the IBM Systems +In the Making <a href="https://www.ibm.com/blogs/systems/author/gabor-samu/">blog</a>.</p> + +<p>Having recently moved house, all of the old classic and newer Arm based systems that I&rsquo;ve +written about previously are still mostly packed away. My hands have been more focused on +home improvement rather than tinkering. As those in HPC circles will know, the annual +Supercomputing <a href="http://sc16.supercomputing.org/">SC16</a> event starts this coming Sunday in Salt Lake City, UT. +Interestingly, if my memory serves me well the last time we were in Salt Lake City for <a href="http://sc12.supercomputing.org/">SC12</a>, +I was a newbie with IBM, having joined as a result of the acquisition of Platform Computing.</p> + +<p>The HPC landscape has changed quite a bit since then, including the divestiture of the IBM +x86 server business to Lenovo and the birth of the OpenPOWER Foundation. The OpenPOWER +Foundation has gone from baby steps to sprinting with a huge and diverse group of members +from accelerators, interconnects, research organizations and more - all united on a common +goal - to drive innovation and change in enterprise computing and HPC via the OpenPOWER +platform. It&rsquo;s like somebody has taken a big wooden spoon and stirred the goulash in the +pot - because we all know that if things stand still for too long in the pot, it&rsquo;s going to +burn.</p> + +<p>As I&rsquo;ve banged on about in previous blogs, I&rsquo;m more pleased than ever to see this explosion +of diversity in HPC from A(RM), P(OWER) to X(86). When you throw accelerators such as FPGAs, +GPUs into the mix, what is needed more than ever to address this complexity in diverse +environments is a software defined approach - which hides this complexity from +the users and allows them to leverage the power of todays environments.</p> + +<p><a href="https://www.ibm.com/us-en/marketplace/hpc-workload-management">IBM Spectrum LSF</a> (formerly Platform LSF) has been making this possible for over 20 years. A +glance at the OS and platform support <a href="https://www.ibm.com/developerworks/community/wikis/home?lang=en#!/wiki/New%20IBM%20Platform%20LSF%20Wiki/page/LSF%20system%20requirements">list</a> illustrates the breadth and depth of OS and +processor support. Not only does IBM Spectrum LSF make tying together heterogeneous +resources easy, it&rsquo;s proven technology allows organizations to share resources on a global +scale. In fact, the latest IBM Spectrum LSF V10 release from June 2016 contained contained +numerous enhancements all focused on improving the productivity the users of HPC and +controlling costs. Read more in this top 10 cool things about IBM Spectrum LSF blog. And +looking beyond HPC, the IBM Spectrum Computing family of products helps provide advanced +resource management capabilities for diverse workloads including Hadoop, Spark.</p> + +<p>Yours truly will be in Salt Lake City for SC16. Drop by booth 1018 to talk about how IBM +software defined computing can help your organization. IBM will be holding a number of user +groups and seminars covering the broad spectrum of IBM solutions for HPC. And for IBM +Spectrum LSF users, we&rsquo;ll be holding our annual user group, where you can hear how +your peers are using IBM Spectrum LSF to get an advantage, and learn about the latest +developments in IBM Spectrum LSF from our experts.</p> + +<p>Come on and stir it up! You&rsquo;ll like it!</p> + + + + + MPI's Place in Big Computing + + 2016-10-14T01:00:00-06:00 + https://hpc.social/2016/mpi-s-place-in-big-computing + <p>The organizers of <a href="http://www.eurompi2016.ed.ac.uk">EuroMPI 2016</a> were kind enough to invite me to give a keynote and participate in a panel at their meeting, which was held at the end of September in beautiful Edinburgh. The event was terrific, with lots of very interesting work going on in MPI implementations and with MPI.</p> + +<p>The topic of my talk was “MPI’s Place in Big Computing”; the materials from the talk can be found <a href="http://github.com/ljdursi/EuroMPI2016">on github</a>. The talk, as you might expect, included discussion of high-productivity big data frameworks, but also — and missing from the discussion in my “HPC is dying” blog post — the “data layer” frameworks that underpin them.</p> + +<p>I think a lot of people have taken, quite reasonably, my that blog post to suggest that <a href="http://spark.apache.org">Spark</a> for example is a competitor to MPI; the point I wanted to make is a little more nuanced that that.</p> + +<p>I’m actually skeptical of Spark’s utility for (<em>e.g.</em>) large-scale simulations. However attractive the model is from a variety of points of view, absent some huge breakthrough I don’t think that functional models with immutable data can support the performance, memory requirements, or performance predictability we require. (But who knows; maybe that’ll be one of the compromises we find we have to make on the road to exascale).</p> + +<p>But whatever you might think of Spark’s efficacy for your particular use case,</p> + +<ul> + <li>A lot of people manifestly find it to be extremely useful for <em>their</em> use case; and</li> + <li>Performance is quite important to those communities.</li> +</ul> + +<p>So given that, why isn’t Spark built atop of MPI for network communications? And why isn’t <a href="http://tensorflow.org">TensorFlow</a>, or <a href="http://dask.pydata.org">Dask</a>, or <a href="http://www.seastar-project.org">SeaStar</a>?</p> + +<p>The past five years have seen a huge number of high-productivity tools for large-scale number crunching gain extremely rapid adoption. Even if you don’t like those particular tools for your problems, surely you’d like for there to exist some tools like that for the traditional HPC community; why do other communications frameworks support this flourishing ecosystem of platforms, and MPI doesn’t?</p> + +<p>There’s another argument there, too - simply from a self-preservation point of view, it would be in MPI’s interest to be adopted by a high-profile big data platform to ensure continued success and support. But none are; why? It’s not because the developers of Spark or at Google are just too dumb to figure out MPI’s syntax.</p> + +<p>Going through what does get used for these packages and what doesn’t — which is what I do in this talk — I think the issues become fairly clear. MPI wants to be both a low-level communications framework and a higher-level programming model, and ends up tripping over it’s own feet trying to dance both dances. As a communications “data plane” it imposes too many high-level decisions on applications — no fault tolerance, restrictive communications semantics (in-order and arrival guarantees), and provides too few services (<em>e.g.</em> a performant active message/RPC layer). And as a high-level programming model it is too low level and is missing different services (communications-aware scheduling came up in several guises at the meeting).</p> + +<p>I don’t think that’s insurmountable; I think inside MPI implementations there is a performant, network-agnostic low-level communications layer trying to get out. Exposing more MPI runtime services is a move in the right direction. I was surprised at how open the meeting participants were to making judicious changes — even perhaps breaking some backwards compatability — in the right directions.</p> + +<p>Thanks again to the organizers for extending the opportunity to participate; it was great.</p> + +<p>My slides can be seen below or on <a href="http://ljdursi.github.io/EuroMPI2016/#1">github</a>, where <a href="http://github.com/ljdursi/EuroMPI2016">the complete materials can be found</a>.</p> + + + + + Jupyter Notebooks for Performing and Sharing Bioinformatics Analyses + + 2016-09-09T01:00:00-06:00 + https://hpc.social/2016/jupyter-notebooks-for-performing-and-sharing-bioinformatics-analyses + <p>I was asked to do a half-day tutorial at the <a href="https://www.iscb.org/glbioccbc2016-program/workshops">Great Lakes Bioinformatics conference Workshop session</a>. +The focus was mainly on R, with some python as well. We covered:</p> + +<ul> + <li>The basics of Jupyter notebooks - what they are and how they work</li> + <li>How to install and run Jupyter notebooks on their laptop, in R and Python</li> + <li>How to perform interactive analyses in a web browser using Jupyter</li> + <li>Using markdown and latex to</li> + <li>How to “Port” an R bioinformatics workflow from some scripts into a Jupyter notebook</li> + <li>How to share a Jupyter notebook online, using three different approaches + <ul> + <li>SageMathCloud</li> + <li>GitHub and</li> + <li>mybinder.org</li> + </ul> + </li> +</ul> + +<p>I think it went prety well; the materials are available <a href="https://github.com/ljdursi/glbio-jupyter-workshop">On GitHub</a>. +It was largely hands-on, so apart from some <a href="https://github.com/ljdursi/glbio-jupyter-workshop/blob/master/Slides/Jupyter.pdf">introductory slides</a>, +it was mainly about giving a tour of the notebook and how use Jupyter to share analyses; the “scripts” that I went through +in presenting the material were aimed at having the students produce the notebooks +<a href="https://github.com/ljdursi/glbio-jupyter-workshop/tree/master/Notebooks">here</a>.</p> + + + + + Basics of I/O Benchmarking + + 2016-07-22T07:07:00-06:00 + https://hpc.social/2016/basics-of-i-o-benchmarking + <p>Most people in the supercomputing business are familiar with using FLOPS as a proxy for how fast or capable a supercomputer is.  This measurement, as observed using the <a href="http://www.netlib.org/benchmark/hpl/">High-Performance Linpack (HPL)</a> benchmark, is the basis for the Top500 list.  However, I/O performance is becoming increasingly important as data-intensive computing becomes a driving force in the HPC community, and even though there is no Top500 list for I/O subsystems, the <a href="http://www.nersc.gov/research-and-development/apex/apex-benchmarks/ior/">IOR</a> benchmark has become the <i>de facto</i> standard way to measure the I/O capability for clusters and supercomputers.<br /><br />Unfortunately, I/O performance tends to be trickier to measure using synthetic benchmarks because of the complexity of the I/O stack that lies between where data is generated (the CPU) to where it’ll ultimately be stored (a spinning disk or SSD on a network file system).  In the interests of clarifying some of the confusion that can arise when trying to determine how capable an I/O subsystem really is, let’s take a look at some of the specifics of running IOR.<br /><br />&lt;h2&gt;Getting Started with IOR&lt;/h2&gt;IOR writes data sequentially with the following parameters:<br />&lt;ul&gt;&lt;li&gt;<span style="font-family: monospace;">blockSize</span> (<span style="font-family: monospace;">-b</span>)&lt;/li&gt;&lt;li&gt;<span style="font-family: monospace;">transferSize</span> (<span style="font-family: monospace;">-t</span>)&lt;/li&gt;&lt;li&gt;<span style="font-family: monospace;">segmentCount</span> (<span style="font-family: monospace;">-s</span>)&lt;/li&gt;&lt;li&gt;<span style="font-family: monospace;">numTasks</span> (<span style="font-family: monospace;">-n</span>)&lt;/li&gt;&lt;/ul&gt;&lt;div&gt;which are best illustrated with a diagram:&lt;/div&gt; +<br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://2.bp.blogspot.com/-fok4ue8yCiw/V2B-5BCjIlI/AAAAAAAASw0/do7YfsfV8I00b35WAWTeZdiPeWOau_oxwCLcB/s1600/ior-io-pattern.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="212" src="https://2.bp.blogspot.com/-fok4ue8yCiw/V2B-5BCjIlI/AAAAAAAASw0/do7YfsfV8I00b35WAWTeZdiPeWOau_oxwCLcB/s400/ior-io-pattern.png" width="400" /></a>&lt;/div&gt; +<br />These four parameters are all you need to get started with IOR.  However, naively running IOR usually gives disappointing results.  For example, if we run a four-node IOR test that writes a total of 16 GiB:<br /><br />&lt;pre&gt;$ mpirun -n 64 ./ior -t 1m -b 16m -s 16<br />…<br />access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter<br />—— ——— ———- ——— ——– ——– ——– ——– —-<br /><span style="background-color: #ffff7f; border-radius: 4px; color: #c7254e;">write 427.36 </span> 16384 1024.00 0.107961 38.34 32.48 38.34 2<br /><span style="background-color: #ffff7f; border-radius: 4px; color: #c7254e;">read 239.08 </span> 16384 1024.00 0.005789 68.53 65.53 68.53 2<br />remove - - - - - - 0.534400 2<br />&lt;/pre&gt;&lt;div&gt;<br />we can only get a couple hundred megabytes per second out of a Lustre file system that should be capable of a lot more.<br /><br />Switching from writing to a single-shared file to one file per process using the <code>-F</code> (<code>filePerProcess=1</code>) option changes the performance dramatically:&lt;/div&gt;</p> +<div><br /></div> +<pre>$ mpirun -n 64 ./ior -t 1m -b 16m -s 16 -F<br />...<br />access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter<br />------ --------- ---------- --------- -------- -------- -------- -------- ----<br /><span style="background-color: #ffff7f; border-radius: 4px; color: #c7254e;">write 33645 </span> 16384 1024.00 0.007693 0.486249 0.195494 0.486972 1<br /><span style="background-color: #ffff7f; border-radius: 4px; color: #c7254e;">read 149473 </span> 16384 1024.00 0.004936 0.108627 0.016479 0.109612 1<br />remove - - - - - - 6.08 1</pre> +<div><div><br />This is in large part because letting each MPI process work on its own file cuts out any contention that would arise because of file locking. &nbsp;</div> +<div><br /></div> +<div>However, the performance difference between our naive test and the file-per-process test is a bit extreme. &nbsp;In fact, the only way that 146 GB/sec read rate could be achievable on Lustre is if each of the four compute nodes had over 45 GB/sec of network bandwidth to Lustre--that is, a 400 Gbit link on every compute and storage node.<br /><br /></div> +<div><h2>Effect of Page Cache on Benchmarking</h2>What's really happening is that the data being read by IOR isn't actually coming from Lustre; rather, files' contents are already cached, and IOR is able to read them directly out of each compute node's DRAM. &nbsp;The data wound up getting cached during the write phase of IOR as a result of Linux (and Lustre) using a write-back cache to buffer I/O, so that instead of IOR writing and reading data directly to Lustre, it's actually mostly talking to the memory on each compute node.</div> +<div><br /></div> +<div>To be more specific, although each IOR process thinks it is writing to a file on Lustre and then reading back the contents of that file from Lustre, it is actually</div> +<div></div> +<ol><li>writing data to a copy of the file that is cached in memory. &nbsp;If there is no copy of the file cached in memory before this write, the parts being modified are loaded into memory first.</li><li>those parts of the file in memory (called "pages") that are now different from what's on Lustre are marked as being "dirty"</li><li>the write() call completes and IOR continues on, even though the written data still hasn't been committed to Lustre</li><li>independent of IOR, the OS kernel continually scans the file cache for files who have been updated in memory but not on Lustre ("dirt pages"), and then commits the cached modifications to Lustre</li><li>dirty pages are declared non-dirty since they are now in sync with what's on disk, but they remain in memory</li></ol>Then when the read phase of IOR follows the write phase, IOR is able to just retrieve the file's contents from memory instead of having to communicate with Lustre over the network.</div> +<div><br /></div> +<div>There are a couple of ways to measure the read performance of the underlying Lustre file system. The most crude way is to simply write more data than will fit into the total page cache so that by the time the write phase has completed, the beginning of the file has already been evicted from cache. For example, increasing the number of segments (<span style="font-family: monospace;">-s</span>) to write more data reveals the point at which the nodes' page cache on my test system runs over very clearly:<br /><div><br /><div class="separator" style="clear: both; text-align: center;"></div> +<div class="separator" style="clear: both; text-align: center;"><a href="http://3.bp.blogspot.com/-7M2BLomSgNA/VyZ8L-G_HpI/AAAAAAAALyU/SSQXrYOqJ94V4W61S9-g-UMs90EJ4waewCK4B/s1600/ior-overflowing-cache.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="271" src="https://3.bp.blogspot.com/-7M2BLomSgNA/VyZ8L-G_HpI/AAAAAAAALyU/SSQXrYOqJ94V4W61S9-g-UMs90EJ4waewCK4B/s400/ior-overflowing-cache.png" width="400" /></a></div> +<br />However, this can make running IOR on systems with a lot of on-node memory take forever.<br /><br /></div> +<div>A better option would be to get the MPI processes on each node to only read data that they didn't write. &nbsp;For example, on a four-process-per-node test, shifting the mapping of MPI processes to blocks by four makes each node N read the data written by node N-1.<br /><br /><div class="separator" style="clear: both; text-align: center;"><a href="http://1.bp.blogspot.com/-AhRMQWdDOxg/VyZ6lH2wl-I/AAAAAAAALyA/nv-EM4OlhX8BHCNX_Bx173Mr7miyBXx-ACK4B/s1600/IOR%2BreorderTasks.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="131" src="https://1.bp.blogspot.com/-AhRMQWdDOxg/VyZ6lH2wl-I/AAAAAAAALyA/nv-EM4OlhX8BHCNX_Bx173Mr7miyBXx-ACK4B/s400/IOR%2BreorderTasks.png" width="400" /></a></div> +<br /></div> +<div></div> +<div>Since page cache is not shared between compute nodes, shifting tasks this way ensures that each MPI process is reading data it did not write.</div> +<div><br />IOR provides the <span style="font-family: monospace;">-C</span> option (reorderTasks) to do this, and it forces each MPI process to read the data written by its neighboring node. &nbsp;Running IOR with this option gives much more credible read performance:</div> +<div><br /></div> +<pre>$ mpirun -n 64 ./ior -t 1m -b 16m -s 16 -F -C<br />...<br />access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter<br />------ --------- ---------- --------- -------- -------- -------- -------- ----<br /><span style="background-color: #ffff7f; border-radius: 4px; color: #c7254e;">write 41326 </span> 16384 1024.00 0.005756 0.395859 0.095360 0.396453 0<br /><span style="background-color: #ffff7f; border-radius: 4px; color: #c7254e;">read 3310.00 </span> 16384 1024.00 0.011786 4.95 4.20 4.95 1<br />remove - - - - - - 0.237291 1<br /></pre><br />But now it should seem obvious that the write performance is also ridiculously high. And again, this is due to the page cache, which signals to IOR that writes are complete when they have been committed to memory rather than the underlying Lustre file system.<br /><br />To work around the effects of the page cache on write performance, we can issue an <span style="font-family: monospace;">fsync()</span> call immediately after all of the <span style="font-family: monospace;">write()</span>s return to force the dirty pages we just wrote to flush out to Lustre. Including the time it takes for <span style="font-family: monospace;">fsync()</span> to finish gives us a measure of how long it takes for our data to write to the page cache and for the page cache to write back to Lustre.<br /><br />IOR provides another convenient option, <span style="font-family: monospace;">-e</span> (<span style="font-family: monospace;">fsync</span>), to do just this. And, once again, using this option changes our performance measurement quite a bit:<br /><br /></div> +<pre>$ mpirun -n 64 ./ior -t 1m -b 16m -s 16 -F -C -e<br />...<br />access bw(MiB/s) block(KiB) xfer(KiB) open(s) wr/rd(s) close(s) total(s) iter<br />------ --------- ---------- --------- -------- -------- -------- -------- ----<br /><span style="background-color: #ffff7f; border-radius: 4px; color: #c7254e;">write 2937.89 </span> 16384 1024.00 0.011841 5.56 4.93 5.58 0<br /><span style="background-color: #ffff7f; border-radius: 4px; color: #c7254e;">read 2712.55 </span> 16384 1024.00 0.005214 6.04 5.08 6.04 3<br />remove - - - - - - 0.037706 0</pre> +<p><br />and we finally have a believable bandwidth measurement for our file system. <br /><br />&lt;h2&gt;Defeating Page Cache&lt;/h2&gt;Since IOR is specifically designed to benchmark I/O, it provides these options that make it as easy as possible to ensure that you are actually measuring the performance of your file system and not your compute nodes’ memory.  That being said, the I/O patterns it generates are designed to demonstrate peak performance, not reflect what a real application might be trying to do, and as a result, there are plenty of cases where measuring I/O performance with IOR is not always the best choice.  There are several ways in which we can get clever and defeat page cache in a more general sense to get meaningful performance numbers.<br /><br />When measuring <b>write performance</b>, bypassing page cache is actually quite simple; opening a file with the <span style="font-family: monospace;">O_DIRECT</span> flag going directly to disk.  In addition, the <span style="font-family: monospace;">fsync()</span> call can be inserted into applications, as is done with IOR’s <span style="font-family: monospace;">-e</span> option.<br /><br />Measuring <b>read performance</b> is a lot trickier.  If you are fortunate enough to have root access on a test system, you can force the Linux kernel to empty out its page cache by doing<br />&lt;blockquote class="tr_bq"&gt;<span style="font-family: monospace;"># echo 1 &gt; /proc/sys/vm/drop_caches</span>&lt;/blockquote&gt;and in fact, this is often good practice before running any benchmark (e.g., Linpack) because it ensures that you aren’t losing performance to the kernel trying to evict pages as your benchmark application starts allocating memory for its own use.<br /><br />Unfortunately, many of us do not have root on our systems, so we have to get even more clever.  As it turns out, there is a way to pass a hint to the kernel that a file is no longer needed in page cache:<br /><br /><br />The effect of passing <span style="font-family: monospace;">POSIX_FADV_DONTNEED</span> using <span style="font-family: monospace;">posix_fadvise()</span> is usually that all pages belonging to that file are evicted from page cache in Linux.  However, this is just a hint–not a guarantee–and the kernel evicts these pages asynchronously, so it may take a second or two for pages to actually leave page cache.  Fortunately, Linux also provides a way to <a href="https://github.com/glennklockwood/atgtools/blob/master/is_file_in_page_cache.c">probe pages in a file to see if they are resident in memory</a>.<br /><br />Finally, it’s often easiest to just limit the amount of memory available for page cache.  Because application memory always takes precedence over cache memory, simply allocating most of the memory on a node will force most of the cached pages to be evicted.  Newer versions of IOR provide the <span style="font-family: monospace;">memoryPerNode</span> option that do just that, and the effects are what one would expect:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://1.bp.blogspot.com/-xiC1K4absXU/V5GVEAfe5dI/AAAAAAAAgwY/HyO4J_ORd2gnJLF7aD3JpNu9p9MqjOc-ACLcB/s1600/ior-memPerNode-test.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="271" src="https://1.bp.blogspot.com/-xiC1K4absXU/V5GVEAfe5dI/AAAAAAAAgwY/HyO4J_ORd2gnJLF7aD3JpNu9p9MqjOc-ACLcB/s400/ior-memPerNode-test.png" width="400" /></a>&lt;/div&gt; +<br />The above diagram shows the measured bandwidth from a single node with 128 GiB of total DRAM.  The first percent on each x-label is the amount of this 128 GiB that was reserved by the benchmark as application memory, and the second percent is the total write volume.  For example, the “50%/150%” data points correspond to 50% of the node memory (64 GiB) being allocated for the application, and a total of 192 GiB of data being read.<br /><br />This benchmark was run on a single spinning disk which is not capable of more than 130 MB/sec, so the conditions that showed performance higher than this were benefiting from some pages being served from cache.  And this makes perfect sense given that the anomalously high performance measurements were obtained when there was plenty of memory to cache relative to the amount of data being read.<br /><br />&lt;h2&gt;Corollary &lt;/h2&gt;Measuring I/O performance is a bit trickier than CPU performance in large part due to the effects of page caching.  That being said, page cache exists for a reason, and there are many cases where an application’s I/O performance really is best represented by a benchmark that heavily utilizes cache.<br /><br />For example, the BLAST bioinformatics application re-reads all of its input data twice; the first time initializes data structures, and the second time fills them up.  Because the first read caches each page and allows the second read to come out of cache rather than the file system, running this I/O pattern with page cache disabled causes it to be about 2x slower:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://4.bp.blogspot.com/-KBZ0TDtNz5w/V5Gc8XLAS3I/AAAAAAAAgwo/GWH6i3xp98oSHilPgPAipG75cClgDhkuACLcB/s1600/cache-vs-nocache.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="290" src="https://4.bp.blogspot.com/-KBZ0TDtNz5w/V5Gc8XLAS3I/AAAAAAAAgwo/GWH6i3xp98oSHilPgPAipG75cClgDhkuACLcB/s400/cache-vs-nocache.png" width="400" /></a>&lt;/div&gt; +<br />Thus, letting the page cache do its thing is often the most realistic way to benchmark with realistic application I/O patterns.  Once you know <i>how </i>page cache might be affecting your measurements, you stand a good chance of being able to reason about what the most meaningful performance metrics are.</p> + + + + + An uninformed perspective on TaihuLight's design + + 2016-06-21T07:36:00-06:00 + https://hpc.social/2016/an-uninformed-perspective-on-taihulight-s-design + <div style="line-height: 100%; text-align: center;"><span style="font-size: xx-small;">Note: What follows are my own personal thoughts, opinions, and analyses. &nbsp;I am not a computer scientist and I don't really know anything about processor design or application performance, so it is safe to assume I don't know what I'm talking about. &nbsp;None of this represents the views of my employer, the U.S. government, or anyone except me.</span></div> +<p><br /><a href="http://top500.org/news/china-tops-supercomputer-rankings-with-new-93-petaflop-machine/">China’s new 93 PF TaihuLight system</a> is impressive given the indigenous processor design and its substantial increase in its HPL score over the #2 system, Tianhe-2.  The <a href="http://www.nytimes.com/2016/06/21/technology/china-tops-list-of-fastest-computers-again.html?_r=0">popular media has started covering this new system and the increasing presence of Chinese systems on Top500</a>, suggesting that China’s string of #1 systems may be a sign of shifting tides.  And maybe it is.  China is undeniably committed to investing in supercomputing and positioning itself as a leader in extreme-scale computing.<br /><br />That being said, the TaihuLight system isn’t quite the technological marvel and threat to the HPC hegemony that it may seem at first glance.  The system features some some critically limiting design choices that make the system smell like a supercomputer that was <a href="http://www.scmp.com/tech/science-research/article/1773421/chinese-supercomputer-too-slow-compete-race-hypersonic-weapons">designed to be #1 on Top500</a>, not solve scientific problems.  This probably sounds like sour grapes at this point, so let’s take a look at some of the details.<br /><br />&lt;h2&gt;Back-of-the-envelope math&lt;/h2&gt;Consider the fact that each TaihuLight node turns 3,062 GFLOPS (that’s 3 TFLOPS) and has 136.51 GB/sec of memory bandwidth. This means that in the time it takes for the processor to load two 64-bit floats into the processor from memory, it could theoretically perform over 350 floating point operations. But it won’t, because it can only load the two operands for one single FLOP.<br /><br />Of course, this is an oversimplification of how CPUs work.  Caches exist to feed the extremely high operation rate of modern processors, and where there are so many cores that their caches can’t be fed fast enough, we see technologies like GDDR DRAM and <a href="http://www.extremetech.com/gaming/179159-gtc-2014-nvidia-reveals-dual-gpu-titan-z-new-pascal-gpu-offers-colossal-memory-bandwidth">HBM</a> (on accelerators) and on-package <a href="https://software.intel.com/en-us/blogs/2016/01/20/an-intro-to-mcdram-high-bandwidth-memory-on-knights-landing">MCDRAM</a> (on KNL) appearing so that dozens or hundreds of cores can all retrieve enough floating-point operands from memory to sustain high rates of floating point calculations.<br /><br />However, the ShenWei SW26010 chips in the TaihuLight machine have neither GDDR nor MCDRAM; they rely on four DDR3 controllers running at 136 GB/sec to keep all 256 compute elements fed with data.  <a href="http://www.netlib.org/utk/people/JackDongarra/PAPERS/sunway-report-2016.pdf">Dongarra’s report on the TaihuLight design</a> briefly mentions this high skew:<br /><br />&lt;blockquote class="tr_bq"&gt;“The ratio of floating point operations per byte of data from memory on the SW26010 is 22.4 Flops(DP)/Byte transfer, which shows an imbalance or an overcapacity of floating point operations per data transfer from memory. By comparison the Intel Knights Landing processor with 7.2 Flops(DP)/Byte transfer.”&lt;/blockquote&gt;<br />This measure of “Flops(DP)/Byte transfer” is called arithmetic intensity, and it is a critical optimization parameter when writing applications for manycore architectures.  Highly optimized GPU codes can show <a href="http://people.eecs.berkeley.edu/~kubitron/cs258/lectures/lec12-Merrimac.pdf">arithmetic intensities of around 10 FLOPS/byte</a>, but such applications are often the exception; there are classes of problems that simply do not have high arithmetic intensities.  This diagram, which I stole from the <a href="https://crd.lbl.gov/departments/computer-science/PAR/research/roofline/">Performance and Algorithms Research group at Berkeley Lab</a>, illustrates the spectrum:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="https://3.bp.blogspot.com/-E_1Yi-g0qws/V2jCeZo0dUI/AAAAAAAATSA/2WCXZkchvuUclAXdyIUhv2ODQI7bv4AuwCLcB/s1600/ResizedImage600300-rooflineai.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="200" src="https://3.bp.blogspot.com/-E_1Yi-g0qws/V2jCeZo0dUI/AAAAAAAATSA/2WCXZkchvuUclAXdyIUhv2ODQI7bv4AuwCLcB/s400/ResizedImage600300-rooflineai.png" width="400" /></a>&lt;/div&gt; +<br />To put this into perspective in the context of hardware, let’s look at the #3 supercomputer, <a href="https://www.olcf.ornl.gov/titan/">the Titan system at Oak Ridge National Lab</a>.  The GPUs on which it is built (<a href="http://www.nvidia.com/content/tesla/pdf/nvidia-tesla-kepler-family-datasheet.pdf">NVIDIA’s K20X</a>) each have a GDDR5-based memory subsystem that can feed the 1.3 TFLOP GPUs at 250 GB/sec.  This means that Titan’s FLOPS/byte ratio is around 5.3, or over 4x lower (more balanced) than the 22 FLOPS/byte of TaihuLight’s SW26010 chips.<br /><br />This huge gap means that an application that is perfectly balanced to run on a Titan GPU–that is, an application with an arithmetic intensity of 5.3–will run 4x slower on one of TaihuLight’s SW26010 processors than a Titan GPU.  Put simply, despite being theoretically capable of doing 3 TFLOPS of computing, TaihuLight’s processors would only be able to deliver performance to 1/4th of that, or 0.75 TFLOPS, to this application.  Because of the severely limited per-node memory bandwidth, <b>this 93 PFLOP system would perform like a 23 PFLOP system</b> on an application that, given an arithmetic intensity of 5.3, would be considered highly optimized by most standards.<br /><br />Of course, the indigenous architecture also means that application developers will have to rely on indigenous implementations or ports of performance runtimes like OpenMP and OpenACC, libraries like BLAS, and ISA-specific vector intrinsics.  The maturity of this software stack for the ShenWei-64 architecture remains unknown.<br /><br />&lt;h2&gt;What <i>is</i> interesting&lt;/h2&gt;This all isn’t to say that the TaihuLight system isn’t a notable achievement; it is the first massive-scale deployment of a CPU-based manycore processor, it is the first massive-scale deployment of EDR InfiniBand, and its CPU design is extremely interesting in a number of ways.<br /><br />The CPU block diagrams included in Dongarra’s report are a bit like a Rorschach test; my esteemed colleagues at <a href="http://www.nextplatform.com/2016/06/20/look-inside-chinas-chart-topping-new-supercomputer/">The Next Platform astutely pointed out its similarities to KNL</a>, but my first reaction was to compare it with IBM’s Cell processor:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://2.bp.blogspot.com/-rCGxhO2fVGw/V2jMRV379wI/AAAAAAAATSQ/l20liolD4jcU8ZxZbkejw5asAeZIOKvZQCLcB/s1600/Cell%2BBE%2Bvs%2BShenWei%2BSW26010.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="181" src="https://2.bp.blogspot.com/-rCGxhO2fVGw/V2jMRV379wI/AAAAAAAATSQ/l20liolD4jcU8ZxZbkejw5asAeZIOKvZQCLcB/s400/Cell%2BBE%2Bvs%2BShenWei%2BSW26010.png" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;IBM Cell BE vs. ShenWei SW26010.  <a href="http://www.hec.nasa.gov/news/features/2008/cell.074208.html">Cell diagram stolen from NAS</a>; <a href="http://www.netlib.org/utk/people/JackDongarra/PAPERS/sunway-report-2016.pdf">SW26010 diagram stolen from the Dongarra report</a>.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />The Cell processor was ahead of its time in many ways and arguably the first manycore chip targeted at HPC.  It had<br />&lt;ul&gt;&lt;li&gt;a single controller core (the PPE) with L1 and L2 caches&lt;/li&gt;&lt;li&gt;eight simpler cores (the SPEs) on an on-chip network with no L2 cache, but an embedded SRAM scratchpad&lt;/li&gt;&lt;/ul&gt;&lt;div&gt;and by comparison, the SW26010 has&lt;/div&gt;</p> +<div><ul><li>a single controller core (the MPE) with L1 and L2 caches</li><li>sixty-four simpler cores (the CPEs) on an on-chip network with no L2 cache, but an embedded SRAM scratchpad</li></ul></div> +<p>Of course, the similarities are largely superficial and there are vast differences between the two architectures, but the incorporation of heterogeneous (albeit very similar) cores on a single package is quite bold and is a design point that may play a role in exascale processor designs:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://3.bp.blogspot.com/--seFJF-UhLw/V2jPfRKPXoI/AAAAAAAATSc/MXVgxyovM4YF0xo9k4XMlpbWY0TUJi80QCLcB/s1600/CQP3qklUsAAjcNT.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="300" src="https://3.bp.blogspot.com/--seFJF-UhLw/V2jPfRKPXoI/AAAAAAAATSc/MXVgxyovM4YF0xo9k4XMlpbWY0TUJi80QCLcB/s400/CQP3qklUsAAjcNT.png" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;What an exascale processor might look like, as <a href="https://twitter.com/hpc_guru/status/649645068995792896">stolen from Kathy Yelick</a>&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />which may feature a combination of many lightweight cores (not unlike the CPE arrays on the TaihuLight processor) and are accompanied by a few capable cores (not unlike the MPE cores).<br /><br />The scratchpad SRAM present on all of the CPE cores is also quite intriguing, as it is a marked departure from the cache-oriented design of on-package SRAM that has dominated CPU architectures for decades.  The Dongarra report doesn’t detail how the scratchpad SRAM is used by applications, but it may offer a unique new way to perform byte-granular loads and stores that do not necessarily waste a full cache line’s worth of memory bandwidth if the application knows that memory access is to be unaligned.<br /><br />This is a rather forward-looking design decision that makes the CPU look a little more like a GPU.  Some experimental processor designs targeting exascale have proposed eschewing deep cache hierarchies in favor of similar scratchpads:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="https://3.bp.blogspot.com/-swXDcTMnt4Q/V2jTH1YUpBI/AAAAAAAATSo/NDvIZdI53NMNIsP6ATzeIevJX4yPIQCBACLcB/s1600/Traleika%2BGlacier%2Bblock.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="187" src="https://3.bp.blogspot.com/-swXDcTMnt4Q/V2jTH1YUpBI/AAAAAAAATSo/NDvIZdI53NMNIsP6ATzeIevJX4yPIQCBACLcB/s400/Traleika%2BGlacier%2Bblock.png" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;The Traleika Glacier processor design, featuring separate control and execution blocks and scratchpad SRAM.  Adapted from the <a href="https://xstackwiki.modelado.org/Traleika_Glacier#Architecture">Traleika Glacier wiki page</a>.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />Whether or not we ever hear about how successful or unsuccessful these processor features are remains to be seen, but there may be valuable lessons to be learned ahead of the first generation of exascale processors from architectures like those in the TaihuLight system.<br /><br />&lt;h2&gt;Outlook&lt;/h2&gt;At a glance, it is easy to call out the irony in the U.S. government’s decision to ban the sale of Intel’s KNL processors to the Chinese now that the TaihuLight system is public.  It is clear that China is in a position to begin building extreme-scale supercomputers without the help of Intel, and it is very likely that the U.S. embargo accelerated this effort.  As pondered by an notable pundit in the HPC community,<br /><br />&lt;blockquote class="twitter-tweet"&gt;&lt;div dir="ltr" lang="en"&gt;If US gov hadn’t barred US <a href="https://twitter.com/hashtag/HPC?src=hash">#HPC</a> tech to China, new No.1 <a href="https://twitter.com/hashtag/supercomputer?src=hash">#supercomputer</a> could’ve been <a href="https://twitter.com/hashtag/KNL?src=hash">#KNL</a>-powered instead of Chinese CPUs? <a href="https://twitter.com/hashtag/ISC16?src=hash">#ISC16</a> <a href="https://twitter.com/hashtag/backfired?src=hash">#backfired</a>&lt;/div&gt; +— Andrew Jones (@hpcnotes) <a href="https://twitter.com/hpcnotes/status/744976851567779841">June 20, 2016</a>&lt;/blockquote&gt; <br />And this may have been the case.  However, despite the TaihuLight system’s #1 position and very noteworthy Linpack performance and efficiency, is not the massive disruptor that puts the U.S. in the back seat.  Underneath TaihuLight’s shiny, 93-petaflop veneer are some cut corners that substantially lower its ability to reliably deliver the performance and scientific impact commensurate to its Linpack score.  As <a href="https://twitter.com/hpcprogrammer/status/744982095127248901">pointed out by a colleague wiser than me</a>, Intel’s impending KNL chip is the product of years of effort, and it is likely that it will be years before ShenWei’s chip designs and fabs are able to be really deliver a fully balanced, competitive, HPC-oriented microarchitecture.<br /><br />With that being said, TaihuLight is still a massive system, and even if its peak Linpack score is not representative of its actual achievable performance in solving real scientific problems, it is undeniably a leadership system.  Even if applications can only realize a small fraction of its Linpack performance, there is a lot of discovery to be made in petascale computing.<br /><br />Further, the SW201060 processor itself features some bold design points, and being able to test a heterogeneous processor with scratchpad SRAM at extreme scale may give China a leg up in the exascale architecture design space.  Only time will tell if these opportunities are pursued, or if TaihuLight follows its predecessors into an existence of disuse in a <a href="http://www.marketwatch.com/story/chinas-bevy-of-supercomputers-goes-unused-2014-07-15">moldy datacenter</a> caused by a <a href="http://www.scmp.com/news/china/article/1543226/chinas-world-beating-supercomputer-fails-impress-some-potential-clients">high electric bill</a>, <a href="http://www.scmp.com/tech/science-research/article/1773421/chinese-supercomputer-too-slow-compete-race-hypersonic-weapons">poor system design, and lack of software</a>.</p> + + + + + Spark, Chapel, TensorFlow- Workshop at UMich + + 2016-05-10T01:00:00-06:00 + https://hpc.social/2016/spark-chapel-tensorflow-workshop-at-umich + <p>The kind folks at the University of Michigan’s <a href="http://micde.umich.edu">Center for Computational Discovery and Engineering (MICDE)</a>, which is just part of the very impressive <a href="http://arc.umich.edu">Advanced Research Computing</a> division, invited me to give a workshop there a couple of months ago about the rapidly-evolving large-scale numerical computing ecosystem.</p> + +<p>There’s lots that I want to do to extend this to a half-day length, but the workshop materials — including a VM that can be used to play with <a href="http://spark.apache.org">Spark</a>, <a href="http://chapel.cray.com">Chapel</a> and <a href="https://www.tensorflow.org">TensorFlow</a>, along with Jupyter notebooks for each — can be found <a href="https://github.com/ljdursi/Spark-Chapel-TF-UMich-2016">on GitHub</a> and may be of some use to others as they stand.</p> + +<p>The title and abstract follow.</p> + +<blockquote> + <h4 id="next-generation-hpc--what-spark-tensorflow-and-chapel-are-teaching-us-about-large-scale-numerical-computing">Next Generation HPC? What Spark, TensorFlow, and Chapel are teaching us about large-scale numerical computing</h4> +</blockquote> + +<blockquote> + <p>For years, the academic science and engineering community was almost alone in pursuing very large-scale numerical computing, and MPI - the 1990s-era message passing library - was the lingua franca for such work. But starting in the mid-2000s, others became interesting in large-scale computing on data. First internet-scale companies like Google and Yahoo! started performing fairly basic analytics tasks at enormous scale, and now many others are tackling increasingly complex and data-heavy machine-learning computations, which involve very familiar scientific computing tasks such as linear algebra, unstructured mesh decomposition, and numerical optimization. But these new communities have created programming environments which emphasize what we’ve learned about computer science and programmability since 1994 - with greater levels of abstraction and encapsulation, separating high-level computation from the low-level implementation details, and some in HPC are starting to notice. This talk will give a brief introduction to Apache Spark environment and Google’s Tensor Flow machine-learning package for high-level numerical computation, as well as the HPC-focused Chapel language from Cray, to show where each can be used today and how they might be used in the future. The slides for this talk, and examples for each package along with a virtual machine which can be used for running them, will be available at https://github.com/ljdursi/Spark-Chapel-TF-UMich-2016 .</p> + +</blockquote> + + + + + Reminiscing and the computing renaissance + + 2016-04-02T02:44:49-06:00 + https://hpc.social/2016/reminiscing-and-the-computing-renaissance + <p>Sifting through boxes of 3.5 inch floppy diskettes - some of questionable +provenance in a dusty basement. Gingerly packing up what I consider to be the +holy trinity of <a href="http://www.amiga.org/">Commodore Amiga</a> computers - A1000, A2000, A3000 - all in some +state of working condition. Of course, back in the day, only Amiga made it all +possible - awesome graphic demos, games, word processing, and ray tracing to +<a href="https://en.wikipedia.org/wiki/Amiga_Unix">Amiga Unix - AMIX</a>, which was one of the first ports of SVR4 to the <a href="https://en.wikipedia.org/wiki/Motorola_68000">MC68000</a> +series processor (yes I do have AMIX installed also).</p> + +<p>The frustration watching the <a href="https://www.youtube.com/watch?v=BaTjwo1ywcI">Death Bed Vigil</a> movie in which Dave Haynie of +Commodore Amiga fame gives us a tour through the Commodore engineering at +headquarters and of course the fire (bankruptcy) sale which happened at +Commodore Canada on Pharmacy Avenue in Toronto.</p> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + +<p>Once upon a time, we all carried the respective flags of our favorite platforms - which were varied. It was this rivalry which I think led to these respective +user communities squeezing tremendous performance out of these systems in the +race to show which platform was best.</p> + +<p>Then it all seemed to change. Suddenly we were all forced to march to the same +clock rhythm - and boredom set in. With this course seemingly set in stone, +how are we to escape this computing Sturm and Drang?</p> + +<p><strong>GAME ON!</strong></p> + +<p>Well, for me this hope appeared in 2013 with the announcement of the OpenPOWER Consortium - an open technical community built around the IBM POWER architecture to grow solutions to serve the evolving computing needs of today and the future. +Next week the second annual OpenPOWER Summit takes place in San Jose, United States and if the first event was any indication, this should be a very exciting +event. So Power Up and strap on your accelerators as we&rsquo;re in for a very +interesting ride!</p> + + + + + Approximate Mapping of Nanopore Squiggle Data with Spatial Indexing + + 2015-12-18T00:00:00-07:00 + https://hpc.social/2015/approximate-mapping-of-nanopore-squiggle-data-with-spatial-indexing + <p>Over at the <a href="http://simpsonlab.github.io/blog/">Simpson Lab blog</a>, +I have an post describing a novel method for <a href="http://simpsonlab.github.io/2015/12/18/kdtree-mapping/">Directly Mapping +Squiggle Data</a>, +using k-d trees to map segmented kmers; a simple <a href="https://github.com/ljdursi/simple-squiggle-pseudomapper">proof of +concept</a> +is available on github.</p> + + + + + Looking back at ISC High-Performance 2015 + + 2015-07-29T03:08:23-06:00 + https://hpc.social/2015/looking-back-at-isc-high-performance-2015 + <p>I&rsquo;ve always enjoyed a good road trip. There&rsquo;s just something fun about jumping +in the car, and heading to a far off location. As they say, half of the fun is +just getting to your destination. My latest road trip brought me to +Frankfurt for ISC High-Performance 2015.</p> + +<p>Crossing all of Austria as well as the southern part of Germany, this trip +proved to be no less exciting than the rest. Breaking down about 50 km from +Frankfurt due to a dead battery, I was fortunate enough to meet a local family +who helped to boost my car so that I could make it in time for the show. +Luckily I had some craft beer to reward them for their troubles. Of course, +part of the excitement this time was the fabled Autobahns of Germany. Here I +could get up to some decent speeds - legally :)</p> + +<p>Refreshments are always needed on long trips&hellip;</p> + +<figure><img src="https://www.gaborsamu.com/images/ISC15_w124.jpg" /> +</figure> + +<p>Frankfurt too had some interesting surprises in store - including the +interesting culinary treat <a href="https://en.wikipedia.org/wiki/Handk%C3%A4se">Handkäse mit Musik</a>, which is a sour milk cheese +served with onions. I&rsquo;ll let you read what the Musik part is all about. There +too is the infamous <a href="https://en.wikipedia.org/wiki/Apfelschorle">Apfelsaftschorle</a> which I constantly mistook for beer at +the ISC High-Performance venue. Such is life :)</p> + +<p>For me, where the rubber hit the road was the ISC High-Performance event. The +IBM booth (928) featured a refreshing bright yellow colour scheme, like the +dawning of a new era of High-Performance Computing built on Data Centric Systemsand OpenPOWER. In terms of demos, the IBM booth featured a number of live and +static demos including:</p> + +<ul> +<li>OpenPOWER HPC Server and Cirrascale GPU Developer System</li> +<li>IBM High Performance Services for HPC</li> +<li>IBM Data Engine for Analytics</li> +<li>IBM Watson tranSMART Transational Medicine Solution</li> +<li>Pluto (astrophysics hydrodynamics/magneto-hydrodynamics) running live on Power8 + GPU</li> +<li>OpenFOAM (CFD)</li> +<li>High Performance Storage System (HPSS)</li> +</ul> +<p>The <a href="http://openpowerfoundation.org/">OpenPOWER</a> hardware that was on the show floor attracted a lot of attention. Many people were impressed to behold the two Power8 systems which included +technology from OpenPOWER members Mellanox and NVIDIA. You may have read about +my interest in Power and ARM based systems in some of my earlier blogs.</p> + +<figure><img src="https://www.gaborsamu.com/images/ISC15_openpower.jpg" /> +</figure> + +<p>Being part of the IBM Platform Computing marketing team, I could frequently be +found at the IBM High Performance Services for HPC demo point. Here we +demonstrated our turnkey cloud solution for HPC workloads built in top of +the IBM SoftLayer cloud and featuring both IBM Platform LSF &amp; Platform Symphony workload management options. The demo leveraged the work done by MINES +ParisTech and Transvalor to provide CFD services to French industry. You can +read more about how MINES ParisTech and Transvalor leverage the +IBM solutions for HPC <a href="https://www.ibm.com/case-studies/mines-paristech-and-transvalor">here</a>.</p> + +<figure><img src="https://www.gaborsamu.com/images/ISC15_clouddemo.jpg" /> +</figure> + +<p>ISC also offered us the opportunity to showcase the IBM Platform LSF family of +products interactive conceptual demo to passersby. Here users could learn that +the Platform LSF family is not simply about workload management. For example, +Platform Process Manager and Platform Application Center, two add-on products +for Platform LSF help to boost user productivity through ease of use and +simplification.</p> + +<figure><img src="https://www.gaborsamu.com/images/ISC15_PMdemo.jpg" /> +</figure> + +<p>So what’s next? Toronto to Austin road trip for SC15? Yeah, that doesn’t +sound like a bad idea.</p> + +<p>See y’all in Texas!</p> + + + + + IBM Platform Cluster Manager - how do you like your eggs? + + 2015-06-07T03:42:03-06:00 + https://hpc.social/2015/ibm-platform-cluster-manager-how-do-you-like-your-eggs- + <p>Whether your HPC center is in Lilliput or Blefuscu, you&rsquo;ll appreciate the +importance of a flexible and easy-to-use cluster management solution to empower your populations. Administrators need software that will allow them to easily +setup, manage, monitor and maintain their infrastructure and ensure consistency for repeatable performance. With the varied workloads we see in modern HPC +centers, ranging from traditional HPC to Big Data and Analytics, organizations +may also consider building out heterogeneous environments, where different +hardware types are used for different workloads. As the OpenPOWER Foundation +grows, it stresses the overall importance of workflows across the HPC Data +Life Cycle - it&rsquo;s clear that when it comes to solutions for technical computing, it&rsquo;s no longer a one horse race.</p> + +<p>IBM Platform Cluster Manager is powerful, easy-to-use infrastructure management for today’s scale out computing needs. The latest release of Platform Cluster +Manager V4.2.1 now provides the ability to manage mixed computing environments - +so whether you&rsquo;re running Linux on POWER Big-Endian or Little-Endian, the choice is yours. In fact, you can even deploy and seamlessly manage a mixed +infrastructure taking advantage of the latest IBM POWER8 and x86 systems.</p> + +<p>Leveraging <a href="http://xcat.org/">xCAT</a> technology, Platform Cluster Manager can manage clusters ranging from &lsquo;Lilliputian&rsquo; in size all the way up to 2500 nodes. Platform Cluster +Manager Advanced Edition supports the automated creation of multiple clusters +on a shared infrastructure - allowing you to easily satisfy the business +requirements of Lilliputians and Blufescans. For organizations with a single +HPC cluster, Platform Cluster Manager Standard Edition provides the ability to +quickly provision, run, manage and monitor a technical computing infrastructure with unprecedented ease.</p> + +<p>For users taking advantage of IBM POWER8 systems, Platform Cluster Manager can +now provision PowerNV nodes as well as PowerKVM hypervisors, which provides +greater flexibility in infrastructure management and optimization. Further +enhancements in this release geared towards administrator productivity include +IBM POWER8 energy, PowerKVM and enhanced switch monitoring</p> + +<p>So go ahead. With Platform Cluster Manager you can crack your eggs any way you +like.</p> + + + + + IBM Platform LSF and Docker- A Whale of a time! + + 2015-05-29T03:49:58-06:00 + https://hpc.social/2015/ibm-platform-lsf-and-docker-a-whale-of-a-time- + <p>Containers are useful. Whether you&rsquo;re shipping things across the blue seas or +encapsulating applications on a computer system, they provide numerous benefits. HPC Administrators will know that applications today can depend upon multiple +packages, libraries and environments. <a href="https://www.docker.com/">Docker</a>, a container technology for Linux, based on well proven technologies brings together ease of setup, use and +efficiency to application management. Leveraging Docker in High-Performance +Computing is one approach to address application &ldquo;dependency hell&rdquo;, as well +as easing transition to the cloud.</p> + +<p>Workload managers are commonly used in High Performance Computing environments +to drive effective use of compute resources and ensure alignment of resources +with business priorities. IBM Platform LSF, a leading workload management +family of products provides support for workloads to run within user-specified +Docker containers by way of an integration package available as an open beta +on Service Management Connect.</p> + +<p>By leveraging the rich Platform LSF plugin framework, the Docker integration +works seamlessly and allows users to specify a defined Docker image as a +submission option. All resource constraints, environment variables are +automatically passed to the container thanks to the integration and Platform +LSF job lifecycle management functions including monitoring resource usage as +well as control actions (i.e. suspend, resume and terminate) are also supported +for Docker containers.</p> + +<p>Ease the burden of administration and ensure consistency with IBM Platform LSF +and Docker! - and have a whale of a time!</p> + + + + + On Random vs. Streaming I/O Performance; Or seek(), and You Shall Find --- Eventually. + + 2015-05-19T01:00:00-06:00 + https://hpc.social/2015/on-random-vs-streaming-i-o-performance-or-seek-and-you-shall-find-eventually- + <p>At the <a href="http://simpsonlab.github.io/blog/">Simpson Lab blog</a>, I’ve written a post +<a href="http://simpsonlab.github.io/2015/05/19/io-performance/">on streaming vs random access I/O performance</a>, +an important topic in bioinformatics. Using a very simple problem (randomly choosing lines in a +non-indexed text file) I give a quick overview of the file system stack and what it means for +streaming performance, and reservoir sampling for uniform random online sampling.</p> + + + + + IBM Software Defined Infrastructure- Put the POWER down and jump the chasm! + + 2015-05-13T15:24:34-06:00 + https://hpc.social/2015/ibm-software-defined-infrastructure-put-the-power-down-and-jump-the-chasm- + <p>OpenPOWER continues to put the power down and accelerate strongly in 2015.<br /> +Earlier this year, the First Annual OpenPOWER Summit took place and more +recently Cabot Partners published the paper <a href="https://openpowerfoundation.org/crossing-the-performance-chasm-with-openpower/">Crossing the Performance Chasm with +OpenPOWER</a>, outlining the benefits of OpenPOWER for HPC. Reading through that +paper, one important point which stuck out were the considerations when +choosing a HPC system. It suggests that rather than using point benchmarks, +one must consider the performance of workflows across the HPC Data Life Cycle. This seems a very sensible approach actually. Would you choose a car strictly +on it&rsquo;s 0-100km/h time? Well, when I was 16 years old probably yes. What +about braking, cornering, economy, safety? You need strong performance in all +categories. OpenPOWER Foundation achieves just this - by bringing together +organizations with broad expertise from accelerators, to interconnects around +IBM POWER server technology.</p> + +<p><a href="https://www.ibm.com/it-infrastructure/spectrum-computing">IBM Software Defined Infrastructure</a> helps to wield the sword of OpenPOWER for high performance +computing workloads. Featuring broad OS/platform support including Linux on +POWER (Little Endian), IBM Platform Computing software products provide broad +capabilities including application management, infrastructure management, job +scheduling as well as monitoring and reporting.</p> + +<p>Learn more about the IBM Software Defined Infrastructure for high performance +computing on OpenPOWER in this presentation from the OpenPOWER Summit. Put +the POWER down and jump the chasm!</p> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + + + + + HPC- Seeing is believing + + 2015-05-11T15:34:40-06:00 + https://hpc.social/2015/hpc-seeing-is-believing + <p>People who know me, know that I like to tinker. Whether it’s with cars, +computers or other mechanical gizmos, I’ve always enjoyed dismantling and +reassembling things - understanding what makes them tick. Maintaining classic +computers is a passion of mine and as you’ve seen in my previous blogs on that +topic, I’ve always tried to add an element of high performance computing when +tinkering with computers. Whether on a classic SPARC based laptop, MIPS +smartbook or a modern ARM developer board, there is a sense of achievement in +getting such systems installed in 2015 and successfully running a benchmark +for example. Even when running a simple home network, in this case with a wild +mix of machines, the importance of monitoring is apparent.</p> + +<p>For organizations that make a considerable investment in high performance +computing infrastructure, monitoring this infrastructure and understanding +how it’s being used is of paramount importance. IBM Platform RTM is a +comprehensive monitoring, reporting and alerting software for HPC environments +running IBM Platform LSF. It takes the guess work out of HPC infrastructure +monitoring by aggregating system, workload as well as license consumption +information, all in a single tool.</p> + +<figure><img src="https://www.gaborsamu.com/images/pandora.png" /> +</figure> + +<p>Whether you’re a system admin or a line of business manager, this <a href="https://www.ibm.com/downloads/cas/Z757K5YW">Technical +Brief</a> provides an in-depth look at the importance of comprehensive HPC +infrastructure monitoring - which allows organizations to correlate in a +single tool workload, system and license consumption metrics.</p> + + + + + Understanding Partial Order Alignment for Multiple Sequence Alignment + + 2015-05-01T01:00:00-06:00 + https://hpc.social/2015/understanding-partial-order-alignment-for-multiple-sequence-alignment + <p>Over at the <a href="http://simpsonlab.github.io/blog/">Simpson Lab blog</a>, I have an explainer +on <a href="http://simpsonlab.github.io/2015/05/01/understanding-poa/">Understanding Partial Order Alignment</a>, +an under-appreciated method for multiple sequence alignment; I hope the explanation there +(and <a href="https://github.com/ljdursi/poapy">explanatory implementation</a>) is useful to those +exploring graph-based approaches to alignment.</p> + + + + + HPC+MPI on RCE Podcast + + 2015-05-01T01:00:00-06:00 + https://hpc.social/2015/hpc-mpi-on-rce-podcast + <p>In <a href="http://www.rce-cast.com/Podcast/rce-97-jonathan-dursi.html">the latest episode</a> of the <a href="http://www.rce-cast.com">RCE podcast</a>, Jeff Squyres, Brock Palen, and I spoke about the HPC and MPI series of blogposts and the community reaction.</p> + +<p>It was a really interesting discussion; Brock has worked closely with an enormous variety of researchers and helps run an HPC centre, while Jeff deeply understands HPC networking, from the getting ones and zeros onto the wires at the lowest-level of hardware up to being an extremely active member of the MPI forum.</p> + +<p>I was really pleased that they asked me to join them; I’ve been listening to their show since at least the VisIt episode in 2009 (I had just missed the Hadoop episode, it turns out) and for some years they were the only big computing podcast around.</p> + +<p>If you were interested in the MPI discussion, you might want to listen to this most recent episode; if you’re interested in big computing software projects more broadly, you should definitely consider subscribing to the podcast.</p> + + + + + More Conjecture on KNL's Near Memory + + 2015-04-29T17:33:00-06:00 + https://hpc.social/2015/more-conjecture-on-knl-s-near-memory + <p>The Platform ran <a href="http://www.theplatform.net/2015/04/28/thoughts-and-conjecture-on-knights-landing-near-memory/">an interesting collection of conjectures on how KNL’s on-package MCDRAM might be used</a> this morning, and I recommend reading through it if you’re following the race to exascale.  I was originally going to write this commentary as a <a href="https://plus.google.com/+glennklockwood/posts">Google+ post</a>, but it got a little long, so pardon the lack of a proper lead-in here.<br /><br />I appreciated Mr. Funk’s detailed description of how processor caches interact with DRAM, and how this might translate into KNL’s caching mode.  However, he underplays exactly why MCDRAM (and the GDDR on KNC) exists on these manycore architectures in his discussion on how MCDRAM may act as an L3 cache.  On-package memory is not simply another way to get better performance out of the manycore processor; rather, it is a hard requirement for keeping all 60+ cores (and their 120+ 512-bit vector registers, 1.8+ MB of L1 data cache, etc) loaded.  Without MCDRAM, it would be physically impossible for these KNL processors to achieve their peak performance due to memory starvation.  By extension, Mr. Funk’s assumption that this MCDRAM will come with substantially lower latency than DRAM might not be true.<br /><br />As a matter of fact, the massive parallelism game is not about latency at all; it came about as a result of latencies hitting a physical floor.  So, rather than drive clocks up to lower latency and increase performance, the industry has been throwing more but slower clocks at a given problem to mask the latencies of data access for any given worker.  While one thread may be stalled due to a cache miss on a Xeon Phi core, the other three threads are keeping the FPU busy to achieve the high efficiency required for performance.  This is at the core of the Xeon Phi architecture (as well as every other massively parallel architecture including GPUs and Blue Gene), so it is unlikely that Intel has sacrificed their power envelope to actually give MCDRAM lower latency than the off-package DRAM on KNL nodes.<br /><br />At an architectural level, accesses to MCDRAM still needs to go through memory controllers like off-package DRAM.  Intel hasn’t been marketing the MCDRAM controllers as “cache controllers,” so it is likely that the latencies of memory access are on par with the off-package memory controllers.  There are simply more of these parallel MCDRAM controllers (eight) operating relative to off-package DRAM controllers (two), again suggesting that bandwidth is the primary capability.<br /><br />Judging by current trends in GPGPU and KNC programming, I think it is far more likely that this caching mode acts at a much higher level, and Intel is providing it as a convenience for (1) algorithmically simple workloads with highly predictable memory access patterns, and (2) problems that will fit entirely within MCDRAM.  Like with OpenACC, I’m sure there will be some problems where explicitly on/off-package memory management (analogous to OpenACC’s copyin, copyout, etc) aren’t necessary and cache mode will be fine.  Intel will also likely provide all of the necessary optimizations in their compiler collection and MKL to make many common operations (BLAS, FFTs, etc) work well in cache mode as they did for KNC’s offload mode.<br /><br />However, to answer Mr. Funk’s question of “Can pre-knowledge of our application’s data use–and, perhaps, even reorganization of that data–allow our application to run still faster if we instead use Flat Model mode,” the answer is almost unequivocally “YES!”  Programming massively parallel architectures has never been easy, and magically transparent caches rarely deliver reliable, high performance.  Even the L1 and L2 caches do not work well without very deliberate application design to accommodate wide vectors; cache alignment and access patterns are at the core of why, in practice, it’s difficult to get OpenMP codes working with high efficiency on current KNC processors.  As much as I’d like to believe otherwise, the caching mode on KNL will likely be even harder to effectively utilize, and explicitly managing the MCDRAM will be an absolute requirement for the majority of applications.</p> + + + + + Coarray Fortran Goes Mainstream- GCC 5.1 + + 2015-04-26T01:00:00-06:00 + https://hpc.social/2015/coarray-fortran-goes-mainstream-gcc-5-1 + <p>This past week’s release of <a href="https://gcc.gnu.org/gcc-5/">GCC 5.1</a> contains at least <a href="https://gcc.gnu.org/gcc-5/changes.html">two new features</a> that are important to the big technical computing community: <a href="https://gcc.gnu.org/wiki/Offloading">OpenMP4/OpenACC offloading</a> to Intel Phi/NVIDIA accellerators, and compiler support for <a href="https://gcc.gnu.org/wiki/Coarray">Coarray Fortran</a>, with the communications layer provided by the <a href="http://opencoarrays.org">OpenCoarrays Project</a>.</p> + +<p>While I don’t want to downplay the importance or technical accomplishment of the OpenMP 4 offloading now being available, I think it’s important to highlight the widespread availability for the first time of a tried-and-tested post-MPI programming model for HPC; and one that, since it is now part of the Fortran standard, is largely immune to fears that it might go away due to lack of interest. Here I’ll give a quick history of Coarray Fortran (CAF), some examples, and the pros and cons of CAF versus other approaches.</p> + +<h2 id="a-quick-history-of-coarray-fortran">A quick history of Coarray Fortran</h2> + +<p>Coarray Fortran first became widely known as Co-array Fortran, described in a <a href="https://scholar.google.ca/scholar?cluster=8719640223898917361&amp;hl=en&amp;as_sdt=0,5">1998 paper</a> which described an implementation on Cray systems (T3Es and X1s) of a minimal extension to Fortran 95 which included distributed memory computing of enough complexity to allow real applications.</p> + +<p>The basic idea is simple enough from a developer’s point of view. As with most MPI programs, a single program is launched across many processors. Each “image” has its own local variables, as usual. However, variables can also be defined to have a “co-dimension”; that is, a dimension which indexes that variable across all images.</p> + +<pre><code class="language-fortran">program coarray1 + implicit none + integer :: me, right, i + integer, dimension(3), codimension[*] :: a + + me = this_image() + + right = me + 1 + if (right &gt; num_images()) right = 1 + + a(:) = [ (me**i, i=1, 3) ] + + sync all + + print *, "Image ", me, " has a(2) = ", a(2)[me], "; neighbour has ", a(2)[right] +end program coarray1 +</code></pre> +<p>where square brackets refer to the co-index across images; recall that Fortran, somewhat unfortunately, uses parenthesis both for array indexing and for function arguments. Note also that, in Fortran fashion, image numbers begin at 1.</p> + +<p>Running this on 4 images gives:</p> + +<pre><code class="language-bash">$ ./coarray1 +Image 2 has a(2) = 4 ; neighbour has 9 +Image 3 has a(2) = 9 ; neighbour has 16 +Image 4 has a(2) = 16 ; neighbour has 1 +Image 1 has a(2) = 1 ; neighbour has 4 +</code></pre> + +<p>While it’s often the case that coarrays are also arrays – as is the case here with <code>a</code> – that needn’t be true. Scalar variables - variables with out array dimensions - can nonetheless have codimensions and thus be coarrays.</p> + +<p>Co-indexes needn’t be linear; one can also define co-dimensions of co-rank 2 or higher, to impose a grid-like pattern over the ranks.</p> + +<p>Co-array Fortran continued to be used on Cray systems, and was submitted as a proposal for inclusion into Fortran 2008. A stripped-down version of the original proposal (losing such things as image “teams”, and the hyphen in Co-array) made it through, with some minor syntax changes. The Cray Fortran compiler quickly adopted the standard, and <a href="https://software.intel.com/en-us/articles/distributed-memory-coarray-fortran-with-the-intel-fortran-compiler-for-linux-essential">Intel’s fFortran compiler</a> has since version 12 supported SMP coarrays, and distributed-memory coarrays as part of the “Cluster suite” that includes Intel MPI. IBM and PGI are said to be working on Coarray support. In less widely-used compilers, <a href="http://web.cs.uh.edu/~openuh/">OpenUH</a> supported Coarrays quite early on, as did the now-defunct <a href="http://www.g95.org">G95</a>.</p> + +<p>A <a href="http://isotc.iso.org/livelink/livelink?func=ll&amp;objId=17064344&amp;objAction=Open">technical specification</a> which is expected to make it into a future Fortran standard largely unscathed re-instates support for teams (giving overlapping functionality with MPI communicators for coordinating subsets of processes), and adds some collective operations, some atomic operations, and Events, which are something like <a href="http://en.wikipedia.org/wiki/Monitor_(synchronization)">condition variables</a>. GCC 5.1 supports many of these features already.</p> + +<h2 id="examples">Examples</h2> + +<p>Let’s take a look at a couple of simple examples to see how Coarray Fortran works in some familiar cases, and how the code complexity compares to MPI.</p> + +<p>We’ll see in part that, unlike with (say) Spark or Chapel examples from earlier in the month, in Coarray Fortran the developer is still responsible for explicitly decomposing the problem. That means a lot that part of the boilerplate of the MPI versions of the code remains. However, as communication patterns become more complex, the code can still simplify quite a bit.</p> + +<p>However, having the communications built into the language has another completely different advantage, one we’ve gotten used to not thinking about as we’re more used to using external libraries. Communication being part of the language means that the compiler itself can perform high-level optimization on commuincations, just as it would with memory access.</p> + +<h3 id="1d-diffusion-equation">1D diffusion equation</h3> + +<p>Let’s take a look at a simple example I’ve used before, <a href="https://github.com/ljdursi/coarray-examples/tree/master/diffusion">1d diffusion</a>. Here, we have a 1D domain broken up across images, or MPI ranks, exchanging data just with nearest neighbours.</p> + +<p>Taking a look at the <a href="https://github.com/ljdursi/coarray-examples/blob/bc356ec1dce3493c59800f1845c93bf18a6e7403/diffusion/diffusion-coarray.f90#L108">CAF code</a>, we have the data exchange part:</p> + +<pre><code class="language-fortran">! +! exchange boundary information +! + + sync images(neighbours(1:nneighbours)) + if (this_image() /= 1) then + temperature(1,old) = temperature(locnpoints+1,old)[left] + endif + if (this_image() /= num_images()) then + temperature(locnpoints+2,old) = temperature(2,old)[right] + endif + +! +! update solution +! + forall (i=2:locnpoints+1) + temperature(i,new) = temperature(i,old) + &amp; + dt*kappa/(dx**2) * ( &amp; + temperature(i+1,old) - &amp; + 2*temperature(i, old) + &amp; + temperature(i-1,old) &amp; ) + end forall +</code></pre> + +<p>There’s a synchronize statement at the beginning, to make sure we don’t get ahead of any of our neighbours (or vice versa), and then we pluck the necessary data for our guardcells out of the coarray of temperature.</p> + +<p>This seems familiar, and indeed it’s not that different than the obvious <a href="https://github.com/ljdursi/coarray-examples/blob/bc356ec1dce3493c59800f1845c93bf18a6e7403/diffusion/diffusion-mpi.f90#L107">MPI implementation</a>:</p> + +<pre><code class="language-fortran"> !... + + call MPI_Sendrecv(temperature(locnpoints+1,old), 1, MPI_REAL, right, righttag, &amp; + temperature(1,old), 1, MPI_REAL, left, righttag, MPI_COMM_WORLD, rstatus, ierr) + + call MPI_Sendrecv(temperature(2,old), 1, MPI_REAL, left, lefttag, &amp; + temperature(locnpoints+2,old), 1, MPI_REAL, right, lefttag, MPI_COMM_WORLD, rstatus, ierr) + + !... +</code></pre> + +<p>(and the update is exactly same).</p> + +<p>But having the exchange done in facilities built into the language has another benefit. Let’s look back to the coarray version. There’s a synchronization point, communications, computation, and (although we don’t see it here), a loop back to the synchronization point, as part of the iteration.</p> + +<p>The compiler will, as it does, perform reorderings that it can prove to itself don’t change the meaning of the code but will likely improve performance. With memory increasingly a bottleneck, compilers frequently perform some sort of prefetch optimization to move requests for data from slow main memory forward, perform computations on data already cache for the ~200 cycles that access will take, and only then work on the data that hopefully has loaded.</p> + +<p>This optimization is familiar in the MPI world, of course; it’s overlapping communication with computation, and is performed using non-blocking Sends and Receives. But because the communication is explicit to the compiler, it’s a difference of degree, not of kind, that the data is coming from over the network rather than from main memory. Thus, this optimization is straightforwardly performed automatically by the compiler.</p> + +<p>On the other hand, it is much less automatic for a developer to rewrite <a href="https://github.com/ljdursi/coarray-examples/blob/1acda1378398f3973a0066e09d89498a36769839/diffusion/diffusion-mpi-nonblocking.f90#L105">the MPI code</a>:</p> + +<pre><code class="language-fortran">! +! begin exchange of boundary information +! + + call MPI_Isend(temperature(locnpoints+1,old), 1, MPI_REAL, &amp; + right, righttag, MPI_COMM_WORLD, requests(1), ierr) + call MPI_Isend(temperature(2,old), 1, MPI_REAL, &amp; + left, lefttag, MPI_COMM_WORLD, requests(2), ierr) + call MPI_Irecv(temperature(1,old), 1, MPI_REAL, &amp; + left, righttag, MPI_COMM_WORLD, requests(3), ierr) + call MPI_Irecv(temperature(locnpoints+2,old), 1, MPI_REAL, &amp; + right, lefttag, MPI_COMM_WORLD, requests(4), ierr) + +! +! update solution +! + forall (i=3:locnpoints) + temperature(i,new) = temperature(i,old) + &amp; + dt*kappa/(dx**2) * ( &amp; + temperature(i+1,old) - &amp; + 2*temperature(i, old) + &amp; + temperature(i-1,old) &amp; + ) + end forall + time = time + dt + +! +! wait for communications to complete +! + call MPI_Waitall(4, requests, statuses, ierr) +! +! update solution +! + temperature(2,new) = temperature(2,old) + dt*kappa/(dx**2) * &amp; + ( temperature(1,old) - 2*temperature(2, old) + temperature(3,old) ) + temperature(locnpoints+1,new) = temperature(locnpoints+1,old) + dt*kappa/(dx**2) * &amp; + ( temperature(locnpoints,old) - 2*temperature(locnpoints+1, old) + &amp; + temperature(locnpoints+2,old) ) +</code></pre> + +<h3 id="block-matrix-multiplication">Block matrix multiplication</h3> + +<p>Let’s take a look at another example, a simple <a href="https://github.com/ljdursi/coarray-examples/tree/master/blockmatrixmult">block matrix multiplication</a> where each image/task has one block of the A and B matrices, and we’re calculating \(C = A \times B\).</p> + +<p>In the <a href="https://github.com/ljdursi/coarray-examples/blob/bc356ec1dce3493c59800f1845c93bf18a6e7403/blockmatrixmult/blockmatrix-coarray.f90#L38">CAF version</a>, this is almost embarrasingly easy:</p> + +<pre><code class="language-fortran"> sync all + c = 0. + do k=1,ncols + c = c + matmul(a[myrow,k],b[k,mycol]) + enddo + sync all +</code></pre> + +<p>and the exchange not that bad in <a href="https://github.com/ljdursi/coarray-examples/blob/bc356ec1dce3493c59800f1845c93bf18a6e7403/blockmatrixmult/blockmatrix-mpi.f90#L53">the MPI version, either</a>, using the SUMMA algorithm (Cannon’s, which can be better for small $P$, would have been messier):</p> + +<pre><code class="language-fortran"> do k=0,ncols-1 + aremote = a + bremote = b + call MPI_Bcast(aremote, blockrows*blockcols, MPI_INTEGER, k, rowcomm, ierr) + call MPI_Bcast(bremote, blockrows*blockcols, MPI_INTEGER, k, colcomm, ierr) + c = c + matmul(aremote, bremote) + enddo +</code></pre> + +<p>although it did take us a lot more boilerplate to get there; three communicators, explicit temporary arrays, etc:</p> + +<pre><code class="language-fortran"> call MPI_Init(ierr) + call MPI_Comm_size(MPI_COMM_WORLD, comsize, ierr) + + !... + + allocate(aremote(blockrows,blockcols)) + allocate(bremote(blockcols,blockrows)) + + !... + + call MPI_Cart_create(MPI_COMM_WORLD, 2, dims, [1,1], 1, cartcomm, ierr) + call MPI_Comm_rank(cartcomm, rank, ierr) + call MPI_Cart_coords(cartcomm, rank, 2, coords, ierr) + + ! create row, column communicators + call MPI_Comm_split( cartcomm, myrow, mycol, rowcomm, ierr ) + call MPI_Comm_split( cartcomm, mycol, myrow, colcomm, ierr ) +</code></pre> + +<p>and this is still a fairly straightforward communications pattern. As communications become more complex, the advantage of it being performed implicitly becomes more clear.</p> + +<h3 id="coarray-pros">Coarray Pros</h3> + +<p>We’ve only looked at two examples, but that’s enough to get some feelings about the strengths and weaknesses of CAF vs other options:</p> + +<h4 id="part-of-the-language">Part of the Language</h4> + +<p>Compilers are enormously more sophisticated than they were twenty+ years ago, and using those optimization engines to our advantage in generating fast communications code is an enormous advantage. Having the communications be explicit in the language enables the compiler to perform entire suites of automatic optimizations (prefetching, batching, memory/time tradeoffs) that can’t easily done with library-based approaches.</p> + +<h4 id="stable">Stable</h4> + +<p>One concern in the HPC community about trying new approaches is lingering doubt about whether a given new tool or language will be around five or ten years later; a concern that can become self-fulfilling.</p> + +<p>As part of the Fortran standard, Coarray Fortran is quite definitely here to stay; there are now several competing implementations, and competition will only improve them.</p> + +<h4 id="incremental">Incremental</h4> + +<p>Because Coarray Fortran uses a familiar model — Single Program, Multiple Data, with data manually decomposed — and only changes how the communications are expressed, there is very modest learning curve for developers already familiar with MPI, and very modest porting effort required.</p> + +<p>The familiarity extends in another dimension, as well; Coarray fortran is about as “authentically HPC” as it’s possible to get (Cray! T3Es! Fortran!) for a community that is sometimes skeptical of ideas from the outside.</p> + +<p>In addition, this incremental approach also makes interoperability with MPI relatively straightforward, for those requiring MPI-based library support.</p> + +<h4 id="already-quite-fast">Already Quite Fast</h4> + +<p>OpenCoarrays, which provides the communications support for gfortran’s coarray implementation, is <a href="http://opencoarrays.org/yahoo_site_admin/assets/docs/pgas14_submission_7.30712505.pdf">already comparable to and sometimes faster than</a> typical MPI code and even faster in some cases the very-well tested Cray coarray implementation(!). While this is still the first major release of gfortran coarrays, and performance improvements and doubtless bug fixes remain to be made, this is already a fairly solid and fast piece of software.</p> + +<h3 id="coarray-cons">Coarray Cons</h3> + +<p>On the other side of the ledger are primarily points we’ve already considered as Pros, but viewed from the glass-half-empty side:</p> + +<h4 id="part-of-a-language">Part of <em>A</em> Language</h4> + +<p>Being built into a language means that it necessarily isn’t available to users of other languages. I think this is largely inevitable for next-gen HPC approaches, to take full advantage of the compilers and runtimes that are now available, but it certainly will affect adoption; I can’t imagine too many C++ programmers will migrate to Fortran for their next project. (Although it does start looking intriguing for Matlab or Python/Numpy users).</p> + +<h4 id="stable-1">Stable</h4> + +<p>As I’ve mentioned in the context of MPI, too much stability can be a bad thing, and the Fortran committee makes the MPI Forum look like a squirrel on cocaine. I’m less concerned about that here in the short term, since the Coarrays that went into the standard were based on a model that had been used for years successfully, and new features are already in the works; but any additional new features that are seen to be needed may well be a long time coming.</p> + +<h4 id="incremental-1">Incremental</h4> + +<p>That Coarrays are incremental certainly makes it easier to port existing code, but it means that many of my concerns about MPI as a development environment remain unaddressed. A researcher or application developer still has to perform the manual decomposition of a problem. This requires an enormous amount of eminently automatable boilerplate and zillions of opportunities for meaningless bugs like off-by-one errors. (That sort of bookkeeping is precisely what computers are better at than developers!) That burden also means that substantial amounts of code must be rewritten if the decomposition changes.</p> + +<h4 id="already-quite-fast-1">Already Quite Fast</h4> + +<p>…Ok, it’s hard to see much of a downside here.</p> + +<h3 id="conclusion">Conclusion</h3> + +<p>The release of gcc-5.1 with coarray support is going to be the first time a huge number of HPC developers have ready access to coarrays. From my point of view, it’s notably less ambitious than a large number of projects out there, but that may well make it easier to adopt for a sizable community. Certainly anyone planning to start a new project in Fortran should give it very serious consideration.</p> + +<p>My own hope is that Coarray Fortran will have a large number of delighted users, some of whose appetite then becomes whetted for other still more productive languages and environments for large-scale technical computing. In the next few posts, I’ll take a closer look at some of those.</p> + + + + + In Praise of MPI Collectives and MPI-IO + + 2015-04-19T01:00:00-06:00 + https://hpc.social/2015/in-praise-of-mpi-collectives-and-mpi-io + <p>While I have a number of posts I want to write on other topics and technologies, there is one last followup I want to make to <a href="http://www.dursi.ca/hpc-is-dying-and-mpi-is-killing-it/">my MPI post</a>.</p> + +<p>Having said what I think is wrong about MPI (the standard, not the implementations, which are of very high quality), it’s only fair to say something about what I think is very good about it. And <em>why</em> I like these parts gives lie to one of the most common pro-MPI arguments I’ve been hearing for years; that application programmers coding at low levels is somehow essential - or even just a good idea - for performance.</p> + +<h2 id="two-great-things-about-mpi">Two great things about MPI</h2> + +<h3 id="collective-operations">Collective Operations</h3> +<p>Since the very beginning, MPI has defined a suite of <a href="https://computing.llnl.gov/tutorials/mpi/#Collective_Communication_Routines">collective communications</a> that include operations like scatter, gather, <a href="http://en.wikipedia.org/wiki/Prefix_sum">prefix scan</a>, and reduce. While these weren’t invented by MPI – many were already implemented as “global communications” routines in the <a href="http://en.wikipedia.org/wiki/Connection_Machine">CM-2’s</a> <a href="http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=365582">Connection Machine Scientific Software Library</a>, for instance, and there is lots of literature on implementing those operations on other architectures like the iPSC/860-based hypercube systems – it’s certainly fair to say that it was MPI that popularized them to the point that they’ve started getting <a href="http://www.mellanox.com/page/products_dyn?product_family=104&amp;menu_section=73">hardware support in network cards</a>. The popularization stems partly from how widely taught MPI is, but also from useful generalizations that the MPI Forum made, like user-defined reduction operations, or being able to perform these operations on user-defined subsets of tasks.</p> + +<p>A classic use of MPI collective operations would be using a reduce to find a global sum (or max, or min, or a user defined operation) of local values:</p> + +<pre><code class="language-python">from mpi4py import MPI +import random + +comm = MPI.COMM_WORLD +rank = comm.Get_rank() +nprocs = comm.Get_size() + +local = random.random() + +globalsum = comm.reduce(local, op=MPI.SUM, root=0) +globalmin = comm.reduce(local, op=MPI.MIN, root=0) +globalmax = comm.reduce(local, op=MPI.MAX, root=0) + +if rank == 0: + print "Min, mean, max = ", globalmin, globalsum/nprocs, globalmax +</code></pre> + +<h3 id="mpi-io">MPI-IO</h3> + +<p><a href="http://beige.ucs.indiana.edu/I590/node86.html">MPI-IO</a> is the foundational middleware for HPC parallel I/O. <a href="https://hdfgroup.org/HDF5/PHDF5/">Parallel HDF5</a> (and thus <a href="http://www.unidata.ucar.edu/software/netcdf/docs_rc/parallel_io.html">Parallel NetCDF4</a>), <a href="https://www.olcf.ornl.gov/center-projects/adios/">ADIOS</a>, and others are built on top of it. As a result, even application software that doesn’t explicitly use MPI sometimes relies on MPI-IO for reading and writing large files in parallel.</p> + +<p>The key concept in MPI-IO is a “file view”, which describes (in terms of MPI data layouts) where in the file a process will be writing. Once that’s done, writing data to the file just looks like sending a message to the file. A trivial example follows below; more complex data layouts like (as often happens in scientific computing) non-contiguous slices of large multidimensional arrays being read and written would look exactly the same:</p> + +<pre><code class="language-python">from mpi4py import MPI + +comm = MPI.COMM_WORLD +rank = comm.Get_rank() +nprocs = comm.Get_size() + +myString = 'Hello ' if rank % 2 == 0 else 'World!' +stringSize = 6 + +subarray = MPI.CHAR.Create_subarray( (stringSize*nprocs,), (stringSize,), (stringSize*rank,)) +subarray.Commit() + +filehandle = MPI.File.Open(comm, 'ioexample.txt', MPI.MODE_CREATE | MPI.MODE_WRONLY) +filehandle.Set_view(0, MPI.CHAR, subarray) +filehandle.Write_all(myString) + +filehandle.Close() +</code></pre> + +<h2 id="why-theyre-great">Why they’re great</h2> + +<p>These two very different parts of the MPI standard have three important features in common for this discussion.</p> + +<ul> + <li>They’re at much higher levels of abstraction than most of the API</li> + <li>Application programmers would get worse performance, not better, if they tried to implement their own at lower levels.</li> + <li>Original implementations of these APIs didn’t perform nearly as well as current implementations. But algorithmic and implementation work done by software engineers greatly sped the low level implementations up without applications programmers needing to rewrite their code.</li> +</ul> + +<h3 id="collectives-and-mpi-io-are-higher-levels-of-abstraction">Collectives and MPI-IO are higher levels of abstraction</h3> + +<p>Calls to MPI collective operations or MPI-IO describe what should be done, not how to do it, and at a much higher level than <code>MPI_Send()/MPI_Put()</code>.</p> + +<p>Operations like “All processes sum their results and distribute the result to all processes”, or “Each process writes to their slice of the file” are enormously broader than “Send this message to process X”. There’s a large number of ways they could be implemented, and in fact there’s a huge literature on both <a href="https://scholar.google.ca/scholar?q=mpi+collectives">collectives</a> and <a href="https://scholar.google.ca/scholar?q=mpi-io">MPI-IO</a> on various approaches to doing so.</p> + +<h3 id="application-programmers-reimplementing-them-would-be-worse-for-performance">Application programmers reimplementing them would be worse for performance</h3> + +<p>If the “low-level application programming is essential for high performance” argument was true, then of course we would be actively dissuading researchers from using these high-level tools. But we don’t, and we’re right not to.</p> + +<p>Most of us who have worked with enough researchers writing their own HPC codes have had the experience of someone coming into our office who was broadcasting data with a loop over <code>MPI_Send()</code>s, or trying to write to a shared file using <code>fseek()</code> or the like, and we’ve directed them to collective operations or MPI-IO instead. We do the same, of course, when someone is trying to type in some Gaussian Elimination code from Numerical Recipes (no link; that book has done enough damage) and we guide them to our local <a href="http://en.wikipedia.org/wiki/LAPACK">LAPACK</a> implementation instead.</p> + +<p>And we do this because even we don’t believe that scientists implementing these things at low level will give better performance. It’s not about it being “too hard”; it’s something else entirely. We know that it would be a huge amount of wasted effort for a <em>worse</em>, <em>slower</em>, result.</p> + +<p>MPI collective operation implementations make run-time decisions behind the researchers back, based on the size of the data, and the structure of the communicator being used, to decide whether to use k-ary trees, or hyper-cubes, or split-ring approaches, and in one, two, or multiple phases of communications, to perform the operation. MPI-IO implementations uses approaches like data-sieving or two-phase I/O to trade off network communication for disk I/O, and use close integration with the filesystem to inform that tradeoff.</p> + +<p>Somebody had to do all that challenging low-level work, yes. But the idea that those optimizations and algorithmic work is properly the job of the researcher/application programmer is absurd.</p> + +<h3 id="implementations-got-faster-and-faster">Implementations got faster and faster</h3> + +<p>These highly optimized implementations of these high-level abstractions did not, of course, spring fully formed from somewhere, any more than the <a href="http://www.netlib.org/lapack/">reference implementation of LAPACK/BLAS</a> was blazingly fast. The abstractions were created with an understanding of both what application programmers needed and what was implementable, and then years and years of work went into developing the algorithms and implementations that we make use of today.</p> + +<p>Initial implementations of MPI-1 collectives were (naturally!) not super optimized, and there were certainly developers who scoffed at the performance and who pointed out they could do better writing low-level network code on their own. They were, in that snapshot in time, narrowly correct; but more broadly and in the longer term, they were flat-out wrong. The most useful and productive approach to a researcher finding out that early versions of those collective operations (say) were slow in some situations was not to break down and re-implement it themselves at low level; it was to file an issue with the library provider, and help them fix it so that it would be faster for everyone.</p> + +<h2 id="these-points-generalize">These points generalize</h2> + +<p>I don’t think anything I’ve said above is particuarly controversial. Performance, as well as productivity, for researchers and applications programmers has clearly improved as a result of MPI’s collectives and MPI-IO.</p> + +<p>But for some reason, the idea that this generalizes — that performance as well as productivity of scientific software development would improve if applications developers spent their time using other, newer higher-level constructs while more tool-builders implemented those constructs in efficient ways — is anathaema to a section of our HPC community.</p> + +<p>I’ve yet to hear compelling reasons why operations on distributed multidimensional arrays, or hash tables, or trees, are completely different from collectives or IO; why application programmers have to implement them directly or indirectly in a low-level tool like MPI sends and receives or gets and puts rather than having them implemented by experts in higher-level environments like Chapel, or Spark, or Ignite, or any of a zillion other projects from within or outside of the HPC community.</p> + + + + + Objections, Continued + + 2015-04-09T01:00:00-06:00 + https://hpc.social/2015/objections-continued + <p>Thanks for all of the comments about <a href="http://dursi.ca/hpc-is-dying-and-mpi-is-killing-it/">my HPC and MPI post</a>, on the post itself, or on twitter, or via email. While much of the comments and discussions were positive, it won’t surprise you to learn that there were objections, too; so I thought I’d keep updating the Objections section in a new post. I’ve also posted <a href="http://www.dursi.ca/in-praise-of-mpi-collectives-and-mpi-io/">one (hopefully last) followup</a>.</p> + +<p>But do keep sending in your objections!</p> + +<h2 id="further-objections">Further Objections</h2> +<h3 id="youre-saying-wed-have-to-rewrite-all-our-code">You’re saying we’d have to rewrite all our code!</h3> + +<p>If someone had suggested I add this objection to the original list before publishing, I would have rejected it as too straw-man to use; I’d be transparently putting this objection up just to demolish it. Clearly, no one would actually claim that “the HPC community should urgently start engaging with and using new technical computing technologies” means “you have to burn all your old stuff to the ground”.</p> + +<p>But sure enough, it came up <em>frequently</em>, in private email, and most dramatically, <a href="https://twitter.com/KpDooty/status/585582597746622464">on twitter</a>.</p> + +<p>Even though this is by far the most common reaction I got, I hope it’s clear to most readers these aren’t the same things. Learning (say) C++ and using it in development of new codes doesn’t mean your old C and Fortran stuff stops working. Or that you’re under an obligation to take the working code in other languages and re-write it all in the new language before ever using it again to maintain some kind of computational moral consistency.</p> + +<p>Your MPI code won’t stop working for you in a fit of rage because you’re seeing other frameworks. MPI will continue to work and be maintained, exactly because there is 20+ years worth of stuff using it.</p> + +<p>But <strong>new</strong> software projects are being started every day, in every field, in every region. This argument is about what we should use for those codes. “Because we’ve always done it that way” isn’t a great reason for a community that’s supposed to be on the cutting edge of computing to keep doing things in one particular framework.</p> + +<h3 id="big-data-and-hpc-are-completely-different-and-its-ridiculous-to-compare-them">Big data and HPC are completely different, and its ridiculous to compare them</h3> + +<p>This was a close second in popularity. And this one worries me quite a bit, because it means that there’s a lot of people in our community that are disturbingly unaware what’s going on in computing and data analysis outside of the confines of their office.</p> + +<p>It’s absolutely true that there are Big-Data-y things that are mainly just I/O with a little bit of processing. But by and large people want to <em>analyze</em> that large amount of data. And then you end up with absolutely classic big numerical computing problems. To take an early example, Page Rank is, after all, <a href="http://en.wikipedia.org/wiki/PageRank#History">an eigenvalue problem</a>. The drive for next-generation big data platforms like Spark is no small part to make machine learning algorithms that would be very familiar to us run as efficiently as possible. Let’s take some example machine learning approaches:</p> + +<ul> + <li><a href="http://en.wikipedia.org/wiki/Spectral_clustering">Spectral clustering</a> solves an <a href="https://charlesmartin14.wordpress.com/2012/10/09/spectral-clustering/">equation for the graph Laplacian</a> - which looks exactly like any other <a href="https://www.dursi.ca/spectral clustering heat equation">parabolic PDE on an unstructured mesh</a>. (Thanks to Lorena Barba for <a href="https://twitter.com/LorenaABarba/status/586515529973764096">pointing out an embarrasing mistake</a> in an earlier version of that point.)</li> + <li><a href="http://en.wikipedia.org/wiki/Support_vector_machine">Support Vector Machines</a> are kernel based methods which involve Green’s functions and 1st order integral equations.</li> + <li>Much of machine learning involves fitting a model, which means that there are entire <a href="http://mitpress.mit.edu/books/optimization-machine-learning">books</a> written about large-scale efficient optimization solvers for machine learning, including physical science chestnuts like <a href="http://en.wikipedia.org/wiki/Stochastic_gradient_descent">gradient descent</a>.</li> + <li>A common first step in data analysis is dimensional reduction involving (say) <a href="http://en.wikipedia.org/wiki/Principal_component_analysis">PCA</a>, requiring the SVD (or similar factorizations) of huge matricies.</li> + <li>In fact, Linear Algebra is omnipresent in machine learning (as it has to be with so much, eg, model fitting), to the point that there are entire <a href="http://stanford.edu/~rezab/nips2013workshop/">conferences</a> on large-scale linear algebra for machine learning.</li> + <li>A lot of the data analyses involve statstical bayesian inference, requiring <a href="http://link.springer.com/article/10.1023%2FA%3A1020281327116">MCMC</a> calculations.</li> + <li>k-Nearest-Neighbour problems in clustering, kernel density methods, and many other techniques relying on something like a distance or similarity metric require classic N-body solutions like <a href="https://books.google.ca/books?id=6GvSBQAAQBAJ&amp;pg=PA162&amp;lpg=PA162&amp;dq=k-d+trees+machine+learning&amp;source=bl&amp;ots=GdA2RtbSvY&amp;sig=JStlVpNy5CB8cJewtFYPIb53QCI&amp;hl=en&amp;sa=X&amp;ei=cRknVeH9OIG5sAWMkoCgAQ&amp;ved=0CE4Q6AEwCDgK#v=onepage&amp;q=k-d%20trees%20machine%20learning&amp;f=false">k-D trees</a>; and if positions are being updated, they essentially become <a href="http://www.cs.cmu.edu/~agray/nips-final.pdf">N-body problems</a>. And of course, an entire class of high-dimensional optimization problems often used in machine learning are essentially <a href="http://en.wikipedia.org/wiki/Particle_swarm_optimization">tracer particle methods</a>.</li> + <li>As a result of all this high mathematical intensity, machine learning is of course becoming a rapidly growing user of <a href="https://registration.gputechconf.com/form/session-listing&amp;doSearch=true&amp;additional_parameter_selector=none&amp;queryInput=&amp;topic_selector=Machine+Learning+%26+Deep+Learning&amp;type_selector=none">GPUs for their numerical algorithms</a>.</li> +</ul> + +<p>So let’s see; PDEs on unstructured meshes, optimization, gradient descent, large-scale linear algebra, particle methods, GPUs. And of course, time series data of any sort means FFTs. So sure, I don’t know what is running on <em>your</em> HPC cluster, but is it really that different than the above?</p> + +<h3 id="mpi-is-great-for-physics-even-if-less-great-for-the-other-stuff">MPI is great for physics, even if less great for the other stuff</h3> + +<p>I got this by email and on twitter several times.</p> + +<p>Great compared to what? And based on what evidence?</p> + +<p>Say a physics grad student walks in to your office who’s going to develop a small bespoke particle code for their dissertation. Pointing them to MPI, rather than other technologies with unimpeachable HPC bona fides like UPC, Chapel, Co-array Fortran, or, (for a particle simulation especially) Charm++ seems like it’s the lazy, easy way for <em>us</em>, and less about what’s actually best for <em>them</em>.</p> + +<p>In what sense is it “great for physics” to have the student increase the amount of code they have to write, and debug by a factor of 3x? In what sense is it great for them to have to re-invent all of the low-level communications algorithms which have been implemented better, in other packages? Maybe you could make an argument about stability or performance against UPC/Chapel (although I’d counter-argue you’d get immediate and helpful support from the developers) - what’s the argument against pointing the student to Charm++? Or Intel’s CAF?</p> + +<p>And this doesn’t even begin to cover things like Spark, Flink, or <a href="http://ignite.incubator.apache.org/index.html">Ignite</a> - for simulation, or experimental physics work (which is physics too, right?), which is necessarily heavy on data analysis.</p> + +<h3 id="youre-just-saying-mpi-is-too-hard">You’re just saying MPI is too hard</h3> + +<p>I’m really not. As a community, we don’t mind hard. Solving complex equations is hard, that’s just how it is. We eat hard for breakfast. (And the genomics and big-data communities are the same way, because they’re also filled with top-notch people with big computational problems).</p> + +<p>I’m saying something different: MPI is needlessly, pointlessly, and uselessly a huge sink of researcher and toolbuilder effort for little if any reward.</p> + +<p>How many grad students have had to tediously decompose a 2d or 3d grid by hand, write halo exchange code, get it debugged and running, run in that crude fasion for a while, then tried moving to move to overlapped communication and computation, and spent days or weeks trying to get that to work efficiently - and then had to re-write chunks as they need a new variable laid out differently (or just implemented a really bad transposition?) and still gotten performance that an expert would consider poor?</p> + +<p>And regular grid codes are the easy stuff; how many scientist-decades worth of efforts have gone into implementing and re-implementing tree codes or unstructured meshes; and by and large resulting in efficiencies ranging from “meh” to “ugh”?</p> + +<p>Wouldn’t it be better to have experts working on the common lower level stuff, tuning it and optimizing it, so that the scientists can actually focus on the numerics and not the communications?</p> + +<p>The stuff about levels of abstraction isn’t some aesthetic philosophical preference. And I’m not complaining MPI because it’s hard; I’m complaining about it because it’s resulted in an enormous waste of researcher time and compute resources. Let the scientists focus on hard stuff that matters to their research, not the stuff that can be effectively outsourced to builders.</p> + +<p>Now, we at centres could at least improve <em>this</em> dreadful state of affairs even with MPI just by doing a better job pointing researchers embarking on a code project to libraries and packages like Trillinos or what have you, and stop counseling them to write raw MPI code themselves. But of course, we normally don’t, because we keep telling ourselves and the incoming grad students “MPI is great for physics”…</p> + +<h3 id="its-important-for-students-to-know-whats-going-on-under-the-hood-even-if-theyre-using-other-frameworks">It’s important for students to know what’s going on under the hood, even if they’re using other frameworks</h3> + +<p>I do have some sympathy for this point, I will admit.</p> + +<p>But anyone who thinks teaching generation after generation of grad students how to manually decompose a 2d mesh and do halo exchange on it using <code>MPI_Sendrecv()</code> is a productive and rewarding use of time, is someone who doesn’t spend enough time doing it.</p> + +<p>As with other pro-low-level arguments: why is MPI automatically the right level to stop at? If we want to teach students how things really work under the covers, why aren’t we going all the way down to Infiniband or TCP/IP, user mode and kernel mode, and the network stack? Or, why don’t we stop a level or two above, draw some diagrams on a whiteboard, and move on to actually solving equations? Why is MPI in particular the right “under the hood” thing to teach, as opposed to GASNet, Charm++, or just pseudo-network-code?</p> + +<p>If the answer to the questions above is “because MPI is what we know and have slides for”, then we need to think about what that implies, and how well it’s serving the research community.</p> + +<h3 id="but-my-new-code-will-need-libraries-based-on-mpi-that-arent-supported-by-chapelupcsparkother-stuff-yet">But my new code will need libraries based on MPI that aren’t supported by Chapel/UPC/Spark/other stuff yet!</h3> + +<p>Fair enough. When you choose what you are going to use to write a program, library and tool support really matter. It’s absolutely true that there are great packages that use MPI, and if your project is going to rely on them, then this isn’t an example of a good project to start expermenting with a new platform on. This is why such a large fraction of numerical code was in FORTRAN77 for so long.</p> + +<p>Co-array Fortran, Chapel, and others do have various degree of MPI interoperability, so do check that out; but yes, you need what you need.</p> + +<h3 id="but-people-are-starting-to-build-things-based-on-mpi-3-rma">But people <em>are</em> starting to build things based on MPI-3 RMA!</h3> + +<p>This <a href="http://dursi.ca/hpc-is-dying-and-mpi-is-killing-it/#comment-1952126251">coment by Jeff on the original post</a>, is by some measure the most interesting objection I’ve heard so far.</p> + +<p>People are legitimately starting to use MPI-3 RMA in the underlying implementations of higher level tools. If that really took off, then my arguments about MPI not being the right level of abstraction for toolbuilders would clearly be wrong, and a huge part of my post would be rendered irrelevant.</p> + +<p>In that case, I would be completely wrong – and it would be awesome! A higher-level toolset for researchers could finally flourish, the lower level stuff could be handled by a completely separate group of experts, and MPI would have found its place.</p> + +<p>I want to be clear that I think it would be fantastic - really, the best of all possible worlds - to be wrong in this way.</p> + +<p>I’m going to describe why I really don’t think I am, and what the stumbling blocks are. Then I’ll discuss an alternate future which sidesteps the worst of those problems, and how it really could be a path to a very productive and growing HPC future - but it will of course never, ever, happen.</p> + +<p>So MPI-3 - useful RMA, being used. Perfect! To see the problem that concerns me here, consider two questions; (1) What are the benefits of using MPI for this, and (2) what are the downsides?</p> + +<p>On the upside, it’s great that MPI is sufficient to implement these tools. But is it necessary? What is the advantage of using something like MPI over something else, and in particular something lower level? Maybe it would be a little easier or a little harder, but would it make a big difference? Particularly to the end-user of the tool being built?</p> + +<p>I doubt it makes much difference either way; the reason I ask is the downside.</p> + +<p>MPI-3 RMA doesn’t come on its own; it’s part of MPI. And in this context, I’m concerned with two real downsides with using even great parts of MPI for low-level toolbuilding. They’re related: the heavy-weight forum process, and the enormous baggage of backwards compatability.</p> + +<p>Let’s take the forum process first. Let’s say there’s two competing tools you could use to build your next lower-layer tool; MPI-3 RMA and some other low-level network abstraction layer. (What I’m picturing is something like <a href="https://github.com/ofiwg/libfabric">OFWG Libfabric</a>, which you can probably tell I’m quite taken with, but that’s not really right here. But something at roughly that level or a little higher).</p> + +<p>You’re starting to build your new tool, which contains a number of really innovative ideas; but now you’ve discovered you need one additional feature in either package.</p> + +<p>Which will get you there first?</p> + +<p>The MPI forum was really able to innovate with MPI-3 RMA, because they were nearly starting afresh - or at least complementary with what had gone before. But now that MPI-3 is out, and a number of projects have used it, the spec is essentially encased in carbonite; the API in its every last detail will outlive us all. None of the existing APIs will change.</p> + +<p>That’s ok, because the Forum has shown its willingness to add new functions to the spec when justified. Your case sounds interesting; you should get your answer in a couple of years or so.</p> + +<p>And that’s kind of crazy for a low-level network abstraction layer. The other package - whatever it is - won’t have that sort of friction.</p> + +<p>There’s another issue in terms of new features; that’s the backwards compatability legacy.</p> + +<p>Let’s take something like fault tolerance, which is important at extreme scale - but will eventually get important for more moderate scales, as well.</p> + +<p>For a really low-level network abstraction, dealing withfault tolerance isn’t an enormous difficulty. For something higher level like MPI-3 RMA, it’s more challenging, but it’s still something where one could imagine how it might go.</p> + +<p>But for MPI-3+ to develop a feature like fault tolerance, it will have to be created in such a way that it integrates seamlessly with every single MPI feature that has ever existed, without altering any of the semantics of a single one of those calls. The backwards compatability requirements are crushing.</p> + +<p>So this is sort of the tragedy of MPI-3 RMA. It’s a great thing that may have just come too late in the lifecycle of a project to be able to have its full impact.</p> + +<p>Let’s imagine a world where we could just shrug this stuff off. Let’s imagine a new framework – MPING, MPI++, whatever; which is a substantially paired down version of MPI. It’s an MPI that has decided what it wants to be; a low level layer for toolbuilders, never to be taught to grad students who are planning to write application software.</p> + +<p>It contains only pared-to-the bone versions of MPI3 RMA, which are demonstrably being found useful; MPI collectives, which are fantastic; MPI-IO, which is also fantastic; and auxiliary stuff like the datatype creation routines, etc. The communications semantics for everything are greatly relaxed, which would confuse the heck out of newbie end users, but toolbuilders can deal with it. And there’s no decades of backwards compatability to fight with.</p> + +<p>This vision actually discourages me a bit, because it would be terrific; there’d be an active, vendor-supported, high-performance, productive network abstraction layer for toolbuilders; and no confusion about who it was for. We could build high-productivity tools for scientific application writing atop a stable, high performance foundation.</p> + +<p>And of course, it will never, ever, happen.</p> + + + + + HPC is dying, and MPI is killing it + + 2015-04-03T01:00:00-06:00 + https://hpc.social/2015/hpc-is-dying-and-mpi-is-killing-it + <p><img alt="King Canute" src="http://news.bbcimg.co.uk/media/images/53009000/jpg/_53009665_canutewaves.jpg" /></p> + +<p><em>Pictured: The HPC community bravely holds off the incoming tide of new technologies and applications. Via <a href="http://www.bbc.com/news/magazine-13524677">the BBC</a></em>.</p> + +<p>This should be a golden age for High Performance Computing.</p> + +<p>For decades, the work of developing algorithms and implementations for tackling simulation and data analysis problems at the largest possible scales was obscure if important work. Then, suddenly, in the mid-2000s, two problems — analyzing internet-scale data, and interpreting an incoming flood of genomics data — arrived on the scene with data volumes and performance requirements which seemed quite familiar to HPCers, but with a size of audience unlike anything that had come before.</p> + +<p>Suddenly discussions of topics of scalability, accuracy, large-scale data storage, and distributed matrix arithmetic all became mainstream and urgent. The number of projects and workshops addressing these topics exploded, and new energy went into implementing solutions problems faced in these domains.</p> + +<p>In that environment, one might expect that programmers with HPC experience – who have dealt routinely with terabytes and now petabytes of data, and have years or decades of experience with designing and optimizing distributed memory algorithms – would be in high demand.</p> + +<p>They are not.</p> + +<p><img alt="Job Trends" src="https://www.dursi.ca/assets/imgs/hpc_jobgraph.png" /></p> + +<p><em><a href="http://www.indeed.com/jobtrends?q=hadoop%2Cspark%2Chpc%2Cmpi">Indeed.com job trends data</a>. Note that as many MPI jobs plotted above require certifications with “Master Patient Index” or “Meetings Professionals International” as are seeking someone who knows how to call MPI_Send</em>.</p> + +<p><img alt="Google Trends" src="https://www.dursi.ca/assets/imgs/google_trends.png" /></p> + +<p><em><a href="https://trends.google.com/trends/explore/TIMESERIES/1564090800?hl=en-US&amp;tz=240&amp;cat=5&amp;date=today+5-y&amp;q=MPI,hadoop,spark&amp;sni=3">Google trends data for MPI, Hadoop, and Spark</a></em></p> + +<p>Instead of relying on those with experience in existing HPC technology stacks or problems, people tackling these internet-scale machine learning problems and genomic data analysis tasks have been creating their own parallel computing stacks. New and rediscovered old ideas are flourishing in new ecosystems, and demand for scalable and accurate computation with these new tools is exploding — while the HPC community resolutely stays on the sidelines, occasionally cheering itself with hopeful assertions of relevance like <a href="http://sc14.supercomputing.org">SC14</a>’s rather plaintive tagline, <a href="http://sc14.supercomputing.org">“HPC Matters”</a>.</p> + +<p>Because within the HPC community, the reaction to these new entrants is mostly not excitement at novel technologies and interesting new problems to solve, but scoffing at solutions which were <a href="http://en.wikipedia.org/wiki/Not_invented_here">Not Invented Here</a>, and suggestions that those who use other platforms simply aren’t doing “real” high performance computing – and maybe don’t know what they’re doing at all. You can see this attitude even in <a href="http://www.theplatform.net/2015/03/03/dna-sequencing-not-quite-hpc-yet/">otherwise well-researched and thought-out pieces</a>, where the suggestion is that it is genomics researchers’ responsibility to alter what they are doing to better fit existing HPC toolsets. This thinking misses the rather important fact that it is HPC’s job to support researchers’ computing needs, rather than vice versa.</p> + +<p>The idea that the people at Google doing large-scale machine learning problems (which involves huge sparse matrices) are oblivious to scale and numerical performance is just delusional. The suggestion that the genomics community is a helpless lot who just don’t know any better and need to be guided back to the one true path is no less so. The reality is simpler; HPC is wedded to a nearly 25-year old technology stack which doesn’t meet the needs of those communities, and if we were being honest with ourselves is meeting fewer and fewer of the needs of even our traditional user base.</p> + +<p>If HPCers don’t start engaging with these other big-computing communities, both exporting our expertise to new platforms and starting to make use of new tools and technologies from within HPC and beyond, we risk serving an ever-narrowing sliver of big research computing. And eventually that last niche will vanish once other technologies can serve even their needs better.</p> + +<h2 id="why-mpi-was-so-successful">Why MPI was so successful</h2> + +<p><a href="http://en.wikipedia.org/wiki/Message_Passing_Interface">MPI</a>, long the lingua franca of HPC, has nothing to apologize for. It was inarguably one of the “killer apps” which supported the <a href="http://en.wikipedia.org/wiki/Beowulf_cluster">initial growth</a> of cluster computing, helping shape what the computing world has become today. It supported a substantial majority of all supercomputing work scientists and engineers have relied upon for the past two-plus decades. Heroic work has gone into MPI implementations, and development of algorithms for such MPI features as <a href="https://www.cac.cornell.edu/VW/MPIcc/default.aspx?id=xup_guest">collective operations</a>. All of this work could be carried over to new platforms by a hypothetical HPC community that actively sought to engage with and help improve these new stacks.</p> + +<p>MPI, the Message Passing Interface, began as a needed standardization above a dizzying array of high-performance network layers and often-proprietary libraries for communicating over these networks. It started with routines for explicitly sending and receiving messages, very useful collective operations (broadcast, reduce, etc.), and routines for describing layout of data in memory to more efficiently communicate that data. It eventually added sets of routines for implicit message passing (one-sided communications) and parallel I/O, but remained essentially at the <a href="http://en.wikipedia.org/wiki/OSI_model#Layer_4:_transport_layer">transport layer</a>, with sends and receives and gets and puts operating on strings of data of uniform types.</p> + +<h2 id="why-mpi-is-the-wrong-tool-for-today">Why MPI is the wrong tool for today</h2> + +<p>But nothing lasts forever, and at the cutting edge of computing, a quarter-century is coming quite close to an eternity. Not only has MPI stayed largely the same in those 25 years, the idea that “everyone uses MPI” has made it nearly impossible for even made-in-HPC-land tools like <a href="http://chapel.cray.com">Chapel</a> or <a href="http://upc.lbl.gov">UPC</a> to make any headway, much less quite different systems like <a href="https://spark.apache.org">Spark</a> or <a href="https://flink.apache.org">Flink</a>, meaning that HPC users are largely stuck with using an API which was a big improvement over anything else available 25 years ago, but now clearly shows its age. Today, MPI’s approach is hardly ever the best choice for anyone.</p> + +<h3 id="mpi-is-at-the-wrong-level-of-abstraction-for-application-writers">MPI is at the wrong level of abstraction for application writers</h3> + +<p>Programming at the transport layer, where every exchange of data has to be implemented with lovingly hand-crafted sends and receives or gets and puts, is an incredibly awkward fit for numerical application developers, who want to think in terms of distributed arrays, data frames, trees, or hash tables. Instead, with MPI, the researcher/developer needs to manually decompose these common data structures across processors, and every update of the data structure needs to be recast into a flurry of messages, synchronizations, and data exchange. And heaven forbid the developer thinks of a new, better way of decomposing the data in parallel once the program is already written. Because in that case, since a new decomposition changes which processors have to communicate and what data they have to send, every relevant line of MPI code needs to be completely rewritten. This does more than simply slow down development; the huge costs of restructuring parallel software puts up a huge barrier to improvement once a code is mostly working.</p> + +<p>How much extra burden does working at this level of abstraction impose? Let’s take a look at a trivial example that’s pretty much a best-case scenario for MPI, an explicit solver for a 1D <a href="http://en.wikipedia.org/wiki/Heat_equation">diffusion equation</a>. Regular communications on a regular grid is just the sort of pattern that is most natural for MPI, and so you will find this example in just about <a href="https://computing.llnl.gov/tutorials/mpi/">every</a> <a href="http://beige.ucs.indiana.edu/I590/node71.html">MPI</a> <a href="https://github.com/ljdursi/mpi-tutorial/blob/master/presentation/presentation.md">tutorial</a> <a href="https://www.hpc.ntnu.no/display/hpc/Diffusion">out</a> <a href="https://www.cs.princeton.edu/picasso/seminarsS04/MPI_Day2.pdf">there</a>.</p> + +<p>At <a href="https://www.dursi.ca/feed.xml#appendix">the end</a> of this post are sample programs, written as similarly as possible, of solving the problem in MPI, Spark, and Chapel. I’d encourage you to scroll down and take a look. The lines of code count follows:</p> + +<center> +<table><thead> +<tr> +<th>Framework&nbsp;&nbsp;</th> +<th>Lines&nbsp;&nbsp;</th> +<th>Lines of Boilerplate</th> +</tr> +</thead><tbody> +<tr> +<td><a href="https://www.dursi.ca/feed.xml#mpi">MPI+Python</a></td> +<td>52</td> +<td>20+</td> +</tr> +<tr> +<td><a href="https://www.dursi.ca/feed.xml#spark">Spark+Python</a>&nbsp;&nbsp;&nbsp;</td> +<td>28</td> +<td>2</td> +</tr> +<tr> +<td><a href="https://www.dursi.ca/feed.xml#chapel">Chapel</a></td> +<td>20</td> +<td>1</td> +</tr> +</tbody></table> +</center> + +<p>Now, this isn’t an entirely fair comparison. It should be mentioned that in addition to the functionality of the MPI program, the Spark version is automatically fault-tolerant, and the Chapel version has features like automatically reading parameters from the command line. In addition, changing the data layout across processors in the Chapel version would only involve changing the variable declaration for the global arrays, and maybe writing some code to implement the decomposition in the unlikely event that your distributed array layout wasn’t already supported; similarly, in Spark, it would mean just changing the hash function used to assign partitions to items.</p> + +<p>But even lacking those important additional functionalities, the MPI version is over twice as long as the others, with an amount of boilerplate that is itself the entire length of the Chapel program. The reason is quite simple. In Chapel, the basic abstraction is of a domain – a dense array, sparse array, graph, or what have you – that is distributed across processors. In Spark, it is a <a href="http://spark.apache.org/docs/1.2.1/quick-start.html">resiliant distributed dataset</a>, a table distributed in one dimension. Either of those can map quite nicely onto various sorts of numerical applications. In MPI, the “abstraction“ is of a message. And thus the huge overhead in lines of code.</p> + +<p>And this is by far the simplest case; introducing asynchronous communications, or multiple variables with differing layouts, or allowing processors to get out of sync, or requiring load balancing, causes levels of complexity explode. Even just moving to 2D, the amount of MPI boilerplate almost exactly doubles, whereas the only lines that change in the Chapel program is the array declaration and the line that actually executes the stencil computation.</p> + +<p>On the one hand, this increase in complexity is perfectly reasonable; those are more challenging cases of networked computation. But on the other hand, of all available models, MPI is the only one where the researcher is required to reinvent from scratch the solutions to these problems inside the heart of their own application software. This requires them to focus on network programming instead of (say) differential-equation solving; and to completely re-architect the entire thing if their application needs change.</p> + +<p>Now, none of this is necessarily a problem. Just because MPI is hugely and unnecessarily burdensome for individual scientists to use directly for complex applications, doesn’t mean that it’s bad, any more than (say) sockets or IB verbs programming is; it could be a useful network-hardware agnostic platform for higher-level tools to be built upon. Except…</p> + +<h3 id="mpi-is-at-the-wrong-level-of-abstraction-for-tool-builders">MPI is at the wrong level of abstraction for tool builders</h3> + +<p>The original book on MPI, <a href="http://www.mcs.anl.gov/research/projects/mpi/usingmpi/">Using MPI</a>, dedicated one of its ten chapters (“Parallel libraries”) to explicitly describing features intended to make it easier for tool builders to build libraries and tools based on MPI, and two others describing implementations and comparing to other models with relevance to tool-builders.</p> + +<p>This was quite prescient; message-passing based frameworks would indeed soon become very important platforms for building complex parallel and distributed software in different communities. <a href="http://www.erlang.org">Erlang</a>, released to the public just five years later, is a functional language with message-passing built in that has played a very large role in many communications and control environments. Rather more recently, <a href="http://akka.io">Akka</a> is a Scala-based message passing framework that, for instance, Spark is built on.</p> + +<p>However, all these years later, while there are several specific numerical libraries built on MPI that MPI programs can use, there are no major general-purpose parallel programming frameworks that primarily use MPI as an underlying layer. Both <a href="http://gasnet.lbl.gov">GASNet</a> (that UPC and Chapel implementations make use of) and <a href="http://charm.cs.illinois.edu/research/charm">Charm++</a> (a parallel computing framework often used for particle simulation methods, amongst other things) <em>have</em> MPI back ends, grudgingly, but they are specifically not recommended for use unless nothing else works; indeed, they have both chosen to re-architect the network-agnostic layer, at significant effort, themselves. (Of the two, GASNet is the more diplomatic about this, <a href="http://gasnet.lbl.gov/dist/README">“…bypassing the MPI layer in order to provide the best possible performance”</a>, whereas the Charm++ group finds MPI problematic enough that, if you must use MPI for “legacy” applications, they recommend using <a href="http://charm.cs.uiuc.edu/research/ampi/">an MPI-like layer built ontop of Charm++</a>, rather than building Charm++ on top of MPI). Similarly, the group implementing Global Arrays – an example come back to time and again in the MPI books – eventually implemented its own low level library, <a href="http://hpc.pnl.gov/armci/">ARMCI</a>.</p> + +<p>Probably the closest to a truly MPI-based parallel scientific programming framework is <a href="http://trilinos.org">Trilinos</a>, which is a well-integrated set of libraries for meshing and numerics rather than a parallel programming model.</p> + +<p>The reason for this disconnect is fairly straightforward. MPI was aimed at two sets of users – the researchers writing applications, and the toolmakers building higher-level tools. But compromises that were made to the semantics of MPI to make it easier to use and reason about for the scientists, such as the <a href="http://www.mpi-forum.org/docs/mpi-1.1/mpi-11-html/node41.html">in-order guarantee</a> and reliability of messages, made it very difficult to write efficient higher-level tools on top of.</p> + +<p>A particularly strong case study of this dynamic is MPI-2’s one-sided communications, which were aimed squarely at tool developers (certainly a very small fraction of applications written directly in MPI ever used these features). This set of routines had extremely strict semantics, and as a result, they were <a href="http://www.cs.berkeley.edu/~bonachea/upc/mpi2.html">soundly</a> <a href="http://dl.acm.org/citation.cfm?id=1359705">panned</a> as being unfit for purpose, and more or less studiously ignored. MPI-3’s <a href="https://www.cac.cornell.edu/VW/MPIoneSided/default.aspx?id=xup_guest">new one-sided communications</a> routines, introduced 14 years later, <a href="http://blogs.cisco.com/performance/the-new-mpi-3-remote-memory-access-one-sided-interface">largely fixes this</a>; but by this point, with GASNet and ARMCI amongst others available and supporting multiple transports, and coming complete with attractive optional higher-level programming models, there’s little compelling reason to use MPI for this functionality.</p> + +<h3 id="mpi-is-more-than-you-need-for-modest-levels-of-parallelism">MPI is more than you need for modest levels of parallelism</h3> + +<p>At HPC centres around the world, the large majority of HPC use is composed of jobs requiring 128 cores or fewer. At that point, most of the parallelism heavy lifting is best done by threading libraries. For the very modest level of inter-node IPC needed for these 2-4 node jobs, the bare-metal performance of MPI simply isn’t worth the bare-metal complexity. At that level of parallelism, for most applications almost any sensible framework, whether GASNet-based, or Charm++, or Spark, or down to Python multiprocessing or iPython cluster will give decent performance.</p> + +<h3 id="mpi-is-less-than-you-need-at-extreme-levels-of-parallelism">MPI is less than you need at extreme levels of parallelism</h3> + +<p>On the other hand, at the emerging extreme high end of supercomputing – the million-core level and up – the bare-metal aspect of MPI causes different sorts of problems.</p> + +<p>The <a href="http://en.wikipedia.org/wiki/Mean_time_between_failures">MTBF</a> of modern motherboards is on the order of a few hundred thousand hours. If you’re running on a million cores (say 32,000 nodes or so) for a 24-hour day, failure of some node or another during the run becomes all but certain. At that point, fault-tolerance, and an ability to re-balance the computation on the altered set of resources, becomes essential.</p> + +<p>Today, MPI’s error handling model is what it has always been; you can assign an <a href="http://www.mpich.org/static/docs/v3.1/www3/MPI_Errhandler_set.html">errorhandler</a> to be called when an error occurs in an MPI program, and when that happens you can… well, you can print a nice message before you crash, instead of crashing <em>without</em> the nice message.</p> + +<p>This isn’t due to anyone’s lack of trying; the <a href="https://svn.mpi-forum.org/trac/mpi-forum-web/wiki/FaultToleranceWikiPage">MPI Fault Tolerance Working Group</a> has been doing yeomanlike work attempting to bring some level of real fault tolerance to MPI. But it’s a truly difficult problem, due in large part to the very strict semantics imposed by MPI. And after building up 25 years of legacy codes that use MPI, there is absolutely no chance that the pull of the future will exceed the drag of the past in the minds of the MPI Forum - none of those semantics will ever change, for backwards compability reasons.</p> + +<p>Balancing and adapting to changing resources are similarly weak spots for the MPI approach; there’s no way that MPI can possibly be of any use in redistributing your computation for you, any more than you could expect TCP or Infiniband Verbs to automatically do that for you. If the highest-level abstraction a library supports is the message, there is no way that the library can know anything about what your data structures are or how they must be migrated.</p> + +<p>Fault-tolerance and adaptation are of course genuinely challenging problems; but (for instance) Charm++ (and AMPI atop it) can do adaptation, and Spark can do fault tolerance. But that’s because they were architected differently.</p> + +<h2 id="our-users-deserve-the-tools-best-for-them">Our users deserve the tools best for them</h2> + +<p>None of this is to say that MPI is bad. But after 25 years of successes, it’s become clear what the limitations are of having the communications layer written within the researchers’ application. And today those limitations are holding us and our users back, especially compared to what can be done with other alternatives that are already out there on the market.</p> + +<p>And none of this is to say that we should uninstall MPI libraries from our clusters. For the near term, MPI will remain the best choice for codes that have to run on tens of thousands of cores and have relatively simple communications patterns.</p> + +<p>But it’s always been true that different sorts of computational problems have required different sorts of parallel tools, and it’s time to start agressively exploring those that are already out there, and building on what we already have.</p> + +<p>We have to start using these new tools when they make sense for our users; which is, demonstrably, quite often. It’s already gotten to the point where it’s irresponsible to teach grad students MPI without also exposing them to tools that other groups find useful.</p> + +<p>The HPC community can, and should, be much more than just consumers of these external technologies. Our assertions of relevance don’t have to be purely aspirational. We have real expertise that can be brought to bear on these new problems and technologies. Excellent work has been done in MPI implementations on important problems like the network-agnostic layer, job launching, and collective algorithms. The people who wrote those network-agnostic layers are already looking into refactoring them into <a href="https://github.com/ofiwg/libfabric">new</a> <a href="https://www.olcf.ornl.gov/center-projects/common-communication-interface/">projects</a> that can be widely used in a variety of contexts, at lower levels of the stack.</p> + +<p>But we need to give up the idea that there is a one-sized fits all approach to large-scale technical computing, and that it has always been and will always be MPI. Other groups are using different approaches for a reason; we can borrow from them to the benefit of our users, and contribute to those approaches to make them better.</p> + +<h2 id="we-can-build-the-future">We can build the future</h2> + +<p>There’s new ways of writing scalable code out there, and completely new classes of problems to tackle, many of which were totally inaccessible just years ago. Isn’t that why we got into this line of work? Why don’t more HPC centres have people contributing code to the <a href="https://github.com/chapel-lang/chapel">Chapel project</a>, and why isn’t everyone at least playing with Spark, which is <a href="http://www.dursi.ca/post/spark-in-hpc-clusters/">trivial to get up and running on an HPC cluster</a>? Why are we spending time scoffing at things, when we can instead be making big research computing better, faster, and bigger?</p> + +<p>Are we the big research computing community, or the MPI community? Because <em>one</em> of those two has a bright and growing future.</p> + +<p><em>Many thanks to my colleague Mike Nolta for many suggestions and improvements to this piece and the arguments it contains.</em></p> + +<h2 id="appendix">Appendix</h2> + +<p>(<strong><em>Update</em></strong>: see objections that came up after the publication of this post, on twitter and email, <a href="http://dursi.ca/post/objections-continued/">on this new post</a>. And see what I like about MPI and why it suggests low-level applications programming isn’t the answer <a href="http://www.dursi.ca/post/in-praise-of-mpi-collectives-and-mpi-io/">on the third post</a>.)</p> + +<h3 id="objections">Objections</h3> + +<p><strong>But the HPC market is <a href="http://www.slideshare.net/insideHPC/hpc-market-update-from-idc">actually growing</a>, so this is all clearly nonsense! Everything’s fine!</strong></p> + +<p>It’s completely true that, although much more slowly in relative or absolute terms than the Hadoop or Spark market, the HPC hardware market is still growing. But that’s not much of a reed to cling to.</p> + +<p>Famously, <a href="http://www.tumotech.com/wp-content/uploads/2014/11/mainframe-computer-sales.png">minicomputer sales</a> (things like System/36 or VAXen) were still growing rapidly a decade or so after personal computers started to be available, well into the mid-80s. They kept being sold, and faster and faster, because they were much better for the problems they were intended for — right up until the point that they weren’t.</p> + +<p>Similarly, photo film sales were <a href="http://www.businessweek.com/1999/99_31/b3640098.htm">going up, if slower, until 2003</a>(!). Let’s continue the <a href="http://en.wikipedia.org/wiki/Disruptive_innovation">disruptive innovation</a> clichés as analogies for a moment — as we all now know, Kodak invented the digital camera. The film company’s problem wasn’t that it lacked the expertise that was needed in the new era; it simply flatly refused to use its expertise in these new ways. And as a result it is a shell of its former self today – a tiny, niche, player. Bringing the comparison closer to home is the experience of the once world-striding Blackberry, which ridiculed the iPhone as being, amongst other things, an inefficient user of network communications. (<a href="http://www.theglobeandmail.com/report-on-business/the-inside-story-of-why-blackberry-is-failing/article14563602/?page=all">“It’s going to collapse the network!”</a>)</p> + +<p>Take a look at the market for developers. We’ve clearly passed the market peak for MPI programmers, and if HPC continues to be an MPI-only shop, our community will be shut out of the exciting things that are going on today, while many of our users begin being attracted by the benefits of these other approaches for their problems.</p> + +<p><strong>But MPI is much faster than the others because it’s bare metal!</strong></p> + +<p>If this is so important, why don’t HPC programmers save even <em>more</em> overhead by packing raw Infiniband frames themselves?</p> + +<p>HPC programmers should know better than most that once you have some software that solves a complex problem well, getting it to go fast is comparatively straightforward, given enough developer hours.</p> + +<p>It’s absolutely true that current MPI implementations, having had decades to work on it, have got screamingly fast MPI-1 functionality and, to a lesser extent, decent one-sided communications performance. But we live in an era where even <a href="http://julialang.org/benchmarks/">JavaScript can have the same order-of-magnitude performance as C or Fortran</a> - and JavaScript might as well have been explicitly designed to be un-en-fastable.</p> + +<p>Chapel already can be <a href="http://chapel.cray.com/hpcc/hpcc09.pdf">as fast or faster than MPI in many common cases</a>; indeed, higher level abstractions allow compilers and runtimes to make optimizations that can’t be performed one individual library calls.</p> + +<p>And unless the basic abstractions used by Spark (<a href="http://www.thecloudavenue.com/2014/01/resilient-distributed-datasets-rdd.html">RDDs</a>) or Flink or the myriad of other options are inherently broken in some way to make fast implementations impossible — and there’s no evidence that they are — they too will get faster. There’s no reason why blazing-fast network communications should have be done at the application layer – in the code that is describing the actual scientific computation. The HPC community can choose to help with implementing that tuning, bringing their expertise and experience to bear. Or they can choose not to, in which case it will happen anyway, without them.</p> + +<p><strong>But MPI will adopt new feature X which will change everything!</strong></p> + +<p>Let me tell you a story.</p> + +<p>MPI-1 and MPI-2 used 32-bit integers for all counts. This means that programs using MPI – the lingua franca of supercomputing, in an era when already outputing terabytes of data being routine – could not (for instance) write out more than 2e9 objects at once without taking some meaningless additional steps.</p> + +<p>This was discussed at length in the process leading up to the 2012 release of MPI-3, the first .0 release in 14 years. After much discussion it was decided that changing things would be a <a href="http://blogs.cisco.com/performance/can-i-mpi_send-and-mpi_recv-with-a-count-larger-than-2-billion">“backwards compatability nightmare”</a>, so the result was that the existing API… was left exactly as it is. But! There was a new larger data type, MPI_Count, which is used in a couple new routines (like <code>MPI_Type_get_extent_X</code>, in addition to the old <code>MPI_Type_get_extent</code>) which simplifies some of the pointless steps you have to take. Yay?</p> + +<p>And that’s the story of how, in 2015, our self-imposed standard of supercomputing has a hardcoded in 32-bit limit throughout almost its entire API, limiting how many objects it can deal with at once without going through pointless but straightforward hoops. A 32-bit limit: 90’s retro-cool computing, like chiptune music and pixelated graphics with 8-bit color. This is unfortunate, but inevitable; after a tool has existed for 25 years, maintainers feel more responsibility towards the past than to the future. Which is perfectly reasonable, and maybe even the correct decision for that tool; but that’s when one need to start looking elsewhere for new projects.</p> + +<p><strong>But these other tools use programming languages I find to be icky.</strong></p> + +<p>Yes, well, perhaps the various alternatives involve languages that lack the austere beauty of Fortran and Matlab, but so it goes. One approach to this would be to help expand these tools reach into the HPC community by writing bindings and APIs for languages more familiar in this space.</p> + +<p><strong>But the Hadoop-y communities are incredibly naive about high performance interconnects, multicore/shared memory, complex scheduling,…</strong></p> + +<p>Yes! This is 100% true. And on the HPC community’s side, we’re quite innocent when it comes to fault tolerance at scale, building reusable tools, architecting APIs so that normal scientists can use them while hiding communications complexity beneath, and integrating nicely with systems industry cares about. There’s a window where we can help each other and contribute meaningfully to each groups success. But other communities can and will eventually figure out, say, multicore with or without our help.</p> + +<h2 id="sample-code">Sample Code</h2> + +<p>Below are code samples referred to earlier in the piece.</p> + +<h3 id="mpi">MPI</h3> + +<p>Here is the 1D diffusion in MPI, Python:</p> + +<pre><code class="language-python">#!/usr/bin/env python +import numpy +from mpi4py import MPI + +def ranksProcs(): # boilerplate + comm = MPI.COMM_WORLD + rank = comm.Get_rank() + nprocs = comm.Get_size() + leftProc = rank-1 if rank &gt; 0 else MPI.PROC_NULL + rightProc = rank+1 if rank &lt; nprocs-1 else MPI.PROC_NULL + return (comm, rank, nprocs, leftProc, rightProc) + +def localnitems(procnum, nprocs, nitems): # boilerplate + return (nitems + procnum)/nprocs + +def myRange(procnum, nprocs, ncells): # boilerplate + start = 0 + for p in xrange(procnum): + start = start + localnitems(p, nprocs, ncells) + locNcells = localnitems(procnum, nprocs, ncells) + end = start + locNcells - 1 + return (start, locNcells, end) + +def ICs(procnum, nprocs, ncells, leftX, rightX, ao, sigma): + start, locNcells, end = myRange(procnum, nprocs, ncells) + dx = (rightX-leftX)/(ncells-1) + startX = leftX + start*dx + x = numpy.arange(locNcells*1.0)*dx + startX + temperature = ao*numpy.exp(-(x*x)/(2.*sigma*sigma)) + + return temperature + +def guardcellFill(data, comm, leftProc, rightProc, leftGC, rightGC): # boilerplate + rightData = numpy.array([-1.]) + leftData = numpy.array([-1.]) + + comm.Sendrecv(data[1], leftProc, 1, rightData, rightProc, 1) + comm.Sendrecv(data[-2], rightProc, 2, leftData, leftProc, 2) + + data[0] = leftGC if leftProc == MPI.PROC_NULL else leftData + data[-1] = rightGC if rightProc == MPI.PROC_NULL else rightData + return data + +def timestep(olddata, coeff): + newdata = numpy.zeros_like(olddata) + newdata[1:-1] = olddata[1:-1] + + coeff*(olddata[0:-2] - 2.*olddata[1:-1] + olddata[2:]) + return newdata + +def simulation(ncells, nsteps, leftX=-10., rightX=+10., sigma=3., ao=1., + coeff=.375): + comm, procnum, nprocs, leftProc, rightProc = ranksProcs() + T = ICs(procnum, nprocs, ncells, leftX, rightX, ao, sigma) + leftGC = T[0] # fixed BCs + rightGC = T[-1] + print "IC: ", procnum, T + for step in xrange(nsteps): + T = timestep(T, coeff) + guardcellFill(procnum, nprocs, T, comm, leftProc, rightProc, + leftGC, rightGC) # boilerplate + + print "Final: ", procnum, T + +if __name__ == "__main__": + simulation(100, 20) +</code></pre> + +<h3 id="spark">Spark</h3> + +<p>1D diffusion in Spark, python (is fault-tolerant)</p> + +<pre><code class="language-python">import numpy +from pyspark import SparkContext + +def simulation(sc, ncells, nsteps, nprocs, leftX=-10., rightX=+10., + sigma=3., ao=1., coeff=.375): + dx = (rightX-leftX)/(ncells-1) + + def tempFromIdx(i): + x = leftX + dx*i + dx/2 + return (i, ao*numpy.exp(-x*x/(2.*sigma*sigma))) + + def interior(ix): # boilerplate + return (ix[0] &gt; 0) and (ix[0] &lt; ncells-1) + + def stencil(item): + i,t = item + vals = [ (i,t) ] + cvals = [ (i, -2*coeff*t), (i-1, coeff*t), (i+1, coeff*t) ] + return vals + filter(interior, cvals) + + temp = map(tempFromIdx,range(ncells)) + data= sc.parallelize(temp).partitionBy(nprocs, rangePartitioner) + print "IC: " + print data.collect() + for step in xrange(nsteps): + print step + stencilParts = data.flatMap(stencil) + data = stencilParts.reduceByKey(lambda x,y:x+y) + print "Final: " + print data.collect() + +if __name__ == "__main__": + sc = SparkContext(appName="SparkDiffusion") + simulation(sc, 100, 20, 4) +</code></pre> + +<h3 id="chapel">Chapel</h3> + +<p>1D diffusion in Chapel (can read parameters from command line)</p> + +<pre><code class="language-c">use blockDist; + +config var ncells = 100, nsteps = 20, leftX = -10.0, rightX = +10.0, + sigma = 3.0, ao = 1.0, coeff = .375; + +proc main() { + const pDomain = {1..ncells} dmapped Block({1..ncells}); + const interior = pDomain.expand(-1); + const dx = (rightX - leftX)/(ncells-1); + var x, temp, tempNew : [pDomain] real = 0.0; + + forall i in pDomain do { + x[i] = leftX + (i-1)*dx; + temp[i] = ao*exp(-x[i]*x[i]/(2.0*sigma*sigma)); + } + + writeln("ICs: ", temp, "\n"); + + for step in [1..nsteps] do { + forall i in interior do + tempNew(i) = temp(i) + coeff*(temp(i-1) - 2.0*temp(i) + temp(i+1)); + temp[interior] = tempNew[interior]; + } + + writeln("Final: ", temp); +} +</code></pre> + + + + + Spark in HPC clusters + + 2015-03-02T00:00:00-07:00 + https://hpc.social/2015/spark-in-hpc-clusters + <p>Over the past several years, as research computing centres and others who run HPC clusters tried to accommodate other forms of computing for data analysis, <a href="http://www.sdsc.edu/~allans/MyHadoop.pdf">much</a> <a href="http://www.hadoopsphere.com/2013/06/options-for-mapreduce-with-hpc.html">effort</a> went into trying to incorporate Hadoop jobs into the scheduler along with other more traditional HPC jobs. It never went especially well, which is a shame, because it seems that those past unsuccessful attempts have <a href="http://www.hadoopsphere.com/2013/06/options-for-mapreduce-with-hpc.html">discouraged</a> experimentation with related next-generation technologies which are a much better fit for large-scale technical computing.</p> + +<p>Hadoop v1 was always going to be a niche player and an awkward fit for big technical computing - and HPCers weren’t the only ones to notice this. Hadoop MapReduce’s mandatory dumping of output to disk after every Map/Reduce stage rendered it nearly unusable for any sort of approach which required iteration, or interactive use. Machine learning users, who often rely on many of the same iterative linear algebra solvers that physical science simulation users need, equally found Hadoop unhelpful. Hadoop v1 solved one set of problems – large single-pass data processing – very well, but those weren’t the problems that the technical computing community needed solved.</p> + +<p>The inefficiency of flushing to disk wasn’t necessarily the difficulty that HPC centres had with incorporating Hadoop into their clusters, however. Dumping to disk could be sped up with caching, or SSDs. The real issue was with <a href="http://hadoop.apache.org/docs/r2.6.0/hadoop-project-dist/hadoop-hdfs/HdfsUserGuide.html">HDFS</a>, the filesystem which Hadoop relies on. Because every job needed very rapid access to its data – to read the entire set in to the compute nodes, do minimal processing, then flush it back out – the file system was intimately tied to Hadoop cluster scheduling, which worked very hard (reasonably enough) to schedule the compute next to the data. But with Hadoop “on demand” in a cluster, how is this to work? One could spin up a new HDFS within each Hadoop job – but now the user has to have the new empty HDFS ingest the data files (probably with replication) initially, and then stage the data out of the doomed-to-be-shut-down HDFS afterwards. But this staging in and out will certainly take substantially longer than even the rest of the job’s I/O, which already likely dominates runtime. One can reserve a number of nodes for Hadoop jobs and keep a persistent HDFS store there, but this now defeats the purpose of running Hadoop in the cluster; one might as well just hive off those nodes into a separate system. Probably the best approach, which worked better than I think anyone had any right to expect, was to run <a href="http://wiki.lustre.org/index.php/Running_Hadoop_with_Lustre">Hadoop on Lustre</a>, but it remained awkward even for those who already were using Lustre for their cluster.</p> + +<p>The HPC community’s reaction to those problems – problems with a technology they were already skeptical of due to <a href="http://en.wikipedia.org/wiki/Not_invented_here">Not Invented Here Syndrome</a> – was largely to give up on anything that seemed “Hadoopy” as a sensible approach. The large-scale machine learning community, which didn’t necessarily have that luxury, was instead already looking for in-memory approaches to avoid this problem entirely.</p> + +<p>Two very promising “post-Hadoop” in-memory approaches which are much better suited to large-scale technical computing than Hadoop v1 ever was are also Apache projects - <a href="https://spark.apache.org">Spark</a> and <a href="https://flink.apache.org">Flink</a>. Flink has some really interesting features - including using a database-like query optimizer for almost all computations - but there’s no real question that currently, Spark is the more mature and capable of the offerings.</p> + +<p>Spark can make use of HDFS, and other related file stores, but those aren’t requirements; since iterative computation can be done in memory given enough RAM, there is much less urgency in having the data local to the computation if the computation is long enough. Instead, Spark can simply use a POSIX interface to whatever filesystem is already running on your cluster.</p> + +<p>Spark not only lacks hard HDFS-style requirements, but can also run in <a href="http://spark.apache.org/docs/latest/spark-standalone.html">standalone mode</a> without a heavyweight scheduler like <a href="http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/YARN.html">Yarn</a> or <a href="http://mesos.apache.org/">Mesos</a>. This standalone mode makes it quite easy to simply spin up a Spark “cluster” within a job, reading from the file system as any other job would. (Earlier versions of Spark made this unnecessarily difficult, with the standalone startup scripts having hardcoded values that assumed only one such job at a time; this is somewhat easier now.)</p> + +<p>Thus, below is a little job submission script for a Spark job on <a href="http://www.scinethpc.ca">SciNet</a>; it starts up a Spark master on the head node of the job, sets the workers, and runs a simple wordcount example.</p> + +<p>Spark’s well-thought-out python interface, standalone mode, and filesystem-agnostic approach, makes Spark a much better match for traditional HPC systems than Hadoop technologies ever were.</p> + +<p>Spark is covered a little bit in my and Mike Nolta’s <a href="http://www.dursi.ca/hadoop-for-hpcers/">Hadoop-for-HPCers</a> workshop.</p> + +<pre><code>#!/bin/bash +# +#PBS -l nodes=3:ppn=8,walltime=0:20:00 +#PBS -N spark-test + +nodes=($( cat $PBS_NODEFILE | sort | uniq )) +nnodes=${#nodes[@]} +last=$(( $nnodes - 1 )) + +cd $PBS_O_WORKDIR + +export SPARK_HOME=/scinet/gpc/Libraries/spark/spark-1.0.2-bin-hadoop2/ +ssh ${nodes[0]} "module load java; cd ${SPARK_HOME}; ./sbin/start-master.sh" +sparkmaster="spark://${nodes[0]}:7077" + +for i in $( seq 0 $last ) +do + ssh ${nodes[$i]} "cd ${SPARK_HOME}; module load java; nohup ./bin/spark-class org.apache.spark.deploy.worker.Worker ${sparkmaster} &amp;&gt; ${SCRATCH}/work/nohup-${nodes[$i]}.out" &amp; +done + +rm -rf ${SCRATCH}/wordcounts + +cat &gt; sparkscript.py &lt;&lt;EOF +from pyspark import SparkContext + +sc = SparkContext(appName="wordCount") +file = sc.textFile("${SCRATCH}/moby-dick.txt") +counts = file.flatMap(lambda line: line.split(" ")).map(lambda word: (word, 1)).reduceByKey(lambda a, b: a+b) +counts.saveAsTextFile("${SCRATCH}/wordcounts") +EOF + +module load java +${SPARK_HOME}/bin/spark-submit --master ${sparkmaster} sparkscript.py + +ssh ${nnodes[0]} "module load java; cd ${SPARK_HOME}; ./sbin/stop-master" +for i in $( seq 0 $last ) +do + ssh ${nodes[$i]} "killall java" +done +wait + +</code></pre> + + + + + IBM Workpad z50 & NetBSD - an interesting combination + + 2015-02-16T14:51:20-07:00 + https://hpc.social/2015/ibm-workpad-z50-netbsd-an-interesting-combination + <p>This week we look at another RISC powered notebook, this time from IBM.<br /> +Although IBM did produce a line of PowerPC based Thinkpad systems, this blog +is focused on a little known system called the IBM Workpad z50. This Microsoft +Handheld PC form factor system was launched in March 1999 and ran Windows CE +at the time. As we’ll see below, with some ingenuity it is also able to run +NetBSD, which makes it a much more interesting proposition (at least for me). +Ironically, although this is a high performance computing (HPC) focused blog, +the “HPC” in this case stands for “Handheld PC”.</p> + +<p>The Workpad z50 has a form factor smaller than a notebook, but has what I +consider to be an excellent keyboard and of course the trademark Thinkpad +trackpoint! Looking more closely at the specifications:</p> + +<ul> +<li>NEC VR4121 MIPS R4100 CPU @ 131 MHz</li> +<li>16 MB System RAM (expandable)</li> +<li>16 MB System ROM</li> +<li>8.4” LCD Display 640x480 (16-bit)</li> +<li>External Monitor connector (SVGA)</li> +<li>Serial port</li> +<li>Infrared port</li> +<li>CF slot</li> +<li>PCMCIA slot</li> +</ul> +<p>What prevents me from taking my pristine Workpad z50 to the local electronics +recycling facility is NetBSD. With a little effort it is possible to install +recent versions of NetBSD on the Workpad z50 and even run XWindows. There are +a number of sources of information on this topic including some videos on +YouTube which helped me a great deal:</p> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + +<p>I won’t run through the install procedure here as that’s been well covered +already in the above series of videos. Rather, let’s look at the boot-up +sequence and of course in keeping with the high performance computing theme, +run a simple benchmark. Links to the videos follow below:</p> + +<p><strong>The requisite system bootup</strong> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + +</p> + +<p><strong>Starting XWindows and running Linpack</strong> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + +</p> + +<p>Using NetBSD pkgsrc, I have setup NetBSD on a x86 based system and have taken +advantage of distcc to cross compile binaries. This helps greatly to get +packages quickly compiled for the system. Note that I ran into a log of local +compiles failing due to lack of RAM. So cross compiling is almost a must.</p> + +<p>Equipped with PCMCIA, I’m able to easily add to the Workpad z50 such +capabilities as Ethernet, Wireless networking and even SCSI. Below is my +collection of PCMCIA adaptors.</p> + +<figure><img src="https://www.gaborsamu.com/images/pcmcia.jpg" /> +</figure> + +<p>Next steps? I&rsquo;ll be looking to move to NetBSD 6.x series and compile a more +compact kernel (with drivers removed that I don&rsquo;t require). And unlike the +system in my previous blog, this one is silent :)</p> + + + + + UltraSPARC powered laptop - circa 2001 + + 2015-01-30T15:07:57-07:00 + https://hpc.social/2015/ultrasparc-powered-laptop-circa-2001 + <p>It’s been ages since my last blog. What better way to start off the new year +then by looking at the past. In this case, let’s wind the clock all the way +back to 2001. This was the era of the Intel Pentium 4 processors. However, +today we’ll be looking at something far less pedestrian. Based on the Scalable +Processor ARChitecture (commonly known as SPARC), the NatureTech 777 +GenialStation is an UltraSPARC IIe laptop computer. Why do I have an +UltraSPARC IIe based laptop computer? Why not? And it’s oh so cool with it’s +lovely blue and gray chassis as opposed to boring old black. This NatureTech777 +laptop boasts the following specs:</p> + +<ul> +<li>SUN UltraSPARC IIe @ 500 MHz w/256-KB L2 Cache</li> +<li>15.0&quot; TFT SXGA LCD Panel</li> +<li>256MB ECC RAM</li> +<li>80GB IDE disk</li> +<li>CD/DVD Combo drive</li> +<li>3.5” Floppy disk drive</li> +<li>5400mAh/ 11.1V. Li-ion Smart Battery Pack (mine is dead)</li> +<li>Built-in H/W Security Controller, 4 button input</li> +<li>A honking noisy fan that always runs at full speed</li> +</ul> +<p>What can you do with a NatureTech 777 laptop? Well, at this stage of its life, I don’t use it for much apart from tinkering. Back in the day, being able to +take SUN Solaris on the road in a portable package was quite impressive +and I understand that these systems also went for a premium price at the time.</p> + +<p>I was surprised to not find any NatureTech video on YouTube or other such sites. So, I’m pleased to present this beast of a laptop in all its glory booting up +Solaris 9 and running Linpack - of course compiled with the requisite SunPro +compilers (and SUN math libraries). No speed records broken here of course, +and with that fan running constantly in overdrive, I would not expect any +thermal issues either :)</p> + +<p><strong>Booting Solaris 9</strong> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + +</p> + +<p><strong>Stressing the mighty UltraSPARC IIe with Linpack</strong> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + +</p> + +<p>I’m lucky enough to have the fancy laptop bag from the manufacturer which +proudly proclaims that it’s carrying a SPARC based piece of equipment.</p> + +<figure><img src="https://www.gaborsamu.com/images/ultrasparc_bag.jpg" /> +</figure> + +<p>As the SUN sets on this blog (pun intended), I reminisce about the days of +variety in computing - different processors, operating systems - and when RISC +was king. Hopefully, we are entering another such era with the rise of ARM, +OpenPower, MIPS as well as the others that are out there.</p> + +<p><strong>Varietas Delectat!</strong></p> + + + + + Thoughts on the NSF Future Directions Interim Report + + 2015-01-29T07:53:00-07:00 + https://hpc.social/2015/thoughts-on-the-nsf-future-directions-interim-report + <p>The National Academies recently released an interim report entitled <a href="http://www.nap.edu/catalog/18972/future-directions-for-nsf-advanced-computing-infrastructure-to-support-us-science-and-engineering-in-2017-2020">Future Directions for NSF Advanced Computing Infrastructure to Support U.S. Science and Engineering in 2017-2020</a> as a part of <a href="http://www.nsf.gov/awardsearch/showAward?AWD_ID=1344417&amp;HistoricalAwards=false">a $723,000 award</a> commissioned to take a hard look at where the NSF’s supercomputing program is going.  Since releasing the interim report, the committee has been soliciting feedback and input from the research community to consider as they draft their final report, and I felt compelled to put some of my thoughts into a response.<br /><br />NSF’s HPC programs are something I hold near and dear since I got my start in the industry by supporting two NSF-owned supercomputers.  I put a huge amount of myself into Trestles and Gordon, and I still maintain that job encompassed the most engaging and rewarding work I’ve ever done.  However, the NSF’s lack of a future roadmap for its HPC program made my future feel perpetually uncertain, and this factored heavily in my decision to eventually pursue other opportunities.<br /><br />Now that I am no longer affiliated with NSF, I wanted to delineate some of the problems I observed during my time on the inside with the hope that someone more important than me really thinks about how they can be addressed.  The report requested feedback in nine principal areas, so I’ve done my best to contextualize my thoughts with the committee’s findings. <br /><br />With that being said, I wrote this all up pretty hastily.  Some of it may be worded strongly, and although I don’t mean to offend anybody, I stand by what I say.  That doesn’t mean that my understanding of everything is correct though, so it’s probably best to assume that I have no idea what I’m talking about here.<br /><br />Finally, a glossary of terms may make this more understandable:<br /><br />&lt;ul&gt;&lt;li&gt;XD is the NSF program that funds XSEDE; it finances infrastructure and people, but it does not fund supercomputer procurements or operations&lt;/li&gt;&lt;li&gt;Track 1 is the program that funded Blue Waters, the NSF’s leadership-class HPC resource&lt;/li&gt;&lt;li&gt;Track 2 is the program that funds most of the XSEDE supercomputers.  It funded systems like Ranger, Keeneland, Gordon, and Stampede&lt;/li&gt;&lt;/ul&gt;<br />&lt;hr /&gt;<br />&lt;h2 style="text-align: left;"&gt;1. How to create advanced computing infrastructure that enables integrated discovery involving experiments, observations, analysis, theory, and simulation.&lt;/h2&gt;Answering this question involves a few key points:<br />&lt;ol&gt;&lt;li&gt;Stop treating NSF’s cyberinfrastructure as a computer science research project and start treating it like research infrastructure operation.  Office of Cyberinfrastructure (OCI) does not belong in Computer &amp; Information Science &amp; Engineering (CISE).&lt;/li&gt;&lt;li&gt;Stop funding cyberinfrastructure solely through capital acquisition solicitations and restore reliable core funding to NSF HPC centers.  This will restore a community that is conducive to retaining expert staff.&lt;/li&gt;&lt;li&gt;Focus OCI/ACI and raise the bar for accountability and transparency.   Stop funding projects and centers that have no proven understanding of operational (rather than theoretical) HPC.&lt;/li&gt;&lt;li&gt;Either put up or give up.  The present trends in funding lie on a road to death by attrition.  &lt;/li&gt;&lt;li&gt;Don’t waste time and funding by presuming that outsourcing responsibility and resources to commercial cloud or other federal agencies will effectively serve the needs of the NSF research community.&lt;/li&gt;&lt;/ol&gt;I elaborate on these points below.<br /><br />&lt;h2 style="text-align: left;"&gt;2. Technical challenges to building future, more capable advanced computing systems and how NSF might best respond to them.&lt;/h2&gt;&lt;blockquote class="tr_bq"&gt;“Today’s approach of federating distributed compute- and data-intensive resources to meet the increasing demand for combined computing and data capabilities is technically challenging and expensive.”&lt;/blockquote&gt;This is true.<br />&lt;blockquote class="tr_bq"&gt;“New approaches that co-locate computational and data resources might reduce costs and improve performance. Recent advances in cloud data center design may provide a viable integrated solution for a significant fraction of (but not all) data- and compute-intensive and combined workloads.”&lt;/blockquote&gt;This strong statement is markedly unqualified and unsubstantiated.  If it is really recommending that the NSF start investing in the cloud, consider the following:<br />&lt;ul&gt;&lt;li&gt;Cloud computing resources are designed for burst capabilities and are only economical when workloads are similarly uneven.  In stark contrast, most well-managed HPCs see constant, high utilization which is where the cloud becomes economically intractable.&lt;/li&gt;&lt;li&gt;The suggestion that cloud solutions can “improve performance” is unfounded.  At a purely technological level, the cloud will never perform as well as unvirtualized HPC resources, period.  Data-intensive workloads and calculations that require modest inter-node communication will suffer substantially.&lt;/li&gt;&lt;/ul&gt;<br />In fact, if any cost reduction or performance improvement can be gained by moving to the cloud, I can almost guarantee that incrementally more can be gained by simply addressing the non-technological aspects of the current approach of operating federated HPC.  Namely, the NSF must<br />&lt;ol&gt;&lt;li&gt;Stop propping up failing NSF centers who have been unable to demonstrate the ability to effectively design and operate supercomputers. &lt;/li&gt;&lt;li&gt;Stop spending money on purely experimental systems that domain scientists cannot or will not use.&lt;/li&gt;&lt;/ol&gt;<br /><b>The NSF needs to re-focus its priorities and stop treating the XD program like a research project and start treating it like a business</b>.  Its principal function should be to deliver a product (computing resources) to customers (the research community).  Any component that is not helping domain scientists accelerate discovery should be strongly scrutinized.  Who are these investments truly satisfying?<br />&lt;blockquote class="tr_bq"&gt;“New knowledge and skills will be needed to effectively use these new advanced computing technologies.”&lt;/blockquote&gt;This is a critical component of XD that is extremely undervalued and underfunded.  Nobody is born with the ability to know how to use HPC resources, and <b>optimization should be performed on users in addition to code</b>.  There is huge untapped potential in collaborative training between U.S. federal agencies (DOE, DOD) and European organizations (PRACE).  If there is bureaucratic red tape in the way, it needs to be dealt with at an official level or circumvented at the grassroots level.<br /><br />&lt;h2 style="text-align: left;"&gt;3. The computing needs of individual research areas.&lt;/h2&gt;XDMoD shows this.  <b>The principal workloads across XSEDE are from traditional domains like physics and chemistry, and the NSF needs to recognize that this is not going to change substantially</b> over the lifetime of a program like XD. <br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-CJtYj8P1tP0/VMnV1LPcKAI/AAAAAAAAK04/jREwPiKa77I/s1600/Screen%2BShot%2B2015-01-27%2Bat%2B10.20.54%2BPM.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="306" src="http://1.bp.blogspot.com/-CJtYj8P1tP0/VMnV1LPcKAI/AAAAAAAAK04/jREwPiKa77I/s1600/Screen%2BShot%2B2015-01-27%2Bat%2B10.20.54%2BPM.png" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Straight from XDMoD for 2014.  MPS = math and physical sciences, BIO = biological sciences, GEO = geosciences.  NSF directorate is not a perfect alignment; for example, I found many projects in BIO were actually chemistry and materials science.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br /><br />While I wholeheartedly agree that new communities should be engaged by lowering the barriers to entry, these activities cannot be done at a great expense of undercutting the resources required by the majority of XD users.<br /><br />The cost per CPU cycle should not be deviating wildly between Track 2 awards because the ROI on very expensive cycles will be extremely poor.  If the NSF wants to fund experimental systems, it needs to do that as an activity that is separate from the production resources.  Alternatively, only a small fraction of each award should be earmarked for new technologies that represent a high risk; the Stampede award was a fantastic model of how a conservative fraction of the award (10%) can fund an innovative and high-risk technology.<br /><br />&lt;h2 style="text-align: left;"&gt;4. How to balance resources and demand for the full spectrum of systems, for both compute- and data-intensive applications, and the impacts on the research community if NSF can no longer provide state-of-the-art computing for its research community.&lt;/h2&gt;&lt;blockquote class="tr_bq"&gt;“But it is unclear, given their likely cost, whether NSF will be able to invest in future highest-tier systems in the same class as those being pursued by the Department of Energy, Department of Defense, and other federal mission agencies and overseas.”&lt;/blockquote&gt;The NSF does not have the budget to support leadership computing.  This is clear even from a bird’s eye view: <a href="http://science.energy.gov/~/media/budget/pdf/sc-budget-request-to-congress/fy-2014/Cong_Budget_2014_Advanced_Computing.pdf">DOE ASCR’s budget for FY2012 was $428 million</a> and, by comparison, <a href="http://www.nsf.gov/about/budget/fy2014/pdf/18_fy2014.pdf">NSF ACI’s budget was only $211 million</a>.  Worse yet, despite having half the funding of its DOE counterpart, the NSF owned HPC resources at seven universities in FY2012 compared to ASCR’s three centers.<br /><br />Even if given the proper funding, the NSF’s practice of spreading Track 2 awards across many universities to operate its HPC assets is not conducive to operating leadership computing.  The unpredictable nature of Track 2 awards has resulted in very uneven funding for NSF centers which, quite frankly, is a terrible way to attract and retain the highly knowledgeable world-class staff that is necessary to operate world-class supercomputers.<br /><br />&lt;h2 style="text-align: left;"&gt;5. The role of private industry and other federal agencies in providing advanced computing infrastructure.&lt;/h2&gt;The report makes some very troubling statements in reference to this question.<br />&lt;blockquote class="tr_bq"&gt;“Options for providing highest-tier capabilities that merit further exploration include purchasing computing services from federal agencies…”&lt;/blockquote&gt;This sounds dirty.  Aren’t there are regulations in place that restrict the way in which money can flow between the NSF and DOE?  I’m also a little put off by the fact that this option is being put forth in a report that is crafted by a number of US DOE folks whose DOE affiliations are masked by university affiliations in the introductory material.<br />&lt;blockquote class="tr_bq"&gt;“…or by making arrangements with commercial services (rather than more expensive purchases by individual researchers).”&lt;/blockquote&gt;Providing advanced cyberinfrastructure for the open science community is not a profitable venture.  <b>There is no money in HPC operations</b>.  I do not see any “leadership” commercial cloud providers offering the NSF a deal on spare cycles, and the going rate for commercial cloud time is known to be <a href="http://www.alcf.anl.gov/magellan">far more expensive than deploying HPC resources in-house</a> at the national scale.<br /><br />&lt;h2 style="text-align: left;"&gt;6. The challenges facing researchers in obtaining allocations of advanced computing resources and suggestions for improving the allocation and review processes.&lt;/h2&gt;&lt;blockquote class="tr_bq"&gt;“Given the “double jeopardy” that arises when researchers must clear two hurdles—first, to obtain funding for their research proposal and, second, to be allocated the necessary computing resources—the chances that a researcher with a good idea can carry out the proposed work under such conditions is diminished.”&lt;/blockquote&gt;XD needs to be more tightly integrated with other award processes to mitigate the double jeopardy issue.  I have a difficult time envisioning the form which this integration would take, but the NSF GRF’s approach of prominently featuring NSF HPC resources as a part of the award might be a good start.  As an adaptive proposal reviewer within XSEDE and a front-line interface with first-time users, I found that having the NSF GRF bundle XSEDE time greatly reduced the entry barrier for new users and made it easier for us reviewers to stratify the proposals.  Another idea may be to invite NSF center staff to NSF contractors’ meetings (if such things exist; I know <a href="http://science.energy.gov/bes/mse/principal-investigators-meetings/">they do for DOE BES</a>) to show a greater amount of integration across NSF divisions.<br /><br />In addition, the current XSEDE allocation proposal process is extremely onerous.  The <a href="https://portal.xsede.org/allocation-policies">document that describes the process</a> is ridiculously long and contains of obscure requirements that serve absolutely no purpose.  For example, all XSEDE proposals require a separate document detailing the scaling performance of their scientific software.  Demonstrating an awareness of the true costs of performing certain calculations has its merits, but a detailed analysis of scaling is not even relevant for the majority of users who run modest-scale jobs or use off-the-shelf black-box software like Gaussian.  The only thing these obscure requirements do is prevent new users, who are generally less familiar with all of the scaling requirements nonsense, from getting any time.  If massive scalability is truly required by an application, the PI needs to be moved over to the Track 1 system (Blue Waters) or referred to <a href="http://www.doeleadershipcomputing.org/">INCITE</a>.<br /><br />As a personal anecdote, many of us center staff found ourselves simply short-circuiting the aforementioned allocations guide and providing potential new users with a guide to the guide.  It was often sufficient to provide a checklist of minutia whose absence would result in an immediate proposal rejection and allow the PIs to do what they do best—write scientific proposals for their work.  Quite frankly, the fact that we had to provide a guide to understanding the guide to the allocations process suggests that the allocations process itself is grossly over-engineered.<br /><br />&lt;h2 style="text-align: left;"&gt;7. Whether wider and more frequent collection of requirements for advanced computing could be used to inform strategic planning and resource allocation; how these requirements might be used; and how they might best be collected and analyzed.&lt;/h2&gt;The XD program has already established a solid foundation for reporting the popularity and usability of NSF HPC resources in <a href="https://xdmod.ccr.buffalo.edu/">XDMoD</a>.  The requirements of the majority are evolving more slowly than computer scientists would have everyone believe.<br /><br />Having been personally invested in two Track 2 proposals, I have gotten the impression that the review panels who select the destiny of the NSF’s future HPC portfolio are more impressed by cutting edge, albeit untested and under-demanded, proposals.  Consequentially, taking a “functional rather than a technology-focused or structural approach” to future planning will result in further loss of focus.  Instead of delivering conservatively designed architectures that will enjoy guaranteed high utilization, <b>functional approaches will give way to computer scientists on review panels dictating what resources domain scientists should be using</b> to solve their problems.  The cart will be before the horse.<br /><br />Instead, it would be far more valuable to include more operational staff in strategic planning.  The people on the ground know how users interact with systems and what will and won’t work.  As with the case of leadership computing, the <b>NSF does not have the financial commitment to be leading the design of novel computing architectures at large scales</b>.  Exotic and high-risk technologies should be simply left out of the NSF’s Track 2 program, incorporated peripherally but funded through other means (e.g., MRIs), or incorporated in the form of a small fraction of a larger, lower-risk resource investment.<br /><br />A perspective of the greater context of this has been <a href="http://www.computer.org/cms/Computer.org/ComputingNow/docs/CISE-17-02-EIC.pdf">eloquently written by Dr. Steven Gottlieb</a>.  Given his description of the OCI conversion to ACI, it seems like taking away the Office of Cyberinfrastructure’s (OCI’s) autonomy and placing it under Computer &amp; Information Science &amp; Engineering (CISE) exemplifies an ongoing and significant loss of focus within NSF.  This changed reflected the misconception that architecting and operating HPC resources for domain sciences is a computer science discipline. <br /><br />This is wrong. <br /><br />Computer scientists have a nasty habit of creating tools that are intellectually interesting but impractical for domain scientists.  These tools get “thrown over the wall,” never to be picked up, and represent an overall waste of effort in the context of operating HPC services for non-computer scientists.  Rather, operating HPC resources for the research community requires experienced technical engineers with a pragmatic approach to HPC.  Such people are most often not computer scientists, but former domain scientists who know what does and doesn’t work for their respective communities.<br /><br />&lt;h2 style="text-align: left;"&gt;8. The tension between the benefits of competition and the need for continuity as well as alternative models that might more clearly delineate the distinction between performance review and accountability and organizational continuity and service capabilities.&lt;/h2&gt;&lt;blockquote class="tr_bq"&gt;“Although NSF’s use of frequent open competitions has stimulated intellectual competition and increased NSF’s financial leverage, it has also impeded collaboration among frequent competitors, made it more difficult to recruit and retain talented staff, and inhibited longer-term planning.”&lt;/blockquote&gt;Speaking from firsthand experience, I can say that <b>working for an NSF center is a life of a perpetually uncertain future and dicing up FTEs into frustratingly tiny pieces</b>.  While some people are driven by competition and fundraising (I am one of them), an entire organization built up to support multi-million dollar cyberinfrastructure cannot be sustained this way.<br /><br />At the time I left my job at an NSF center, my salary was covered by six different funding sources at levels ranging from 0.05 to 0.30 FTEs.  Although this officially meant that I was only 30% committed to directly supporting the operation of one of our NSF supercomputers, the reality was that I (and many of my colleagues) simply had to put in more than 100% of my time into the job.  This is a very high-risk way to operate because committed individuals get noticed and almost invariably receive offers of stable salaries elsewhere.  Retaining talent is extremely difficult when you have the least to offer, and the current NSF funding structure makes it very difficult for centers to do much more than continually hire entry-level people to replace the rising stars who find greener pastures.<br /><br />Restoring reliable, core funding to the NSF centers would allow them to re-establish a strong foundation that can be an anchor point for other sites wishing to participate in XD.  This will effectively cut off some of the current sites operating Track 2 machines, but frankly, <b>the NSF has spread its HPC resources over too many sites at present and is diluting its investments</b> in people and infrastructure.  The basis for issuing this core funding could follow a pattern similar to that of XD where long-term (10-year) funding is provisioned with a critical 5-year review.<br /><br />If the NSF cannot find a way to re-establish reliable funding, it needs to <b>accept defeat and stop trying to provide advanced cyberinfrastructure</b>.  The current method of only funding centers indirectly through HPC acquisitions and associated operations costs is unsustainable for two reasons:<br />&lt;ul&gt;&lt;li&gt;The length of these Track 2 awards (typically 3 years of operations) makes future planning impossible.  Thus, this current approach forces centers to follow high-risk and inadequately planned roadmaps.&lt;/li&gt;&lt;li&gt;All of the costs associated with maintaining world-class expertise and facilities have to come from someone else’s coffers.  Competitive proposals for HPC acquisitions simply cannot afford to request budgets that include strong education, training, and outreach programs, so these efforts wind up suffering.&lt;/li&gt;&lt;/ul&gt;<br /><br />&lt;h2 style="text-align: left;"&gt;9. How NSF might best set overall strategy for advanced computing-related activities and investments as well as the relative merits of both formal, top-down coordination and enhanced, bottom-up process.&lt;/h2&gt;Regarding the top-down coordination, the NSF should drop the Track 2 program’s current solicitation model where proposers must have a vendor partner to get in the door.  This is unnecessarily restrictive and fosters an unhealthy ecosystem where vendors and NSF centers are both scrambling to pair up, resulting in high-risk proposals.  Consider the implications:<br />&lt;ol&gt;&lt;li&gt;Vendors are forced to make promises that they may not be able to fulfill (e.g., Track 2C and Blue Waters).  Given these two (of nine) solicitations resulted in substantial wastes of time and money (over 20% vendor failure rate!), I find it shocking that the NSF continues to operate this way.&lt;/li&gt;&lt;li&gt;NSF centers are only capable of choosing the subset of vendors who are willing to play ball with them, resulting in a high risk of sub-optimal pricing and configurations for the end users of the system.&lt;/li&gt;&lt;/ol&gt;<br />I would recommend a model, similar to many European nations’, where a solicitation is issued for a vendor-neutral proposal to deploy and support a program that is built around a resource.  A winning proposal is selected based on not only the system features, its architecture, and the science it will support, but the plan for training, education, collaboration, and outreach as well.  Following this award, the bidding process for a specific hardware solution begins.<br /><br />This addresses the two high-risk processes mentioned above and simultaneously eliminates the current qualification in Track 2 solicitations that no external funding can be included in the proposal.  By leaving the capital expenses out of the selection process, the NSF stands to get the best deal from all vendors and other external entities independent of the winning institution.<br /><br />Bottom-up coordination is much more labor-intensive because it requires highly motivated people at the grassroots to participate.  Given the NSF’s current inability to provide stable funding for highly qualified technical staff, I cannot envision how this would actually come together.<br />&lt;div&gt;<br />&lt;/div&gt;</p> + + + + + Machine Learning for Scientists + + 2014-12-20T00:00:00-07:00 + https://hpc.social/2014/machine-learning-for-scientists + <p>I recently taught a 1-day <a href="http://ljdursi.github.io/ML-for-scientists">machine learning workshop for scientists</a> for the good folks at <a href="http://www.scinethpc.ca">SciNetHPC</a>. There was enough interest (nearly forty people signed up for a day-long session near the end of term) that we had to book a large-ish classroom.</p> + +<p>There’s a lot of interest in the topic — which might even be surprising, given that a lot of the material is either familiar or pretty easy to digest for those who spend a lot of their time doing scientific data analysis. But for those coming to it for the first time and on their own, the difference in terminology (“features”? “shrinkage”? Wait, you just mean variables and regularization?) and the huge number of different methods available can be pretty baffling.</p> + +<p>And I think it helps to have someone with a science background to explain the very different approaches taken to modelling than in the sciences (especially the natural sciences) and <em>why</em> it is that way. Having that connection means that you can translate – so that the very real expertise and experience they do already have can be a benefit, rather than throwing up barriers. (“Bias-Variance tradeoff? You mean you’re willing to introduce error just to get the error bars down a bit – centred on the <em>wrong</em> <em>answer</em>? What kind of monster are you, and what dangerous nonsense is this machine learning stuff?”)</p> + +<p>This was the first time teaching this material, and while there are some things I’d like to improve (especially doing more on PCA and clustering, although I don’t know what I’d take out for a 1-day class), I think that it went fairly well. The presentation can be seen <a href="http://ljdursi.github.io/ML-for-scientists">online</a>, and everything’s available on <a href="https://github.com/ljdursi/ML-for-scientists">github</a>.</p> + +<p>Incidentally, this was my first time using <a href="http://slidify.org">Slidify</a> for a presentation, and I really enjoyed it – this may be the first markdown/html5 setup that finally gets me willingly moving away from Keynote for this sort of material. Obviously, Slidify integrates much more closely with R than with python, particularly for graphics; but still, it was a pleasure to use.</p> + + + + + Storage Utilization in the Long Tail of Science + + 2014-11-05T15:53:00-07:00 + https://hpc.social/2014/storage-utilization-in-the-long-tail-of-science + <h2>Introduction</h2> +<p>Since changing careers and moving up to the San Francisco Bay Area in July, I haven’t had nearly as much time to post interesting things here on my blog—I guess that’s the startup life. That isn’t to say that my life in DNA sequencing hasn’t been without interesting observations to explore though; the world of high-throughput sequencing is becoming increasingly dependent on high-performance computing, and many of the problems being solved in genomics and bioinformatics are stressing aspects of system architecture and cyberinfrastructure that haven’t gotten a tremendous amount of exercise from the more traditional scientific domains in computational research. <br /><br />Take, for example, <a href="http://systems.illumina.com/systems/hiseq-x-sequencing-system.ilmn">the biggest and baddest DNA sequencer on the market</a>: over the course of a three-day run, it outputs around 670 GB of raw (but compressed) sequence data, and this data is spread out over 1,400,000 files. This would translate to an average file size of around 500 KB, but the reality is that the file sizes are a lot less uniform:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://3.bp.blogspot.com/-f1nf0-PQkRA/VCjZ9NZatZI/AAAAAAAAKuQ/cQZfm6HKV28/s1600/hiseqx-filesizedist.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="271" src="http://3.bp.blogspot.com/-f1nf0-PQkRA/VCjZ9NZatZI/AAAAAAAAKuQ/cQZfm6HKV28/s1600/hiseqx-filesizedist.png" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Figure 1. File size distribution of a single flow cell output (~770 gigabases) on Illumina’s highest-end sequencing platform&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />After some basic processing (which involves opening and closing hundreds of these files repeatedly and concurrently), these data files are converted into very large files (tens or hundreds of gigabytes each) which then get reduced down to data that is more digestible over the course of hundreds of CPU hours. As one might imagine, this entire process is very good at taxing many aspects of file systems, and on the computational side, most of this IO-intensive processing is not distributed and performance benefits most from single-stream, single-client throughput.<br /><br />As a result of these data access and processing patterns, the storage landscape in the world of DNA sequencing and bioinformatics is quite different from conventional supercomputing. Some large sequencing centers do use the file systems we know and love (and hate) like <a href="http://www.nersc.gov/users/computational-systems/genepool/file-storage-and-io/">GPFS at JGI</a> and <a href="http://insidehpc.com/2013/10/sanger-institute-deploys-22-petabytes-lustre-powered-ddn-storage/">Lustre at Sanger</a>, but it appears that most small- and mid-scale sequencing operations are relying heavily on network-attached storage (NAS) for both receiving raw sequencer data and being a storage substrate for all of the downstream data processing.<br /><br />I say all of this because these data patterns—accessing large quantities of small files and large files with a high degree of random IO—is a common trait in many scientific applications used in the “long tail of science.” The fact is, the sorts of IO for which parallel file systems like Lustre and GPFS are designed are tedious (if not difficult) to program, and for the majority of codes that don’t require thousands of cores to make new discoveries, simply reading and writing data files in a naïve way is “good enough.”<br /><br />&lt;h3&gt;The Long Tail&lt;/h3&gt;This long tail of science is also using up a huge amount of the supercomputing resources made available to the national open science community; to illustrate, 98% of all jobs submitted to the XSEDE supercomputers in 2013 used 1024 or fewer CPU cores, and these modest-scale jobs represented over 50% of all the CPU time burned up on these machines.<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://3.bp.blogspot.com/-h1Xc98JyrW0/VCjapVMZXQI/AAAAAAAAKuY/aB-B7ZjkOZQ/s1600/Job%2BSize%2BDistribution%2B-%2B2013.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="271" src="http://3.bp.blogspot.com/-h1Xc98JyrW0/VCjapVMZXQI/AAAAAAAAKuY/aB-B7ZjkOZQ/s1600/Job%2BSize%2BDistribution%2B-%2B2013.png" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Figure 2. Cumulative job size distribution (weighted by job count and SUs consumed) for all jobs submitted to XSEDE compute resources in 2013&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />The NSF has responded to this shift in user demand by awarding <a href="http://www.sdsc.edu/News%20Items/PR100313_comet.html">Comet, a 2 PF supercomputer designed to run these modest-scale jobs</a>. The Comet architecture limits its full-bisection bandwidth interconnectivity to <a href="http://dx.doi.org/10.1145/2616498.2616540">groups of 72 nodes</a>, and these 72-node islands will actually have enough cores to satisfy 99% of all the jobs submitted to XSEDE clusters in 2013 (see above). By limiting the full-bisection connectivity to smaller islands and using less rich connectivity between islands, the cost savings in not having to buy so many mid-tier and core switches are then turned into additional CPU capacity.<br /><br />What the Comet architecture <i>doesn’t</i> address, however, is the question of data patterns and IO stress being generated by this same long tail of science—the so-called 99%. If DNA sequencing is any indicator of the 99%, parallel file systems are actually a poor choice for high-capacity, mid-scale jobs because their <a href="http://dx.doi.org/10.1145/2159352.2159356">performance degrades significantly when facing many small files</a>. Now, the real question is, are the 99% of HPC jobs really generating and manipulating lots of small files in favor of the large striped files that Lustre and GPFS are designed to handle? That is, might the majority of jobs on today’s HPC clusters actually be better served by file systems that are less scalable but handle small files and random IO more gracefully?<br /><br />Some colleagues and I set out to answer this question last spring, and a part of this quest involved looking at every single file on two of SDSC’s Data Oasis file systems. This represented about 1.7 PB of real user data spread across two Lustre 2.4 file systems—one designed for temporary scratch data and the other for projects storage—and we wanted to know if users’ data really consisted of the large files that Lustre loves or if, like job size, the 99% are really working with small files.  Since SDSC’s two national resources, Gordon and Trestles, restrict the maximum core count for user jobs to modest-scale submissions, these file systems should contain files representative of long-tail users.<br /><br />&lt;h2&gt;Scratch File Systems&lt;/h2&gt;At the roughest cut, files can be categorized based on whether their size is on the order of bytes and kilobytes (size &lt; 1024*1024 bytes), megabytes (&lt; 1024 KB), gigabytes (&lt;1024 MB), and terabytes (&lt; 1024 GB). Although pie charts are generally a terrible way to show relative compositions, this is how the files on the 1.2 PB scratch file system broke down:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-e-UpylKZPBw/VCjcZFRbClI/AAAAAAAAKuk/uf38vgGNnNk/s1600/file%2Bcount%2Bpie.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="320" src="http://1.bp.blogspot.com/-e-UpylKZPBw/VCjcZFRbClI/AAAAAAAAKuk/uf38vgGNnNk/s1600/file%2Bcount%2Bpie.png" width="296" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Figure 3. Fraction of file count consumed by files of a given size on Data Oasis’s scratch file system for Gordon&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br /><br />The above figure shows the number of files on the file system classified by their size, and there are clearly a preponderance of small files less than a gigabyte in size. This is not terribly surprising as the data is biased towards smaller files; that is, you can fit a thousand one-megabyte files in the same space that a single one-gigabyte file would take up. Another way to show this data is by how much file system capacity is taken up by files of each size:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://3.bp.blogspot.com/-htbZijzc2MY/VCjcdu-hPrI/AAAAAAAAKus/Y8F4ohme4Yg/s1600/file%2Bsize%2Bpie.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="320" src="http://3.bp.blogspot.com/-htbZijzc2MY/VCjcdu-hPrI/AAAAAAAAKus/Y8F4ohme4Yg/s1600/file%2Bsize%2Bpie.png" width="296" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Figure 4. File system capacity consumed by files of a given size on Data Oasis’s scratch file system for Gordon&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br /><br />This makes it very apparent that the vast majority of the used space on this scratch file system—a total of 1.23 PB of data—are taken up by files on the order of gigabytes and megabytes. There were only seventeen files that were a terabyte or larger in size. <br /><br />Incidentally, I don’t find it too surprising that there are so few terabyte-sized files; even in the realm of Hadoop, median job dataset sizes are on the order of a dozen gigabytes (e.g., Facebook has reported that <a href="http://dx.doi.org/10.1145/2169090.2169092">90% of its jobs read in under 100 GB of data</a>). Examining file sizes with much finer granularity reveals that the research data on this file system isn’t even of Facebook scale though:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-FrkhmvkuOao/VCjclW_IAqI/AAAAAAAAKu0/7sMuQQlrXas/s1600/file%2Bsize%2Bdistribution.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="272" src="http://4.bp.blogspot.com/-FrkhmvkuOao/VCjclW_IAqI/AAAAAAAAKu0/7sMuQQlrXas/s1600/file%2Bsize%2Bdistribution.png" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Figure 5. Number of files of a given size on Data Oasis’s scratch file system for Gordon.  This data forms the basis for Figure 3 above&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br /><br />While there are a large number of files on the order of a few gigabytes, it seems that files on the order of tens of gigabytes or larger are far more scarce. Turning this into relative terms,<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-b1zxYPEkiA4/VCjctOul9LI/AAAAAAAAKu8/I0LHbxoEoTU/s1600/cumul%2Bfile%2Bsize%2Bdistribution.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="276" src="http://1.bp.blogspot.com/-b1zxYPEkiA4/VCjctOul9LI/AAAAAAAAKu8/I0LHbxoEoTU/s1600/cumul%2Bfile%2Bsize%2Bdistribution.png" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Figure 6. Cumulative distribution of files of a given size on Data Oasis’s scratch file system for Gordon&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br /><br />we can make more meaningful statements. In particular,<br /><br />&lt;ul&gt;&lt;li&gt;90% of the files on this Lustre file system are 1 megabyte or smaller&lt;/li&gt;&lt;li&gt;99% of files are 32 MB or less&lt;/li&gt;&lt;li&gt;99.9% of files are 512 MB or less&lt;/li&gt;&lt;li&gt;and 99.99% of files are 4 GB or less&lt;/li&gt;&lt;/ul&gt;<br />The first statement is quite powerful when you consider the fact that the default stripe size in Lustre is 1 MB. The fact that 90% of files on the file system are smaller than this means that <b>90% of users’ files really gain no advantages by living on Lustre</b>. Furthermore, since this is a scratch file system that is meant to hold temporary files, it would appear that either user applications are generating a large amount of small files, or users are copying in large quantities of small files and improperly using it for cold storage. Given the quota policies for Data Oasis, I suspect there is a bit of truth to both.<br /><br />Circling back a bit though, I said earlier that comparing just the quantity of files can be a bit misleading since a thousand 1 KB files will take up the same space as a single 1 MB file. We can also look at how much total space is taken up by files of various sizes.<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://3.bp.blogspot.com/-jSmVlIJTa9E/VCjdrFMZlZI/AAAAAAAAKvI/gVPZm53WnDA/s1600/bin%2Bweight%2Band%2Bcumul%2Bdist.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="271" src="http://3.bp.blogspot.com/-jSmVlIJTa9E/VCjdrFMZlZI/AAAAAAAAKvI/gVPZm53WnDA/s1600/bin%2Bweight%2Band%2Bcumul%2Bdist.png" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Figure 7. File system capacity consumed by files of a given size on Data Oasis’s scratch file system for Gordon.  This is just a more finely diced version of the data presented in Figure 4 above.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />The above chart is a bit data-dense so it takes some staring at to understand what’s going on. First looking at the purple line, we can pull out some pretty interesting facts:<br /><br />&lt;ul&gt;&lt;li&gt;Half of the file system’s used capacity (50%) is consumed by files that are 1 GB or less in size&lt;/li&gt;&lt;li&gt;Over 20% of the file system’s used capacity is taken up by files smaller than 64 MB&lt;/li&gt;&lt;li&gt;About 10% of the capacity is used by files that are 64 GB or larger&lt;/li&gt;&lt;/ul&gt;<br />The blue boxes represent the derivative of that purple line—that is, how much space is taken up by files of only one specific size. The biggest chunk of the file system (141 TB) is taken up by 4 GB files, but it appears that there is a substantial range of file sizes that take up very similarly sized pieces of the pie. 512 MB files take up a total of 139 TB; 1 GB, 2 GB, and 8 GB files all take up over 100 TB of total space each as well. In fact, files ranging from 512 MB to 8 GB comprise 50% of the total file system capacity.<br /><br />Why the sweet spot for space-consuming files is between 512 MB and 8 GB is unclear, but I suspect it’s more caused by the human element in research. In my own research, I worked with files in this range simply because it was enough data to be statistically meaningful while still small enough to quickly re-analyze or transfer to a colleague. For file sizes above this range, the mass of the data made it difficult to manipulate using the “long-tail” cyberinfrastructure available to me. But, perhaps as more national-scale systems comes online to meet the needs of these sorts of workloads, this sweet spot will creep out to larger file sizes.<br /><br />&lt;h2&gt;Projects Storage&lt;/h2&gt;The above discussion admittedly comes with a lot of caveats.  In particular, the scratch file system we examined was governed by no hard quotas which did lead some people to leave data resident for longer than they probably should have.  However, the other file system we analyzed was SDSC’s Data Oasis projects storage which was architected for capacity over performance and featured substantially more disks per OSS.  This projects storage also came with 500 GB quotas by default, forcing users to be a little more mindful of what was worth keeping.<br /><br />Stepping back to the coarse-grained kilobyte/megabyte/gigabyte/terabyte pie charts, here is how projects storage utilization compared to scratch storage:<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://3.bp.blogspot.com/-wdsd5yB18VE/VCjiRbSA0HI/AAAAAAAAKvU/W52Xv6-Z8-w/s1600/ct%2Bbreakdown%2Bcompare.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="400" src="http://3.bp.blogspot.com/-wdsd5yB18VE/VCjiRbSA0HI/AAAAAAAAKvU/W52Xv6-Z8-w/s1600/ct%2Bbreakdown%2Bcompare.png" width="348" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Figure 8. Fraction of file count consumed by files of a given size on Data Oasis’s projects file system (shared between Gordon and Trestles users)&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />On the basis of file counts, it’s a bit surprising that users seem to store more smaller (kilobyte-sized) files in their projects space than their scratch space.  This may imply that the beginning and end data bookending simulations aren’t as large as the intermediate data generated during the calculation.  Alternately, it may be a reflection of user naïveté; I’ve found that newer users were often afraid to use the scratch space because of the perception that their data may vanish from there without advanced notice.  Either way, gigabyte-sized files comprised a few hundredths of a percent of files, and terabyte-sized files were more scarce still on both file systems.  The trend was uniformly towards smaller sizes on projects space.<br /><br />As far as space consumed by these files, the differences remain subtle.<br /><br />&lt;table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style="text-align: center;"&gt;<a href="http://1.bp.blogspot.com/-3cs8iwXSnRA/VCjmML1IMMI/AAAAAAAAKvg/HUFZh8BYsLk/s1600/size%2Bbkdown%2Bcompare.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="271" src="http://1.bp.blogspot.com/-3cs8iwXSnRA/VCjmML1IMMI/AAAAAAAAKvg/HUFZh8BYsLk/s1600/size%2Bbkdown%2Bcompare.png" width="400" /></a>&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class="tr-caption" style="text-align: center;"&gt;Figure 9. Fraction of file system capacity consumed by files of a given size on Data Oasis’s projects file system&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />There appears to be a trend towards users keeping larger files in their projects space, and the biggest change is the decrease in megabyte-sized files in favor of gigabyte-sized files.  However, this trend is very small and persists across a finer-grained examination of file size distributions:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;&lt;/div&gt;</p> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://3.bp.blogspot.com/-yYPXPGFN2ck/VCjvIHxoJpI/AAAAAAAAKv8/MiUafRm7yCU/s1600/megaplot.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="271" src="http://3.bp.blogspot.com/-yYPXPGFN2ck/VCjvIHxoJpI/AAAAAAAAKv8/MiUafRm7yCU/s1600/megaplot.png" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Figure 10.&nbsp;File system capacity consumed by files of a given size on Data Oasis's projects file system</td></tr></tbody></table> +<p><br />Half of the above plot is the same data shown above, making this plot twice as busy and confusing.  However there’s a lot of interesting data captured in it, so it’s worth the confusing presentation.  In particular, the overall distribution of mass with respect to the various file sizes is remarkably consistent between scratch and projects storage.  We see the same general peak of file size preference in the 1 GB to 10 GB range, but there is a subtle bimodal divide in projects storage that reveals preference for 128MB-512MB and 4GB-8GB files which manifests in the integrals (red and purple lines) that show a visibly greater slope in these regions.<br /><br />The observant reader will also notice that the absolute values of the bars are smaller for projects storage and scratch storage; this is a result of the fact that the projects file system is subject to quotas and, as a result, is not nearly as full of user data.  To complicate things further, the projects storage represents user data from two different machines (each with unique job size policies, to boot), whereas the scratch storage is only accessible from one of those machines.  Despite these differences though, user data follows very similar distributions between both file systems.<br /><br />&lt;h2&gt;Corollaries&lt;/h2&gt;It is probably unclear what to take away from these data, and that is with good reason.  There are fundamentally two aspects to quantifying storage utilizations–raw capacity and file count–because they represent two logically separate things.  There is some degree of interchangeability (e.g., storing a whole genome in one file vs. storing each chromosome its own file), and this is likely contributing to the broad peak in file size between 512 MB and 8 GB.  With that being said, it appears that the typical long-tail user stores a substantial amount of decidedly “small” files on Lustre, and this is exemplified by the fact that 90% of the files resident on the file systems analyzed here are 1 MB or less in size.<br />&lt;div&gt;<br />&lt;/div&gt;</p> +<div>This alone suggests that large parallel file systems may not actually be the most appropriate choice for HPC systems that are designed to support a large group of long-tail users. &nbsp;While file systems like Lustre and GPFS certainly provide a unique <i>capability</i> in that some types of medium-sized jobs absolutely require the IO capabilities of parallel file systems, there are a larger number of long-tail applications that do single-thread IO, and some of these perform IO in such an abusive way (looking at you, quantum chemistry) that they cannot run on file systems like Lustre or GPFS because of the number of small files and random IO they use.</div> +<div><br /></div> +<div>So if Lustre and GPFS aren't the unequivocal best choice for storage in long-tail HPC, what are the other options?</div> +<div><br /><h3>Burst Buffers</h3></div> +<div>I would be remiss if I neglected to mention burst buffers here since they are designed, in part, to address the limitations of parallel file systems. &nbsp;However, their actual usability remains unproven. &nbsp;Anecdotally, long-tail users are generally not quick to alter the way they design their jobs to use cutting-edge technology, and my personal experiences with Gordon (and its 300 TB of flash) were that getting IO-nasty user applications to effectively utilize the flash was often a very manual process that introduced new complexities, pitfalls, and failure modes. &nbsp;Gordon was a very experimental platform though, and <a href="http://www.cray.com/Products/Computing/XC/DataWarp.aspx">Cray's new DataWarp</a> burst buffer seems to be the first large-scale productization of this idea. &nbsp;It will be interesting to see how well it works for real users when the technology starts <a href="https://www.nersc.gov/users/computational-systems/cori/">hitting the floor for open science in mid-2016</a>, if not sooner.</div> +<div><h3>High-Performance NAS</h3></div> +<div>An emerging trend in HPC storage is the use of high-performance NAS as a complementary file system technology in HPC platforms. &nbsp;Traditionally, NAS has been a very poor choice for HPC applications because of the limited scalability of the typical NAS architecture--data resides on traditional local file system with network service being provided by an additional software layer like NFS, and the ratio of storage capacity to network bandwidth out of the NAS is very high.</div> +<div><br /></div> +<div>The emergence of cheap RAM and enterprise SSDs has allowed some sophisticated file systems like ZFS and NetApp's WAFL to demonstrate very high performance, especially in delivering very high random read performance, by using both RAM and flash as a buffer between the network and spinning rust. &nbsp;This allows certain smaller-scale jobs to enjoy substantially better performance when running on flash-backed NAS than a parallel file system. &nbsp;Consider the following IOP/metadata benchmark run on a parallel file system and a NAS head with SSDs for caching:<br /><br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://1.bp.blogspot.com/-fyF1j9G0ouU/VFHNnHOB-YI/AAAAAAAAKxQ/owVhLiILb1E/s1600/mdstat-stats-per-sec.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="271" src="http://1.bp.blogspot.com/-fyF1j9G0ouU/VFHNnHOB-YI/AAAAAAAAKxQ/owVhLiILb1E/s1600/mdstat-stats-per-sec.png" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Figure 11. File stat rate on flash-backed NAS vs. a parallel file system as measured by <a href="http://mdtest.sourceforge.net/">the mdtest benchmark</a></td></tr></tbody></table><br />A four-node job that relies on <a href="http://pubs.opengroup.org/onlinepubs/009695399/functions/stat.html">statting</a> many small files (for example, an application that traverses a large directory structure such as the output of one of the Illumina sequencers I mentioned above) <i>can</i> achieve a much higher IO rate on a high-performance NAS than on a parallel file system. &nbsp;Granted, there are a lot of qualifications to be made with this statement and benchmarking high-performance NAS is worth a post of its own, but the above data illustrate a case where NAS may be preferable over something like Lustre.</div> +<h3>Greater Context</h3> +<div>Parallel file systems like Lustre and GPFS will always play an essential role in HPC, and I don't want to make it sound like they can be universally replaced by high-performance NAS. &nbsp;They are fundamentally architected to scale out so that increasing file system bandwidth does not require adding new partitions or using <a href="http://www.netapp.com/us/products/platform-os/infinite-volume.aspx">software to emulate a single namespace</a>. &nbsp;In fact, the single namespace of parallel file systems makes the management of the storage system, its users, and the underlying resources very flexible and straightforward. &nbsp;No volume partitioning needs to be imposed, so scientific applications' and projects' data consumption do not have to align with physical hardware boundaries.<br /><br /><div>However, there are cases where a single namespace is not necessary at all; for example, user home directories are naturally partitioned with fine granularity and can be mounted in a uniform location while physically residing on different NAS heads with a simple autofs map. &nbsp;In this example, leaving user home directories on a pool of NAS filers offers two big benefits:<br /><br /><ol><li>Full independence of the underlying storage mitigates the impact of one bad user. &nbsp;A large job dropping multiple files per MPI process will crush both Lustre and NFS, but in the case of Lustre, the MDS may become unresponsive and block IO across all users' home directories.</li><li>Flash caches on NAS can provide higher performance on IOP-intensive workloads at long-tail job sizes. &nbsp;In many ways, high-performance NAS systems have the built-in burst buffers that parallel file systems are only now beginning to incorporate.</li></ol><div>Of course, these two wins come at a cost:</div> +<div><ol><li>Fully decentralized storage is more difficult to manage. &nbsp;For example, balancing capacity across all NAS systems is tricky when users have very different data generation rates that they do not disclose ahead of time.</li><li>Flash caches can only get you so far, and NFS will fall over when enough IO is thrown at it. &nbsp;I mentioned that 98% of all jobs use 1024 cores or fewer (see Figure 1), but 1024 cores all performing heavy IO on a typical capacity-rich, bandwidth-poor NAS head will cause it to grind to a halt.</li></ol><div><div>Flash-backed high-performance NAS is not an end-all storage solution for long-tail computational science, but it also isn't something to be overlooked outright. &nbsp;As with any technology in the HPC arena, its utility may or may not match up well with users' workloads, but when it does, it can deliver less pain and better performance than parallel file systems.</div> +</div> +</div> +<div><br /><h2>Acknowledgments&nbsp;</h2></div> +</div> +</div> +<div>As I mentioned above, the data I presented here was largely generated as a result of an internal project in which I participated while at SDSC. &nbsp;I couldn't have cobbled this all together without the help of SDSC's HPC Systems group, and I'm really indebted to <a class="g-profile" href="https://plus.google.com/115709389472600856394" target="_blank">+Rick</a>,&nbsp;<a class="g-profile" href="https://plus.google.com/105132496853043288048" target="_blank">+Haisong</a>, and&nbsp;<a class="g-profile" href="https://plus.google.com/113299603442523075439" target="_blank">+Trevor</a>&nbsp;for doing a lot of the heavy lifting in terms of generating the original data, getting systems configured to test, and figuring out what it all meant when the dust settled (even after I had left!). &nbsp;SDSC's really a world-class group of individuals.</div> + + + + + The Shell For Scientists + + 2014-10-05T01:00:00-06:00 + https://hpc.social/2014/the-shell-for-scientists + <p>I’ve posted a half-day “The Shell for Scientists” +<a href="https://github.com/ljdursi/shell-for-scientists">tutorial</a> that +I’ve given variants on a number of times; the motivating problem, +provided by Greg Wilson for a two-day set of of tutorials at the +University of Toronto, was cleaning up a bunch of auditory lab data +on people’s cochlear implants.</p> + +<p>The focus is on productivity and automation; PDF slides are available +<a href="https://github.com/ljdursi/shell-for-scientists/raw/master/presentation/presentation.pdf">here</a> +(although I really should translate them into a markdown-based format to +make them more re-usable).</p> + +<p>Covered are a number of basic shell commands</p> + +<ul> + <li>echo</li> + <li>pwd</li> + <li>cd</li> + <li>ls</li> + <li>man</li> + <li>file</li> + <li>cat</li> + <li>more</li> + <li>wc</li> + <li>mv</li> + <li>cp</li> + <li>rm</li> + <li>head</li> + <li>tail</li> + <li>sort</li> + <li>mkdir</li> + <li>rmdir</li> + <li>grep</li> + <li>for..do..done</li> +</ul> + +<p>As well as simple script writing. There is some optional material +on make (again, for automation) and ssh/scp (because that was +frequently necessary for tutorials at SciNet). There are a number +of hands-on exercises sprinkled throughout.</p> + + + + + Floating-Point Data Shouldn't Be Serialized As Text + + 2014-10-01T01:00:00-06:00 + https://hpc.social/2014/floating-point-data-shouldn-t-be-serialized-as-text + <p>Write data files in a binary format, unless you’re going to actually be reading the output - and you’re not going to be reading a millions of data points.</p> + +<p>The reasons for using binary are threefold, in decreasing importance:</p> + +<ul> + <li>Accuracy</li> + <li>Performance</li> + <li>Data size</li> +</ul> + +<p>Accuracy concerns may be the most obvious. When you are converting a (binary) floating point number to a string representation of the decimal number, you are inevitably going to truncate at some point. That’s ok if you are sure that when you read the text value back into a floating point value, you are certainly going to get the same value; but that is actually a subtle question and requires choosing your format carefully. Using default formatting, various compilers perform this task with varying degrees of quality. <a href="http://randomascii.wordpress.com/2013/02/07/float-precision-revisited-nine-digit-float-portability/">This blog post</a>, written from the point of view of a games programmer, does a good job of covering the issues; but note that for technical computing, we generally must be much more demanding about accuracy.</p> + +<p>Let’s consider a little program which, for a variety of formats, writes a single-precision real number out to a string, and then reads it back in again, keeping track of the maximum error it encounters. We’ll just go from 0 to 1, in units of machine epsilon. The code follows:</p> + +<pre><code class="language-c">#include &lt;stdio.h&gt; +#include &lt;math.h&gt; +#include &lt;float.h&gt; + +int main(int argc, char **argv) { + + const int nformats=5; + char *formats[] = { "%11.4f", "%13.6f", "%15.8f", "%17.10f", "%f" }; + float maxerrors[nformats]; + + for (int fmt=0; fmt&lt;nformats; fmt++) + maxerrors[fmt] = 0.; + + float input = 0; + while (input &lt; 1) { + for (int fmt=0; fmt&lt;nformats; fmt++) { + char stringrep[128]; + sprintf(stringrep, formats[fmt], input); + + float output; + sscanf(stringrep, "%f", &amp;output); + + float err = fabs(output-input); + if (err &gt; maxerrors[fmt]) + maxerrors[fmt] = err; + } + + input += FLT_EPSILON; + } + + printf("Maximum errors: \n"); + for (int fmt=0; fmt&lt;nformats; fmt++) + printf("%12s\t", formats[fmt]); + printf("\n"); + for (int fmt=0; fmt&lt;nformats; fmt++) + printf("%12.6f\t", maxerrors[fmt]); + printf("\n"); + + return 0; +} +</code></pre> + +<p>and when we run it, we get:</p> + +<pre><code class="language-bash">$ ./accuracy +Maximum errors: + %11.4f %13.6f %15.8f %17.10f %f +5.000830e-05 5.066395e-07 7.450581e-09 5.820766e-11 5.066395e-07 +</code></pre> + +<p>Note that even using a format with 8 digits after the decimal place - which we might think would be plenty, given that <a href="http://stackoverflow.com/questions/24377058/decimal-accuracy-of-binary-floating-point-numbers/24387402#24387402">single precision reals are only accurate to 6-7 decimal places</a> - when the data makes a round trip through string-formatting we don’t get exact copies back, off by approximately $10^{-8}$. And this compiler’s default format does <em>not</em> give us accurate round-trip floating point values; some error is introduced! If you’re a video-game programmer, that level of accuracy may well be enough. For scientific/technical work, however, that might absolutely not be ok, particularly if there’s some bias to where the error is introduced, or if the error occurs in what is supposed to be a conserved quantity.</p> + +<p>Note that if you try running this code, you’ll notice that it takes a surprisingly long time to finish. That’s because, maybe surprisingly, performance is another real issue with text output of floating point numbers. Consider a following simple program, which just writes out a 2d array of a 5000 × 5000 floats as text (using <code>fprintf()</code> and as unformatted binary, using <code>fwrite()</code>. The code will follow, but to start here’s the timing outputs:</p> + +<pre><code class="language-bash">$ ./io-performance 5000 +Text : time = 20.229191 +Raw Binary: time = 0.042213 +</code></pre> + +<p>Note that when writing to disk, the binary output is <strong>479 times</strong> as fast as text output. There are two reasons for this - one is that you can write out data all at once, rather than having to loop; the other is that generating the string decimal representation of a floating point number is a surprisingly subtle operation which requires a significant amount of computing for each value.</p> + +<p>Finally, is data size; the text file in the above example comes out (on my system - depends on compilers default floating string representation, etc) to about 4 times the size of the binary file.</p> + +<p>Now, there are real problems with binary output. In particular, raw binary output is very brittle. If you change platforms, or your data size changes, your output may no longer be any good. Adding new variables to the output will break the file format unless you always add new data at the end of the file, and you have no way of knowing ahead of time what variables are in a binary blob you get from your collaborator (who might be you, three months ago).</p> + +<p>Most of the downsides of binary output are avoided by using libraries which use binary output to serialize, but include enough metadata to describe the data. For output of large scientific arrays, <a href="http://www.unidata.ucar.edu/software/netcdf/">NetCDF</a> – which writes self-describing binary files that are much more “future proof” than raw binary – is a good chioce. Better still, since it’s a standard, many tools read NetCDF files. In other contexts, formats like <a href="http://bsonspec.org">BSON</a> make a lot of sense.</p> + +<p>There are many NetCDF tutorials on the internet; one I wrote is is <a href="http://wiki.scinethpc.ca/wiki/images/a/af/Netcdfhdf5.pdf">here</a>. A simple example using NetCDF gives IO performance results much closer to raw binary than to text:</p> + +<pre><code class="language-bash">$ ./io-performance +Text : time = 20.504855 +Raw Binary: time = 0.049945 +NetCDF4 : time = 0.155822 +</code></pre> + +<p>but gives you a nice self-describing file:</p> + +<pre><code class="language-bash">$ ncdump -h test.nc +netcdf test { +dimensions: + X = 5000 ; + Y = 5000 ; +variables: + float Array(X, Y) ; + Array:units = "ergs" ; +} +</code></pre> + +<p>and file sizes about the same as raw binary:</p> + +<pre><code class="language-bash">$ du -sh test.* +96M test.dat +96M test.nc +382M test.txt +</code></pre> + +<p>the code follows:</p> + +<pre><code class="language-c">#include &lt;stdio.h&gt; +#include &lt;stdlib.h&gt; +#include &lt;sys/time.h&gt; +#include &lt;netcdf.h&gt; +#include &lt;string.h&gt; + +void tick(struct timeval *t); +double tock(struct timeval *t); +void writenetcdffile(const char *filename, int n, float **data); + +int main(int argc, char **argv) { + + if (argc &lt; 2) { + fprintf(stderr,"Usage: %s n -- test write speeds of n x n array\n", argv[0]); + exit(1); + } + + int n = atoi(argv[1]); + const int defaultn = 5000; + if (n &lt; 1 || n &gt; 10000) { + fprintf(stderr, "Invalid n %s: using n = %d\n", argv[1], defaultn); + n = defaultn; + } + + float **data = malloc(n*sizeof(float *)); + float *p = malloc(n*n*sizeof(float)); + for (int i=0; i&lt;n; i++) + data[i] = &amp;(p[i*n]); + + for (int i=0; i&lt;n; i++) + for (int j=0; j&lt;n; j++) + data[i][j] = i*n+j; + + struct timeval timer; + tick(&amp;timer); + FILE *txt = fopen("test.txt","w"); + for (int i=0; i&lt;n; i++) { + for (int j=0; j&lt;n; j++) + fprintf(txt, "%f ", data[i][j]); + fprintf(txt, "\n"); + } + fclose(txt); + printf("Text : time = %lf\n", tock(&amp;timer)); + + tick(&amp;timer); + FILE *bin = fopen("test.dat","wb"); + fwrite(data[0], sizeof(float), n*n, bin); + fclose(bin); + printf("Raw Binary: time = %lf\n", tock(&amp;timer)); + + tick(&amp;timer); + writenetcdffile("test.nc", n, data); + printf("NetCDF4 : time = %lf\n", tock(&amp;timer)); + + free(data[0]); + free(data); +} + + +void tick(struct timeval *t) { + gettimeofday(t, NULL); +} + +/* returns time in seconds from now to time described by t */ +double tock(struct timeval *t) { + struct timeval now; + gettimeofday(&amp;now, NULL); + return (double)(now.tv_sec - t-&gt;tv_sec) + ((double)(now.tv_usec - t-&gt;tv_usec)/1000000.); +} + +void writenetcdffile(const char *filename, int n, float **data) { + /* identifiers */ + int file_id, xdim_id, ydim_id; + int data_id; + + /* sizes */ + int datadims[2]; + + /* name of units for data */ + const char *dataunits="ergs"; + + /* return status */ + int status; + + /* Create a new file - clobber anything existing */ + status = nc_create(filename, NC_CLOBBER, &amp;file_id); + /* netCDF routines return NC_NOERR on success */ + if (status != NC_NOERR) { + fprintf(stderr,"Could not open file %s\n", filename); + return; + } + + /* define the dimensions */ + nc_def_dim(file_id, "X", n, &amp;xdim_id); + nc_def_dim(file_id, "Y", n, &amp;ydim_id); + + /* now that we've defined the dimensions, we can define variables on them */ + datadims[0] = xdim_id; datadims[1] = ydim_id; + nc_def_var(file_id, "Array", NC_FLOAT, 2, datadims, &amp;data_id); + + /* assign units to the variables */ + nc_put_att_text(file_id, data_id, "units", strlen(dataunits), dataunits); + + /* we are now done defining variables and their attributes */ + nc_enddef(file_id); + + /* Write out the data to the variables we've defined */ + nc_put_var_float(file_id, data_id, &amp;(data[0][0])); + + nc_close(file_id); + return; +} +</code></pre> + +<p>(This post is crosslisted from a <a href="http://stackoverflow.com/questions/24395686/best-way-to-write-a-large-array-to-file-in-fortran-text-vs-other/24396176#24396176">StackOverflow Answer</a>.)</p> + + + + + Docker for HPC + + 2014-09-05T16:21:11-06:00 + https://hpc.social/2014/docker-for-hpc + <p>With the recent release of Docker 1.0 and the broad industry backing from +organizations such as Red Hat and IBM, it&rsquo;s no wonder that interest in the use +and application of this Linux container technology continues to grow. Docker is +shipped with Red Hat Enterprise 7 and there exists a growing registry of <a href="https://registry.hub.docker.com/">Docker images</a> for a wide variety of applications.</p> + +<p>For those who unfamiliar with Docker, it’s essentially a container technology +for the Linux platform, which leverages existing and well proven technologies +such as control groups (cgroup), and LinuX Containers (LXC). Docker brings +these technologies together and provides ease of setup, use and compelling +efficiency.</p> + +<p>The IBM Platform Computing team has recently announced the availability of the +IBM Platform LSF and Docker integration, which is available as an open beta on +Service Management Connect. Supplementing the release of the integration is a +white paper which is focused on the suitability of Docker for high performance +computing (HPC) and includes an easy to follow, real world example of how to +run a Docker image under Platform LSF.</p> + +<p>Happy tinkering!</p> + + + + + Hadoop For HPCers + + 2014-09-04T01:00:00-06:00 + https://hpc.social/2014/hadoop-for-hpcers + <p>I and my colleague Mike Nolta have put together <a href="https://github.com/ljdursi/hadoop-for-hpcers-tutorial">a half-day tutorial on Hadoop</a> - briefly covering HDFS, Map Reduce, <a href="http://pig.apache.org">Pig</a>, and Spark - for an HPC audience, and put the materials on <a href="https://github.com/ljdursi/hadoop-for-hpcers-tutorial">github</a>.</p> + +<p>The <a href="https://hadoop.apache.org">Hadoop</a> ecosystem of tools continues to rapidly grow, and now includes tools like <a href="https://spark.apache.org">Spark</a> and <a href="http://flink.incubator.apache.org">Flink</a> that are very good for iterative numerical computation - either simulation or data analysis. These tools, and the underlying technologies, are (or should be) of real interest to the HPC community, but most materials are written for audiences with web application or maybe machine-learning backgrounds, which makes it harder for an HPC audience to see how they can be useful to them and how they might be applied.</p> + +<p>Most of the source code is Python. Included on git hub are all sources for the examples, a vagrantfile for a VM to run the software on your laptop, and the presentation in <a href="https://github.com/ljdursi/hadoop-for-hpcers-tutorial/blob/master/presentation/presentation.md">Markdown</a> and <a href="https://github.com/ljdursi/hadoop-for-hpcers-tutorial/blob/master/presentation/keynote-presentation.pdf?raw=true">PDF</a>. Feel free to fork, send pull requests, or use the materials as you see fit.</p> + + + + + Exascale in perspective- RSC's 1.2 petaflop rack + + 2014-06-29T21:31:00-06:00 + https://hpc.social/2014/exascale-in-perspective-rsc-s-1-2-petaflop-rack + <div>Russian supercomputing manufacturer <a href="http://insidehpc.com/2014/06/29/rsc-announces-record-compute-density-xeon-phi-isc14/">RSC generated some buzz at ISC'14 last week when they showed their 1.2 PF-per-rack Xeon Phi-based platform</a>. &nbsp;I was aware of this system from when they <a href="http://insidehpc.com/2014/04/02/rsc-petastream-delivers-1-2-pflops-per-rack-xeon-phi/">first announced it a few months prior</a>, and I referenced it in a piece of a blog post I was writing about the scarier aspects of exascale computing. &nbsp;Given my impending career change though, it is unclear that I will have the time to ever finish that post before it becomes outdated. &nbsp;Since RSC is back in the spotlight though, I thought I'd post the piece I wrote up to illustrate how wacky this 1.2 PF rack really is in terms of power consumption. &nbsp;Power consumption, of course, is the limiting factor standing between today and the era of exascale computing.<br /><br />So, to put a 400 kW, 1.2 PF rack into perspective, here is that piece:<br /><br /></div> +<div><hr /></div> +<div><br /><h2>The Importance of Energy Efficiency</h2>Up through the petascale era in which we currently live, raw performance of high-performance components--processors, RAM, and interconnect--were what limited the ultimate performance of a given high-end machine. &nbsp;The first petaflop machine, Los Alamos' Roadrunner, derived most of its FLOPs from <a href="http://www.redbooks.ibm.com/redpapers/pdfs/redp4477.pdf">high-speed PowerXCell 8i processors pushing 3.2 GHz per core</a>. &nbsp;Similarly, the first 10 PF supercomputer, RIKEN's K computer, derived its performance from <a href="http://www.fujitsu.com/downloads/TC/isc12/k-computer-isc12.pdf">its sheer size of 864 cabinets</a>. &nbsp;Although I don't mean to diminish the work done by the engineers that actually got these systems to deliver this performance, the petascale era really was made possible by making really big systems out of really fast processors.<br /><br />By contrast, Exascale represents the first milestone where the limitation does <i>not</i> lie in making these high-performance components faster; rather, performance is limited by the amount of electricity that can be physically delivered to a processor and the amount of heat that can be extracted from it. &nbsp;This limitation is what has given rise to these massively parallel processors that eschew a few fast cores for a larger number of low-powered ones. &nbsp;By keeping clock speeds low and densely packing many (dozens or hundreds) of compute cores on a single silicon die, these massively parallel processors are now realizing power efficiencies (flops per watt) that are an order of magnitude higher than what traditional CPUs can deliver.<br /><br />The closest technology on the market that will probably resemble the future's exaflop machines are based on accelerators--either NVIDIA GPUs or Intel's MICs. &nbsp;The goal will be to jam as many of these massively parallel processors into as small a space and with as tight of an integration as possible. &nbsp;Recognizing this trend, NERSC has opted to build what I would call the first "pre-exascale" machine in its <a href="https://www.nersc.gov/users/computational-systems/nersc-8-system-cori/">NERSC-8 procurement</a>&nbsp;which will feature a homogeneous system of manycore processors.<br /><br />However, such pre-exascale hardware doesn't actually exist yet, and NERSC-8 won't appear until 2016. &nbsp;What does exist, though, is a product by Russia's <a href="http://rscgroup.ru/">RSC Group</a> called PetaStream: a rack packed with 1024 current-generation Xeon Phi (Knight's Corner) coprocessors that has a peak performance of 1.2 PF/rack. &nbsp;While this sounds impressive, it also highlights the principal challenge of exascale computing: power consumption. &nbsp;One rack of RSC PetaStream is rated for 400 kW, delivering 3 GFLOPs/watt peak. &nbsp;Let's put this into perspective.<br /><br /><h2>Kilowatts, megawatts, and gigawatts in perspective</h2>During a recent upgrade to our data center infrastructure, three <a href="http://multiquip.com/multiquip/DCA220SSCU4i.htm">MQ DCA220SS-series diesel generators</a> were brought in for the critical systems. &nbsp;Each is capable of producing 220 kVA according to the spec sheets.</div> +<div><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://2.bp.blogspot.com/-9fyvZaqnAY8/U4YzYjZPpQI/AAAAAAAAKcw/4p4v0RKhoTI/s1600/2014-02-14+17.09.29.jpg" style="margin-left: auto; margin-right: auto;"><img border="0" height="320" src="http://2.bp.blogspot.com/-9fyvZaqnAY8/U4YzYjZPpQI/AAAAAAAAKcw/4p4v0RKhoTI/s1600/2014-02-14+17.09.29.jpg" width="239" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Three 220 kVA diesel generators plugged in during a PM at SDSC</td></tr></tbody></table>It would take three of these diesel generators to power a single rack of RSC's PetaStream. &nbsp;Of course, these backup diesel generators aren't a very efficient way of generating commercial power, so this example is a bit skewed.<br /><br />Let's look at something that is used to generate large quantities of commercial power instead. &nbsp;A <a href="http://www.ge-energy.com/products_and_services/products/wind_turbines/ge_1.5_77_wind_turbine.jsp">GE 1.5-77 wind turbine</a>, which is GE's most popular model, is advertised as delivering 1.5 megawatts at wind speeds above 15 miles per hour.<br /><br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://4.bp.blogspot.com/-cQNM7XKbFmM/U4Y3JL8-YAI/AAAAAAAAKdA/LVO_ohhL9bc/s1600/20090821_turbines_blades1.jpg" style="margin-left: auto; margin-right: auto;"><img border="0" height="320" src="http://4.bp.blogspot.com/-cQNM7XKbFmM/U4Y3JL8-YAI/AAAAAAAAKdA/LVO_ohhL9bc/s1600/20090821_turbines_blades1.jpg" width="212" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">GE 1.5 MW wind turbine. &nbsp; Source: <a href="http://www.nrel.gov/news/features/feature_detail.cfm/feature_id=1717">NREL</a></td></tr></tbody></table>Doing the math, this means that the above pictured turbine would be able to power only three racks of RSC PetaStream on a breezy day.<br /><br />To create a supercomputer with a peak capability of an exaflop using RSC's platform, you'd need over 800 racks of PetaStream and over 300 MW of power to turn it all on. &nbsp;That's over 200 of the above GE wind turbines and enough electrity to power about 290,000 homes in the U.S. &nbsp;Wind farms of this size do exist; for example,<br /><br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://1.bp.blogspot.com/-1ZFeqhmyzLQ/U7By0J6HxhI/AAAAAAAAKls/v24Y5NpFG7U/s1600/1024px-WindTurbinesWallaWallaRiverWashington.JPG" style="margin-left: auto; margin-right: auto;"><img border="0" height="243" src="http://1.bp.blogspot.com/-1ZFeqhmyzLQ/U7By0J6HxhI/AAAAAAAAKls/v24Y5NpFG7U/s1600/1024px-WindTurbinesWallaWallaRiverWashington.JPG" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">300 MW Stateline Wind Farm. &nbsp;Source: <a href="http://en.wikipedia.org/wiki/File:WindTurbinesWallaWallaRiverWashington.JPG">Wikimedia Commons</a></td></tr></tbody></table>the <a href="http://en.wikipedia.org/wiki/Stateline_Wind_Farm">Stateline Wind Farm</a>, which was built on the border between Oregon and Washington, has a capacity of about 300 MW. &nbsp;Of course, wind farms of this capacity cannot be built in any old place.<br /><br />Commercial nuclear power plants can be built in a variety of places though, and they typically generate on the order of 1 gigawatt (GW) of power per reactor. &nbsp;In my home state of New Jersey, the <a href="http://www.pseg.com/family/power/nuclear/index.jsp">Hope Creek Nuclear Generating Station</a> has a single reactor that was built to deliver about 1.2 GW of power:<br /><br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://1.bp.blogspot.com/-x6ewW2IogbY/U7B1B4muwpI/AAAAAAAAKl4/t1qY4Q8irZE/s1600/469px-Hope_creek_NPP.jpg" style="margin-left: auto; margin-right: auto;"><img border="0" height="400" src="http://1.bp.blogspot.com/-x6ewW2IogbY/U7B1B4muwpI/AAAAAAAAKl4/t1qY4Q8irZE/s1600/469px-Hope_creek_NPP.jpg" width="312" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">1.2 GW Hope Creek nuclear power station. &nbsp;The actual reactor is housed in the concrete cylinder to the bottom left. &nbsp;Courtesy of the Nuclear Regulatory Commission.</td></tr></tbody></table><br />This is enough to power almost 4 exaflops of PetaStream. &nbsp;Of course, building a nuclear reactor for every exaflop supercomputer would be extremely costly, given the multi-billion dollar cost of building reactors like this. &nbsp;Clearly, the energy efficiency (flops/watt) of computing technology needs to improve substantially before we can arrive at the exascale era.</div> + + + + + ISC 2014- Auf Wiedersehen Leipzig + + 2014-06-28T16:24:51-06:00 + https://hpc.social/2014/isc-2014-auf-wiedersehen-leipzig + <p>I&rsquo;ve just returned from International Supercomputing 2014, which took place in +Leipzig, Germany. As was the case in 2013, I greatly enjoyed my time at the +conference, and the hospitality in Leipzig. It&rsquo;s a wonderful city to visit.</p> + +<p>You will have read in my previous blogs about my experiences with ARM based +developer systems, and running IBM Platform LSF. For me, ISC 2014 was a very +interesting event for one big reason - variety! Variety is the spice +of life as they say. And the variety in this case came from the displays at +OpenPOWER Foundation members Mellanox and NVIDIA, as well as servers based on +the the newly unveiled Applied Micro X-Gene 64-bit ARM processors.</p> + +<p>Although small in size, the Tyan POWER8 motherboard with NVIDIA Tesla K40 +installed made a strong statement. Although OpenPOWER was founded in 2013, we +are already seeing the benefits of this foundation - with a varied member +base including education, interconnect, and accelerator vendors - all with an +HPC pedigree. With the rich set of members that is growing, these look to be +exciting times for the IBM POWER8 processor and the OpenPower Foundation.</p> + +<p>For those of you who did not attend, the IBM booth had a number of live demos +including the IBM Platform Computing Cloud Service, which is built on top of +IBM SoftLayer infrastructure. This service can provide both hybrid and +stand-alone clouds and is ideally suited for HPC workloads - as it&rsquo;s +non-virtualized.</p> + +<p>So we say Auf Wiedersehen to Leipzig for now and look forward to the spice that New Orleans will provide this autumn; where there will surely be more exciting +things emerging from the OpenPower Foundation!</p> + +<figure><img src="https://www.gaborsamu.com/images/ISC2014_wrapup.jpg" /> +</figure> + + + + + Perspectives on the Current State of Data-Intensive Scientific Computing + + 2014-06-24T19:08:00-06:00 + https://hpc.social/2014/perspectives-on-the-current-state-of-data-intensive-scientific-computing + <p>I recently had the benefit of being invited to attend two workshops in Oakland, CA, hosted by the U.S. Department of Energy (DOE), that shared the common theme of emerging trends in data-intensive computing: the <a href="http://www.nersc.gov/users/training/nersc-training-events/joint-data-intensive/">Joint User Forum on Data-Intensive Computing</a> and the <a href="http://www.nersc.gov/research-and-development/HPCOR/">High Performance Computing Operational Review</a>.  My current employment requires that I stay abreast of all topics in data-intensive scientific computing (I wish there was <a href="https://twitter.com/NicoleHemsoth/status/433484762787741696">an acronym to abbreviate this</a>…DISC perhaps?) so I didn’t go in with the expectation of being exposed to a world of new information.  As it turned out though, I did gain a very insightful perspective on how data-intensive scientific computing (DISC), and I daresay <i>Big Data</i>, is seen from the people who operate some of the <a href="https://asc.llnl.gov/computing_resources/sequoia/">world’s</a> <a href="https://www.olcf.ornl.gov/titan/">largest</a> <a href="http://www.lanl.gov/asc/trinity-highlight.php">supercomputers</a>.<br /><br />The DOE perspective is surprisingly realistic, application-oriented, and tightly integrated with high-performance computing.  There was the obligatory discussion of Hadoop and how it may be wedged into machines at <a href="https://github.com/chu11/magpie">LLNL with Magpie</a>, <a href="http://www.nersc.gov/assets/Uploads/T12-Experience-with-Data-Parallel-Frameworks.pdf">ORNL</a> with <a href="https://github.com/jhorey/SpotHadoop">Spot Hadoop</a>, and <a href="http://www.nersc.gov/assets/Uploads/T10-Joint-Facilities-User-Forum.pdf">SDSC</a> with <a href="https://github.com/glennklockwood/myhadoop">myHadoop</a>, of course, and there was also some discussion of real production use of Hadoop on <i>bona fide</i> Hadoop clusters at some of the DOE labs.  However, Hadoop played only a minor role in the grand scheme of the two meetings for <a href="http://glennklockwood.blogspot.com/2014/05/hadoops-uncomfortable-fit-in-hpc.html">all of the reasons I’ve outlined previously</a>.<br /><br />Rather, these two meetings had three major themes that crept into all aspects of the discussion:<br />&lt;ol&gt;&lt;li&gt;Scientific workflows&lt;/li&gt;&lt;li&gt;Burst buffers&lt;/li&gt;&lt;li&gt;Data curation&lt;/li&gt;&lt;/ol&gt;&lt;div&gt;I found this to be a very interesting trend, as #1 and #2 (workflows and burst buffers) aren’t topics I’d heard come up at any other DISC workshops, forums, or meetings I’ve attended.  The connection between DISC and workflows wasn’t immediately evident to me, and burst buffers are a unique aspect of cyberinfrastructure that have only been thrust into the spotlight with the <a href="https://www.nersc.gov/users/computational-systems/nersc-8-system-cori/nersc-8-procurement/trinity-nersc-8-rfp/">NERSC-8/LANL Trinity RFP last fall</a>.  However, all three of these topics will become central to both data-intensive scientific computing and, by virtue of their ability to <i>produce</i> data, exascale supercomputers.&lt;/div&gt;</p> +<div><br /></div> +<h2>Scientific workflows</h2> +<div>Workflows are one of those aspects of scientific computing that have been easy to dismiss as the toys of computer scientists because traditional problems in high-performance computing have typically been quite monolithic in how they are run. &nbsp;<a href="https://kepler-project.org/">SDSC's own Kepler</a> and <a href="http://pegasus.isi.edu/">USC's Pegasus</a> systems are perhaps the most well-known and highly engineered workflow management systems, and I have to confess that when I'd first heard of them a few years ago, I thought they seemed like a very complicated way to do very simple tasks.</div> +<div><br /></div> +<div>As it turns out though, both data-intensive scientific computing and exascale computing (by virtue of the output size of exaflop calculations) tend to follow patterns that look an awful lot like map/reduce at a very abstract level. &nbsp;This is a result of the fact that most data-intensive problems are not processing giant monoliths of tightly coupled and inter-related data; rather, they are working on large collections of generally independent data. &nbsp;Consider the <a href="http://www.sdsc.edu/Events/ipp_webinars/large_scale_genomics.pdf">recent talk I gave about a large-scale genomic study on which I consulted</a>; the general data processing flow was</div> +<div><ol><li>Receive 2,190 input files, 20 GB each, from a data-generating instrument</li><li>Do some processing on each input file</li><li>Combine groups of five input files into 438 files, each 100 GB in size</li><li>Do more processing&nbsp;</li><li>Combine 438 files into 25 overlapping groups to get 100 files, each 2.5 GB in size</li><li>Do more processing</li><li>Combine 100 files into a single 250 GB file</li><li>Perform statistical analysis on this 250 GB file for scientific insight</li></ol></div> +<div>The natural <i>data-parallelism</i> inherent from the data-generating instrument means that any collective insight to be gleaned from this data requires some sort of mapping and reduction, and the process of managing this large volume of distributed data is where scientific workflows become a necessary part of data-intensive scientific computing. &nbsp;Managing terabytes or petabytes of data distributed across thousands or millions of logical records (whether they be files on a file system, rows in a database, or whatever else) very rapidly becomes a problem that nobody will want to do by hand. &nbsp;Hadoop/HDFS delivers an automated framework for managing these sorts of workflows if you don't mind rewriting all of your processing steps against the Hadoop API and building out HDFS infrastructure, but if this is not the case, alternate workflow management systems begin to look very appealing.</div> +<div><br /></div> +<div>The core debate was not whether or not workflow management systems were a necessary component in DISC; rather, I observed two salient, open questions:</div> +<div><ol><li>The systems in use at DOE (notably <a href="https://pythonhosted.org/FireWorks/">Fireworks</a> and <a href="https://bitbucket.org/berkeleylab/qdo">qdo</a>) are primarily used to work around deficiencies in current HPC schedulers (e.g., Moab and SLURM) in that they cannot handle scheduling hundreds of thousands of tiny jobs concurrently. &nbsp;Thus, <b>should these workflow managers be integrated into the scheduler</b> to address these shortcomings at their source?</li><li><b>How do we stop every user from creating his or her own workflow manager scripts</b> and adopt an existing solution instead? &nbsp;Should one workflow manager rule them all, or should a Darwinian approach be taken towards the current diverse landscape of existing software?</li></ol></div> +<div>Question #1 is a highly technical question that has several dimensions; ultimately though, it's not clear to me that there is enough incentive for resource manager and scheduler developers to really dig into this problem. &nbsp;They haven't done this yet, and I can only assume that this is a result of the perceived domain-specificity and complexity of each workflow. &nbsp;In reality, a large number of workflows can be accommodated by two simple features: support for directed acyclic graphs (DAGs) of tasks and support for lightweight, fault-tolerant task scheduling within a pool of reserved resources. &nbsp;Whether or not anyone will rise to the challenge of incorporating this <i>in a usable way</i> is an open question, but there certainly is a need for this in the emerging realm of DISC.</div> +<div><br /></div> +<div>Question #2 is more interesting to me since this problem of multiple people cooking up different but equivalent solutions to the same problems is pervasive throughout computational and computer science. This is in large part due to the fatal assumption held by many computer scientists that good software can be simply "thrown over the fence" to scientists and it will be adopted. &nbsp;This has never worked; rather, the majority of widely adopted software technologies in HPC have been a result of the standardization of a landscape of similar but non-standard tools. &nbsp;This is something I touched on <a href="http://glennklockwood.blogspot.com/2014/05/hadoops-uncomfortable-fit-in-hpc.html">in a previous post</a>&nbsp;when outlining the history of MPI and OpenMP's successes.</div> +<div><br /></div> +<div>I don't think the menagerie of workflows' developers are ready to settle on a standard, as the field is not mature enough to have a holistic understanding of all of the issues that workflows need to solve. &nbsp;Despite the numerous presentations and discussions of various workflow solutions being used across DOE's user facilities, my presentation was the only one that considered optimizing workflow execution for the underlying hardware. &nbsp;Given that the target audience of these talks were users of high-performance computing, the lack of consideration given to the performance aspects of workflow optimization is a testament to this immaturity.<br /><br /></div> +<h2 id="bb">Burst buffers</h2> +<div>For those who haven't been following the details of one of DOE's more recent procurement rounds, the <a href="https://www.nersc.gov/users/computational-systems/nersc-8-system-cori/nersc-8-procurement/trinity-nersc-8-rfp/">NERSC-8 and Trinity request for proposals</a> (RFP) explicitly required that all vendor proposals include a <i>burst buffer</i> to address the capability of multi-petaflop simulations to dump tremendous amounts of data in very short order. &nbsp;The target use case is for petascale checkpoint-restart, where the memory of thousands of nodes (hundreds of terabytes of data) needs to be flushed to disk in an amount of time that doesn't dominate the overall execution time of the calculation.</div> +<div><br /></div> +<div>The concept of what a <i>burst buffer</i> is remains poorly defined. &nbsp;I got the sense that there are two outstanding definitions:</div> +<div><ul><li>The <i>NERSC burst buffer</i> is something more tightly integrated on the compute side of the system and may be a resource that can be allocated on a per-job basis</li><li>The <i>Argonne burst buffer</i> is something more tightly integrated on the storage side of the system and acts in a fashion that is largely transparent to the user. &nbsp;This sounded a lot like <a href="http://insidehpc.com/2014/05/08/video-efficient-distributed-burst-buffer-system-lustre/">the burst buffer support being explored for Lustre</a>.</li></ul><div>In addition, <a href="http://www.hpcwire.com/2014/05/01/burst-buffers-flash-exascale-potential/">Los Alamos National Labs (LANL) is exploring burst buffers for the Trinity procurement</a>, and it wasn't clear to me if they had chosen a definition or if they are exploring all angles. &nbsp;One commonality is that DOE is going full-steam ahead on providing this burst buffer capability in some form or another, and solid-state storage is going to be a central enabling component.</div> +</div> +<div><br /></div> +<div>Personally, I find the NERSC burst buffer concept a lot more interesting since it provides a more general purpose flash-based resource that can be used in novel ways. &nbsp;For example, emerging software-defined storage platforms like <a href="http://www.emc.com/cloud/vipr/index.htm">EMC's Vipr</a> can potentially provide very fine-grained access to flash as-needed to make better overall use of the underlying SSDs in HPC environments serving a broad user base (e.g., NERSC and the NSF centers). &nbsp;Complementing these software technologies are emerging hardware technologies like DSSD's D5 product which will be exposing flash to compute systems in innovative ways at hardware, interconnect, and software levels.</div> +<div><br /></div> +<div>Of course, the fact that <a href="http://www.sdsc.edu/supercomputing/gordon/">my favorite supercomputer</a> provides dynamically allocatable SSDs in a fashion not far removed from these NERSC burst buffers probably biases me, but we've demonstrated unique DISC successes enabled by our ability to pile tons of flash on to single compute nodes. &nbsp;This isn't to say that the Argonne burst buffer isn't without merit; given that the Argonne Leadership Computing Facility (ALCF) caters to <i>capability</i> jobs rather than <i>capacity</i> jobs, their user base is better served by providing a uniform, transparent burst I/O capability across all nodes. &nbsp;The NERSC burst buffer, by comparison, is a lot less transparent and will probably be much more susceptible to user disuse or misuse. &nbsp;I suspect that when the dust settles, both takes on the burst buffer concept will make their way into production use.</div> +<div><br /></div> +<div>A lot of the talk and technologies surrounding burst buffers are shrouded in NNSA secrecy or vendor non-disclosures, so I'm not sure what more there is to be said. &nbsp;However, the good folks at&nbsp;<a href="http://www.hpcwire.com/2014/05/01/burst-buffers-flash-exascale-potential/">HPCwire ran an insightful article on burst buffers</a> after the NERSC-8 announcement for those who are interested in more detail.<br /><br /></div> +<h2>Data curation</h2> +<div>The final theme that bubbled just beneath the surface of the DOE workshops was the idea that we are coming upon an era where scientists can no longer save all their data from all their calculations in perpetuity. &nbsp;Rather, someone will have to become the curator of the scientific data being generated by computations and figure out what is and is not worth keeping, and how or where that data should be stored and managed. &nbsp;This concept of selectively retaining user data manifested in a variety of discussions ranging from in-place data sharing and publication with <a href="https://www.globus.org/researchers/plus-plans">Globus Plus</a> and <a href="https://fasterdata.es.net/science-dmz/">science DMZs</a> to <a href="https://www.alcf.anl.gov/user-guides/using-hpss">transparently managing online data volumes with hierarchical storage management</a>&nbsp;(HSM). &nbsp;However, the common idea was that scientists are going to have to start coming to grips with data management themselves, as facilities will soon be unable to cope with the entirety of their users' data.</div> +<div><br /></div> +<div>This was a particularly interesting problem to me because it very closely echoed the sentiments that came about from <a href="http://leveragebigdata.com/">Datanami's recent LeverageBIGDATA</a> event which had a much more industry-minded audience. &nbsp;The general consensus is that several fields are far ahead of the pack in terms of addressing this issue; the high-energy physics community has been filtering data at its genesis (e.g., <a href="http://lhcb-public.web.cern.ch/lhcb-public/en/Data%20Collection/Triggers-en.html">ignoring the data from uninteresting collision events</a>) for years now, and enterprises seem comfortable with retaining marketing data for only as long as it is useful. &nbsp;By comparison, NERSC's tape archive has not discarded user data since its inception several decades ago; each new tape system simply repacks the previous generation's tape to roll all old data forward.</div> +<div><br /></div> +<div>All of the proposed solutions for this problem revolve around metadata. &nbsp;The reality is that not all user data has equal importance, and there is a need to provide a mechanism for users (or their applications) to describe this fact. &nbsp;For example, the principal use case for the aforementioned burst buffers is to store massive checkpoint-restart files; while these checkpoints are important to retain <i>while</i> a calculation is running, they have limited value <i>after</i> the calculation has completed. &nbsp;Rather than rely on a user to manually recognize that these checkpoints can be deleted, the hope is that metadata attributes can be attached to these checkpoint files to indicate that they are not critical data that must be retained forever for automated curation systems to understand.</div> +<div><br /></div> +<div>The exact way this metadata would be used to manage space on a file system remains poorly defined. &nbsp;A few examples of exactly how metadata can be used to manage data volume in data-intensive scientific computing environments include</div> +<div><ul><li>tagging certain files or directories as permanent or ephemeral, signaling that the file system can purge certain files whenever a cleanup is initiated;</li><li>tagging certain files with a set expiration date, either as an option or by default. &nbsp;When a file ages beyond a certain point, it would be deleted;</li><li>attributing a sliding scale of "importance" to each file, so that files of low importance can be transparently migrated to tape via HSM</li></ul><div>Some of these concepts are already implemented, but the ability for users <i>and</i> applications to attach extensible metadata to files in a file system-agnostic way does not yet exist. &nbsp;I think this is a significant gap in technology that will need to be filled in very short order as pre-exascale machines begin to demonstrate the ability to generate tremendous I/O loads. &nbsp;Frankly, I'm surprised this issue hasn't been solved in a broadly deployable way yet.</div> +</div> +<div><br /></div> +<div>The good news here is that the problem of curating digital data is not new; it is simply new to high-performance computing. &nbsp;In the spirit of doing things the right way, DOE invited <a href="https://twitter.com/deemagnoni">the director of LANL's Research Library</a> to attend the workshops, and she provided valuable insights into how methods of digital data curation may be applied to these emerging challenges in data-intensive scientific computing.</div> +<div><br /></div> +<h2>Final Thoughts</h2> +<div>The products of the working groups' conventions at the HPC Operational Review are being assembled into a report to be delivered to DOE's Office of Science, and it should be available online at the <a href="http://www.nersc.gov/research-and-development/HPCOR/">HPCOR 2014 website</a> as well as the <a href="http://www.osti.gov/home/">usual DOE document repository</a>&nbsp;in a few months. &nbsp;Hopefully it will reflect what I feel was the essence of the workshop, but at any rate, it should contain a nice perspective on how we can expect the HPC community to address the new demands emerging from data-intensive scientific computing (DISC) community.</div> +<div><br /></div> +<div>In the context of high-performance computing,&nbsp;</div> +<div><ul><li>Workflow management systems will continue to gain importance as data sets become larger, more parallel, and more unwieldy.</li><li>Burst buffers, in one form or another, will become the hardware solution to the fact that all exascale simulations will become data-intensive problems.</li><li>Data curation frameworks are the final piece of the puzzle and will provide the manageability of data at rest.</li></ul></div> +<div>None of these three legs are fully developed, and this is simply an indication of data-intensive scientific computing's immaturity relative to more traditional high-performance computing: &nbsp;</div> +<div><ul><li>Workflows need to converge on some sort of standardized API or feature set in order to provide the incentive to users to abandon their one-off solutions.</li><li>Burst buffer technology has diverged into two solutions centered at either the compute or storage side of a DISC platform; both serve different workloads, and the underlying hardware and software configurations remain unfinished.</li><li>Effective data curation requires a metadata management system that will allow both users and their applications to identify the importance of data to automate sensible data retention policy enforcement and HSM.</li></ul><div>Of course, I could be way off in terms of what I took away from these meetings seeing as how I don't really know what I'm talking about. &nbsp;Either way, it was a real treat to be invited out to hang out with the DOE folks for a week; I got to meet some of my personal supercomputing heroes, share war stories, and make some new pals. <br /><br />I also got to spend eight days getting to know the Bay Area. &nbsp;So as not to leave this post entirely without a picture,</div> +</div> +<div><br /></div> +<div class="separator" style="clear: both; text-align: center;"><a href="http://4.bp.blogspot.com/-FIgACJFuqu8/U6kbTBKwNbI/AAAAAAAAKlM/L1wZAaZrBn0/s1600/10455579_10152060455017282_6816950694141486686_n.jpg" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="295" src="http://4.bp.blogspot.com/-FIgACJFuqu8/U6kbTBKwNbI/AAAAAAAAKlM/L1wZAaZrBn0/s1600/10455579_10152060455017282_6816950694141486686_n.jpg" width="400" /></a></div> +<div><br /></div> +<div>I also learned that I have a weird fascination with streetcars. &nbsp;I'm glad I was introduced to supercomputers first.</div> + + + + + Spark on Supercomputers- A Few Notes + + 2014-06-08T03:34:00-06:00 + https://hpc.social/2014/spark-on-supercomputers-a-few-notes + <p>I’ve been working with Apache Spark quite a bit lately in an effort to bring it into the fold as a viable tool for solving some of the data-intensive problems encountered in supercomputing.  I’ve already added support for <a href="https://github.com/glennklockwood/myhadoop/tree/spark">provisioning Spark clusters to a branch of the myHadoop framework</a> I maintain so that Slurm, Torque, and SGE users can begin playing with it, and as a result of these efforts, I’ve discovering a number of interesting issues with Spark running on traditional supercomputers.<br /><br />At this point in time, Spark is very rough around the edges.  The core implementation of resilient distributed datasets are all there and work wonderfully, but I’ve found that it doesn’t take long to start discovering bugs and half-implemented features that can get very confusing very quickly.  Perhaps half of the problems I’ve faced are the result of the fact that I have been trying to run Spark in non-traditional ways (for example, over hosts’ TCP over InfiniBand interfaces and with non-default config directories), and although the documentation claims to support all of the features necessary to make this possible, the reality is a bit different.<br /><br />What follows are just some incoherent notes I’ve taken while porting Spark to the myHadoop framework.  Spark is rapidly developing and it is constantly improving, so I hope this post becomes outdated as the Spark developers make the framework more robust.<br /><br />&lt;h2&gt;Control Script Problems&lt;/h2&gt;Hadoop and Spark both ship with “control scripts” or “<a href="http://spark.apache.org/docs/0.9.1/spark-standalone.html#cluster-launch-scripts">cluster launch scripts</a>” that facilitate the starting and stopping of the entire cluster of daemons.  At the highest level, this includes start-all.sh and stop-all.sh, which make calls to start-dfs.sh and start-yarn.sh (in Hadoop) and start-master.sh and start-slaves.sh.  In Hadoop, these scripts work wonderfully, but Spark’s implementation of these control scripts is still quite immature because they carry implicit assumptions about users’ Spark configurations.<br /><br />Like Hadoop, Spark supports a spark-env.sh file (located in $SPARK_CONF_DIR) which defines environment variables for all of the remote Spark workers that are spawned across the cluster.  This file is an ideal place to put the following environment variable definitions:<br />&lt;ul&gt;&lt;li&gt;SPARK_MASTER_IP - the default value for this is <code class="language-plaintext highlighter-rouge">hostname</code> which is generally not a great default on most clusters.  On Rocks, we append “.ibnet” to the hostname to get Spark to operate over the InfiniBand fabric.&lt;/li&gt;&lt;li&gt;SPARK_LOCAL_IP - again, ensure that this is set up to use the correct interface on the cluster.  We append .ibnet on Rocks.&lt;/li&gt;&lt;li&gt;SPARK_HOME, SPARK_PREFIX, and SPARK_CONF_DIR should also be defined here since spark-env.sh will usually override the variables defined by spark-config.sh (see below)&lt;/li&gt;&lt;/ul&gt;$SPARK_HOME/sbin/spark-config.sh is where much of the Spark control scripts’ “intelligence” comes from as far as defining the environment variables that Spark needs to launch.  In particular, spark-config.sh defines the following variables <i>before</i> reading spark-env.sh:<br />&lt;ul&gt;&lt;li&gt;SPARK_PREFIX&lt;/li&gt;&lt;li&gt;SPARK_HOME&lt;/li&gt;&lt;li&gt;SPARK_CONF_DIR&lt;/li&gt;&lt;/ul&gt;The problem is that <b>spark-config.sh will stomp all over anything the user defines</b> for the above variables, and since spark-config.sh is called from within all of the Spark control scripts (both evoked by the user and evoked by sub-processes on remote hosts during the daemon spawning process), trying to get Spark to use non-default values for SPARK_CONF_DIR (e.g., exactly what myHadoop does) gets to be tedious. <br /><br />The Spark developers tried to work around this by having the control scripts call spark-env.sh after spark-config.sh, meaning you should be able to define your own SPARK_CONF_DIR in spark-env.sh.  Unfortunately, this mechanism of calling spark-env.sh after spark-config.sh appears as<br /><br />&lt;pre&gt;. “$sbin/spark-config.sh”<br /><br />if [ -f “${SPARK_CONF_DIR}/spark-env.sh” ]; then<br /> . “${SPARK_CONF_DIR}/spark-env.sh”<br />fi<br />&lt;/pre&gt;<br />That is, spark-config.sh will stomp all over any user-specified SPARK_CONF_DIR, and then use the SPARK_CONF_DIR from spark-config.sh to look for spark-env.sh.  Thus, there is no actual way to get the Spark control scripts (as of version 0.9) to honor the user-specified SPARK_CONF_DIR.  It looks like the latest commits to Spark have started to address this, but a cursory glance over the newest control scripts suggests that this remains broken.<br /><br />Anyway, as a result of this, myHadoop’s Spark integration eschews the Spark control scripts and handles spawning the daemons more directly using the <a href="http://spark.apache.org/docs/0.9.1/spark-standalone.html#starting-a-cluster-manually">manual method of spawning slaves</a>.  Doing this averts the following issues:<br />&lt;ol&gt;&lt;li&gt;start-slaves.sh can’t find any slaves because it always looks for $SPARK_HOME/etc/slaves.  This can be worked around by passing SPARK_SLAVES=$SPARK_CONF_DIR/slaves to start-slaves.sh for a non-default SPARK_CONF_DIR.&lt;/li&gt;&lt;li&gt;stop-master.sh doesn’t do anything useful because you still need to kill -9 the master process by hand.  Not sure why this is the case.&lt;/li&gt;&lt;/ol&gt;&lt;div&gt;<br />&lt;/div&gt;</p> +<h2>Deciphering Spark Errors</h2> +<p>Here are various cryptic stack traces I’ve encountered while working on Spark.  I kept these mostly for myself, but I’ve started meeting people that hit the same problems and thought it might be worthwhile to share the diagnoses I’ve found.<br /><br />In general, Spark seems to work best when used conservatively, but when you start doing things that do not strictly fall within the anticipated use case, things break in strange ways.  For example, if you try to write an RDD with an empty element (e.g., a text file with empty lines), you would get this really crazy error that does not actually say anything meaningful:<br /><br />&lt;pre style="font-size: smaller;"&gt;14/04/30 16:23:07 ERROR Executor: Exception in task ID 19<br />scala.MatchError: 0 (of class java.lang.Integer)<br />     at org.apache.spark.api.python.PythonRDD\(anon$1.read(PythonRDD.scala:110)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.spark.api.python.PythonRDD\)anon$1.(PythonRDD.scala:153)<br />     at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:96)<br />     at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)<br />     at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)<br />     at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)<br />     at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)<br />     at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)<br />     at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)<br />     at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)<br />     at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)<br />     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:109)<br />     at org.apache.spark.scheduler.Task.run(Task.scala:53)<br />     at org.apache.spark.executor.Executor$TaskRunner\(anonfun$run$1.apply$mcV$sp(Executor.scala:213)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.spark.deploy.SparkHadoopUtil.runAsUser(SparkHadoopUtil.scala:49)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:178)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at java.lang.Thread.run(Thread.java:722)&lt;/pre&gt;&lt;br /&gt;I filed a bug report about this particular problem and the&amp;nbsp;&lt;a href="https://github.com/apache/spark/pull/644"&gt;issue has been fixed&lt;/a&gt;, but it's just one of those edge cases where Spark will fail catastrophically (I had to look at the source code to figure out what "scala.MatchError" meant). &amp;nbsp;Usually you wouldn't be operating on empty data sets, but I discovered this error when I was trying to quickly determine if my Spark slaves were communicating with my master correctly by issuing&lt;br /&gt;&lt;br /&gt;&lt;pre&gt;file = sc.textFile('hdfs://master.ibnet0/user/glock/input.txt')&lt;br /&gt;file.saveAsTextFile('hdfs://master.ibnet0/user/glock/output')&lt;/pre&gt;&lt;br /&gt;That is, simply reading in a file and writing it back out with pyspark would cause catastrophic failure. &amp;nbsp;This is what I meant when I say Spark's still rough around the edges.&lt;br /&gt;&lt;br /&gt;Here are a few more errors I've encountered. &amp;nbsp;They're not problems with Spark, but the stack traces and exceptions thrown can be a little mysterious. &amp;nbsp;I'm pasting it all here for the sake of googlers who may run into these same problems.&lt;br /&gt;&lt;br /&gt;If you try to use Spark built against Hadoop 2 with a Hadoop 1 HDFS, you'll get this IPC error:&lt;br /&gt;&lt;br /&gt;&lt;pre style="font-size: smaller;"&gt;&amp;gt;&amp;gt;&amp;gt; file.saveAsTextFile('hdfs://s12ib:54310/user/glock/gutenberg.out')&lt;br /&gt;Traceback (most recent call last):&lt;br /&gt;&amp;nbsp; File "", line 1, in &lt;br /&gt;&amp;nbsp; File "/home/glock/apps/spark-0.9.0/python/pyspark/rdd.py", line 682, in saveAsTextFile&lt;br /&gt;&amp;nbsp; &amp;nbsp; keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)&lt;br /&gt;&amp;nbsp; File "/home/glock/apps/spark-0.9.0/python/lib/py4j-0.8.1-src.zip/py4j/java_gateway.py", line 537, in __call__&lt;br /&gt;&amp;nbsp; File "/home/glock/apps/spark-0.9.0/python/lib/py4j-0.8.1-src.zip/py4j/protocol.py", line 300, in get_return_value&lt;br /&gt;py4j.protocol.Py4JJavaError: An error occurred while calling o23.saveAsTextFile.&lt;br /&gt;: org.apache.hadoop.ipc.RemoteException: &lt;b&gt;Server IPC version 9 cannot communicate with client version 4&lt;/b&gt;&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.hadoop.ipc.Client.call(Client.java:1070)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.hadoop.ipc.RPC$Invoker.invoke(RPC.java:225)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at $Proxy7.getProtocolVersion(Unknown Source)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:396)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.hadoop.ipc.RPC.getProxy(RPC.java:379)&lt;br /&gt;&lt;br /&gt;&lt;/pre&gt;&lt;br /&gt;If your Pythons aren't all the same version across the nodes when Spark workers are instantiated, you might get a cryptic error like this when trying to call the count() method on an RDD:&lt;br /&gt;&lt;br /&gt;&lt;pre style="font-size: smaller;"&gt;14/04/30 16:15:11 ERROR Executor: Exception in task ID 12&lt;br /&gt;org.apache.spark.api.python.PythonException: Traceback (most recent call last):&lt;br /&gt;&amp;nbsp; File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/worker.py", line 77, in main&lt;br /&gt;&amp;nbsp; &amp;nbsp; serializer.dump_stream(func(split_index, iterator), outfile)&lt;br /&gt;&amp;nbsp; File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/serializers.py", line 182, in dump_stream&lt;br /&gt;&amp;nbsp; &amp;nbsp; self.serializer.dump_stream(self._batched(iterator), stream)&lt;br /&gt;&amp;nbsp; File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/serializers.py", line 117, in dump_stream&lt;br /&gt;&amp;nbsp; &amp;nbsp; for obj in iterator:&lt;br /&gt;&amp;nbsp; File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/serializers.py", line 171, in _batched&lt;br /&gt;&amp;nbsp; &amp;nbsp; for item in iterator:&lt;br /&gt;&amp;nbsp; File "/home/glock/apps/spark-0.9.0-incubating-bin-hadoop1/python/pyspark/rdd.py", line 493, in func&lt;br /&gt;&amp;nbsp; &amp;nbsp; if acc is None:&lt;br /&gt;&lt;b&gt;TypeError: an integer is required&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.spark.api.python.PythonRDD\)anon$1.read(PythonRDD.scala:131)<br />     at org.apache.spark.api.python.PythonRDD\(anon$1.(PythonRDD.scala:153)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:96)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:109)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.spark.scheduler.Task.run(Task.scala:53)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.spark.executor.Executor$TaskRunner\)anonfun$run$1.apply$mcV$sp(Executor.scala:213)<br />     at org.apache.spark.deploy.SparkHadoopUtil.runAsUser(SparkHadoopUtil.scala:49)<br />     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:178)<br />     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)<br />     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)<br />     at java.lang.Thread.run(Thread.java:722)&lt;/pre&gt;<br /><br />If you try to write an RDD to a file with mismatched Python versions, or if you were using anything earlier than Python 2.7 (e.g., 2.6) with any Spark version earlier than 1.0.0, you’d see this:<br /><br />&lt;pre style="font-size: smaller;"&gt;14/04/30 17:53:20 WARN scheduler.TaskSetManager: Loss was due to org.apache.spark.api.python.PythonException<br />org.apache.spark.api.python.PythonException: Traceback (most recent call last):<br />  File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/worker.py”, line 77, in main<br />    serializer.dump_stream(func(split_index, iterator), outfile)<br />  File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/serializers.py”, line 117, in dump_stream<br />    for obj in iterator:<br />  File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/rdd.py”, line 677, in func<br />    if not isinstance(x, basestring):<br /><b>SystemError: unknown opcode</b><br /><br />     at org.apache.spark.api.python.PythonRDD\(anon$1.read(PythonRDD.scala:131)&lt;br /&gt;&amp;nbsp; &amp;nbsp; &amp;nbsp;at org.apache.spark.api.python.PythonRDD\)anon$1.(PythonRDD.scala:153)<br />     at org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:96)<br />     at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)<br />     at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)<br />     at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)<br />     at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)<br />     at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)<br />     at org.apache.spark.rdd.MappedRDD.compute(MappedRDD.scala:31)<br />     at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:241)<br />     at org.apache.spark.rdd.RDD.iterator(RDD.scala:232)<br />     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:109)<br />     at org.apache.spark.scheduler.Task.run(Task.scala:53)<br />     at org.apache.spark.executor.Executor$TaskRunner$$anonfun$run$1.apply$mcV$sp(Executor.scala:213)<br />     at org.apache.spark.deploy.SparkHadoopUtil.runAsUser(SparkHadoopUtil.scala:49)<br />     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:178)<br />     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)<br />     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)<br />     at java.lang.Thread.run(Thread.java:722)&lt;/pre&gt;<br /><br />If your HDFS URI is wrong, the error message actually makes sense.  It is buried quite deeply though.<br /><br />&lt;pre style="font-size: smaller;"&gt;Traceback (most recent call last):<br />  File “”, line 1, in <br />  File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/pyspark/rdd.py”, line 682, in saveAsTextFile<br />    keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)<br />  File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/lib/py4j-0.8.1-src.zip/py4j/java_gateway.py”, line 537, in <strong>call</strong><br />  File “/home/glock/apps/spark-0.9.0-incubating-bin-hadoop2/python/lib/py4j-0.8.1-src.zip/py4j/protocol.py”, line 300, in get_return_value<br />py4j.protocol.Py4JJavaError: An error occurred while calling o23.saveAsTextFile.<br />: java.lang.IllegalArgumentException: <b>java.net.UnknownHostException: s12ib.ibnet0</b><br />     at org.apache.hadoop.security.SecurityUtil.buildTokenService(SecurityUtil.java:418)<br />     at org.apache.hadoop.hdfs.NameNodeProxies.createNonHAProxy(NameNodeProxies.java:231)<br />     at org.apache.hadoop.hdfs.NameNodeProxies.createProxy(NameNodeProxies.java:139)<br />     at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:510)<br />     at org.apache.hadoop.hdfs.DFSClient.(DFSClient.java:453)<br />     at org.apache.hadoop.hdfs.DistributedFileSystem.initialize(DistributedFileSystem.java:136)<br />     at org.apache.hadoop.fs.FileSystem.createFileSystem(FileSystem.java:2433)<br />     at org.apache.hadoop.fs.FileSystem.access$200(FileSystem.java:88)<br />     at org.apache.hadoop.fs.FileSystem$Cache.getInternal(FileSystem.java:2467)<br />     at org.apache.hadoop.fs.FileSystem$Cache.get(FileSystem.java:2449)<br />     at org.apache.hadoop.fs.FileSystem.get(FileSystem.java:367)<br />     at org.apache.hadoop.fs.Path.getFileSystem(Path.java:287)<br />     at org.apache.hadoop.mapred.SparkHadoopWriter$.createPathFromString(SparkHadoopWriter.scala:193)<br />     at org.apache.spark.rdd.PairRDDFunctions.saveAsHadoopFile(PairRDDFunctions.scala:685)<br />     at org.apache.spark.rdd.PairRDDFunctions.saveAsHadoopFile(PairRDDFunctions.scala:572)<br />     at org.apache.spark.rdd.RDD.saveAsTextFile(RDD.scala:894)<br />     at org.apache.spark.api.java.JavaRDDLike$class.saveAsTextFile(JavaRDDLike.scala:355)<br />     at org.apache.spark.api.java.JavaRDD.saveAsTextFile(JavaRDD.scala:27)<br />     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)<br />     at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)<br />     at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)<br />     at java.lang.reflect.Method.invoke(Method.java:597)<br />     at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:231)<br />     at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:379)<br />     at py4j.Gateway.invoke(Gateway.java:259)<br />     at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)<br />     at py4j.commands.CallCommand.execute(CallCommand.java:79)<br />     at py4j.GatewayConnection.run(GatewayConnection.java:207)<br />     at java.lang.Thread.run(Thread.java:619)<br />Caused by: java.net.UnknownHostException: s12ib.ibnet0<br />     … 29 more&lt;/pre&gt;</p> + + + + + Hadoop's Uncomfortable Fit in HPC + + 2014-05-17T06:28:00-06:00 + https://hpc.social/2014/hadoop-s-uncomfortable-fit-in-hpc + <p>Hadoop has come up in a few conversations I’ve had in the last few days, and it’s occurred to me that the supercomputing community continues having a difficult time fully understanding how Hadoop currently fits (and should fit) into scientific computing.  HPCwire was kind enough <a href="http://www.hpcwire.com/2014/02/11/hpc-hacking-hadoop/">to run a piece that let me voice my perspective</a> of the realities of Hadoop use in HPC a few months ago–that is, scientists are still getting a feel for Hadoop and what it can do, and it just isn’t seeing widespread adoption in scientific computing yet.  This contrasts with the tremendous buzz surrounding the “Hadoop” brand and ultimately gives way to strange dialogue, originating from the HPC side of the fence, like this:<br /><br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;<a href="http://4.bp.blogspot.com/-_fXs6Nh1BWw/U3UNE3S96pI/AAAAAAAAKbQ/GE-h2Aphdlw/s1600/Screen+Shot+2014-05-15+at+11.24.37+AM.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="188" src="http://4.bp.blogspot.com/-_fXs6Nh1BWw/U3UNE3S96pI/AAAAAAAAKbQ/GE-h2Aphdlw/s1600/Screen+Shot+2014-05-15+at+11.24.37+AM.png" width="320" /></a>&lt;/div&gt; +<br />&lt;div class="separator" style="clear: both; text-align: center;"&gt;&lt;/div&gt; +I’m not sure if this original comment was facetious and dismissive of the Hadoop buzz or if it was a genuinely interested observation.  Regardless of the intent, both interpretations reveal an important fact: Hadoop is being taken seriously only at a subset of supercomputing facilities in the US, and at a finer granularity, only by a subset of professionals within the HPC community.  Hadoop is in a very weird place within HPC as a result, and I thought it might benefit the greater discussion of its ultimate role in research computing if I laid out some of the factors contributing to Hadoop’s current awkward fit.  The rest of this post will strive to answer two questions: <b>Why does Hadoop remain at the fringe of high-performance computing, and what will it take for it to be a serious solution in HPC?</b><br /><br />&lt;h2&gt;#1. Hadoop is an invader&lt;/h2&gt;I think what makes Hadoop uncomfortable to the HPC community is that, unlike virtually every other technology that has found successful adoption within research computing, <b>Hadoop was not designed by HPC people</b>.  Compare this to a few other technologies that are core to modern supercomputing:<br /><br />&lt;ul&gt;&lt;li&gt;<a href="http://beige.ucs.indiana.edu/I590/node54.html">MPI was literally born at the world’s largest supercomputing conference</a>, and the reference was developed by computer scientists major universities and national labs.  It was developed by scientists for scientists.&lt;/li&gt;&lt;li&gt;<a href="http://openmp.org/wp/about-openmp/">OpenMP was developed by an industrial consortium comprised of vendors of high-performance computing hardware and software</a>.  Like MPI, this standard emerged as a result of vendor-specific threading APIs causing compatibility nightmares across different high-end computing platforms.&lt;/li&gt;&lt;li&gt;<a href="http://www.nvidia.com/object/cuda_home_new.html">CUDA</a> was developed out of <a href="http://graphics.stanford.edu/projects/brookgpu/">Brook</a> which was developed by <a href="http://graphics.stanford.edu/people.html">computer scientists at Stanford</a>.  Again, CUDA now is largely targeted at high-performance computing (although <a href="http://venturebeat.com/2014/03/27/big-data-visualization-firm-map-d-takes-home-100k-in-nvidias-emerging-companies-summit-contest/">this is changing</a>–and it’ll be interesting to see if adoption outside of HPC really happens)&lt;/li&gt;&lt;/ul&gt;&lt;div&gt;By contrast, <a href="http://hortonworks.com/big-data-insights/spotlight-on-the-early-history-of-hadoop/">Hadoop was developed by Yahoo</a>, and the original <a href="http://research.google.com/archive/mapreduce.html">MapReduce was developed by Google</a>.  They were not created to solve problems in fundamental science or national defense; they were created to provide a service for the masses.  They weren’t meant to interface with traditional supercomputers or domain scientists; Hadoop is very much an interloper in the world of supercomputing.&lt;/div&gt;</p> +<div><br /></div> +<div>The notion that Hadoop's commercial origins make it contentious for stodgy people in the traditional supercomputing arena may sound silly without context, but the fact is, developing a framework for a commercial application rather than a scientific application leaves it with an interesting amount of baggage.</div> +<div><br /></div> +<h2>#2. Hadoop looks funny</h2> +<div>The most obvious baggage that Hadoop brings with it to HPC is the fact that it is written in Java. &nbsp;One of the <a href="http://www.oracle.com/technetwork/java/intro-141325.html">core design features of the Java language was to allow its programmers to write code once and be able to run it on any hardware platform</a>--a concept that is diametrically opposite to the foundations of high-performance computing, where code should be compiled and optimized for the specific hardware on which it will run. &nbsp;Java made sense for Hadoop due to its origins in the world of web services, but Java maintains a perception of being slow and inefficient. &nbsp;Slow and inefficient codes are, frankly, offensive to most HPC professionals, and I'd wager than a majority of researchers in traditional HPC scientific domains simply don't know the Java language at all. &nbsp;I sure don't.</div> +<div><br /></div> +<div>The idea of <a href="http://www.cnet.com/news/supercomputer-beagle-can-analyze-240-whole-genomes-in-two-days/">running Java applications on supercomputers is beginning to look less funny</a> nowadays with the explosion of cheap genome sequencing. &nbsp;Some of the most popular foundational applications in bioinformatics (e.g., <a href="http://www.broadinstitute.org/gatk/">GATK</a> and <a href="http://picard.sourceforge.net/">Picard</a>) are written in Java, and although considered an "emerging community" within the field of supercomputing, bioinformatics is rapidly outgrowing the capabilities of lab-scale computing. &nbsp;Perhaps most telling is <a href="http://www.bio-itworld.com/2014/3/20/broad-intel-announce-speed-improvements-gatk-powered-by-intel-optimizations.html">Intel's recent contributions to the Java-based GATK</a> which facilitate much richer use of AVX operations for variant calling.<br /><br />With that being said though, Java is still a very strange way to interact with a supercomputer. &nbsp;Java applications don't compile, look, or feel like normal applications in UNIX as a result of their cross-platform compatibility. &nbsp;Its runtime environment exposes a lot of very strange things to the user for no particularly good reason (-Xmx1g? &nbsp;I'm still not sure why I need to specify this to see the version of Java I'm running, much less do anything else**) and it doesn't support shared-memory parallelism in an HPC-oriented way (manual thread management, thread pools...yuck). &nbsp;For the vast majority of HPC users coming from traditional domain sciences and the professionals who support their infrastructure, Java applications remain unconventional and foreign.<br /><br /></div> +<div style="font-size: xx-small; line-height: 120%;">** A few readers have pointed out that this isn't necessary, and on regular desktops or servers, they would be correct. &nbsp;However, this remark <i>is</i> true on multi-user, shared resources like supercomputer login nodes where ulimits exist to prevent one user from rendering the node unusable for everyone else. &nbsp;For example, we only allow up to 4 GB of RAM per user on our larger machine's login nodes, and this is not sufficient to run java -version. &nbsp;Yes, there are ways to work around this, but that's the whole point I was trying to make--this is an aspect of Java that is weird when compared to plain old C and Fortran applications.</div> +<div><br /></div> +<h2>#3. Hadoop reinvents HPC technologies poorly</h2> +<div>For those who have taken a serious look at the performance characteristics of Hadoop, the honest truth is that it re-invents a lot of functionality that has existed in HPC for decades, and it does so very poorly. &nbsp;Consider the following examples:</div> +<div><ol><li><b>Hadoop uses TCP with a combination of REST and RPC for inter-process communication</b>. &nbsp;HPC has been using lossless DMA-based communication, which provides better performance in all respects, for years now.</li><li><b>Hadoop doesn't really handle multi-tenancy and its schedulers are terrible</b>. &nbsp;The architecture of Hadoop is such that, with a 3x replication factor, a single cluster can only support three concurrent jobs at a time with optimal performance. &nbsp;Its current scheduler options have very little in the way of intelligent, locality-aware job placement.</li><li><b>Hadoop doesn't support scalable interconnect topologies</b>. &nbsp;The rack-aware capabilities of Hadoop, while powerful for their intended purpose, do not support scalable network topologies like multidimensional meshes and toruses. &nbsp;They handle Clos-style network topologies, period.</li><li><b>HDFS is very slow and very obtuse</b>. &nbsp;Parallel file systems like Lustre and GPFS have been an integral part of HPC for years, and HDFS is just very slow and difficult to use by comparison. &nbsp;The lack of a POSIX interface means getting data in and out is tedious, and its vertical integration of everything from replication and striping to centralized metadata in Java makes it rather unresponsive.</li></ol><div>However, these poor reinventions are not the result of ignorance; rather, Hadoop's reinvention of a lot of HPC technologies arises from reason #1 above: Hadoop was not designed to run on supercomputers and it was not designed to fit into the existing matrix of technologies available to traditional HPC. &nbsp;Rather, it was created to interoperate with web-oriented infrastructure. &nbsp;Specifically addressing the above four points,</div> +</div> +<div><ol><li>Hadoop uses TCP/IP and Ethernet because virtually all data center infrastructure is centered around these technologies, not high-speed RDMA. &nbsp;Similarly, REST and RPC are used across enterprise-oriented services because they are simple protocols.</li><li>Multi-tenancy arises when many people want to use a scarce resource such as a supercomputer; in the corporate world, resources should never be a limiting factor because waiting in line is what makes consumers look elsewhere. &nbsp;This principle and the need for elasticity is what has made the cloud so attractive to service providers. &nbsp;It follows that Hadoop is designed to provide a service for a single client such as a single search service or data warehouse.</li><li>Hadoop's support for Clos-style (leaf/spine) topologies models most data center networks. &nbsp;Meshes, toruses, and more exotic topologies are exclusive to supercomputing and had no relevance to Hadoop's intended infrastructure.</li><li>HDFS implements everything in software to allow it to run on the cheapest and simplest hardware possible--JBODs full of spinning disk. &nbsp;The lack of a POSIX interface is a direct result of Hadoop's optimization for large block reads and data warehousing. &nbsp;By making HDFS write-once, a lot of complex distributed locking can go out the window because MapReduce doesn't need it.</li></ol><div>This loops back around to item #1 above: Hadoop came from outside of HPC, and it carries this baggage with it.<br /><br /></div> +<h2>#4. Hadoop evolution is backwards</h2><div style="border: 1px solid black; display: block; float: right; font-size: xx-small; margin-left: 1em; padding: 0.5em; width: 40%;"><div style="background-color: black; color: white; text-align: center; width: 100%;">A tiny anecdote</div> +<br />I gave two MapReduce-related consultations this past month which really highlighted how this evolutionary path of Hadoop (and MapReduce in general) is not serving HPC very well.<br /><br />My first meeting was with a few folks from a large clinical testing lab that was beginning to to incorporate genetic testing into their service lineup. They were having a difficult time keeping up with the volume of genetic data being brought in by their customers and were exploring <a href="https://portal.futuregrid.org/manual/hadoop-blast">Hadoop BLAST</a> as an alternative to their current BLAST-centric workflow. The problem, though, is that Hadoop BLAST was developed as an academic project when Hadoop 0.20 (which has evolved into Hadoop 1.x) was the latest and greatest technology. Industry has largely moved beyond Hadoop version 1 onto Hadoop 2 and YARN, and this lab was having significant difficulties in getting Hadoop BLAST to run on their brand new Hadoop cluster because its documentation hasn't been updated in three years<br /><br />The other meeting was with a colleague who works for a multinational credit scoring company. &nbsp;They were deploying Spark on their Cloudera cluster with the aforementioned clinical testing company: their data collection processes were outgrowing their computational capabilities and they were exploring better alternatives for data exploration. &nbsp;The problem they encountered was not one caused by their applications being frozen in time after someone finished their Ph.D.; rather, their IT department had botched the Spark installation.<br /><br />This disparity is pervasive throughout the Hadoop application ecosystem. &nbsp;Tools created for scientific research seem to be abandoned just as quickly as they were created, so looking for existing Hadoop-based tools for research can be a frustrating game of chasing 404s.</div> +</div> +<div>Generally speaking, the development of all technologies at the core of HPC have followed a similar evolutionary path into broad adoption. &nbsp;Both software and hardware technologies arise as disparities between available and necessary solutions widen. &nbsp;Researchers often hack together non-standard solutions to these problems until a critical mass is achieved, and a standard technology emerges to unify these varying solutions and fill the gap. &nbsp;OpenMP is a great example--before it became standard, there were a number of vendor-specific pragma-based multithreading APIs; &nbsp;<a href="http://docs.cray.com/books/S-2179-52/html-S-2179-52/z1075740829oswald.html">Cray</a>, <a href="http://docs.oracle.com/cd/E19059-01/stud.9/817-6694/10_parallel.html#74354">Sun</a>, and <a href="http://techpubs.sgi.com/library/tpl/cgi-bin/getdoc.cgi?coll=0650&amp;db=bks&amp;srch=&amp;fname=/SGI_Developer/Pragmas/sgi_html/ch09.html">SGI</a>&nbsp;all had their own implementations that did the same thing but made porting codes between systems very unpleasant. &nbsp;These vendors ultimately all adopted a standard interface which became OpenMP, and that technology has been embraced because it provided a portal way of solving the original motivating problem.<br /><br />The evolution of Hadoop has very much been a backwards one; it entered HPC as a solution to a problem which, by and large, did not yet exist. &nbsp;As a result, it followed a common, but backwards, pattern by which computer scientists, not domain scientists, get excited by a new toy and invest a lot of effort into creating proof-of-concept codes and use cases. &nbsp;Unfortunately, this sort of development is fundamentally unsustainable because of its nucleation in a vacuum, and in the case of Hadoop, researchers moved on to the next big thing and largely abandoned their model applications as the shine of Hadoop faded (see sidebar). &nbsp;This has left a graveyard of software, documentation, and ideas that are frozen in time and rapidly losing relevance as Hadoop moves on.<br /><br />Consider this evolutionary path of Hadoop compared to OpenMP: there were no OpenMP proofs-of-concept. &nbsp;There didn't need to be any; the problems had already been defined by the people who needed OpenMP, so by the time OpenMP was standardized and implemented in compilers, application developers already knew where it would be needed.<br /><br />Not surprisingly, innovation in the Hadoop software ecosystem remains in the sphere for which it was developed: data warehousing and data analytics. &nbsp;Applications and libraries like <a href="http://www.cloudera.com/content/cloudera/en/products-and-services/cdh/impala.html">Impala</a>, <a href="http://parquet.io/">Parquet</a>, and <a href="http://spark.apache.org/">Spark</a> are at the cutting edge of applied analytics in the Hadoop/MapReduce ecosystem and represent useful, usable implementations of some really novel ideas.<br /><br /><h2 style="clear: both;">How <i>can</i> Hadoop fit into HPC?</h2>So this all is why Hadoop is in this awkward position, but does this mean Hadoop (and MapReduce) will never be welcome in the world of HPC? &nbsp;Alternatively, what would it take for Hadoop to become a universally recognized core technology in HPC?<br /><br />I'll say up front that there are no easy answers--if there were, I wouldn't be delivering this monologue. &nbsp;However, solutions are being developed and attempted to address a few of the four major barriers I outlined above.<br /><br /><h3>Reimplement MapReduce in an HPC-oriented way</h3>This idea has been tried in a number of different ways (see <a href="http://mapreduce.sandia.gov/">MPI MapReduce</a> and <a href="http://mapreduce.stanford.edu/">Phoenix</a>), but none have really gained traction. &nbsp;I suspect this is largely the result of one particular roadblock: there just aren't that many problems which are so onerous in the traditional HPC space that reimplementing a solution in a relatively obscure implementation of MapReduce becomes worth the effort. &nbsp;As I mentioned in point #4 above, HPC vendors haven't been creating their own MapReduce APIs to address the demands of their customers as they did for OpenMP and MPI's predecessors, so Hadoop's role in HPC is not clearly addressing a problem that needs an immediate solution.<br /><br /><i>This is not to say that the data-oriented problems at which Hadoop excels do not exist within the domain sciences</i>. &nbsp;Rather, there are two key roles that Hadoop/MapReduce will play in scientific computations:<br /><ul><li><b>Solving existing problems</b>: &nbsp;The most activity I've seen involving Hadoop in domain sciences comes out of bioinformatics and observational sciences. &nbsp;Bioinformatics, as a consumer of HPC cycles, is still in its infancy, but <a href="http://glennklockwood.blogspot.com/2014/01/the-1000-genome-computational.html">the data sets being generated by next-generation sequencers are enormous</a>--the data to describe a single human genome, even when compressed, takes up about 120 GB. &nbsp;Similarly, advances in imaging and storage technology have allowed <a href="http://cas.sdss.org/astro/en/skyserver/paper/">astronomy</a> and <a href="http://blog.cloudera.com/blog/2012/07/processing-rat-brain-neuronal-signals-using-a-hadoop-computing-cluster-part-i/">radiology</a> to generate extremely large collections of data.</li><li><b>Enabling new problems</b>: One of Hadoop's more long-term promises is not solving the problems of today, but giving us a solution to problems we previously thought to be intractable. &nbsp;Although I can't disclose too much detail, an example of this lies in statistical mechanics: many problems involving large ensembles of particles have relied on data sampling or averaging to reduce the sheer volume of numerical information into a usable state. &nbsp;Hadoop and MapReduce allow us to start considering what deeper, more subtle patterns may emerge if a massive trajectory through phase space could be dumped and analyzed with, say, machine learning methods.</li></ul><br />Unfortunately, reimplementing MapReduce inside the context of existing HPC paradigms represents a large amount of work for a relatively small subset of problems. &nbsp;Some sort of catalyzing scientific problem will need to emerge to give vendors and application developers a strong reason to start re-thinking their problems in terms of MapReduce.<br /><br /><h3>Incorporate HPC technologies in Hadoop</h3>Rather than reimplementing Hadoop/MapReduce as an HPC technology, I think a more viable approach forward is to build upon the Hadoop framework and correct some of its poorly reinvented features I described in item #3 above. &nbsp;This will allow HPC to continuously fold in new innovations being developed in Hadoop's traditional competencies--data warehousing and analytics--as they become relevant to scientific problems. &nbsp;Some serious effort is being made to this end:<br /><ul><li>The <a href="http://hadoop-rdma.cse.ohio-state.edu/">RDMA for Apache Hadoop project</a>, headed by the esteemed D.K. Panda and his colleagues at OSU, has replaced Hadoop's TCP/RPC communication modes with native RDMA <a href="http://hadoop-rdma.cse.ohio-state.edu/performance/terasort/">with really impressive initial results</a>.</li><li>Some larger players in the HPC arena have begun to provide rich support for high-performance parallel file systems as a complete alternative to HDFS. &nbsp;IBM's GPFS file system has a <a href="http://public.dhe.ibm.com/common/ssi/ecm/en/dcs03038usen/DCS03038USEN.PDF">file placement optimization (FPO)</a> capability that allows GPFS to act as a drop-in replacement for HDFS, and <a href="http://www.intel.com/content/www/us/en/software/intel-hpc-distribution-for-apache-hadoop-software.html">Intel was selling native Lustre</a> support before they <a href="http://newsroom.intel.com/community/intel_newsroom/blog/2014/03/27/cloudera-intel-commit-to-accelerate-and-transform-how-enterprises-use-big-data-intel-makes-significant-equity-investment-in-cloudera">sold IDH to Cloudera</a>.</li><li>I would be remiss if I did not mention my own efforts in making Hadoop provisioning as seamless as possible on batch-based systems with <a href="https://github.com/glennklockwood/myhadoop/">myHadoop</a>.</li></ul><br />In addition to incorporating these&nbsp;<i>software</i> technologies from HPC into Hadoop, there are some really clever things you can do with <i>hardware</i> technologies that make Hadoop much more appealing to traditional HPC. &nbsp;I am working on some exciting and innovative (if I may say so) architecture designs that will further lower the barrier between Hadoop and HPC at my day job, and with any luck, we'll get to see some of these ideas go into production in the next few years.<br /><br /><h3>Make MapReduce Less Weird</h3>The very nature of MapReduce is a very strange one to supercomputing--it solves a class of problems that the world's fastest supercomputers just weren't designed to solve. &nbsp;Rather than make raw compute performance the most important capability, MapReduce treats I/O scalability as the most important capability and CPU performance is secondary. &nbsp;As such, it will always be weird until such a day comes when science faces an equal balance of compute-limited and data-limited problems. &nbsp;Fundamentally, I'm not sure that such a day will ever come. &nbsp;Throwing data against a wall to see what sticks is good, but deriving analytical insight is better.<br /><br />With that all being said, there's room for improvement in making Hadoop less weird. &nbsp;<a href="http://spark.apache.org/">Spark</a> is an exciting project because it sits at a nice point between academia and industry; <a href="https://amplab.cs.berkeley.edu/projects/spark-lightning-fast-cluster-computing/">developed at Berkeley</a> but targeted directly at Hadoop, it feels like it was developed for scientists, and it treats high-performance as a first-class citizen by providing the ability to utilize memory a lot more efficiently than Hadoop does. &nbsp;It also doesn't have such a heavy-handed Java-ness to it and provides a reasonably rich interface for Python (and&nbsp;<a href="https://amplab.cs.berkeley.edu/2014/01/26/large-scale-data-analysis-made-easier-with-sparkr/">R support is on the way</a>!). &nbsp;There still are a lot of rough edges (this is where the academic origins shine through, I think) but I'm hopeful that it cleans up under the Apache project.<br /><br />Perhaps more than (or inclusive of) the first two paths forward in increasing MapReduce adoption in research science, Spark holds the most promise in that it feels less like Hadoop and more normal from the HPC perspective. &nbsp;It doesn't force you to cast your problem in terms of a map and a reduce step; the way in which you interact with your data (your <i>resilient distributed dataset</i>, or RDD, in Spark parlance) is <a href="http://spark.apache.org/docs/0.9.0/api/pyspark/pyspark.rdd.RDD-class.html">much more versatile</a> and is more likely to directly translate to the logical operation you want to perform. &nbsp;It also supports the basic things Hadoop lacks such as iterative operations.<br /><br /><h2>Moving Forward</h2>I think I have a pretty good idea about why Hadoop has received a lukewarm, and sometimes cold, reception in HPC circles, and much of these underlying reasons are wholly justified. &nbsp;Hadoop's from the wrong side of the tracks from the purists' perspective, and it's not really changing the way the world will do its high-performance computing. &nbsp;There is a disproportionate amount of hype surrounding it as a result of its revolutionary successes in the commercial data sector.<br /><br />However, Hadoop and MapReduce aren't to be dismissed outright either. &nbsp;There is a growing subset of scientific problems that are growing against a scalability limit in terms of data movement, and at some point, solving these problems using conventional, CPU-oriented parallelism will reduce to using the wrong tool for the job. &nbsp;The key, as is always the case in this business, is to understand the job and realize that there are more tools in the toolbox than just a hammer.<br /><br />As these data-intensive and data-bound problems gain a growing presence in traditional HPC domains, I hope the progress being made on making Hadoop and MapReduce more relevant to research science continues. &nbsp;I mentioned above that great strides forward are being made to truly bridge the gap of utility and making MapReduce a serious go-to solution to scientific problems, and although Hadoop remains on the fringe of HPC today, it won't pay to dismiss it for too much longer.</div> + + + + + Parallelizing R on Supercomputers + + 2014-04-25T00:29:00-06:00 + https://hpc.social/2014/parallelizing-r-on-supercomputers + <div><b>Executive summary</b>: &nbsp;I've posted a tutorial on <a href="http://www.glennklockwood.com/di/R-para.php">how to parallelize R codes</a> on my website. &nbsp;This post is a more personal reflection on how I got there.<br /><br /></div> +<div><hr /></div> +<p><br />“Parallel Options for R” was the title of the first talk I ever presented on behalf of my employer, and despite the fact that I <a href="http://www.theguardian.com/lifeandstyle/2013/nov/09/impostor-syndrome-oliver-burkeman">didn’t (and still don’t) know anything</a> about the R language, statistics, or how to parallelize any of it, the shoe seemed to fit at the time.  The talk went over well, and I’ve been <a href="http://pace.sdsc.edu//sites/pace.sdsc.edu/bootcamp2/201405/schedule.html">asked</a> <a href="http://www.meetup.com/San-Diego-Data-Science-R-Users-Group/events/135782742/">to give</a> <a href="http://extension.ucsd.edu/studyarea/index.cfm?vAction=singleCourse&amp;vCourse=CSE-41185">the talk</a> in my capacity as the resident “parallel R guy” plenty of times since.<br />&lt;div&gt;<br />&lt;/div&gt;</p> +<div>Every once in a while I get asked how I came to become so involved in some of the weird topics about which I write and speak--after all, I really have no formal training in things like <a href="http://glennklockwood.blogspot.com/2013/12/high-performance-virtualization-sr-iov_14.html">SR-IOV</a>, <a href="http://glennklockwood.blogspot.com/2014/02/deploying-hadoop-on-traditional.html">Hadoop</a>, and <a href="http://glennklockwood.blogspot.com/2014/01/the-1000-genome-computational.html">next-generation gene sequencing</a>. &nbsp;As much as I'd like to claim I just have some infinite sage-like knowledge, the reality is that I have to learn about these various technologies as a result of my day job--answering helpdesk tickets. &nbsp;In the case of parallel R, I simply got a ticket in January 2013 that read,</div> +<div><blockquote class="tr_bq">"I just ran an intensive R script through [the supercomputer]. &nbsp;Its not much faster than my own machine. &nbsp;Could you point me to a tutorial for how I can make the process run in different processors in parallel?"</blockquote></div> +<div>I couldn't very well say "lol no idea" (which was the truth), but the fact is that there are only about three whole people in my group** who are tasked with solving every problem that comes in from the thousand unique users who run jobs on our system every year. &nbsp;If I didn't know the answer, there was a good chance that nobody else knew either. &nbsp;That doesn't change the fact that someone needs to answer the user's question though, and that fact is what got me into the parallel R business.</div> +<div><br /></div> +<div>In my quest for an answer to this user's helpdesk request, I further discovered that there were no good tutorials online that explain the process of parallelizing R codes. &nbsp;Thus, I wound up having to <a href="http://shop.oreilly.com/product/0636920021421.do">buy a book</a> to learn what I need to know to answer the user's question. &nbsp;So I did, and I learned the rough basics of how someone might go about parallelizing their R codes. &nbsp;I gave the user a few starting pointers, some of the libraries that he might want to check out on CRAN, and tried to provide some boilerplate code that might help him parallelize his particular script. &nbsp;We then went our separate ways.</div> +<div><br /></div> +<div>With all this reflection aside though, I never lost sight of the reality that I never did answer the user's question: what is a good tutorial on how to parallelize R codes?</div> +<div><br /></div> +<div>This question has actually come up a number of times from a number of users over the last year. &nbsp;Rather than take the easy route and tell everyone to attend my next talk on the subject, I decided to turn my presentation on parallelizing R into a series of tutorials which I've put on my website:</div> +<div><br /></div> +<div style="text-align: center;"><a href="http://www.glennklockwood.com/di/R-para.php"><b>Parallel Options for R</b></a></div> +<div><br /></div> +<div>It's not comprehensive by any means; notably, I did not cover either the <a href="http://r-pbd.org/">pbdr library out of UTK/Oak Ridge</a> (an omission with no particularly good justification) or <a href="http://r-sprint.org/">SPRINT from Edinburgh</a> (it's a bit specialized in functionality). &nbsp;I also haven't had the opportunity to convert my presentation on using R with Hadoop and Spark into the final component of this tutorial. &nbsp;Those topics will come as time permits. &nbsp;Regardless, I hope someone finds the write-up useful.</div> +<div><br /></div> +<div><span style="font-size: xx-small;">** I say "whole people" to reflect that our funding provides somewhere in the neighborhood of three full-time equivalent employees providing front-line user support. &nbsp;That funding winds up getting distributed across more physical staff.</span></div> + + + + + Armed and ready with IBM Platform LSF + + 2014-04-11T16:39:53-06:00 + https://hpc.social/2014/armed-and-ready-with-ibm-platform-lsf + <p>These days it&rsquo;s not uncommon to hear about CPUs based upon ARM cores. They can +be found in mobile phones, embedded systems, laptops and even servers. Indeed, +recently there have been a number of major announcements from vendors building +processors based ARM cores. This includes the AMD Opteron A1100, NVIDIA Tegra +K1 and even the Apple A7, which is used the iPhone 5s. What these all have in +common is that they are 64-bit and based on the ARM v8 ISA. At the same time, +the ARM-server chip startup Calxeda announced it was shutting down. Surging +power requirements, as well as the announcement of 64-bit chips have led to +renewed interest in energy efficient ARM based processors for high performance +computing.</p> + +<p>When building out an infrastructure for Technical Computing, a workload manager +is typically used to control access to the computing resources. As it turns out,the leading workload manager IBM Platfom LSF (formerly Platform Computing) has +supported Linux on ARM for about 10 years. In fact, today there are IBM +clients using Platform LSF on Linux ARM-based clusters as part of mobile +device design and testing.</p> + +<p>The current release of IBM Platform LSF 9.1.2 supports Linux on ARM v7 with +upcoming support for ARM v8. Given that Platform LSF provides the ability to +build out heterogeneous clusters, creating a compute cluster containing ARM, +Power and x86 based nodes is a snap. Jobs may be targetted to a specific +processor type and the optional portal IBM Platform Application Centre +provides an easy to use, highly configurable, application-centric web based +interface for job management.</p> + +<p><strong>Hello. How do you &ldquo;doo&rdquo;?</strong></p> + +<p>I&rsquo;ve recently had the opportunity to test IBM Platform LSF on two node, ARM +based cluster . The IBM Platform LSF master node was a Udoo Quad system running Debian Wheezy ARMv7 EABI hard-float. The second node was running Fedora on a +ARM v8 simulator. Installation and operation of the software was identical to +other platforms. Using the Platform LSF ELIM (External LIM) facility for +adding external load indices, I was able to quickly create a script to load +the processor temperature on the Udoo Quad system.</p> + +<p>Now, putting Platform LSF through it&rsquo;s paces, we see the type and model and +other physical characteristics of the nodes are detected.</p> + +<div class="highlight"><pre><code class="language-bash">$ lshosts -w +HOST_NAME type model cpuf ncpus maxmem maxswp server RESOURCES +udoo LINUX_ARM ARM7l 60.0 <span style="color: #ae81ff;">4</span> 875M - Yes <span style="color: #f92672;">(</span>mg<span style="color: #f92672;">)</span> +ma1arms4 LINUX_ARM ARM8 60.0 <span style="color: #ae81ff;">1</span> 1.8G 1.9G Yes <span style="color: #f92672;">()</span></code></pre></div> + +<p>Looking at the load information on the system, we see the built-in load +indices, in addition to the cputemp metric which I introduced to report the +CPU temperature (Celsius). At this point the system is essentially idle.</p> + +<div class="highlight"><pre><code class="language-bash">$ lsload -l +HOST_NAME status r15s r1m r15m ut pg io ls it tmp swp mem cputemp +udoo ok 0.5 0.6 1.5 4% 0.0 <span style="color: #ae81ff;">311</span> <span style="color: #ae81ff;">1</span> <span style="color: #ae81ff;">0</span> 1297M 0M 701M 45.0 +ma1arms4 busy 3.6 *7.7 6.2 52% 0.0 <span style="color: #ae81ff;">50</span> <span style="color: #ae81ff;">3</span> <span style="color: #ae81ff;">0</span> 954M 1.9G 1.6G 0.0</code></pre></div> + +<p>Next, we submit a job for execution to Platform LSF. Rather than the requisite +sleep job, we submit something a bit more interesting, the HPC Challenge +Benchmark (HPCC). Debian Wheezy happens to include a pre-compiled binary which +is compiled against OpenMPI.</p> + +<p>As the Udoo Quad is a 4 core system (as the name implies), hpcc is submitted +requesting 4 cores.</p> + +<div class="highlight"><pre><code class="language-bash">$ bsub -n <span style="color: #ae81ff;">4</span> mpiexec -n <span style="color: #ae81ff;">4</span> /usr/bin/hpcc +Job &lt;2&gt; is submitted to default queue &lt;normal&gt;.</code></pre></div> + +<p>With HPCC running, we quickly see the utilization as well as the CPU +temperature increase to 60C.</p> + +<div class="highlight"><pre><code class="language-bash">$ lsload -l +HOST_NAME status r15s r1m r15m ut pg io ls it tmp swp mem cputemp +udoo ok 5.1 5.1 2.4 94% 0.0 <span style="color: #ae81ff;">49</span> <span style="color: #ae81ff;">1</span> <span style="color: #ae81ff;">0</span> 1376M 0M 497M 60.0 +ma1arms4 ok 0.5 1.1 1.2 40% 0.0 <span style="color: #ae81ff;">50</span> <span style="color: #ae81ff;">3</span> <span style="color: #ae81ff;">0</span> 954M 1.9G 1.6G 0.0</code></pre></div> + +<p>During the life of the job, the resource utilization may be easily viewed using the Platform LSF user commands. This includes details such as the PIDs which +the job is comprised of.</p> + +<div class="highlight"><pre><code class="language-bash">$ bjobs -l + +Job &lt;2&gt;, User &lt;debian&gt;, Project &lt;default&gt;, Status &lt;RUN&gt;, Queue &lt;normal&gt;, + Command &lt;mpiexec -n <span style="color: #ae81ff;">4</span> /usr/bin/hpcc&gt;, Share group charged &lt;/debian&gt; +Sun Feb <span style="color: #ae81ff;">2</span> 23:49:48: Submitted from host &lt;udoo&gt;, CWD &lt;/opt/ibm/lsf/conf&gt;, + <span style="color: #ae81ff;">4</span> Processors Requested; +Sun Feb <span style="color: #ae81ff;">2</span> 23:49:48: Started on <span style="color: #ae81ff;">4</span> Hosts/Processors &lt;udoo&gt; &lt;udoo&gt; &lt;udoo&gt; &lt;udoo&gt;, +Execution Home &lt;/home/debian&gt;, Execution CWD &lt;/opt/ibm/lsf/conf&gt;; +Sun Feb <span style="color: #ae81ff;">2</span> 23:51:05: Resource usage collected. +The CPU time used is <span style="color: #ae81ff;">227</span> seconds. +MEM: <span style="color: #ae81ff;">140</span> Mbytes; SWAP: <span style="color: #ae81ff;">455</span> Mbytes; NTHREAD: <span style="color: #ae81ff;">8</span> +PGID: 15678; PIDs: <span style="color: #ae81ff;">15678</span> <span style="color: #ae81ff;">15679</span> <span style="color: #ae81ff;">15681</span> <span style="color: #ae81ff;">15682</span> <span style="color: #ae81ff;">15683</span> <span style="color: #ae81ff;">15684</span> +<span style="color: #ae81ff;">15685</span> +.... +....</code></pre></div> + +<p><strong>New Roads?</strong></p> + +<p>Here we could speak of GFlops, and other such measures of performance, but +that was not my objective. The key, is that there is a growing interest in +non-x86 solutions for Technical Computing. IBM Platform LSF software has +supported and continues to support a wide variety of operating systems and +processor architectures, from ARM to IBM Power to IBM System z.</p> + +<p>As for ARM based development boards such as the Udoo Quad, Parallela Board, +etc., they are inexpensive as well as being energy efficient. This fact makes +them of interest to HPC scientists looking at possible approaches to energy +efficiency for HPC workloads. Let us know your thoughts about the suitability +of ARM for HPC workloads.</p> + + + + + Udoo Quad test drive + + 2014-03-23T16:53:38-06:00 + https://hpc.social/2014/udoo-quad-test-drive + <p>Here is a brief update regarding my experiences so far with the Udoo Quad +board. I call this <em>kicking the tires</em>, but it simply amounts to tinkering +with the board and getting a better understanding of it&rsquo;s capabilities.</p> + +<p>My choice of OS for this round of testing is Ubuntu Studio 12.04 armHF, +which I obtained from the Udoo Community site downloads page.</p> + +<p>As the Udoo Quad includes an on-board SATA connected, I followed the necessary +steps to install the OS to the external disk, and to boot from it by selecting +the appropriate device from the U-Boot environment. I used the following <a href="https://elinux.org/UDOO_boot_from_sata">page</a> as a high-level guide.</p> + +<p>The disk in this case was an older ~80GB Hitachi disk that I had in my spares +and suitable for the intended purpose. With the system booted up, here is what we see:</p> + +<div class="highlight"><pre><code class="language-bash"> root@udoo-studio-hfp:~# uname -a + +Linux udoo-studio-hfp 3.0.35 <span style="color: #75715e;">#1 SMP PREEMPT Mon Dec 16 14:46:12 CET 2013 armv7l armv7l armv7l GNU/Linux</span> + +root@udoo-studio-hfp:~# cat /proc/cpuinfo + +Processor : ARMv7 Processor rev <span style="color: #ae81ff;">10</span> <span style="color: #f92672;">(</span>v7l<span style="color: #f92672;">)</span> +processor : <span style="color: #ae81ff;">0</span> +BogoMIPS : 1988.28 + +processor : <span style="color: #ae81ff;">1</span> +BogoMIPS : 1988.28 + +processor : <span style="color: #ae81ff;">2</span> +BogoMIPS : 1988.28 + +processor : <span style="color: #ae81ff;">3</span> +BogoMIPS : 1988.28 + +Features : swp half thumb fastmult vfp edsp neon vfpv3 + +CPU implementer : 0x41 +CPU architecture: <span style="color: #ae81ff;">7</span> +CPU variant : 0x2 +CPU part : 0xc09 +CPU revision : <span style="color: #ae81ff;">10</span> + +Hardware : SECO i.Mx6 UDOO Board +Revision : <span style="color: #ae81ff;">63012</span> +Serial : <span style="color: #ae81ff;">0000000000000000</span> + +root@udoo-studio-hfp:~# lsscsi + +<span style="color: #f92672;">[</span>0:0:0:0<span style="color: #f92672;">]</span> disk ATA Hitachi HTS54128 HP3O /dev/sda</code></pre></div> + +<p>Using the trusty <em>gnome-disk-utility</em>, the read benchmark returns the following results. If this all looks a bit Mac OS X ish - don&rsquo;t be alarmed. I&rsquo;m +connecting to my Udoo from my Macbook and tunneling X over ssh. Again keep in +mind here that this is an old disk.</p> + +<figure><img src="https://www.gaborsamu.com/images/udoo_sata2.png" /> +</figure> + +<p>I was surprised to find the the <em>cpufreq</em> utilities all worked as expected on +the system also. By default, the system booted in a conservative mode +(~396 MHz) and with <em>cpufreq-set</em> I successfully enabled the performance governor.</p> + +<div class="highlight"><pre><code class="language-bash"> root@udoo-studio-hfp:/usr/bin# ./cpufreq-info + +cpufrequtils 007: cpufreq-info <span style="color: #f92672;">(</span>C<span style="color: #f92672;">)</span> Dominik Brodowski 2004-2009 + +Report errors and bugs to cpufreq@vger.kernel.org, please. + +analyzing CPU 0: + + driver: imx + + CPUs which run at the same hardware frequency: <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1</span> <span style="color: #ae81ff;">2</span> <span style="color: #ae81ff;">3</span> + + CPUs which need to have their frequency coordinated by software: <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1</span> <span style="color: #ae81ff;">2</span> <span style="color: #ae81ff;">3</span> + + maximum transition latency: 61.0 us. + + hardware limits: <span style="color: #ae81ff;">396</span> MHz - <span style="color: #ae81ff;">996</span> MHz + + available frequency steps: <span style="color: #ae81ff;">996</span> MHz, <span style="color: #ae81ff;">792</span> MHz, <span style="color: #ae81ff;">396</span> MHz + + available cpufreq governors: interactive, conservative, ondemand, userspace, powersave, performance + + current policy: frequency should be within <span style="color: #ae81ff;">396</span> MHz and <span style="color: #ae81ff;">996</span> MHz. + + The governor <span style="color: #e6db74;">"performance"</span> may decide which speed to use + + within this range. + + current CPU frequency is <span style="color: #ae81ff;">996</span> MHz <span style="color: #f92672;">(</span>asserted by call to hardware<span style="color: #f92672;">)</span>. + + cpufreq stats: <span style="color: #ae81ff;">996</span> MHz:8.10%, <span style="color: #ae81ff;">792</span> MHz:0.63%, <span style="color: #ae81ff;">396</span> MHz:91.27% <span style="color: #f92672;">(</span>172036<span style="color: #f92672;">)</span> + +....</code></pre></div> + +<p>As I indicated at the outset, the system has been installed with a ARM HF +prepared Linux distribution. This implies that the distro has been compiled +with the appropriate flags to enable hardware Floating Point Unit support.<br /> +Which should help us to attain better performance for applications which make +use of floating point arithmetic.</p> + +<p>The system <em>readelf</em> tool can be used to interrogate a binary for architecture +information. In this case, I&rsquo;ve installed the OS supplied HPC Challenge +package to give the board it&rsquo;s baptism into the world of Technical Computing.</p> + +<div class="highlight"><pre><code class="language-bash"> root@udoo-studio-hfp:/etc/apt# dpkg --get-selections |grep hpcc +hpcc install + +root@udoo-studio-hfp:/etc/apt# readelf -A /usr/bin/hpcc +Attribute Section: aeabi +File Attributes + Tag_CPU_name: <span style="color: #e6db74;">"7-A"</span> + Tag_CPU_arch: v7 + Tag_CPU_arch_profile: Application + Tag_ARM_ISA_use: Yes + Tag_THUMB_ISA_use: Thumb-2 + Tag_FP_arch: VFPv3-D16 + Tag_ABI_PCS_wchar_t: <span style="color: #ae81ff;">4</span> + Tag_ABI_FP_denormal: Needed + Tag_ABI_FP_exceptions: Needed + Tag_ABI_FP_number_model: IEEE <span style="color: #ae81ff;">754</span> + Tag_ABI_align_needed: 8-byte + Tag_ABI_align_preserved: 8-byte, except leaf SP + Tag_ABI_enum_size: int + Tag_ABI_HardFP_use: SP and DP + Tag_ABI_VFP_args: VFP registers + Tag_CPU_unaligned_access: v6 + Tag_DIV_use: Not allowed</code></pre></div> + +<p>Now that we&rsquo;re done kicking the tires, lets take it for a drive!</p> + +<p>The intent here was not for a Top 500 run. Rather, just to stress the Udoo +Quad with a more intensive workload. For this purpose, I wrote a small +Qt program to display the CPU temperature. I was curious to understand how +the system would heat up given that it&rsquo;s passively cooled (with a nice +heatsink).</p> + +<p>The output from my Linpack run is below:</p> + +<div class="highlight"><pre><code class="language-bash"> <span style="color: #f92672;">================================================================================</span> +HPLinpack 2.0 -- High-Performance Linpack benchmark -- September 10, <span style="color: #ae81ff;">2008</span> +Written by A. Petitet and R. Clint Whaley, Innovative Computing Laboratory, UTK +Modified by Piotr Luszczek, Innovative Computing Laboratory, UTK +Modified by Julien Langou, University of Colorado Denver +<span style="color: #f92672;">================================================================================</span> + + +An explanation of the input/output parameters follows: +T/V : Wall time / encoded variant. +N : The order of the coefficient matrix A. +NB : The partitioning blocking factor. +P : The number of process rows. +Q : The number of process columns. +Time : Time in seconds to solve the linear system. +Gflops : Rate of execution <span style="color: #66d9ef;">for</span> solving the linear system. + +The following parameter values will be used: + +N : <span style="color: #ae81ff;">7000</span> +NB : <span style="color: #ae81ff;">90</span> <span style="color: #ae81ff;">192</span> <span style="color: #ae81ff;">110</span> +PMAP : Row-major process mapping +P : <span style="color: #ae81ff;">2</span> +Q : <span style="color: #ae81ff;">2</span> +PFACT : Right +NBMIN : <span style="color: #ae81ff;">4</span> +NDIV : <span style="color: #ae81ff;">2</span> +RFACT : Crout +BCAST : 1ringM +DEPTH : <span style="color: #ae81ff;">1</span> +SWAP : Mix <span style="color: #f92672;">(</span>threshold <span style="color: #f92672;">=</span> 64<span style="color: #f92672;">)</span> +L1 : transposed form +U : transposed form +EQUIL : yes +ALIGN : <span style="color: #ae81ff;">8</span> double precision words + +-------------------------------------------------------------------------------- + +- The matrix A is randomly generated <span style="color: #66d9ef;">for</span> each test. +- The following scaled residual check will be computed: + <span style="color: #f92672;">||</span>Ax-b<span style="color: #f92672;">||</span>_oo / <span style="color: #f92672;">(</span> eps * <span style="color: #f92672;">(</span> <span style="color: #f92672;">||</span> x <span style="color: #f92672;">||</span>_oo * <span style="color: #f92672;">||</span> A <span style="color: #f92672;">||</span>_oo + <span style="color: #f92672;">||</span> b <span style="color: #f92672;">||</span>_oo <span style="color: #f92672;">)</span> * N <span style="color: #f92672;">)</span> +- The relative machine precision <span style="color: #f92672;">(</span>eps<span style="color: #f92672;">)</span> is taken to be 1.110223e-16 +- Computational tests pass <span style="color: #66d9ef;">if</span> scaled residuals are less than 16.0 + +<span style="color: #f92672;">================================================================================</span> +T/V N NB P Q Time Gflops +-------------------------------------------------------------------------------- +WR11C2R4 <span style="color: #ae81ff;">7000</span> <span style="color: #ae81ff;">90</span> <span style="color: #ae81ff;">2</span> <span style="color: #ae81ff;">2</span> 133.23 1.717e+00 +-------------------------------------------------------------------------------- +<span style="color: #f92672;">||</span>Ax-b<span style="color: #f92672;">||</span>_oo/<span style="color: #f92672;">(</span>eps*<span style="color: #f92672;">(||</span>A<span style="color: #f92672;">||</span>_oo*<span style="color: #f92672;">||</span>x<span style="color: #f92672;">||</span>_oo+<span style="color: #f92672;">||</span>b<span style="color: #f92672;">||</span>_oo<span style="color: #f92672;">)</span>*N<span style="color: #f92672;">)=</span> 0.0033466 ...... PASSED +<span style="color: #f92672;">================================================================================</span> +T/V N NB P Q Time Gflops +-------------------------------------------------------------------------------- +WR11C2R4 <span style="color: #ae81ff;">7000</span> <span style="color: #ae81ff;">192</span> <span style="color: #ae81ff;">2</span> <span style="color: #ae81ff;">2</span> 130.95 1.747e+00 +-------------------------------------------------------------------------------- +<span style="color: #f92672;">||</span>Ax-b<span style="color: #f92672;">||</span>_oo/<span style="color: #f92672;">(</span>eps*<span style="color: #f92672;">(||</span>A<span style="color: #f92672;">||</span>_oo*<span style="color: #f92672;">||</span>x<span style="color: #f92672;">||</span>_oo+<span style="color: #f92672;">||</span>b<span style="color: #f92672;">||</span>_oo<span style="color: #f92672;">)</span>*N<span style="color: #f92672;">)=</span> 0.0034782 ...... PASSED +<span style="color: #f92672;">================================================================================</span> +T/V N NB P Q Time Gflops +-------------------------------------------------------------------------------- +WR11C2R4 <span style="color: #ae81ff;">7000</span> <span style="color: #ae81ff;">110</span> <span style="color: #ae81ff;">2</span> <span style="color: #ae81ff;">2</span> 137.24 1.667e+00 +-------------------------------------------------------------------------------- +<span style="color: #f92672;">||</span>Ax-b<span style="color: #f92672;">||</span>_oo/<span style="color: #f92672;">(</span>eps*<span style="color: #f92672;">(||</span>A<span style="color: #f92672;">||</span>_oo*<span style="color: #f92672;">||</span>x<span style="color: #f92672;">||</span>_oo+<span style="color: #f92672;">||</span>b<span style="color: #f92672;">||</span>_oo<span style="color: #f92672;">)</span>*N<span style="color: #f92672;">)=</span> 0.0034961 ...... PASSED +<span style="color: #f92672;">================================================================================</span> + +Finished <span style="color: #ae81ff;">3</span> tests with the following results: + <span style="color: #ae81ff;">3</span> tests completed and passed residual checks, + <span style="color: #ae81ff;">0</span> tests completed and failed residual checks, + <span style="color: #ae81ff;">0</span> tests skipped because of illegal input values. +--------------------------------------------------------------------------------</code></pre></div> + +<p>During the runs of HPCC (in particular the HPLinpack portion), I observed the +CPU temperature climb to ~60 degrees Celsius.</p> + +<p>I produced a short video showing a run of HPCC along with the Qt CPU temperature +app that I created.</p> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + +<p>That wraps up a successful first test drive. What&rsquo;s next? OpenCL sees like +the next logical step.</p> + + + + + Looking forward from the ARM days of old + + 2014-03-13T17:29:42-06:00 + https://hpc.social/2014/looking-forward-from-the-arm-days-of-old + <p>These days we often hear about CPUs based upon ARM cores. They can be found +in mobile phones, embedded systems, laptops and even servers. Indeed, projects such as <a href="https://www.montblanc-project.eu/">Mont Blanc</a> are investigating the +use of ARM based systems for high performance computing (HPC).</p> + +<p>Back in the late 1980&rsquo;s, I was a student in high-school and a budding +computer scientist. In those days, my view of the personal computer market +was ver North American centric. Until one day I read about a new desktop +computer from the UK know as the Acorn Achimedes. This system was based upon +a RISC CPU which was given the name ARM (Acorn RISC Machine). The writeup in +the local <em>Toronto Computes!</em> newspaper indicated that Olivetti Canada was +bringing the Acorn Archimedes range to North America. As luck would have it, +Olivetti was just down the road from me. After after a few phone calls, I was +invited to their offices for some hands on time with a top of the line +<em>Acorn Archimedes 440</em>. This was the start of my journey with ARM based +systems. The folks at Olivetti were kind enough let me use the <em>Archie</em> over a +number of days. During that time, I had a chance to try out a number of +different software products including games and productivity software. Overall, +I was greatly impressed by the Archie and it&rsquo;s operating system, RISC OS and +it&rsquo;s WIMP interface. One game in particular I remember quite well called +Zarch - which showed off the 3D graphics capabilities of the system.</p> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + +<p>The only catch for me was the list price of the system. As I recall it was +around $2,500 CAD, which for me at the time was prohibitive.</p> + +<p>Moving forward to 2014, I&rsquo;ve recently been tinkering with the ARM-based mini +PC <em>UDOO Quad</em> running Debian Wheezy EABI (hard-float). This happens to +intersect with another area of interest, Technical Computing.</p> + +<p>I&rsquo;ll share more of my experiences with Udoo Quad in the coming weeks.</p> + + + + + Quantum ESPRESSO- Performance Benefits of Vendor-Optimized Libraries + + 2014-02-25T16:42:00-07:00 + https://hpc.social/2014/quantum-espresso-performance-benefits-of-vendor-optimized-libraries + <div class="p1">In my previous post, I presented a lot of different options you can use to build Quantum ESPRESSO which are (admittedly) very confusing. &nbsp;At the end of the day, the set of options that produce the fastest-running executable matters the most, so I went through and benchmarked many of the permutations of compiler/MPI/library options.</div> +<div class="p1"><br />What this post ultimately illustrates is that&nbsp;<i>you should never use the Netlib reference implementations of BLAS and LAPACK</i>; even <a href="http://www.netlib.org/blas/faq.html#5">Netlib says as much</a>. &nbsp;ScaLAPACK is much less broadly supported by hardware vendors (e.g., the ACML library that shipped with the PGI compiler I used did not include it), but most of the hardware-dependent optimizations are done below the BLACS level and within the MPI library and associated hardware drivers. &nbsp;As such, I was able to use Intel's MKL ScaLAPACK when building with the Intel compiler in the data below, but I had to use Netlib's ScaLAPACK with ACML-optimized BLAS and LAPACK when compiling with PGI.<br /><br />The actual benchmark I used was the <a href="http://www.deisa.eu/science/benchmarking/codes/quantumespresso">DEISA AUSURF112 benchmark</a>&nbsp;problem with only one pool using 64 MPI processes. &nbsp;The two testing platforms were<br /><br /><ul><li>SDSC's Gordon supercomputer (four nodes)</li><ul><li>16× 2.6 GHz Intel Xeon E5-2670 (Sandy Bridge) cores</li><li>64 GB DDR3 SDRAM</li><li>Mellanox ConnectX-3 QDR HCAs on PCIe 3.0</li><li>Mellanox Infiniscale IV switch</li></ul><li>SDSC's Trestles supercomputer (two nodes)</li><ul><li>32×&nbsp;2.4 GHz AMD Opteron 6136 (Magny Cours) nodes</li><li>64 GB DDR3 SDRAM</li><li>Mellanox ConnectX QDR HCAs on PCIe 2.0</li><li>Voltaire Grid Director 4700 switch</li></ul></ul><br />I don't know the port-to-port latency for the Trestles runs, but the application is bandwidth-bound due to the problem geometry (one pool) and the large amount of <span style="font-family: Courier New, Courier, monospace;">MPI_Allreduce</span>s and <span style="font-family: Courier New, Courier, monospace;">MPI_Alltoallv</span>s render the latency largely irrelevant. &nbsp;More information about the communication patterns of this benchmark are available from the <a href="http://www.hpcadvisorycouncil.com/pdf/QuantumEspresso_Performance_Analysis.pdf">HPC Advisory Council</a>.<br /><br />On both testing systems, the software versions were the same:<br /><ul><li><b>Compilers</b>: Intel 2013.1.117 and PGI 13.2</li><li><b>MPI libraries</b>: MVAPICH2 1.9 and OpenMPI 1.6.5</li><li><b>Vendor FFTs</b>: MKL 11.0.1 and ACML 5.3.0</li><li><b>Vendor BLAS/LAPACK</b>: MKL 11.0.1 and ACML 5.3.0</li><li><b>Vendor ScaLAPACK</b>: MKL 11.0.1 (used Netlib ScaLAPACK 2.0.2 with PGI)</li><li><b>Reference FFTs</b>: FFTW 3.3.3</li><li><b>Reference BLAS/LAPACK</b>: Netlib 3.4.2</li><li><b>Reference ScaLAPACK</b>: Netlib 2.0.2</li></ul><br /><h2>Vendor-optimized Libraries</h2>On Gordon, MKL shows extremely good performance compared to ACML, and this is to be expected given the fact that Intel's MKL is optimized for Gordon's ability to do AVX operations.<br /><br /><div class="separator" style="clear: both; text-align: center;"></div> +<table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://3.bp.blogspot.com/-9W-NxWoL_-c/UwASBxQ_ZBI/AAAAAAAAKRc/wN5-WgL4y7Q/s1600/Gordon+Comparison.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="271" src="http://3.bp.blogspot.com/-9W-NxWoL_-c/UwASBxQ_ZBI/AAAAAAAAKRc/wN5-WgL4y7Q/s1600/Gordon+Comparison.png" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Performance with vendor libraries on Gordon</td></tr></tbody></table></div> +<div class="p1"><br />In addition, the difference in MPI libraries is also quite consistent. &nbsp;Although the point-to-point performance of MVAPICH2 and OpenMPI over the same fabric should be comparable, the two libraries have different implementations of MPI collective operations. &nbsp;Quantum ESPRESSO is dominated by costly <span style="font-family: Courier New, Courier, monospace;">MPI_Allreduce</span> and <span style="font-family: Courier New, Courier, monospace;">MPI_Alltoallv</span>, so the level of optimization within the MPI implementations is very apparent.<br /><br />In fact, the PGI and OpenMPI build (which uses the Netlib ScaLAPACK, as opposed to a vendor-supplied ScaLAPACK which MKL provides) would hang on collectives unless the following environment variable was passed to the OpenMPI runtime:<br /><br /><pre>OMPI_MCA_coll_sync_barrier_after=100</pre><br />This switch forces the OpenMPI runtime to sync all processes after every 100 collective operations to prevent certain MPI ranks from racing so far ahead of the rest that a deadlock occurs. &nbsp;OpenMPI does this after every 1,000 collectives by default. &nbsp;Alternatively, HPCAC suggests the following tunings for OpenMPI:<br /><br /><pre>OMPI_MCA_mpi_affinity_alone=1<br />OMPI_MCA_coll_tuned_use_dynamic_rules=1<br />OMPI_MCA_coll_tuned_barrier_algorithm=6<br />OMPI_MCA_coll_tuned_allreduce_algorithm=0</pre><br />These collective tunings also prevented deadlocking of the benchmark, but the performance was no better than simply increasing the implicit barrier frequency with <span style="font-family: Courier New, Courier, monospace;">OMPI_MCA_coll_sync_barrier_</span>*.<br /><br />Trestles, with its AMD processors, does not realize as large a benefit from using MKL:<br /><br /></div> +<div class="p1"><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://3.bp.blogspot.com/-ijpV8iYr6uw/UwASP0zFqQI/AAAAAAAAKRk/peFbho9NXQ0/s1600/Trestles+Comparison.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="271" src="http://3.bp.blogspot.com/-ijpV8iYr6uw/UwASP0zFqQI/AAAAAAAAKRk/peFbho9NXQ0/s1600/Trestles+Comparison.png" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Performance with vendor libraries on Trestles</td></tr></tbody></table></div> +<div class="p1"><br />MKL still outperforms ACML even on AMD processors, but the margin is almost negligible. &nbsp;As with the Gordon case though, the difference in MPI implementations is start because of OpenMPI's poor collective performance.<br /><br />It is worth noting that PGI with OpenMPI did not work unless&nbsp;<i>both</i> of the following OpenMPI parameters were specified:</div> +<div class="p1"><br /></div> +<pre>OMPI_MCA_coll_sync_barrier_after=100<br />OMPI_MCA_coll_sync_barrier_before=100</pre> +<div class="p1"><br /></div> +<div class="p1">At smaller processor counts, ScaLAPACK compiled with OpenMPI (both Netlib's and MKL's implementations) performed horrendously. &nbsp;I don't know exactly what the conflict is, but OpenMPI and ScaLAPACK do not seem to play nicely.<br /><br /></div> +<h2>Netlib reference implementations</h2> +<div class="p1">As a fun afterthought, I thought it also might be useful to compare the vendor libraries to Netlib's reference implementations of BLAS and LAPACK. &nbsp;I rebuilt the four compiler+MPI combinations on both systems using Netlib's BLAS, LAPACK, and ScaLAPACK (as well as the stock FFTW library instead of MKL or ACML's versions) to see how badly Netlib's reference really performs, and here are the results:</div> +<div class="p1"><br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://3.bp.blogspot.com/-XrD1PtrJ_iI/UwASdyK5f_I/AAAAAAAAKR4/f13bCkKCte8/s1600/Gordon+Reference.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="271" src="http://3.bp.blogspot.com/-XrD1PtrJ_iI/UwASdyK5f_I/AAAAAAAAKR4/f13bCkKCte8/s1600/Gordon+Reference.png" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Performance with Netlib reference libraries on Gordon. &nbsp;The build with Intel and MVAPICH2 was not able to run.</td></tr></tbody></table><br />On SDSC's Gordon resource, the OpenMPI builds were between 3× and 4× slower, but the PGI build with MVAPICH2 was only(!) 64% slower. &nbsp;This is a curious result, as I would have expected performance to be dramatically worse across all combinations of compiler and MPI library since BLAS and LAPACK should really show no performance difference when it comes to the choice of MPI library. <br /><br />The above results suggest that Quantum ESPRESSO makes its heavy use of BLAS and LAPACK through the ScaLAPACK library, and as such, the ScaLAPACK implementation and its performance with each of the MPI libraries is critically important. &nbsp;Of course, even with a good combination of ScaLAPACK and MPI stack, having a vendor-optimized BLAS and LAPACK goes a long way in increasing overall performance by more than 50%.<br /><br />It should also be obvious that the Intel and MVAPICH2 build's performance data is absent. &nbsp;This is because the build with Intel and MVAPICH2 repeatedly failed with this error:<br /><br /><pre>** On entry to DLASCL parameter number 4 had an illegal value</pre><br />This error is the result of DGELSD within LAPACK not converging within the hard-coded criteria. &nbsp;<a href="https://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=529">This problem has been detailed at the LAPACK developers' forums</a>, and the limits were actually dramatically increased since the postings in the aforementioned forum. <br /><br />Despite that patch though, the problem still manifests in the newest versions of Netlib's reference BLACS/ScaLAPACK implementation, and I suspect that this is really <a href="http://mailman.cse.ohio-state.edu/pipermail/mvapich-discuss/2013-May/004434.html">a fundamental limitation of the BLACS library relying on platform-dependent behavior to produce its results</a>. &nbsp;Recall from above that the vendor-supplied implementations of LAPACK do not trigger this error.<br /><br />On Trestles, the results are even worse:<br /><br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://4.bp.blogspot.com/-Gu-zuZeCanw/UwASr7WVTAI/AAAAAAAAKR8/FxMmVFKNWoo/s1600/Trestles+Reference.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="271" src="http://4.bp.blogspot.com/-Gu-zuZeCanw/UwASr7WVTAI/AAAAAAAAKR8/FxMmVFKNWoo/s1600/Trestles+Reference.png" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Performance with Netlib reference libraries on Trestles. &nbsp;Only the build with PGI and MVAPICH2 was able to run.</td></tr></tbody></table>When built with the Intel compiler, both MVAPICH2- and OpenMPI-linked builds trigger the DLASCL error. &nbsp;The PGI and OpenMPI build do not trigger this error, but instead hang on collectives even with the OpenMPI tunings I reported for the vendor-optimized Trestles PGI+OpenMPI build.<br /><br />Cranking up the implicit barrier frequency beyond 100 might have gotten the test to run, but quite frankly, having to put a barrier before <i>and</i> after every 100th collective is already an extremely aggressive modification to runtime behavior. &nbsp;Ultimately, this data all suggests that you should, in fact, never use the Netlib reference implementations of BLAS and LAPACK.</div> +<div class="p1"><br /></div> +<h2>Summary of Data</h2> +<div class="p1">Here is an overall summary of the test matrix:</div> +<div class="p1"><br /><table align="center" cellpadding="0" cellspacing="0" class="tr-caption-container" style="margin-left: auto; margin-right: auto; text-align: center;"><tbody><tr><td style="text-align: center;"><a href="http://1.bp.blogspot.com/-a9IQN7AJdeU/UwATBXIp5bI/AAAAAAAAKSE/FFJ8mvqUc3s/s1600/Comparison+All.png" style="margin-left: auto; margin-right: auto;"><img border="0" height="271" src="http://1.bp.blogspot.com/-a9IQN7AJdeU/UwATBXIp5bI/AAAAAAAAKSE/FFJ8mvqUc3s/s1600/Comparison+All.png" width="400" /></a></td></tr><tr><td class="tr-caption" style="text-align: center;">Overall performance comparison for AUSURF112 benchmark</td></tr></tbody></table><br />This benchmark is very sensitive to the performance of collectives, and exactly how collectives are performed is specific to the MPI implementation being used. &nbsp;OpenMPI shows weaker collective performance across the board, and as a result, significantly worse performance.<br /><br />These collective calls are largely made via the ScaLAPACK library though, and since ScaLAPACK is built upon BLAS and LAPACK, it is critical to have all components (BLAS, LAPACK, ScaLAPACK, and the MPI implementation) working together. &nbsp;In all cases tested, <b>Intel's MKL library along with MVAPICH2 provides the best performance</b>. &nbsp;As one may guess, ACML also performs well on AMD Opteron processors, but its lack of optimization for AVX instructions prevented it from realizing the full performance possible on Sandy Bridge processors.<br /><br />In addition to performance, there are conclusions to be drawn about <i>application resiliency</i>, or Quantum ESPRESSO's ability to run calculations without hanging or throwing strange errors:<br /><ul><li><b>PGI with MVAPICH2 was the most resilient combination</b>; it worked out of the box with all combinations of BLAS/LAPACK/ScaLAPACK tested</li><li><b>PGI with OpenMPI was the least resilient combination</b>, perhaps because ACML's lack of ScaLAPACK bindings forces the use of Netlib ScaLAPACK. &nbsp;Combining Netlib BLAS/LAPACK/ScaLAPACK with PGI/OpenMPI simply failed on Trestles, and getting Netlib ScaLAPACK to play nicely with either MKL or ACML's BLAS/LAPACK libraries when compiled against PGI and OpenMPI required tuning of the OpenMPI collectives.</li><li>In both test systems, <b>using vendor libraries wherever possible make Quantum ESPRESSO run more reliably</b>. &nbsp;The only roadblocks encountered when using MKL or ACML arose when they were combined with PGI and OpenMPI, where special collective tunings had to be done.</li></ul><br />At the end of the day, there aren't many big surprises here. &nbsp;There are three take-away lessons:<br /><ol><li>MKL provides very strong optimizations for the Intel x86 architecture, and ACML isn't so bad either. &nbsp;You run into trouble when you start linking against Netlib libraries.</li><li>MVAPICH2 has better collectives than OpenMPI, and this translates into better ScaLAPACK performance. &nbsp;Again, this becomes less true when you start linking against Netlib libraries.</li><li><b>Don't use the Netlib reference implementations of BLAS, LAPACK, or ScaLAPACK</b> because they aren't designed for performance or resiliency. &nbsp;</li><ul><li><b>Using Netlib caused performance to drop by between 60% and 400%</b>, and&nbsp;</li><li><b>only half of the builds that linked against the Netlib reference trials would even run</b>.</li></ul></ol><div>Friends don't let friends link against Netlib!</div> +</div> + + + + + Quantum ESPRESSO- Compiling and Choice of Libraries + + 2014-02-25T00:59:00-07:00 + https://hpc.social/2014/quantum-espresso-compiling-and-choice-of-libraries + <div class="p1">We recently upgraded our two big machines at work, and as a result of that upgrade, a number of our users had to rebuild their installation of Quantum ESPRESSO. &nbsp;As it turns out, little quirks in our system conflicted with little quirks in Quantum ESPRESSO after the upgrade and resulted in the regular process of just doing <span style="font-family: Courier New, Courier, monospace;">./configure</span> and make not working out of the box.</div> +<div class="p1"><br /></div> +<div class="p1">Since I had been playing with Quantum ESPRESSO for the purpose of <a href="http://glennklockwood.blogspot.com/2013/12/high-performance-virtualization-sr-iov_14.html">benchmarking QDR InfiniBand virtualized with SR-IOV</a>, I also took it upon myself to iron out exactly how to squeeze the best performance out of QE with respect to compilers, MPI stacks, and choice of linear algebra libraries. &nbsp;For the sake of posterity (or at least until a new version of QE comes out that makes this all irrelevant), here are my notes.<br /><br />I also wrapped all of these build options into <a href="https://github.com/sdsc/sdsc-user/blob/master/makefiles/espresso/build-espresso.sh">a script that will configure and build optimized versions of Quantum ESPRESSO</a> for various compiler and MPI combinations on the two machines I support at work.</div> +<div class="p1"><br /></div> +<h2>BLAS, LAPACK, and ScaLAPACK</h2> +<div class="p1">Quantum ESPRESSO, like a multitude of other scientific codes, does a lot of linear algebra and uses the BLAS, LAPACK, and ScaLAPACK libraries to this end. &nbsp;I have to shamefully admit that I never fully understood the relationship between these libraries before[1], but figuring out how to build Quantum ESPRESSO to deliver the best performance was a great excuse to sit down and get it straightened out.</div> +<div class="p1"><br /></div> +<div class="p1">BLAS, LAPACK, and ScaLAPACK are all libraries (and <i>de facto</i> standard APIs) that provide increasing levels of abstraction to glue applications to underlying hardware. &nbsp;This is the way I see this layering taking place:</div> +<div class="p1"><br /></div> +<div class="separator" style="clear: both; text-align: center;"><a href="http://4.bp.blogspot.com/-AMFxB0saoqI/UsfIweiCCPI/AAAAAAAAKLg/qWV-_P_d-V8/s1600/LAPACK+Stack.png" style="margin-left: 1em; margin-right: 1em;"><img border="0" height="218" src="http://4.bp.blogspot.com/-AMFxB0saoqI/UsfIweiCCPI/AAAAAAAAKLg/qWV-_P_d-V8/s400/LAPACK+Stack.png" width="400" /></a></div> +<div class="p1"><br /></div> +<div class="p1"><b>BLAS</b> is the lowest-level library and provides subroutines that do basic vector operations. &nbsp;Netlib provides a <a href="http://www.netlib.org/blas/">reference implementation of BLAS written in Fortran</a>, but the big idea behind BLAS is to allow hardware vendors to provide <a href="http://www.netlib.org/blas/faq.html#5">highly tuned versions of the BLAS subroutines</a> that obviate the need for application developers to worry about optimizing their linear algebra for every possible computer architecture on which the application might run. &nbsp;This motivation is also what gave rise to the MPI standard, but unlike MPI, BLAS is not an actual standard.</div> +<div class="p1"><br /></div> +<div class="p1"><b>LAPACK</b> builds upon BLAS and provides higher-level matrix operations such as diagonalization (i.e., solving for eigenvectors and eigenvalues) and inversion. &nbsp;BLAS and LAPACK seem to be bundled together when actually implemented (e.g., IBM ESSL and Intel MKL both provide both optimized BLAS and LAPACK), but they provide two distinct layers of abstracting the mathematical complexity away from application developers.</div> +<div class="p1"><br /></div> +<div class="p1"><b>ScaLAPACK</b> builds upon LAPACK and provides a set of subroutines (prefixed with the letter P) that are analogous to the subroutines provided by LAPACK. &nbsp;The big difference is that ScaLAPACK uses MPI to parallelize these LAPACK routines, whereas LAPACK itself (and the underlying BLAS) are completely serial (e.g., Netlib's reference distribution) or rely on shared memory for parallelization (e.g., multithreaded).</div> +<div class="p1"><br /></div> +<div class="p1">ScaLAPACK is where things get a little hairy because it not only relies on BLAS as an abstraction layer for doing computations, but it relies on the <b>BLACS</b> library to abstract away the inter-node communications. &nbsp;The MPI standard is supposed to do much of the same thing though, and in fact BLACS now only supports MPI, making it somewhat of an antiquated layer of abstraction. &nbsp;It follows that most vendors seem to optimize their MPI libraries and leave BLACS unchanged relative to the reference distribution.</div> +<div class="p1"><br /></div> +<div class="p1">As I'll mention below, BLACS is a growing source of problems with ScaLAPACK. &nbsp;BLACS is <a href="http://mailman.cse.ohio-state.edu/pipermail/mvapich-discuss/2013-May/004434.html">known to have non-deterministic behavior</a> which renders it sensitive to the MPI implementation upon which is layered, causing ScaLAPACK to not work under similarly non-deterministic conditions.</div> +<div class="p1"><br /></div> +<div class="p1"><span style="font-size: xx-small;">[1] I have a compelling excuse though! &nbsp;I got my start in scientific computing doing molecular dynamics simulations, and there just isn't a great deal of linear algebra required to calculate most models. &nbsp;I did work on <a href="http://dx.doi.org/10.1021/jp207181s">an electronegativity-based model that required solving big systems of equations</a>, but we found that there were more efficient ways to tackle the underlying physical problem like <a href="http://dx.doi.org/10.1063/1.2206578">using a clever extended Lagrangian methods</a>.</span></div> +<div class="p1"><br /></div> +<h2>Building Quantum ESPRESSO</h2> +<div class="p1">Customizing a build of Quantum ESPRESSO isn't completely standard compared to most non-scientific Linux packages, but it's miles ahead of most scientific packages in that it uses autoconf instead of a home-cooked build process.<br /><br /><h3>Choice of Libraries</h3>There are a few key factors to define when building Quantum ESPRESSO. &nbsp;As you may have guessed from the previous section, they are (in no particular order):<br /><ul><li>choice of compiler</li><li>choice of MPI implementation</li><li>choice of BLAS library</li><li>choice of LAPACK library</li><li>choice of ScaLAPACK library</li><li>choice of FFT library</li></ul></div> +<div class="p1">On most academic systems like SDSC's Gordon and Trestles, there are several options available for each one of these parameters, and figuring out (1) how to actually define your choice for each, and (2) determine which provides the best performance can be a bear. &nbsp;What's worse is that these choices are often tied together; for example, the best ScaLAPACK implementation might not be compatible with the best FFT library.<br /><br />Gordon and Trestles provide the following options:<br /><br /><br /><table style="margin: 0 auto;"><thead><tr><th>Compiler</th><th>Options</th></tr></thead><tbody><tr><td>MPI</td><td>Intel and PGI</td></tr><tr><td>BLAS</td><td>MVAPICH2 and OpenMPI</td></tr><tr><td>LAPACK</td><td>MKL, ACML, and Netlib Reference</td></tr><tr><td>ScaLAPACK</td><td>MKL and Netlib Reference</td></tr><tr><td>FFTs</td><td>MKL, ACML, or FFTW3</td></tr></tbody></table><br />There are actually more than this (e.g., GNU compilers and the MPICH implementation), but I did not test them.<br /><div><br /></div> +<h3>Passing Library Choices to the Build Process</h3><div>As of Quantum ESPRESSO 5.0.3, which is what I used here, you can't specify libraries in the autoconf-standard way (e.g., <span style="font-family: Courier New, Courier, monospace;">--with-lapack=/opt/lapack/...</span>). &nbsp;I suspect this is because the actual implementations these libraries don't follow a standard convention (e.g., LAPACK calls aren't necessarily in a shared object called&nbsp;<span style="font-family: Courier New, Courier, monospace;">liblapack.so</span>), but the QE build process <i>does</i> honor certain environment variables.</div> +<div><br /></div> +<div><b>To specify compiler</b>, you can simply set the <span style="font-family: Courier New, Courier, monospace;">CC</span>, <span style="font-family: Courier New, Courier, monospace;">FC</span>, and <span style="font-family: Courier New, Courier, monospace;">F77</span> environment variables as with any other application that uses autoconf, e.g.,</div> +<blockquote style="font-family: Courier New, Courier, monospace; font-size: smaller; text-align: left;">export CC=icc<br />export FC=ifort<br />export F77=ifort</blockquote><div>QE will actually pick up any proprietary compiler in your <span style="font-family: Courier New, Courier, monospace;">$PATH</span> before it reverts to the GNU compilers, which is a surprisingly sensible approach. &nbsp;On SDSC's machines, as long as you have the intel or pgi modules loaded, just plain old <span style="font-family: Courier New, Courier, monospace;">./configure</span> will pick it up.<br /><br /><b>The MPI stack</b> will be automatically detected based on whatever <span style="font-family: Courier New, Courier, monospace;">mpif90</span> is in your path. &nbsp;Again, as long as you have a valid MPI module loaded (<span style="font-family: Courier New, Courier, monospace;">openmpi_ib</span> or <span style="font-family: Courier New, Courier, monospace;">mvapich2_ib</span> on Gordon/Trestles), you don't have to do anything special.<br /><br /><b>The BLAS implementation</b> is selected by setting the <span style="font-family: Courier New, Courier, monospace;">BLAS_LIBS</span> environment variable to the appropriate link-time options. &nbsp;For example, the Netlib reference BLAS compiled with the Intel compiler is installed in /opt/lapack/intel/lib on SDSC's machines; thus, your <span style="font-family: Courier New, Courier, monospace;">BLAS_LIBS</span> should be passed to configure as<br /><blockquote style="font-family: Courier New, Courier, monospace; font-size: smaller; text-align: left;">export BLAS_LIBS="-L/opt/lapack/intel/lib -lblas"</blockquote>Similarly, <b>the LAPACK implementation</b> can be specified using the <span style="font-family: Courier New, Courier, monospace;">LAPACK_LIBS</span> environment variable. &nbsp;At SDSC, we install the Netlib BLAS and LAPACK in the same directory, so your <span style="font-family: Courier New, Courier, monospace;">LAPACK_LIBS</span> should actually contain the same library path as <span style="font-family: Courier New, Courier, monospace;">BLAS_LIBS</span>:<br /><blockquote style="font-family: Courier New, Courier, monospace; font-size: smaller; text-align: left;">export LAPACK_LIBS="-L/opt/lapack/intel/lib -llapack"</blockquote>We (and many other supercomputing sites) provide a handy dandy environment variable when you load this <span style="font-family: Courier New, Courier, monospace;">lapack</span> module called <span style="font-family: Courier New, Courier, monospace;">$LAPACKHOME</span>. &nbsp;With this environment variable, you can specify the generic (non-compiler-specific) line to configure:<br /><blockquote style="font-family: Courier New, Courier, monospace; font-size: smaller; text-align: left;">export BLAS_LIBS="-L$LAPACKHOME/lib -lblas" <br />export LAPACK_LIBS="-L$LAPACKHOME/lib -llapack"</blockquote>for convenience.<br /><br /><b>The ScaLAPACK libraries</b> are much the same and are passed to autoconf via the SCALAPACK_LIBS environment variable. &nbsp;To use the Netlib reference on Gordon/Trestles, you can load the <span style="font-family: Courier New, Courier, monospace;">scalapack</span> module and to configure:<br /><blockquote style="font-family: Courier New, Courier, monospace; font-size: smaller; text-align: left;">export SCALAPACK_LIBS="-L$SCALAPACKHOME/lib -lscalapack"</blockquote>Finally, <b>the&nbsp;FFT libraries</b> are defined via the <span style="font-family: Courier New, Courier, monospace;">FFT_LIBS</span> environment variable. &nbsp;To use our fftw installation, <span style="font-family: Courier New, Courier, monospace;">module load fftw</span> and configure:<br /><blockquote style="font-family: Courier New, Courier, monospace; font-size: smaller; text-align: left;">export FFT_LIBS="-L$FFTWHOME/lib -lfftw3"</blockquote>This is all well and good, but using the reference implementations for BLAS and LAPACK, as I will show, will result in very poor performance.<br /><br /><h3>Using Vendor-Optimized Libraries</h3><div><br /></div> +<h4>Intel</h4>Since none of these libraries are really standardized, vendors are free to bury their API wrappers in whatever libraries they want and support them to whatever extent they want. &nbsp;Intel's compilers come bundled with their Math Kernel Library (MKL) which provides bindings for<br /><ul><li><b>BLAS:</b><br /><span style="font-family: Courier New, Courier, monospace; font-size: smaller;">BLAS_LIBS="-lmkl_intel_lp64 -lmkl_sequential -lmkl_core"</span></li><li><b>LAPACK:</b><br /><span style="font-family: Courier New, Courier, monospace; font-size: smaller;">LAPACK_LIBS</span> can be left as the default since BLAS and LAPACK are buried in the same libraries</li><li><b>ScaLAPACK/BLACS:</b><br /><span style="font-family: Courier New, Courier, monospace; font-size: smaller;">SCALAPACK_LIBS="-lmkl_scalapack_lp64 -lmkl_blacs_openmpi_lp64"</span> for OpenMPI <b>OR</b><br /><span style="font-family: Courier New, Courier, monospace; font-size: smaller;">SCALAPACK_LIBS="-lmkl_scalapack_lp64 -lmkl_blacs_intelmpi_lp64"</span> for MVAPICH2</li><li><b>FFTW</b>:<br /><span style="font-family: Courier New, Courier, monospace; font-size: smaller;">FFT_LIBS="-lmkl_intel_lp64 -lmkl_sequential -lmkl_core"</span> for modern versions of MKL; older versions had the FFTW3 bindings in a separate library</li></ul><div>so your final configure command should look something like<br /><blockquote style="font-family: 'Courier New', Courier, monospace; font-size: smaller;">./configure \<br />&nbsp; CC=icc \<br />&nbsp; CXX=icpc \<br />&nbsp; FC=ifort \<br />&nbsp; F77=ifort \<br />&nbsp; BLAS_LIBS="-lmkl_intel_lp64 -lmkl_sequential -lmkl_core" \<br />&nbsp; SCALAPACK_LIBS="-lmkl_scalapack_lp64 -lmkl_blacs_openmpi_lp64" \<br />&nbsp; FFT_LIBS="-lmkl_intel_lp64 -lmkl_sequential -lmkl_core"</blockquote>when compiling with OpenMPI, or with a slightly modified <span style="font-family: Courier New, Courier, monospace;">SCALAPACK_LIBS</span> line (<span style="font-family: Courier New, Courier, monospace;">-lmkl_blacs_intelmpi_lp64</span>) when compiling with MVAPICH2.<br /><br /><h4>PGI/AMD</h4>PGI's compilers come bundled with the AMD Core Math Library (ACML), which provides bindings for BLAS, LAPACK, and FFTW, but its lack of ScaLAPACK means we still must use Netlib's ScaLAPACK and BLACS libraries. &nbsp;Be sure to load the pgi module, your preferred MPI module, and the scalapack module first!<br /><ul><li><b>BLAS:</b><br /><span style="font-family: Courier New, Courier, monospace; font-size: smaller;">BLAS_LIBS="-L$PGIHOME/libso -lacml"</span></li><li><b>LAPACK:</b><br /><span style="font-family: Courier New, Courier, monospace; font-size: smaller;">LAPACK_LIBS</span>&nbsp;can be left as the default since BLAS and LAPACK are buried in the same ACML library</li><li><b>ScaLAPACK/BLACS:</b><br /><span style="font-family: Courier New, Courier, monospace; font-size: smaller;">SCALAPACK_LIBS="-L$SCALAPACKHOME/lib -lscalapack"</span></li><li><b>FFTW</b>:<br /><span style="font-family: Courier New, Courier, monospace; font-size: smaller;">FFT_LIBS="-L$PGIHOME/libso -lacml" </span>even though ACML is included in the <span style="font-family: Courier New, Courier, monospace;">$BLAS_LIBS</span> variable--this is because autoconf may pick up a system fftw library which needs to be superceded by the FFTW bindings in ACML.</li></ul><div>so your final configure command should look something like<br /><blockquote style="font-family: 'Courier New', Courier, monospace; font-size: smaller;">./configure \<br />&nbsp; CC=pgcc \<br />&nbsp; CXX=pgCC \<br />&nbsp; FC=pgf90 \<br />&nbsp; F77=pgf77 \<br />&nbsp; BLAS_LIBS="-L$PGIHOME/libso -lacml" \<br />&nbsp; SCALAPACK_LIBS="-L$SCALAPACKHOME/lib -lscalapack" \<br />&nbsp; FFT_LIBS="-L$PGIHOME/libso -lacml"</blockquote>After doing this, there is one additional bit of manual hacking that must be done! &nbsp;PGI is known to trigger problems in Quantum ESPRESSO's IO library, IOTK, and you will need to compile with the <span style="font-family: Courier New, Courier, monospace;">-D__IOTK_WORKAROUND1</span> switch enabled. &nbsp;This command will hack the necessary line in <span style="font-family: Courier New, Courier, monospace;">make.sys</span>:<br /><blockquote><span style="font-family: Courier New, Courier, monospace; font-size: x-small;">sed -i 's/^DFLAGS\(.*\)$/DFLAGS\1 -D__IOTK_WORKAROUND1/' make.sys</span></blockquote>I owe a lot of gratitude to <a href="http://filippospiga.me/">Filippo Spiga</a> of Cambridge/the Quantum ESPRESSO Foundation for helping me quickly work through some of the issues I encountered in getting all of these builds to work correctly.<br /><br />In my next post, I will show what effect all of these options has on actual application performance.</div> +</div> +</div> +</div> + + + + + Linux perf, libquadmath, and GFortran's Insane Behavior + + 2014-02-12T20:20:00-07:00 + https://hpc.social/2014/linux-perf-libquadmath-and-gfortran-s-insane-behavior + <p><b>Executive Summary</b>: <a href="http://gcc.gnu.org/onlinedocs/libquadmath/">libquadmath</a> was introduced in GFortran 4.6 which fundamentally changed what the <span style="font-family: Courier New, Courier, monospace;">-fdefault-real-8</span> switch does.  Rather than promoting all floating point arithmetic to double precision, it doubles the width of all floating point types, so explicitly typed double precision is converted to quad precision.  This quad precision is orders of magnitude slower since it must be done in software, causing binaries built with <span style="font-family: Courier New, Courier, monospace;">-fdefault-real-8</span> to grind to a halt when built with GFortran 4.6 and newer.  The solution is to add <span style="font-family: Courier New, Courier, monospace;">-fdefault-double-8</span> to undo this implicit doubling of explicit <span style="font-family: Courier New, Courier, monospace;">real*8</span>.<br /><br />What follows is a case study of sorts in how I discovered this.  Maybe my methodology will be useful for others who are tasked with debugging performance problems.<br /><br />&lt;h2 id="prob"&gt;The Problem&lt;/h2&gt;A colleague from my past in research science sent me an e-mail this morning with a very typical problem that people run into whenever they try transferring their applications from one machine to another.  He wrote,<br />&lt;blockquote class="tr_bq"&gt;“I’ve been having a problem with compiling on a new workstation that is an HP with the newer gcc/gfortran 4.6.3.  The executable for the code runs very slow.  If I compile the exact same on the cluster or one of the Dell workstations (both have gfortran 4.4.3) it runs very fast on both.  Also, if I transfer the compiled binary from the cluster to the new HP workstation, it runs fast.”&lt;/blockquote&gt;That is to say,<br /><br />&lt;table style="border-collapse: collapse; border: 1px solid black; margin: 0 auto; text-align: center;"&gt;&lt;tbody&gt;&lt;tr style="border: 1px solid black;"&gt;&lt;th&gt;&lt;/th&gt;&lt;th style="border: 1px solid black;"&gt;Run on New <br />Workstation&lt;/th&gt;&lt;th style="border: 1px solid black;"&gt;Run on Old<br />Workstation&lt;/th&gt;&lt;/tr&gt;&lt;tr style="border: 1px solid black;"&gt;&lt;th style="border: 1px solid black; text-align: right;"&gt;Compiled on <br />New Workstation&lt;/th&gt;&lt;td style="background: #ff7777; border: 1px solid black;"&gt;SLOW&lt;/td&gt;&lt;td style="border: 1px solid black;"&gt;?&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;th style="text-align: right;"&gt;Compiled on <br />Old Workstation&lt;/th&gt;&lt;td style="background: #77ff77; border: 1px solid black;"&gt;FAST&lt;/td&gt;&lt;td style="background: #77ff77;"&gt;FAST&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;<br />&lt;div&gt;The fact that the old binary ran fast on the new machine ruled out some odd hardware or kernel problem and suggested that the issue was somewhere in userland.  Userland issues are always fixable issues, so this alone suggests that there will be a solution to this issue if we dig deep enough.&lt;/div&gt;</p> +<div><br /></div> +<h2 id="arcane">A Little Logic, A Little Arcane Knowledge</h2> +<div>The difference in performance was probably related to the upgrade from GFortran 4.4 to GFortran 4.6, and just to make sure this was a well-defined problem, I re-built the application and ran the test case on a local machine to ensure that the problem was reproducible on hardware and an OS with which I was familiar. &nbsp;I built with</div> +<div><ul><li>The GFortran 4.4 that ships with Red Hat 6. &nbsp;My colleague said that his build with GFortran 4.4 ran fine, and I was able to confirm that <b>GFortran 4.4 produced a reliably fast executable</b>.</li><li>The GFortran 4.6 that ships with Ubuntu 12.04 (my colleague's machine). &nbsp;He said that this one ran very slowly, and I could confirm that <b>GFortran 4.6 did, indeed, produce an unusably slow binary</b>.&nbsp;</li><li>The GFortran 4.8 that I built as a "latest-and-greatest" version on my test system. &nbsp;I wanted to verify that there wasn't some bug in 4.6 that was patched out of subsequent releases. &nbsp;Unfortunately this was not the case, as <b>GFortran 4.8 also produced a very slow binary</b>.</li></ul><div>The good news was that the problem is reproducible and we have a baseline case where the application <i>does</i> behave as intended. &nbsp;This meant that, in the worst-case scenario, we can do line-by-line comparisons of the assembly code for the working and non-working binaries to see where the problem lies. &nbsp;Thus, we know the problem has a solution.</div> +<div><br /></div> +<div>Of course, the bad news was that some change made between GFortran 4.4 and GFortran 4.6 broke this code, and we have to figure out exactly what this change was.</div> +<div><br /></div> +<div>This is where arcane knowledge comes in: I know two facts about GCC that suggest this may be, in fact, a problem with GFortran:</div> +</div> +<div><ol><li>GFortran has been known to throw backwards compatibility to the wind and make wild changes default behavior. &nbsp;For example, g77 and GFortran 4.1 used 8-byte record marker lengths by default, but then <a href="http://gcc.gnu.org/wiki/GFortran/News#gfortran_4.2">switched over to 4-byte markers in GFortran 4.2 to be in line with what every other Fortran compiler does</a>. &nbsp;This meant that data generated by GFortran 4.1 was not compatible with anything else. &nbsp;It wouldn't have surprised me if they did this sort of thing again.</li><li><a href="http://gcc.gnu.org/wiki/GFortran/News#gfortran_4.6">GCC introduced libquadmath in version 4.6</a> which made all GFortran objects built with 4.6 or later pull in libquadmath. &nbsp;This used to cause me problems because Red Hat 5 did not ship with libquadmath, making all binaries dynamically linked against GFortran 4.6 not portable* to RHEL5. &nbsp;Thus, this issue might have something to do with the addition of libquadmath.</li></ol><div><span style="font-size: xx-small;">* I acknowledge that trying to move binaries between machines is pretty crazy in its own right. &nbsp;Explaining why this was an actual issue for me is both uninteresting and beyond the scope of this post.</span><br /><br /></div> +<h2 id="perfuse">Examining Baseline Performance</h2></div> +<div>All modern Linux kernels ship with the <a href="https://perf.wiki.kernel.org/">perf subsystem</a> which makes diagnosing performance problems significantly easier than it has been in the past. &nbsp;If you haven't familiarized yourself with them yet, you really need to--all it took for me was a 2-minute demo by&nbsp;<a class="g-profile" href="https://plus.google.com/109775321689856324025" target="_blank">+Peter Kjellström</a>&nbsp;at SC'13 last year to realize Linux perf is serious business. &nbsp;We will simply use it as an alternative to gprof in this case so that we don't have to re-build all this code with instrumentation, but <a href="https://perf.wiki.kernel.org/index.php/Tutorial">perf can also do a lot of things</a> that used to be the exclusive domain of special-purpose libraries like <a href="http://icl.cs.utk.edu/papi/">PAPI</a> and <a href="http://ipm-hpc.org/">IPM</a>.<br /><br />Running the "good" build of this application through<sup>†</sup> perf establishes our baseline expected behavior:<br /><br /><pre style="font-family: monospace; font-size: smaller; margin-left: 2em;">$ <span style="color: #0b5394;"><b>perf record -o fast.report -g ./mdvgg.x</b></span><br />WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,<br />check /proc/sys/kernel/kptr_restrict.<br /><br />Samples in kernel functions may not be resolved if a suitable vmlinux<br />file is not found in the buildid cache or in the vmlinux path.<br /><br />Samples in kernel modules won't be resolved at all.<br /><br />If some relocation was applied (e.g. kexec) symbols may be misresolved<br />even with a suitable vmlinux or kallsyms file.<br /><br /><span style="color: #999999;"> Pressure list found<br /> taux,y,z: 0.000000 0.000000 0.000000<br /> txx0,tyy0,tzz0: 1013250. 1013250. 1013250.<br /> Wolf beta value: 4.4600E-008<br /><br />***Lennard-Jones parameters, epsilons in ergs *** <br /> 0.00000D+00 0.00000D+00 0.00000D+00 0.00000D+00 0.00000D+00<br />...<br /> average energy per atom: -0.39932E-11 -57.4739kcal/mole<br /> average energy with selfterm: -0.51864E-11 -74.6481kcal/mole</span><br />[ perf record: Woken up 3 times to write data ]<br />[ perf record: Captured and wrote 0.895 MB fast.report (~39121 samples) ]<br /></pre>where<br /><ul><li><span style="font-family: Courier New, Courier, monospace;">-o fast.report</span> dumps the recorded data to a file called <span style="font-family: Courier New, Courier, monospace;">fast.report</span></li><li><span style="font-family: Courier New, Courier, monospace;">-g</span> generates call graphs in addition to the flat profile (this isn't always necessary)</li><li><span style="font-family: Courier New, Courier, monospace;">./mdvgg.x</span> is the application binary we are profiling</li></ul><br />The scary warnings about kernel functions are harmless and a result of this entire debugging process being run as an unprivileged user. &nbsp;Once the job finishes running, viewing the report reveals (with some extraneous data removed for brevity):<br /><br /><pre style="font-family: monospace; font-size: smaller; margin-left: 2em;">$ <span style="color: #0b5394;">perf report -i fast.report --stdio --sort dso -g flat</span><br />...</pre><pre style="font-family: monospace; font-size: smaller; margin-left: 2em;"># Overhead Command Shared Object<br /># ........ ....... ....................<br />#<br /> 72.13% mdvgg.x mdvgg.x <br /> 61.12%<br /> <b><span style="color: red;">pairs_</span></b><br /> <b><span style="color: blue;">move1_</span></b><br /><br /> 7.00%<br /> listwater_<br /> bulk_<br /> MAIN__<br /> 0x400efd<br />...<br /> 20.99% mdvgg.x libc-2.12.so <br /> 14.09%<br /> <b><span style="color: magenta;">__memset_sse2</span></b><br /> bulk_<br /> MAIN__<br /> 0x400efd<br /><br /> 0.97%<br /> <b><span style="color: magenta;">__memset_sse2</span></b><br /> MAIN__<br /> 0x400efd<br /></pre>where<br /><ul><li><span style="font-family: Courier New, Courier, monospace;">-i fast.report</span> is the file containing our recorded data</li><li><span style="font-family: Courier New, Courier, monospace;">--stdio</span> prevents perf from using the interactive text user interface (I only added this because I can't paste interactions into a blog)</li><li><span style="font-family: Courier New, Courier, monospace;">--sort dso</span> presents the output in a relatively compact way sorted by the shared object in which time was being spent</li><li><span style="font-family: Courier New, Courier, monospace;">-g flat</span> presents a relatively flat profile (we don't need the full call graph)</li></ul><br />Thus, the majority of our runtime is taken up in a subroutine called <span style="color: red; font-family: Courier New, Courier, monospace;"><b>pairs</b></span>, called from <span style="color: blue; font-family: Courier New, Courier, monospace;"><b>move1</b></span>&nbsp;when this application is working normally. &nbsp;A surprising fraction of runtime was also consumed by <span style="color: magenta; font-family: Courier New, Courier, monospace;"><b>memset(3)</b></span>&nbsp;in this case, but this was the result of my test input being so small that most of the actual runtime was spent doing initialization. &nbsp;Even though this is generally not a great way to test application performance, it is acceptable in this case because even initialization takes 20x longer with the "bad" binary built against GFortran 4.6 <span style="font-size: xx-small;">(which in itself is a very insightful behavior that suggests that there is something systematically wrong with the bad binary)</span>. &nbsp;The simplest and shortest possible run required to reproduce the issue should elucidate where the problem lies.<br /><br />Now, profiling the "bad" binary built with GFortran 4.6 should give us a definite place to start looking:<br /><br /><pre style="font-family: monospace; font-size: smaller; margin-left: 2em;">$ <span style="color: #0b5394;"><b>perf record -o slow.report -g ./mdvgg.x</b></span><br />WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,<br />check /proc/sys/kernel/kptr_restrict.<br />...<br /><br />$ <span style="color: #0b5394;"><b>perf report -i slow.report --stdio --sort dso -g flat</b></span><br />...<br /># Overhead Shared Object<br /># ........ ....................<br />#<br /> 93.59% libgcc_s.so.1 <br /> 48.69%<br /> <b><span style="color: blue;">__sfp_handle_exceptions</span></b><br /><br /> 13.54%<br /> <b><span style="color: magenta;">__multf3</span></b><br /><br /> 6.89%<br /> <b><span style="color: magenta;">__addtf3</span></b><br /><br /> 6.16%<br /> <b><span style="color: magenta;">__subtf3</span></b><br /><br />...<br /> 3.02% libquadmath.so.0.0.0<br /> 1.62%<br /> __sfp_handle_exceptions<br /><br /> <b><span style="color: red;">2.67% mdvgg.x</span></b> <br /> 1.91%<br /> hcristo_<br /> fcc100_<br /><br />...</pre><br />Well there's our problem! &nbsp;Only <b><span style="color: red;">2.67%</span></b> of the application runtime is actually being spent running the <span style="font-family: Courier New, Courier, monospace;">mdvgg.x</span> application, and a huge amount of time is being spent in some <span style="color: blue; font-family: Courier New, Courier, monospace;"><b>__sfp_handle_exceptions</b></span> call. &nbsp;What gives?<br /><br />Now I'm not ashamed to say that I routinely turn to Google to figure out what most of this sort of computer nonsense means. &nbsp;Unfortunately, searching for "<span style="font-family: Courier New, Courier, monospace;">__sfp_handle_exceptions</span>" doesn't turn up anything useful, so the only hint we have is that the name of the call suggests that this "bad" build is generating a lot of floating point exceptions (FPEs). <br /><br />The logical next step is to rebuild the application with a lot of FPE trapping (<span style="font-family: Courier New, Courier, monospace;">FCFLAGS+=-ffpe-trap=invalid,zero,overflow,underflow,denormal</span>). &nbsp;This will determine if the code had been generating a ton of floating point exceptions all along but GFortran had just gotten stricter in 4.6. &nbsp;Unfortunately, doing this just leads to more disappointment--the application does not generate any of the common floating point exceptions, meaning that this mysterious <span style="font-family: Courier New, Courier, monospace;">__sfp_handle_exceptions</span> is, in fact, not handling serious floating point exceptions. &nbsp;What else could it be doing?<br /><br /><span style="font-size: xx-small;"><sup>†</sup> Although this particular application was both quick enough to run entirely through perf and serial enough to not require any special considerations with MPI, getting these perf profiles from long-running and highly parallel codes is similarly easy. &nbsp;Instead of running the application through perf (<span style="font-family: Courier New, Courier, monospace;">perf record -o fast.report -g ./mdvgg.x</span>) you can attach perf to an already-running process for a fixed period of time to generate a sample of the overall performance profile. &nbsp;This is achieved by doing <span style="font-family: Courier New, Courier, monospace;">perf record -o fast.report -g -p <i></i> sleep 10</span>. &nbsp;Perf attaches to the specified pid and gathers data from it, and just sleeps for ten seconds before detaching.</span><br /><br /><h2 id="diagnosis">Quad-Precision: Back to the Intel 80286</h2>Giving up on <span style="font-family: Courier New, Courier, monospace;">__sftp_handle_exceptions</span> and moving on down the performance profile, it appears that suddenly libquadmath (which, as I mentioned above, appeared after our "working" compiler version was released) is soaking up cycles. &nbsp;Furthermore, a quick googling of some of those big offenders like <span style="color: magenta; font-family: Courier New, Courier, monospace;"><b>__multf3</b></span>, <span style="color: magenta; font-family: Courier New, Courier, monospace;"><b>__addtf3</b></span>, and <span style="color: magenta; font-family: Courier New, Courier, monospace;"><b>__subtf3</b></span> reveals that <b>they are software implementations of long-double arithmetic</b>--the application is now doing quad precision arithmetic in this "bad" build whereas it is definitely not doing this in our "good" build.<br /><br />Suddenly everything becomes a little clearer: long-double floating point arithmetic involves numbers stored in 128-bit precision, but 64-bit CPUs <span style="font-size: xx-small;">(or more properly, FPUs)</span> are only capable of handling (you guessed it) 64-bit precision floating point calculations. &nbsp;Thus, to get an application to do calculations in 128-bit precision, a software layer (libquadmath) must emulate 128-bit floating point hardware and actually translate the binary logic into something the 64-bit CPU can understand. &nbsp;This is analogous to getting a 3rd grader to do a large calculation (e.g., 6×8) by breaking into pieces they know how to solve (e.g., 8+8, 8+8, 8+8), and it is a <i>very</i> slow process. &nbsp;This massive performance loss is why Intel has had a hardware floating point unit in every processor it's designed since the 20386 (ca. 1985).<br /><br />The obvious question is then why GFortran 4.6 has decided to start carrying out all of the calculations in this code in quad precision by default. &nbsp;Surely the GFortran developers didn't think forcing all arithmetic to be done in software was a good idea, right?<br /><br /><h2 id="gf">Redefining Default Behavior</h2>Of course not. <br /><br />The next challenge, then, is to dig through the GFortran 4.6 manual to figure out what the libquadmath integration did to default behavior, or alternatively, what compiler flags started changing the precision of variables and calculations automatically.<br /><br />This is where knowledge of Fortran becomes important, because an unfortunate aspect of F77 (which has carried forward in F90) is its implicit typing. &nbsp;A novice Fortran programmer (like a new graduate student) may think that doing something like<br /><br /><pre style="margin-left: 2em;">implicit real*8(a-h,o-z)<br />value1 = 0.31415926535e+1</pre><br />will store a double-precision (<span style="font-family: Courier New, Courier, monospace;">real*8</span>) value in <span style="font-family: Courier New, Courier, monospace;">value1</span>. &nbsp;This isn't the case, as the "<span style="font-family: Courier New, Courier, monospace;">e+1</span>" instead of "<span style="font-family: Courier New, Courier, monospace;">d+1</span>" tends to render this a single-precision value. &nbsp;This isn't <i>always</i> the case, but let it suffice to say that the details get messy and I've seen different compilers handle this in different ways by default.<br /><br />Anyway, every Fortran compiler has options to override this implicit typing and force all floating point values into double precision. &nbsp;In GFortran, this has traditionally been <span style="font-family: Courier New, Courier, monospace;">-fdefault-real-8</span>; that is, the default data type for real (i.e., single-precision) values is <span style="font-family: Courier New, Courier, monospace;">real*8</span>, or double precision. &nbsp;In this particular code's makefile, this flag was enabled to override the sloppy coding practices of decades of graduate students and ensure precision wasn't going down the drain because someone used E's instead of D's in 1997.<br /><br />When a simple search for "quadmath" in the GFortran 4.6 manual turns up nothing, searching for <span style="font-family: Courier New, Courier, monospace;">-fdefault-real-8</span> is the next step. &nbsp;Lo and behold, <a href="http://gcc.gnu.org/onlinedocs/gcc-4.6.3/gfortran/Fortran-Dialect-Options.html">this gem appears</a>:<br /><blockquote class="tr_bq"><span style="font-family: Courier New, Courier, monospace;">-fdefault-real-8</span><br />Set the default real type to an 8 byte wide type. Do nothing if this is already the default. This option also affects the kind of non-double real constants like 1.0, and <b>does promote the default width of DOUBLE PRECISION to 16 bytes if possible</b>, unless <span style="font-family: Courier New, Courier, monospace;">-fdefault-double-8</span> is given, too.&nbsp;</blockquote>Bingo. &nbsp;Any code that previously used <span style="font-family: Courier New, Courier, monospace;">-fdefault-real-8</span> to ensure that all floating point arithmetic was being done in double precision now does <b>all explicitly typed double precision arithmetic as 128-bit quad precision in software as its effective default behavior</b>. &nbsp;What's worse is that this change in behavior <a href="http://gcc.gnu.org/gcc-4.6/changes.html#fortran">isn't even mentioned in the release notes for GFortran 4.6</a>&nbsp;because <span style="font-family: Courier New, Courier, monospace;">-fdefault-real-8</span> has always <i>tried</i> to promote <span style="font-family: Courier New, Courier, monospace;">real*8</span> to <span style="font-family: Courier New, Courier, monospace;">real*16</span> as its intended behavior; it simply never succeeded because GFortran didn't support software quad-precision before libquadmath appeared in 4.6.<br /><br />Quite frankly, defining the behavior of something as straightforward-sounding as <span style="font-family: Courier New, Courier, monospace;">-fdefault-real-8</span> to be so environment-specific is insane. &nbsp;The only use case where this new behavior would even make sense is if a programmer intentionally mixes <span style="font-family: Courier New, Courier, monospace;">real*4</span> and <span style="font-family: Courier New, Courier, monospace;">real*8</span> datatypes within code and wants to see what will happen if all variable widths are doubled uniformly. &nbsp;On the other hand if <span style="font-family: Courier New, Courier, monospace;">-fdefault-real-8</span> was being used to ensure all calculations were done in double-precision (as was the case in this application and at least a few other unrelated scientific codes with which I have worked), performance takes a catastrophic hit simply because a new quad-precision math library is bundled with GCC.<br /><br />It would make more sense if GFortran added a <span style="font-family: Courier New, Courier, monospace;">-fdefault-real-16</span> (a la Intel Fortran's <span style="font-family: Courier New, Courier, monospace;">-real-size 128</span> switch) to promote all floating point to quad precision. &nbsp;In fact, I find it difficult to make sense of GFortran's choice to make <span style="font-family: Courier New, Courier, monospace;">-fdefault-real-8</span> preserve mixed precision codes as it does; the only case where I can envision this sort of behavior being useful is in codes that implement their own reduced-precision FFTs. &nbsp;I have literally never encountered such a code, though.<br /><br />Ultimately the solution to this problem, for those who are fortunate enough to get to the bottom of it, is to simply add <span style="font-family: Courier New, Courier, monospace;">-fdefault-double-8</span> in addition to <span style="font-family: Courier New, Courier, monospace;">-fdefault-real-8</span>. &nbsp;This was enough to fix the issue my colleague was having, and now his lab is back to crunching away with molecular dynamics at normal speed.</div> + + + + + Scalable Data Analysis in R + + 2014-01-16T00:00:00-07:00 + https://hpc.social/2014/scalable-data-analysis-in-r + <p>R is a great environment for interactive analysis on your desktop, but when your data needs outgrow your personal computer, it’s not clear what to do next.</p> + +<p>I’ve put together material for a day-long tutorial on scalable data analysis in R. It covers:</p> + +<ul> + <li>A brief introduction to R for those coming from a Python background;</li> + <li>The <a href="http://cran.r-project.org/web/packages/bigmemory/index.html">bigmemory</a> package for out-of-core computation on large data matrices, with a simple physical sciences example;</li> + <li>The standard parallel package, including what was the snow and multicore facilities, using <a href="http://stat-computing.org/dataexpo/2009/the-data.html">airline data</a> as an example</li> + <li>The <a href="http://cran.r-project.org/web/packages/foreach/index.html">foreach</a> package, using airline data and simple stock data;</li> + <li>The <a href="http://cran.r-project.org/web/packages/Rdsm/index.html">Rdsm</a> package for shared memory; and</li> + <li>a brief introduction to the powerful <a href="http://r-pbd.org">pbdR</a> pacakges for extremely large-scale computation.</li> +</ul> + +<p>The presentation for the material, in R markdown (so including the sourcecode) is in the presentation directory; you can read the resulting presentation <a href="https://github.com/ljdursi/scalable-analysis-R/blob/master/presentation/ScalableDataAnalysis-R.md">as markdown there</a>, or <a href="https://github.com/ljdursi/scalable-analysis-R/blob/master/presentation/ScalableDataAnalysisInR.pdf?raw=true">as a PDF</a>.</p> + +<p>The R code from the slides can be found in the R directory.</p> + +<p>Some data can be found in the data directory; but as you might expect in a workshop on scalable data analysis, the files are quite large! Mostly you can just find scripts for downloading the data; running make in the main directory will pull almost everything down, but a little more work needs go to into automating some of the production of the data products used.</p> + +<p>Suggestions, as always, greatly welcomed.</p> + + + + + IBM Platform HPC V4.1.1.1- Best Practices for Managing NVIDIA GPU devices + + 2013-12-20T18:35:22-07:00 + https://hpc.social/2013/ibm-platform-hpc-v4-1-1-1-best-practices-for-managing-nvidia-gpu-devices + <p><strong>Summary</strong></p> + +<p>IBM Platform HPC V4.1.1.1 is easy-to-use, yet comprehensive technical +computing infrastructure management software. It includes as standard GPU +management capabilities including monitoring and workload scheduling for +systems equipped with NVIDIA GPU devices.</p> + +<p>At the time of writing, the latest available NVIDIA CUDA version is 5.5. Here +we provide a series of best practices to deploy and manage a IBM Platform HPC +cluster with servers equipped with NVIDIA GPU devices.</p> + +<p><strong>Introduction</strong></p> + +<p>The document serves as a guide to enabling IBM Platform HPC V4.1.1.1 GPU +management capabilities in a cluster equipped with NVIDIA Tesla Kepler GPUs +and with NVIDIA CUDA 5.5. The steps below assume familiarity with IBM Platform +HPC V4.1.1.1 commands and concepts. As part of the procedure, a NVIDIA CUDA +5.5 Kit is prepared using an example template.</p> + +<p>The example cluster defined in the Best Practices below is equipped as follows:</p> + +<ul> +<li>Red Hat Enterprise Linux 6.4 (x86-64)</li> +<li>IBM Platform HPC management node (<em>hpc4111tete</em>)</li> +<li>Compute nodes:</li> +<li>compute000 (NVIDIA Tesla K20c)</li> +<li>compute001 (NVIDIA Tesla K40c)</li> +<li>compute002 (NVIDIA Tesla K40c)</li> +</ul> +<p><strong>A. Create a NVIDIA CUDA Kit</strong></p> + +<p>It is assumed that the IBM Platform HPC V4.1.1.1 management node (hpctete4111) has been installed and that there are 3 compute nodes equipped with NVIDIA Tesla GPUs that will be provisioned as part of the procedure.</p> + +<p>Here we provide the procedure to download the NVIDIA CUDA RPMs from NVIDIA. +This is achieved by installing the NVIDIA CUDA RPM to configure the CUDA +repository from which the NVIDIA CUDA RPMs will be downloaded for packaging as +a Kit.</p> + +<p>Additional details regarding IBM Platform HPC Kits can be found <a href="http://sourceforge.net/apps/mediawiki/xcat/index.php?title=Using_Software_Kits_in_OS_Images">here</a>.</p> + +<p>The procedure assumes the following:</p> + +<ul> +<li>The IBM Platform HPC management node has access to the Internet.</li> +<li>The procedure has been validated with NVIDIA CUDA 5.5</li> +<li>All commands are run as user root on the IBM Platform HPC management node, +unless otherwise indicated.</li> +</ul> +<ol> +<li>Install the yum <em>downloadonly</em> plugin on the IBM Platform HPC management +node.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># yum install yum-plugin-downloadonly</span> +Loaded plugins: product-id, refresh-packagekit, security, subscription-manager +This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register. +Setting up Install Process +Resolving Dependencies +--&gt; Running transaction check +---&gt; Package yum-plugin-downloadonly.noarch 0:1.1.30-14.el6 will be installed +--&gt; Finished Dependency Resolution + +Dependencies Resolved + +<span style="color: #f92672;">================================================================================</span> + Package Arch Version Repository Size +<span style="color: #f92672;">================================================================================</span> +Installing: + yum-plugin-downloadonly noarch 1.1.30-14.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">20</span> k + +Transaction Summary +<span style="color: #f92672;">================================================================================</span> +Install <span style="color: #ae81ff;">1</span> Package<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> + +Total download size: <span style="color: #ae81ff;">20</span> k +Installed size: <span style="color: #ae81ff;">21</span> k +Is this ok <span style="color: #f92672;">[</span>y/N<span style="color: #f92672;">]</span>: y +Downloading Packages: +yum-plugin-downloadonly-1.1.30-14.el6.noarch.rpm | <span style="color: #ae81ff;">20</span> kB 00:00 +Running rpm_check_debug +Running Transaction Test +Transaction Test Succeeded +Running Transaction + Installing : yum-plugin-downloadonly-1.1.30-14.el6.noarch 1/1 + Verifying : yum-plugin-downloadonly-1.1.30-14.el6.noarch 1/1 + +Installed: + yum-plugin-downloadonly.noarch 0:1.1.30-14.el6 + +Complete!</code></pre></div> + +<ol start="2"> +<li>On the IBM Platform HPC management node, install the NVIDIA CUDA RPM. This +will configure the CUDA repository.</li> +</ol> +<p>Note: The CUDA RPM can be downloaded <a href="https://developer.nvidia.com/cuda-downloads">here</a>.</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># rpm -ivh ./cuda-repo-rhel6-5.5-0.x86_64.rpm</span> +Preparing... <span style="color: #75715e;">########################################### [100%]</span> + 1:cuda-repo-rhel6 <span style="color: #75715e;">########################################### [100%]</span></code></pre></div> + +<ol start="3"> +<li>NVIDIA CUDA requires packages which are part of Extra Packages for +Enterprise Linux (EPEL), including <em>dkms</em>. On the IBM Platform HPC management +node, we now install the EPEL repository RPM.</li> +</ol> +<p>Note: The EPEL RPM for RHEL 6 family can be downloaded <a href="http://download.fedoraproject.org/pub/epel/6/i386/repoview/epel-release.html">here</a>.</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># rpm -ivh ./epel-release-6-8.noarch.rpm</span> +warning: ./epel-release-6-8.noarch.rpm: Header V3 RSA/SHA256 Signature, key ID 0608b895: NOKEY +Preparing... <span style="color: #75715e;">########################################### [100%]</span> + 1:epel-release <span style="color: #75715e;">########################################### [100%]</span></code></pre></div> + +<ol start="4"> +<li>Now, the NVIDIA CUDA Toolkit RPMs are downloaded via the OS <em>yum</em> command +to the directory <em>/root/CUDA5.5</em>. The RPMs will be part of the NVIDIA CUDA Kit +which is built in the subsequent steps.</li> +</ol> +<p>Note: Details on using the <em>yum &ndash;downloadonly</em> option can be found <a href="https://access.redhat.com/site/solutions/10154">here</a>.</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># yum install --downloadonly --downloaddir=/root/CUDA5.5/ cuda-5-5.x86_64</span> +Loaded plugins: downloadonly, product-id, refresh-packagekit, security, + : subscription-manager +This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register. +epel/metalink | <span style="color: #ae81ff;">15</span> kB 00:00 +epel | 4.2 kB 00:00 +epel/primary_db | 5.7 MB 00:05 +Setting up Install Process +Resolving Dependencies +--&gt; Running transaction check +---&gt; Package cuda-5-5.x86_64 0:5.5-22 will be installed +--&gt; Processing Dependency: cuda-command-line-tools-5-5 <span style="color: #f92672;">=</span> 5.5-22 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: cuda-headers-5-5 <span style="color: #f92672;">=</span> 5.5-22 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: cuda-documentation-5-5 <span style="color: #f92672;">=</span> 5.5-22 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: cuda-samples-5-5 <span style="color: #f92672;">=</span> 5.5-22 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: cuda-visual-tools-5-5 <span style="color: #f92672;">=</span> 5.5-22 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: cuda-core-libs-5-5 <span style="color: #f92672;">=</span> 5.5-22 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: cuda-license-5-5 <span style="color: #f92672;">=</span> 5.5-22 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: cuda-core-5-5 <span style="color: #f92672;">=</span> 5.5-22 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: cuda-misc-5-5 <span style="color: #f92672;">=</span> 5.5-22 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: cuda-extra-libs-5-5 <span style="color: #f92672;">=</span> 5.5-22 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: xorg-x11-drv-nvidia-devel<span style="color: #f92672;">(</span>x86-32<span style="color: #f92672;">)</span> &gt;<span style="color: #f92672;">=</span> 319.00 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: xorg-x11-drv-nvidia-libs<span style="color: #f92672;">(</span>x86-32<span style="color: #f92672;">)</span> &gt;<span style="color: #f92672;">=</span> 319.00 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: xorg-x11-drv-nvidia-devel<span style="color: #f92672;">(</span>x86-64<span style="color: #f92672;">)</span> &gt;<span style="color: #f92672;">=</span> 319.00 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: nvidia-xconfig &gt;<span style="color: #f92672;">=</span> 319.00 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: cuda-driver &gt;<span style="color: #f92672;">=</span> 319.00 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: nvidia-settings &gt;<span style="color: #f92672;">=</span> 319.00 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: nvidia-modprobe &gt;<span style="color: #f92672;">=</span> 319.00 <span style="color: #66d9ef;">for</span> package: cuda-5-5-5.5-22.x86_64 +--&gt; Running transaction check +---&gt; Package cuda-command-line-tools-5-5.x86_64 0:5.5-22 will be installed +---&gt; Package cuda-core-5-5.x86_64 0:5.5-22 will be installed +---&gt; Package cuda-core-libs-5-5.x86_64 0:5.5-22 will be installed +---&gt; Package cuda-documentation-5-5.x86_64 0:5.5-22 will be installed +---&gt; Package cuda-extra-libs-5-5.x86_64 0:5.5-22 will be installed +---&gt; Package cuda-headers-5-5.x86_64 0:5.5-22 will be installed +---&gt; Package cuda-license-5-5.x86_64 0:5.5-22 will be installed +---&gt; Package cuda-misc-5-5.x86_64 0:5.5-22 will be installed +---&gt; Package cuda-samples-5-5.x86_64 0:5.5-22 will be installed +--&gt; Processing Dependency: mesa-libGLU-devel <span style="color: #66d9ef;">for</span> package: cuda-samples-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: freeglut-devel <span style="color: #66d9ef;">for</span> package: cuda-samples-5-5-5.5-22.x86_64 +--&gt; Processing Dependency: libXi-devel <span style="color: #66d9ef;">for</span> package: cuda-samples-5-5-5.5-22.x86_64 +---&gt; Package cuda-visual-tools-5-5.x86_64 0:5.5-22 will be installed +---&gt; Package nvidia-modprobe.x86_64 0:319.37-1.el6 will be installed +---&gt; Package nvidia-settings.x86_64 0:319.37-30.el6 will be installed +---&gt; Package nvidia-xconfig.x86_64 0:319.37-27.el6 will be installed +---&gt; Package xorg-x11-drv-nvidia.x86_64 1:319.37-2.el6 will be installed +--&gt; Processing Dependency: xorg-x11-drv-nvidia-libs<span style="color: #f92672;">(</span>x86-64<span style="color: #f92672;">)</span> <span style="color: #f92672;">=</span> 1:319.37-2.el6 <span style="color: #66d9ef;">for</span> package: 1:xorg-x11-drv-nvidia-319.37-2.el6.x86_64 +--&gt; Processing Dependency: nvidia-kmod &gt;<span style="color: #f92672;">=</span> 319.37 <span style="color: #66d9ef;">for</span> package: 1:xorg-x11-drv-nvidia-319.37-2.el6.x86_64 +---&gt; Package xorg-x11-drv-nvidia-devel.i686 1:319.37-2.el6 will be installed +---&gt; Package xorg-x11-drv-nvidia-devel.x86_64 1:319.37-2.el6 will be installed +---&gt; Package xorg-x11-drv-nvidia-libs.i686 1:319.37-2.el6 will be installed +--&gt; Processing Dependency: libX11.so.6 <span style="color: #66d9ef;">for</span> package: 1:xorg-x11-drv-nvidia-libs-319.37-2.el6.i686 +--&gt; Processing Dependency: libz.so.1 <span style="color: #66d9ef;">for</span> package: 1:xorg-x11-drv-nvidia-libs-319.37-2.el6.i686 +--&gt; Processing Dependency: libXext.so.6 <span style="color: #66d9ef;">for</span> package: 1:xorg-x11-drv-nvidia-libs-319.37-2.el6.i686 +--&gt; Running transaction check +---&gt; Package freeglut-devel.x86_64 0:2.6.0-1.el6 will be installed +--&gt; Processing Dependency: freeglut <span style="color: #f92672;">=</span> 2.6.0-1.el6 <span style="color: #66d9ef;">for</span> package: freeglut-devel-2.6.0-1.el6.x86_64 +--&gt; Processing Dependency: libGL-devel <span style="color: #66d9ef;">for</span> package: freeglut-devel-2.6.0-1.el6.x86_64 +--&gt; Processing Dependency: libglut.so.3<span style="color: #f92672;">()(</span>64bit<span style="color: #f92672;">)</span> <span style="color: #66d9ef;">for</span> package: freeglut-devel-2.6.0-1.el6.x86_64 +---&gt; Package libX11.i686 0:1.5.0-4.el6 will be installed +--&gt; Processing Dependency: libxcb.so.1 <span style="color: #66d9ef;">for</span> package: libX11-1.5.0-4.el6.i686 +---&gt; Package libXext.i686 0:1.3.1-2.el6 will be installed +---&gt; Package libXi-devel.x86_64 0:1.6.1-3.el6 will be installed +---&gt; Package mesa-libGLU-devel.x86_64 0:9.0-0.7.el6 will be installed +---&gt; Package nvidia-kmod.x86_64 1:319.37-1.el6 will be installed +--&gt; Processing Dependency: kernel-devel <span style="color: #66d9ef;">for</span> package: 1:nvidia-kmod-319.37-1.el6.x86_64 +--&gt; Processing Dependency: dkms <span style="color: #66d9ef;">for</span> package: 1:nvidia-kmod-319.37-1.el6.x86_64 +---&gt; Package xorg-x11-drv-nvidia-libs.x86_64 1:319.37-2.el6 will be installed +---&gt; Package zlib.i686 0:1.2.3-29.el6 will be installed +--&gt; Running transaction check +---&gt; Package dkms.noarch 0:2.2.0.3-17.el6 will be installed +---&gt; Package freeglut.x86_64 0:2.6.0-1.el6 will be installed +---&gt; Package kernel-devel.x86_64 0:2.6.32-358.el6 will be installed +---&gt; Package libxcb.i686 0:1.8.1-1.el6 will be installed +--&gt; Processing Dependency: libXau.so.6 <span style="color: #66d9ef;">for</span> package: libxcb-1.8.1-1.el6.i686 +---&gt; Package mesa-libGL-devel.x86_64 0:9.0-0.7.el6 will be installed +--&gt; Processing Dependency: pkgconfig<span style="color: #f92672;">(</span>libdrm<span style="color: #f92672;">)</span> &gt;<span style="color: #f92672;">=</span> 2.4.24 <span style="color: #66d9ef;">for</span> package: mesa-libGL-devel-9.0-0.7.el6.x86_64 +--&gt; Processing Dependency: pkgconfig<span style="color: #f92672;">(</span>xxf86vm<span style="color: #f92672;">)</span> <span style="color: #66d9ef;">for</span> package: mesa-libGL-devel-9.0-0.7.el6.x86_64 +--&gt; Processing Dependency: pkgconfig<span style="color: #f92672;">(</span>xfixes<span style="color: #f92672;">)</span> <span style="color: #66d9ef;">for</span> package: mesa-libGL-devel-9.0-0.7.el6.x86_64 +--&gt; Processing Dependency: pkgconfig<span style="color: #f92672;">(</span>xdamage<span style="color: #f92672;">)</span> <span style="color: #66d9ef;">for</span> package: mesa-libGL-devel-9.0-0.7.el6.x86_64 +--&gt; Running transaction check +---&gt; Package libXau.i686 0:1.0.6-4.el6 will be installed +---&gt; Package libXdamage-devel.x86_64 0:1.1.3-4.el6 will be installed +---&gt; Package libXfixes-devel.x86_64 0:5.0-3.el6 will be installed +---&gt; Package libXxf86vm-devel.x86_64 0:1.1.2-2.el6 will be installed +---&gt; Package libdrm-devel.x86_64 0:2.4.39-1.el6 will be installed +--&gt; Finished Dependency Resolution + +Dependencies Resolved + +<span style="color: #f92672;">================================================================================</span> + Package Arch Version Repository Size +<span style="color: #f92672;">================================================================================</span> +Installing: + cuda-5-5 x86_64 5.5-22 cuda 3.3 k +Installing <span style="color: #66d9ef;">for</span> dependencies: + cuda-command-line-tools-5-5 x86_64 5.5-22 cuda 6.4 M + cuda-core-5-5 x86_64 5.5-22 cuda <span style="color: #ae81ff;">29</span> M + cuda-core-libs-5-5 x86_64 5.5-22 cuda <span style="color: #ae81ff;">230</span> k + cuda-documentation-5-5 x86_64 5.5-22 cuda <span style="color: #ae81ff;">79</span> M + cuda-extra-libs-5-5 x86_64 5.5-22 cuda <span style="color: #ae81ff;">120</span> M + cuda-headers-5-5 x86_64 5.5-22 cuda 1.1 M + cuda-license-5-5 x86_64 5.5-22 cuda <span style="color: #ae81ff;">25</span> k + cuda-misc-5-5 x86_64 5.5-22 cuda 1.7 M + cuda-samples-5-5 x86_64 5.5-22 cuda <span style="color: #ae81ff;">150</span> M + cuda-visual-tools-5-5 x86_64 5.5-22 cuda <span style="color: #ae81ff;">268</span> M + dkms noarch 2.2.0.3-17.el6 epel <span style="color: #ae81ff;">74</span> k + freeglut x86_64 2.6.0-1.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">172</span> k + freeglut-devel x86_64 2.6.0-1.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">112</span> k + kernel-devel x86_64 2.6.32-358.el6 xCAT-rhels6.4-path0 8.1 M + libX11 i686 1.5.0-4.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">590</span> k + libXau i686 1.0.6-4.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">24</span> k + libXdamage-devel x86_64 1.1.3-4.el6 xCAT-rhels6.4-path0 9.3 k + libXext i686 1.3.1-2.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">34</span> k + libXfixes-devel x86_64 5.0-3.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">12</span> k + libXi-devel x86_64 1.6.1-3.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">102</span> k + libXxf86vm-devel x86_64 1.1.2-2.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">17</span> k + libdrm-devel x86_64 2.4.39-1.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">77</span> k + libxcb i686 1.8.1-1.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">114</span> k + mesa-libGL-devel x86_64 9.0-0.7.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">507</span> k + mesa-libGLU-devel x86_64 9.0-0.7.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">111</span> k + nvidia-kmod x86_64 1:319.37-1.el6 cuda 4.0 M + nvidia-modprobe x86_64 319.37-1.el6 cuda <span style="color: #ae81ff;">14</span> k + nvidia-settings x86_64 319.37-30.el6 cuda <span style="color: #ae81ff;">847</span> k + nvidia-xconfig x86_64 319.37-27.el6 cuda <span style="color: #ae81ff;">89</span> k + xorg-x11-drv-nvidia x86_64 1:319.37-2.el6 cuda 5.1 M + xorg-x11-drv-nvidia-devel i686 1:319.37-2.el6 cuda <span style="color: #ae81ff;">116</span> k + xorg-x11-drv-nvidia-devel x86_64 1:319.37-2.el6 cuda <span style="color: #ae81ff;">116</span> k + xorg-x11-drv-nvidia-libs i686 1:319.37-2.el6 cuda <span style="color: #ae81ff;">28</span> M + xorg-x11-drv-nvidia-libs x86_64 1:319.37-2.el6 cuda <span style="color: #ae81ff;">28</span> M + zlib i686 1.2.3-29.el6 xCAT-rhels6.4-path0 <span style="color: #ae81ff;">73</span> k + +Transaction Summary +<span style="color: #f92672;">================================================================================</span> +Install <span style="color: #ae81ff;">36</span> Package<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> + +Total download size: <span style="color: #ae81ff;">731</span> M +Installed size: 1.6 G +Is this ok <span style="color: #f92672;">[</span>y/N<span style="color: #f92672;">]</span>: y +Downloading Packages: +<span style="color: #f92672;">(</span>1/36<span style="color: #f92672;">)</span>: cuda-5-5-5.5-22.x86_64.rpm | 3.3 kB 00:00 +<span style="color: #f92672;">(</span>2/36<span style="color: #f92672;">)</span>: cuda-command-line-tools-5-5-5.5-22.x86_64.rpm | 6.4 MB 00:06 +<span style="color: #f92672;">(</span>3/36<span style="color: #f92672;">)</span>: cuda-core-5-5-5.5-22.x86_64.rpm | <span style="color: #ae81ff;">29</span> MB 00:31 +<span style="color: #f92672;">(</span>4/36<span style="color: #f92672;">)</span>: cuda-core-libs-5-5-5.5-22.x86_64.rpm | <span style="color: #ae81ff;">230</span> kB 00:00 +<span style="color: #f92672;">(</span>5/36<span style="color: #f92672;">)</span>: cuda-documentation-5-5-5.5-22.x86_64.rpm | <span style="color: #ae81ff;">79</span> MB 01:28 +<span style="color: #f92672;">(</span>6/36<span style="color: #f92672;">)</span>: cuda-extra-libs-5-5-5.5-22.x86_64.rpm | <span style="color: #ae81ff;">120</span> MB 02:17 +<span style="color: #f92672;">(</span>7/36<span style="color: #f92672;">)</span>: cuda-headers-5-5-5.5-22.x86_64.rpm | 1.1 MB 00:01 +<span style="color: #f92672;">(</span>8/36<span style="color: #f92672;">)</span>: cuda-license-5-5-5.5-22.x86_64.rpm | <span style="color: #ae81ff;">25</span> kB 00:00 +<span style="color: #f92672;">(</span>9/36<span style="color: #f92672;">)</span>: cuda-misc-5-5-5.5-22.x86_64.rpm | 1.7 MB 00:01 +<span style="color: #f92672;">(</span>10/36<span style="color: #f92672;">)</span>: cuda-samples-5-5-5.5-22.x86_64.rpm | <span style="color: #ae81ff;">150</span> MB 02:51 +<span style="color: #f92672;">(</span>11/36<span style="color: #f92672;">)</span>: cuda-visual-tools-5-5-5.5-22.x86_64.rpm | <span style="color: #ae81ff;">268</span> MB 05:06 +<span style="color: #f92672;">(</span>12/36<span style="color: #f92672;">)</span>: dkms-2.2.0.3-17.el6.noarch.rpm | <span style="color: #ae81ff;">74</span> kB 00:00 +<span style="color: #f92672;">(</span>13/36<span style="color: #f92672;">)</span>: freeglut-2.6.0-1.el6.x86_64.rpm | <span style="color: #ae81ff;">172</span> kB 00:00 +<span style="color: #f92672;">(</span>14/36<span style="color: #f92672;">)</span>: freeglut-devel-2.6.0-1.el6.x86_64.rpm | <span style="color: #ae81ff;">112</span> kB 00:00 +<span style="color: #f92672;">(</span>15/36<span style="color: #f92672;">)</span>: kernel-devel-2.6.32-358.el6.x86_64.rpm | 8.1 MB 00:00 +<span style="color: #f92672;">(</span>16/36<span style="color: #f92672;">)</span>: libX11-1.5.0-4.el6.i686.rpm | <span style="color: #ae81ff;">590</span> kB 00:00 +<span style="color: #f92672;">(</span>17/36<span style="color: #f92672;">)</span>: libXau-1.0.6-4.el6.i686.rpm | <span style="color: #ae81ff;">24</span> kB 00:00 +<span style="color: #f92672;">(</span>18/36<span style="color: #f92672;">)</span>: libXdamage-devel-1.1.3-4.el6.x86_64.rpm | 9.3 kB 00:00 +<span style="color: #f92672;">(</span>19/36<span style="color: #f92672;">)</span>: libXext-1.3.1-2.el6.i686.rpm | <span style="color: #ae81ff;">34</span> kB 00:00 +<span style="color: #f92672;">(</span>20/36<span style="color: #f92672;">)</span>: libXfixes-devel-5.0-3.el6.x86_64.rpm | <span style="color: #ae81ff;">12</span> kB 00:00 +<span style="color: #f92672;">(</span>21/36<span style="color: #f92672;">)</span>: libXi-devel-1.6.1-3.el6.x86_64.rpm | <span style="color: #ae81ff;">102</span> kB 00:00 +<span style="color: #f92672;">(</span>22/36<span style="color: #f92672;">)</span>: libXxf86vm-devel-1.1.2-2.el6.x86_64.rpm | <span style="color: #ae81ff;">17</span> kB 00:00 +<span style="color: #f92672;">(</span>23/36<span style="color: #f92672;">)</span>: libdrm-devel-2.4.39-1.el6.x86_64.rpm | <span style="color: #ae81ff;">77</span> kB 00:00 +<span style="color: #f92672;">(</span>24/36<span style="color: #f92672;">)</span>: libxcb-1.8.1-1.el6.i686.rpm | <span style="color: #ae81ff;">114</span> kB 00:00 +<span style="color: #f92672;">(</span>25/36<span style="color: #f92672;">)</span>: mesa-libGL-devel-9.0-0.7.el6.x86_64.rpm | <span style="color: #ae81ff;">507</span> kB 00:00 +<span style="color: #f92672;">(</span>26/36<span style="color: #f92672;">)</span>: mesa-libGLU-devel-9.0-0.7.el6.x86_64.rpm | <span style="color: #ae81ff;">111</span> kB 00:00 +<span style="color: #f92672;">(</span>27/36<span style="color: #f92672;">)</span>: nvidia-kmod-319.37-1.el6.x86_64.rpm | 4.0 MB 00:13 +<span style="color: #f92672;">(</span>28/36<span style="color: #f92672;">)</span>: nvidia-modprobe-319.37-1.el6.x86_64.rpm | <span style="color: #ae81ff;">14</span> kB 00:00 +<span style="color: #f92672;">(</span>29/36<span style="color: #f92672;">)</span>: nvidia-settings-319.37-30.el6.x86_64.rpm | <span style="color: #ae81ff;">847</span> kB 00:01 +<span style="color: #f92672;">(</span>30/36<span style="color: #f92672;">)</span>: nvidia-xconfig-319.37-27.el6.x86_64.rpm | <span style="color: #ae81ff;">89</span> kB 00:00 +<span style="color: #f92672;">(</span>31/36<span style="color: #f92672;">)</span>: xorg-x11-drv-nvidia-319.37-2.el6.x86_64.rpm | 5.1 MB 00:17 +<span style="color: #f92672;">(</span>32/36<span style="color: #f92672;">)</span>: xorg-x11-drv-nvidia-devel-319.37-2.el6.i686.rpm | <span style="color: #ae81ff;">116</span> kB 00:00 +<span style="color: #f92672;">(</span>33/36<span style="color: #f92672;">)</span>: xorg-x11-drv-nvidia-devel-319.37-2.el6.x86_64.r | <span style="color: #ae81ff;">116</span> kB 00:00 +<span style="color: #f92672;">(</span>34/36<span style="color: #f92672;">)</span>: xorg-x11-drv-nvidia-libs-319.37-2.el6.i686.rpm | <span style="color: #ae81ff;">28</span> MB 00:31 +<span style="color: #f92672;">(</span>35/36<span style="color: #f92672;">)</span>: xorg-x11-drv-nvidia-libs-319.37-2.el6.x86_64.rp | <span style="color: #ae81ff;">28</span> MB 02:15 +<span style="color: #f92672;">(</span>36/36<span style="color: #f92672;">)</span>: zlib-1.2.3-29.el6.i686.rpm | <span style="color: #ae81ff;">73</span> kB 00:00 +-------------------------------------------------------------------------------- +Total <span style="color: #ae81ff;">786</span> kB/s | <span style="color: #ae81ff;">731</span> MB 15:53 + + +exiting because --downloadonly specified</code></pre></div> + +<ol start="5"> +<li>Add <em>dkms</em> as a custom package to the default image profile. All other +dependencies for NVIDIA CUDA are part of the OS distribution (RHEL 6.4).</li> +</ol> +<p>Note that the <em>plcclient.sh</em> CLI is used here to refresh the imageprofile in +the IBM Platform HPC Web console.</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># cp /root/CUDA5.5/dkms-2.2.0.3-20.el6.noarch.rpm /install/contrib/rhels6.4/x86_64/</span> +<span style="color: #75715e;"># plcclient.sh -d "pcmimageprofileloader"</span> +Loaders startup successfully.</code></pre></div> + +<ol start="6"> +<li>Now we are ready to create the NVIDIA CUDA 5.5 Kit for IBM Platform HPC.<br /> +The Kit will contain 3 components:</li> +</ol> +<ul> +<li>NVIDIA Display Driver</li> +<li>NVIDIA CUDA 5.5 Toolkit</li> +<li>NVIDIA CUDA 5.5 Samples and Documentation</li> +</ul> +<p>The <em>buildkit</em> CLI is used to create a new kit template. The template will be +used as the basis for the NVIDIA CUDA 5.5 Kit. The buildkit CLI is executed +within the directory <em>/root</em>.</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># buildkit create kit-CUDA</span> +Kit template <span style="color: #66d9ef;">for</span> kit-CUDA created in /root/kit-CUDA directory</code></pre></div> + +<ol start="7"> +<li>Copy to <em>/root/kit-CUDA</em> the <em>buildkit.conf</em> file provided in <strong>Appendix A</strong>.<br /> +Note that a backup of the original <em>buildkit.conf</em> is performed.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># mv /root/kit-CUDA/buildkit.conf /root/kit-CUDA/buildkit.conf.bak</span> +<span style="color: #75715e;"># cp buildkit.conf /root/kit-CUDA</span></code></pre></div> + +<ol start="8"> +<li>Copy the NVIDIA RPMs to the kit <em>source_packages</em> directory. Here we +create subdirectories for the RPMs matching each respective component, then +copy the correct RPMs into place.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># mkdir /root/kit-CUDA/source_packages/cuda-samples</span> +<span style="color: #75715e;"># mkdir /root/kit-CUDA/source_packages/cuda-toolkit</span> +<span style="color: #75715e;"># mkdir /root/kit-CUDA/source_packages/nvidia-driver</span> + +<span style="color: #75715e;"># cp /root/CUDA5.5/nvidia-kmod* /root/kit-CUDA/source_packages/nvidia-driver/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/nvidia-modprobe* /root/kit-CUDA/source_packages/nvidia-driver/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/nvidia-settings* /root/kit-CUDA/source_packages/nvidia-driver/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/nvidia-xconfig* /root/kit-CUDA/source_packages/nvidia-driver/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/xorg-x11-drv-nvidia-319.37-2.el6.x86_64* /root/kit-CUDA/source_packages/nvidia-driver/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/xorg-x11-drv-nvidia-devel-319.37-2.el6.x86_64* /root/kit-CUDA/source_packages/nvidia-driver/</span> + +<span style="color: #75715e;"># cp /root/CUDA5.5/cuda-command-line-tools* /root/kit-CUDA/source_packages/cuda-toolkit/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/cuda-core* /root/kit-CUDA/source_packages/cuda-toolkit/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/cuda-extra* /root/kit-CUDA/source_packages/cuda-toolkit/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/cuda-headers* /root/kit-CUDA/source_packages/cuda-toolkit/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/cuda-license* /root/kit-CUDA/source_packages/cuda-toolkit/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/cuda-misc* /root/kit-CUDA/source_packages/cuda-toolkit/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/cuda-visual-tools* /root/kit-CUDA/source_packages/cuda-toolkit/</span> + +<span style="color: #75715e;"># cp /root/CUDA5.5/cuda-documentation* /root/kit-CUDA/source_packages/cuda-samples/</span> +<span style="color: #75715e;"># cp /root/CUDA5.5/cuda-samples* /root/kit-CUDA/source_packages/cuda-samples/</span></code></pre></div> + +<ol start="9"> +<li>Copy in place a script required to create a symbolic link <em>/usr/lib64/nvidia/libnvidia-ml.so</em>, required by the LSF <em>elim</em> script for GPUs. The example +script <em>createsymlink.sh</em> can be found in <strong>Appendix B</strong>.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"> <span style="color: #75715e;"># mkdir /root/kit-CUDA/scripts/nvidia</span> +<span style="color: #75715e;"># cp createsymlink.sh /root/kit-CUDA/scripts/nvidia/</span> +<span style="color: #75715e;"># chmod 755 /root/kit-CUDA/scripts/nvidia/createsymlink.sh</span></code></pre></div> + +<ol start="10"> +<li>Build the kit repository and the final kit package.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># cd /root/kit-CUDA</span> +<span style="color: #75715e;"># buildkit buildrepo all</span> +Spawning worker <span style="color: #ae81ff;">0</span> with <span style="color: #ae81ff;">22</span> pkgs +Workers Finished +Gathering worker results + +Saving Primary metadata +Saving file lists metadata +Saving other metadata +Generating sqlite DBs +Sqlite DBs complete + +<span style="color: #75715e;"># buildkit buildtar</span> +Kit tar file /root/kit-CUDA/kit-CUDA-5.5-1.tar.bz2 successfully built.</code></pre></div> + +<p><strong>B. Deploying the NVIDIA CUDA 5.5 Kit</strong></p> + +<p>In the preceding section, a detailed procedure was provided to download NVIDIA +CUDA 5.5 and to package the NVIDIA CUDA software as a kit for deployment in +cluster managed by IBM Platform HPC.</p> + +<p>Here detailed steps are provided to install and deploy the kit. Screenshots +are provided where necessary to illustrate certain operations.</p> + +<ol> +<li>In the IBM Platform HPC Web portal, select <em>Resource &gt; Provisioning Templates &gt;Image Profiles</em> and click on Copy to create a copy of the profile <em>rhels6.4-x86_64-stateful-compute</em>. The new image profile will be used for nodes equipped +with NVIDIA GPUs. The new image profile name is <em>rhels6.4-x86_64-stateful-compute_CUDA</em>.</li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/image_profile.png" /> +</figure> + +<ol start="2"> +<li> +<p>Next, we add the CUDA 5.5 Kit to the Kit Library. In the IBM Platform HPC +Web portal, select <em>Resources &gt; Node Provisioning &gt; Kit Library</em> and click Add. Next Browse to the CUDA 5.5 Kit (<em>kit-CUDA-5.5-1.tar.bz2</em>) and add to the Kit +Library.</p> + +</li> +<li> +<p>The image profile <em>rhels6.4-x86_64-stateful-compute_CUDA</em> is updated as +follows:</p> + +</li> +</ol> +<ul> +<li>select the custom package dkms</li> +<li>select the OS package make</li> +<li>specify Boot Parameters: rdblacklist=nouveau nouveau.modeset=0</li> +<li>enable CUDA Kit components</li> +</ul> +<p>In the IBM Platform HPC Web portal, browse to <em>Resources &gt; Node Provisioning &gt; Provisioning Templates &gt; Image Profiles</em>, select <em>rhels6.4-x86_64-stateful-compute_CUDA</em> and click on Modify.</p> + +<p>Under General, specify the Boot Parmaeters <em>rdblacklist=nouveau nouveau.modeset=0</em></p> + +<figure><img src="https://www.gaborsamu.com/images/modeset.png" /> +</figure> + +<p>Under <em>Packages &gt; Custom Packages</em> select <em>dkms.noarch</em> +Under <em>Packages &gt; OS Packages</em> select <em>make</em> (note that the Filter can be used here)</p> + +<figure><img src="https://www.gaborsamu.com/images/packages.png" /> +</figure> + +<p>Under Kit Components select:</p> + +<ul> +<li>component-NVIDIA_Driver-5.5-1-rhels-6.4-x86_64</li> +<li>component-CUDA_Samples-5.5-1-rhels-6.4-x86_64</li> +<li>component-CUDA_Toolkit-5.5-1-rhels-6.4-x86_64</li> +</ul> +<p>Note that minimally NVIDIA_Driver and CUDA_Toolkit should be selected.</p> + +<figure><img src="https://www.gaborsamu.com/images/components.png" /> +</figure> + +<ol start="4"> +<li>Next, the nodes equipped with NVIDIA GPUs are provisioned. Here Auto +discovery by PXE boot is used to provision the nodes using the newly created +image profile <em>rhels6.4-x86_64-stateful-compute_CUDA</em>.</li> +</ol> +<p>In the IBM Platform HPC Web console select <em>Resources &gt; Devices &gt; Nodes</em> and +click on the Add button. Here we specify the following provisioning template +properties:</p> + +<ul> +<li>Image profile rhels6.4-x86_64-stateful-compute_CUDA</li> +<li>Network profile default_network_profile</li> +<li>Hardware profile IPMI</li> +</ul> +<figure><img src="https://www.gaborsamu.com/images/provision.png" /> +</figure> + +<p>Then we specify Auto discovery by PXE boot and power on the nodes. <em>compute000-compute002</em> are provisioned including the NVIDIA CUDA 5.5 Kit components:</p> + +<ul> +<li>component-NVIDIA_Driver-5.5-1-rhels-6.4-x86_64</li> +<li>component-CUDA_Samples-5.5-1-rhels-6.4-x86_64</li> +<li>component-CUDA_Toolkit-5.5-1-rhels-6.4-x86_64</li> +</ul> +<figure><img src="https://www.gaborsamu.com/images/provision3.png" /> +</figure> + +<ol start="5"> +<li>After provisioning, check the installation of the NVIDIA Driver and CUDA +stack. <em>xdsh</em> is used here to concurrently execute the NVIDIA CLI <em>nvidia-smi</em> +across the compute nodes.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># xdsh compute00[0-2] nvidia-smi -L</span> +compute000: GPU 0: Tesla K20c <span style="color: #f92672;">(</span>UUID: GPU-46d00ece-a26d-5f8c-c695-23e525a88075<span style="color: #f92672;">)</span> +compute002: GPU 0: Tesla K40c <span style="color: #f92672;">(</span>UUID: GPU-e3ac8955-6a76-12e1-0786-ec336b0b3824<span style="color: #f92672;">)</span> +compute001: GPU 0: Tesla K40c <span style="color: #f92672;">(</span>UUID: GPU-ba2733d8-4473-0a69-af14-e80008568a42<span style="color: #f92672;">)</span></code></pre></div> + +<ol start="6"> +<li>Next, compile and execute the NVIDIA CUDA <em>deviceQuery</em> sample. Be sure to +check the output for any errors.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"> <span style="color: #75715e;"># xdsh compute00[0-2] -f 1 "cd /usr/local/cuda-5.5/samples/1_Utilities/deviceQuery;make"</span> +compute000: /usr/local/cuda-5.5/bin/nvcc -ccbin g++ -I../../common/inc -m64 -gencode arch<span style="color: #f92672;">=</span>compute_10,code<span style="color: #f92672;">=</span>sm_10 -gencode arch<span style="color: #f92672;">=</span>compute_20,code<span style="color: #f92672;">=</span>sm_20 -gencode arch<span style="color: #f92672;">=</span>compute_30,code<span style="color: #f92672;">=</span>sm_30 -gencode arch<span style="color: #f92672;">=</span>compute_35,code<span style="color: #f92672;">=</span><span style="color: #ae81ff;">\"</span>sm_35,compute_35<span style="color: #ae81ff;">\"</span> -o deviceQuery.o -c deviceQuery.cpp +compute000: /usr/local/cuda-5.5/bin/nvcc -ccbin g++ -m64 -o deviceQuery deviceQuery.o +compute000: mkdir -p ../../bin/x86_64/linux/release +compute000: cp deviceQuery ../../bin/x86_64/linux/release +compute001: /usr/local/cuda-5.5/bin/nvcc -ccbin g++ -I../../common/inc -m64 -gencode arch<span style="color: #f92672;">=</span>compute_10,code<span style="color: #f92672;">=</span>sm_10 -gencode arch<span style="color: #f92672;">=</span>compute_20,code<span style="color: #f92672;">=</span>sm_20 -gencode arch<span style="color: #f92672;">=</span>compute_30,code<span style="color: #f92672;">=</span>sm_30 -gencode arch<span style="color: #f92672;">=</span>compute_35,code<span style="color: #f92672;">=</span><span style="color: #ae81ff;">\"</span>sm_35,compute_35<span style="color: #ae81ff;">\"</span> -o deviceQuery.o -c deviceQuery.cpp +compute001: /usr/local/cuda-5.5/bin/nvcc -ccbin g++ -m64 -o deviceQuery deviceQuery.o +compute001: mkdir -p ../../bin/x86_64/linux/release +compute001: cp deviceQuery ../../bin/x86_64/linux/release +compute002: /usr/local/cuda-5.5/bin/nvcc -ccbin g++ -I../../common/inc -m64 -gencode arch<span style="color: #f92672;">=</span>compute_10,code<span style="color: #f92672;">=</span>sm_10 -gencode arch<span style="color: #f92672;">=</span>compute_20,code<span style="color: #f92672;">=</span>sm_20 -gencode arch<span style="color: #f92672;">=</span>compute_30,code<span style="color: #f92672;">=</span>sm_30 -gencode arch<span style="color: #f92672;">=</span>compute_35,code<span style="color: #f92672;">=</span><span style="color: #ae81ff;">\"</span>sm_35,compute_35<span style="color: #ae81ff;">\"</span> -o deviceQuery.o -c deviceQuery.cpp +compute002: /usr/local/cuda-5.5/bin/nvcc -ccbin g++ -m64 -o deviceQuery deviceQuery.o +compute002: mkdir -p ../../bin/x86_64/linux/release +compute002: cp deviceQuery ../../bin/x86_64/linux/release + +<span style="color: #75715e;"># xdsh compute00[0-2] -f 1 /usr/local/cuda-5.5/samples/1_Utilities/deviceQuery/deviceQuery</span> +compute000: /usr/local/cuda-5.5/samples/1_Utilities/deviceQuery/deviceQuery Starting... +compute000: +compute000: CUDA Device Query <span style="color: #f92672;">(</span>Runtime API<span style="color: #f92672;">)</span> version <span style="color: #f92672;">(</span>CUDART static linking<span style="color: #f92672;">)</span> +compute000: +compute000: Detected <span style="color: #ae81ff;">1</span> CUDA Capable device<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> +compute000: +compute000: Device 0: <span style="color: #e6db74;">"Tesla K20c"</span> +compute000: CUDA Driver Version / Runtime Version 5.5 / 5.5 +compute000: CUDA Capability Major/Minor version number: 3.5 +compute000: Total amount of global memory: <span style="color: #ae81ff;">4800</span> MBytes <span style="color: #f92672;">(</span><span style="color: #ae81ff;">5032706048</span> bytes<span style="color: #f92672;">)</span> +compute000: <span style="color: #f92672;">(</span>13<span style="color: #f92672;">)</span> Multiprocessors, <span style="color: #f92672;">(</span>192<span style="color: #f92672;">)</span> CUDA Cores/MP: <span style="color: #ae81ff;">2496</span> CUDA Cores +compute000: GPU Clock rate: <span style="color: #ae81ff;">706</span> MHz <span style="color: #f92672;">(</span>0.71 GHz<span style="color: #f92672;">)</span> +compute000: Memory Clock rate: <span style="color: #ae81ff;">2600</span> Mhz +compute000: Memory Bus Width: 320-bit +compute000: L2 Cache Size: <span style="color: #ae81ff;">1310720</span> bytes +compute000: Maximum Texture Dimension Size <span style="color: #f92672;">(</span>x,y,z<span style="color: #f92672;">)</span> 1D<span style="color: #f92672;">=(</span>65536<span style="color: #f92672;">)</span>, 2D<span style="color: #f92672;">=(</span>65536, 65536<span style="color: #f92672;">)</span>, 3D<span style="color: #f92672;">=(</span>4096, 4096, 4096<span style="color: #f92672;">)</span> +compute000: Maximum Layered 1D Texture Size, <span style="color: #f92672;">(</span>num<span style="color: #f92672;">)</span> layers 1D<span style="color: #f92672;">=(</span>16384<span style="color: #f92672;">)</span>, <span style="color: #ae81ff;">2048</span> layers +compute000: Maximum Layered 2D Texture Size, <span style="color: #f92672;">(</span>num<span style="color: #f92672;">)</span> layers 2D<span style="color: #f92672;">=(</span>16384, 16384<span style="color: #f92672;">)</span>, <span style="color: #ae81ff;">2048</span> layers +compute000: Total amount of constant memory: <span style="color: #ae81ff;">65536</span> bytes +compute000: Total amount of shared memory per block: <span style="color: #ae81ff;">49152</span> bytes +compute000: Total number of registers available per block: <span style="color: #ae81ff;">65536</span> +compute000: Warp size: <span style="color: #ae81ff;">32</span> +compute000: Maximum number of threads per multiprocessor: <span style="color: #ae81ff;">2048</span> +compute000: Maximum number of threads per block: <span style="color: #ae81ff;">1024</span> +compute000: Max dimension size of a thread block <span style="color: #f92672;">(</span>x,y,z<span style="color: #f92672;">)</span>: <span style="color: #f92672;">(</span>1024, 1024, 64<span style="color: #f92672;">)</span> +compute000: Max dimension size of a grid size <span style="color: #f92672;">(</span>x,y,z<span style="color: #f92672;">)</span>: <span style="color: #f92672;">(</span>2147483647, 65535, 65535<span style="color: #f92672;">)</span> +compute000: Maximum memory pitch: <span style="color: #ae81ff;">2147483647</span> bytes +compute000: Texture alignment: <span style="color: #ae81ff;">512</span> bytes +compute000: Concurrent copy and kernel execution: Yes with <span style="color: #ae81ff;">2</span> copy engine<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> +compute000: Run time limit on kernels: No +compute000: Integrated GPU sharing Host Memory: No +compute000: Support host page-locked memory mapping: Yes +compute000: Alignment requirement <span style="color: #66d9ef;">for</span> Surfaces: Yes +compute000: Device has ECC support: Enabled +compute000: Device supports Unified Addressing <span style="color: #f92672;">(</span>UVA<span style="color: #f92672;">)</span>: Yes +compute000: Device PCI Bus ID / PCI location ID: <span style="color: #ae81ff;">10</span> / <span style="color: #ae81ff;">0</span> +compute000: Compute Mode: +compute000: &lt; Default <span style="color: #f92672;">(</span>multiple host threads can use ::cudaSetDevice<span style="color: #f92672;">()</span> with device simultaneously<span style="color: #f92672;">)</span> &gt; +compute000: +compute000: deviceQuery, CUDA Driver <span style="color: #f92672;">=</span> CUDART, CUDA Driver Version <span style="color: #f92672;">=</span> 5.5, CUDA Runtime Version <span style="color: #f92672;">=</span> 5.5, NumDevs <span style="color: #f92672;">=</span> 1, Device0 <span style="color: #f92672;">=</span> Tesla K20c +compute000: Result <span style="color: #f92672;">=</span> PASS +compute001: /usr/local/cuda-5.5/samples/1_Utilities/deviceQuery/deviceQuery Starting... +.... + +.... + +compute002: /usr/local/cuda-5.5/samples/1_Utilities/deviceQuery/deviceQuery Starting... +.... + +.... + +compute002: deviceQuery, CUDA Driver <span style="color: #f92672;">=</span> CUDART, CUDA Driver Version <span style="color: #f92672;">=</span> 5.5, CUDA Runtime Version <span style="color: #f92672;">=</span> 5.5, NumDevs <span style="color: #f92672;">=</span> 1, Device0 <span style="color: #f92672;">=</span> Tesla K40c +compute002: Result <span style="color: #f92672;">=</span> PASS</code></pre></div> + +<p>Up to this point, we have produced a CUDA 5.5 Kit, and deployed nodes including +CUDA 5.5. Furthermore, we have tested the correct installation of the NVIDIA +driver as well as the CUDA Sample <em>deviceQuery</em> example.</p> + +<p><strong>C. Enable Management of NVIDIA GPU devices</strong></p> + +<p>The IBM Platform HPC Administration Guide contains detailed steps on enabling +NVIDIA GPU monitoring. This can be found in Chapter 10 in the section titled +<em>Enabling the GPU</em>.</p> + +<p>Below, as the user root, we make the necessary updates to the IBM Platform +HPC (and workload manager) configuration files to enable LSF GPU support.</p> + +<ol> +<li>Define the GPU processing resources reported by ELIM in <em>$LSF_ENVDIR/lsf.shared</em>. You can define a new resources section to include the following resource +definitions:</li> +</ol> +<div class="highlight"><pre><code class="language-bash">Begin Resource +RESOURCENAME TYPE INTERVAL INCREASING CONSUMABLE DESCRIPTION <span style="color: #75715e;"># Keywords</span> +ngpus Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>Number of GPUs<span style="color: #f92672;">)</span> +gpushared Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>Number of GPUs in Shared Mode<span style="color: #f92672;">)</span> +gpuexcl_thrd Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>Number of GPUs in Exclusive thread Mode<span style="color: #f92672;">)</span> +gpuprohibited Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>Number of GPUs in Prohibited Mode<span style="color: #f92672;">)</span> +gpuexcl_proc Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>Number of GPUs in Exclusive process Mode<span style="color: #f92672;">)</span> +gpumode0 Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>Mode of 1st GPU<span style="color: #f92672;">)</span> +gputemp0 Numeric <span style="color: #ae81ff;">60</span> Y <span style="color: #f92672;">(</span>Temperature of 1st GPU<span style="color: #f92672;">)</span> +gpuecc0 Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>ECC errors on 1st GPU<span style="color: #f92672;">)</span> +gpumode1 Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>Mode of 2nd GPU<span style="color: #f92672;">)</span> +gputemp1 Numeric <span style="color: #ae81ff;">60</span> Y <span style="color: #f92672;">(</span>Temperature of 2nd GPU<span style="color: #f92672;">)</span> +gpuecc1 Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>ECC errors on 2nd GPU<span style="color: #f92672;">)</span> +gpumode2 Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>Mode of 3rd GPU<span style="color: #f92672;">)</span> +gputemp2 Numeric <span style="color: #ae81ff;">60</span> Y <span style="color: #f92672;">(</span>Temperature of 3rd GPU<span style="color: #f92672;">)</span> +gpuecc2 Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>ECC errors on 3rd GPU<span style="color: #f92672;">)</span> +gpumode3 Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>Mode of 4th GPU<span style="color: #f92672;">)</span> +gputemp3 Numeric <span style="color: #ae81ff;">60</span> Y <span style="color: #f92672;">(</span>Temperature of 4th GPU<span style="color: #f92672;">)</span> +gpuecc3 Numeric <span style="color: #ae81ff;">60</span> N <span style="color: #f92672;">(</span>ECC errors on 4th GPU<span style="color: #f92672;">)</span> +gpudriver String <span style="color: #ae81ff;">60</span> <span style="color: #f92672;">()</span> <span style="color: #f92672;">(</span>GPU driver version<span style="color: #f92672;">)</span> +gpumodel0 String <span style="color: #ae81ff;">60</span> <span style="color: #f92672;">()</span> <span style="color: #f92672;">(</span>Model name of 1st GPU<span style="color: #f92672;">)</span> +gpumodel1 String <span style="color: #ae81ff;">60</span> <span style="color: #f92672;">()</span> <span style="color: #f92672;">(</span>Model name of 2nd GPU<span style="color: #f92672;">)</span> +gpumodel2 String <span style="color: #ae81ff;">60</span> <span style="color: #f92672;">()</span> <span style="color: #f92672;">(</span>Model name of 3rd GPU<span style="color: #f92672;">)</span> +gpumodel3 String <span style="color: #ae81ff;">60</span> <span style="color: #f92672;">()</span> <span style="color: #f92672;">(</span>Model name of 4th GPU<span style="color: #f92672;">)</span> +End Resource</code></pre></div> + +<ol start="2"> +<li>Define the following resource map to support GPU processing in <em>$LSF_ENVDIR/lsf.cluster.cluster-name</em>, where <em>cluster-name</em> is the name of your cluster.</li> +</ol> +<div class="highlight"><pre><code class="language-bash">Begin ResourceMap +RESOURCENAME LOCATION +ngpus <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpushared <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpuexcl_thrd <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpuprohibited <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpuexcl_proc <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpumode0 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gputemp0 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpuecc0 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpumode1 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gputemp1 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpuecc1 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpumode2 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gputemp2 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpuecc2 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpumode3 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gputemp3 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpuecc3 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpumodel0 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpumodel1 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpumodel2 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpumodel3 <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +gpudriver <span style="color: #f92672;">[</span>default<span style="color: #f92672;">]</span> +End ResourceMap</code></pre></div> + +<ol start="3"> +<li>To configure the newly defined resources, run the following command on the +IBM Platform HPC management node. Here we must restart the LIMs on all hosts.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># lsadmin reconfig</span> + +Checking configuration files ... +No errors found. + +Restart only the master candidate hosts? <span style="color: #f92672;">[</span>y/n<span style="color: #f92672;">]</span> n +Do you really want to restart LIMs on all hosts? <span style="color: #f92672;">[</span>y/n<span style="color: #f92672;">]</span> y +Restart LIM on &lt;hpc4111tete&gt; ...... <span style="color: #66d9ef;">done</span> +Restart LIM on &lt;compute000&gt; ...... <span style="color: #66d9ef;">done</span> +Restart LIM on &lt;compute001&gt; ...... <span style="color: #66d9ef;">done</span> +Restart LIM on &lt;compute002&gt; ...... <span style="color: #66d9ef;">done</span></code></pre></div> + +<ol start="4"> +<li>Define how NVIDA CUDA jobs are submitted to LSF in <em>$LSF_ENVDIR/lsbatch/cluster-name/configdir/lsb.applications</em>, where <em>cluster-name</em> is the name of your +cluster.</li> +</ol> +<div class="highlight"><pre><code class="language-bash">Begin Application +NAME <span style="color: #f92672;">=</span> nvjobsh +DESCRIPTION <span style="color: #f92672;">=</span> NVIDIA Shared GPU Jobs +JOB_STARTER <span style="color: #f92672;">=</span> nvjob <span style="color: #e6db74;">"%USRCMD"</span> +RES_REQ <span style="color: #f92672;">=</span> <span style="color: #66d9ef;">select</span><span style="color: #f92672;">[</span>gpushared&gt;0<span style="color: #f92672;">]</span> +End Application + +Begin Application +NAME <span style="color: #f92672;">=</span> nvjobex_t +DESCRIPTION <span style="color: #f92672;">=</span> NVIDIA Exclusive GPU Jobs +JOB_STARTER <span style="color: #f92672;">=</span> nvjob <span style="color: #e6db74;">"%USRCMD"</span> +RES_REQ <span style="color: #f92672;">=</span> rusage<span style="color: #f92672;">[</span>gpuexcl_thrd<span style="color: #f92672;">=</span>1<span style="color: #f92672;">]</span> +End Application + +Begin Application +NAME <span style="color: #f92672;">=</span> nvjobex2_t +DESCRIPTION <span style="color: #f92672;">=</span> NVIDIA Exclusive GPU Jobs +JOB_STARTER <span style="color: #f92672;">=</span> nvjob <span style="color: #e6db74;">"%USRCMD"</span> +RES_REQ <span style="color: #f92672;">=</span> rusage<span style="color: #f92672;">[</span>gpuexcl_thrd<span style="color: #f92672;">=</span>2<span style="color: #f92672;">]</span> +End Application + +Begin Application +NAME <span style="color: #f92672;">=</span> nvjobex_p +DESCRIPTION <span style="color: #f92672;">=</span> NVIDIA Exclusive-process GPU Jobs +JOB_STARTER <span style="color: #f92672;">=</span> nvjob <span style="color: #e6db74;">"%USRCMD"</span> +RES_REQ <span style="color: #f92672;">=</span> rusage<span style="color: #f92672;">[</span>gpuexcl_proc<span style="color: #f92672;">=</span>1<span style="color: #f92672;">]</span> +End Application + +Begin Application +NAME <span style="color: #f92672;">=</span> nvjobex2_p +DESCRIPTION <span style="color: #f92672;">=</span> NVIDIA Exclusive-process GPU Jobs +JOB_STARTER <span style="color: #f92672;">=</span> nvjob <span style="color: #e6db74;">"%USRCMD"</span> +RES_REQ <span style="color: #f92672;">=</span> rusage<span style="color: #f92672;">[</span>gpuexcl_proc<span style="color: #f92672;">=</span>2<span style="color: #f92672;">]</span> +End Application</code></pre></div> + +<ol start="5"> +<li>To add the GPU-related application pofiles, issue the following command on +the IBM Platform HPC management node.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># badmin reconfig</span> + +Checking configuration files ... + +No errors found. + +Reconfiguration initiated</code></pre></div> + +<ol start="6"> +<li>To enable monitoring GPU related metrics in the IBM Platform HPC Web portal, +modify the <em>$PMC_TOP/gui/conf/pmc.conf</em> configuration file by setting the +variable <em>ENABLE_GPU_MONITORING</em> equal to <em>Y</em>. To make the change take effect, +it is necessary to restart the IBM Platform HPC Web portal server.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># service pmc stop</span> +<span style="color: #75715e;"># service pmc start</span></code></pre></div> + +<ol start="7"> +<li>Within the IBM Platform HPC Web portal, the GPU tab is now present for +nodes equipped with NVIDIA GPUs. Browse to <em>Resources &gt; Devices &gt; Nodes</em> and +select the GPU tab.</li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/gputab.png" /> +</figure> + +<p><strong>D. Job Submission - specifying GPU resources</strong></p> + +<p>The IBM Platform HPC Administration Guide contains detailed steps job submission +to GPU resources. This can be found in Chapter 10 in the section titled +<em>Submitting jobs to a cluster by specifying GPU resources</em>. Please refer to the +IBM Platform HPC Administration Guide for detailed information.</p> + +<p>Here we look at a simple example. In Section C. we configured a number of +application profiles specific to GPU jobs.</p> + +<ol> +<li>The list of application profiles can be displayed using the <em>bapp</em> CLI:</li> +</ol> +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># bapp</span> +APP_NAME NJOBS PEND RUN SUSP +nvjobex2_p <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> +nvjobex2_t <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> +nvjobex_p <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> +nvjobex_t <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> +nvjobsh <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span></code></pre></div> + +<p>Furthermore, details regarding a specific profile can be obtained as follows:</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># bapp -l nvjobsh</span> + +APPLICATION NAME: nvjobsh + -- NVIDIA Shared GPU Jobs + +STATISTICS: + NJOBS PEND RUN SSUSP USUSP RSV + <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">0</span> + +PARAMETERS: + +JOB_STARTER: nvjob <span style="color: #e6db74;">"%USRCMD"</span> +RES_REQ: <span style="color: #66d9ef;">select</span><span style="color: #f92672;">[</span>gpushared&gt;0<span style="color: #f92672;">]</span></code></pre></div> + +<ol start="2"> +<li>To submit a job for execution on a GPU resource, the folloiwng syntax is +used. Note here that the <em>deviceQuery</em> application (previously compiled) is +submitted for execution). The following command is run as user <em>phpcadmin</em>.<br /> +The <em>bsub -a</em> option is used to specify the application profile at the time of +job submission.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"> $ bsub -I -a gpushared /usr/local/cuda-5.5/samples/1_Utilities/deviceQuery/deviceQuery +Job &lt;208&gt; is submitted to default queue &lt;medium_priority&gt;. +<span style="color: #e6db74;">&lt;&lt;Waiting for dispatch ...&gt;&gt; +</span><span style="color: #e6db74;">&lt;&lt;Starting on compute000&gt;&gt; +</span><span style="color: #e6db74;">/usr/local/cuda-5.5/samples/1_Utilities/deviceQuery/deviceQuery Starting... +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;"> CUDA Device Query (Runtime API) version (CUDART static linking) +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">Detected 1 CUDA Capable device(s) +</span><span style="color: #e6db74;"> +</span><span style="color: #e6db74;">Device 0: "Tesla K20c" +</span><span style="color: #e6db74;"> CUDA Driver Version / Runtime Version 5.5 / 5.5 +</span><span style="color: #e6db74;"> CUDA Capability Major/Minor version number: 3.5 +</span><span style="color: #e6db74;"> Total amount of global memory: 4800 MBytes (5032706048 bytes) +</span><span style="color: #e6db74;"> (13) Multiprocessors, (192) CUDA Cores/MP: 2496 CUDA Cores +</span><span style="color: #e6db74;"> GPU Clock rate: 706 MHz (0.71 GHz) +</span><span style="color: #e6db74;"> Memory Clock rate: 2600 Mhz +</span><span style="color: #e6db74;"> Memory Bus W</span>idth: 320-bit + L2 Cache Size: <span style="color: #ae81ff;">1310720</span> bytes + Maximum Texture Dimension Size <span style="color: #f92672;">(</span>x,y,z<span style="color: #f92672;">)</span> 1D<span style="color: #f92672;">=(</span>65536<span style="color: #f92672;">)</span>, 2D<span style="color: #f92672;">=(</span>65536, 65536<span style="color: #f92672;">)</span>, 3D<span style="color: #f92672;">=(</span>4096, 4096, 4096<span style="color: #f92672;">)</span> + Maximum Layered 1D Texture Size, <span style="color: #f92672;">(</span>num<span style="color: #f92672;">)</span> layers 1D<span style="color: #f92672;">=(</span>16384<span style="color: #f92672;">)</span>, <span style="color: #ae81ff;">2048</span> layers + Maximum Layered 2D Texture Size, <span style="color: #f92672;">(</span>num<span style="color: #f92672;">)</span> layers 2D<span style="color: #f92672;">=(</span>16384, 16384<span style="color: #f92672;">)</span>, <span style="color: #ae81ff;">2048</span> layers + Total amount of constant memory: <span style="color: #ae81ff;">65536</span> bytes + Total amount of shared memory per block: <span style="color: #ae81ff;">49152</span> bytes + Total number of registers available per block: <span style="color: #ae81ff;">65536</span> +.... + +.... + + Compute Mode: + &lt; Default <span style="color: #f92672;">(</span>multiple host threads can use ::cudaSetDevice<span style="color: #f92672;">()</span> with device simultaneously<span style="color: #f92672;">)</span> &gt; + +deviceQuery, CUDA Driver <span style="color: #f92672;">=</span> CUDART, CUDA Driver Version <span style="color: #f92672;">=</span> 5.5, CUDA Runtime Version <span style="color: #f92672;">=</span> 5.5, NumDevs <span style="color: #f92672;">=</span> 1, Device0 <span style="color: #f92672;">=</span> Tesla K20c +Result <span style="color: #f92672;">=</span> PASS</code></pre></div> + +<p><strong>Appendix A: Example NVIDIA CUDA 5.5 Kit template (buildkit.conf)</strong></p> + +<div class="highlight"><pre><code class="language-bash"> <span style="color: #75715e;"># Copyright International Business Machine Corporation, 2012-2013</span> + +<span style="color: #75715e;"># This information contains sample application programs in source language, which</span> +<span style="color: #75715e;"># illustrates programming techniques on various operating platforms. You may copy,</span> +<span style="color: #75715e;"># modify, and distribute these sample programs in any form without payment to IBM,</span> +<span style="color: #75715e;"># for the purposes of developing, using, marketing or distributing application</span> +<span style="color: #75715e;"># programs conforming to the application programming interface for the operating</span> +<span style="color: #75715e;"># platform for which the sample programs are written. These examples have not been</span> +<span style="color: #75715e;"># thoroughly tested under all conditions. IBM, therefore, cannot guarantee or</span> +<span style="color: #75715e;"># imply reliability, serviceability, or function of these programs. The sample</span> +<span style="color: #75715e;"># programs are provided "AS IS", without warranty of any kind. IBM shall not be</span> +<span style="color: #75715e;"># liable for any damages arising out of your use of the sample programs.</span> + +<span style="color: #75715e;"># Each copy or any portion of these sample programs or any derivative work, must</span> +<span style="color: #75715e;"># include a copyright notice as follows:</span> + +<span style="color: #75715e;"># (C) Copyright IBM Corp. 2012-2013.</span> + +<span style="color: #75715e;"># Kit Build File</span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Copyright International Business Machine Corporation, 2012-2013</span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># This example buildkit.conf file may be used to build a NVIDIA CUDA 5.5 Kit</span> +<span style="color: #75715e;"># for Red Hat Enterprise Linux 6.4 (x64). </span> +<span style="color: #75715e;"># The Kit will be comprised of 3 components allowing you to install:</span> +<span style="color: #75715e;"># 1. NVIDIA Display Driver</span> +<span style="color: #75715e;"># 2. NVIDIA CUDA 5.5 Toolkit</span> +<span style="color: #75715e;"># 3. NVIDIA CUDA 5.5 Samples and Documentation</span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Refer to the buildkit manpage for further details.</span> +<span style="color: #75715e;">#</span> +kit: + basename<span style="color: #f92672;">=</span>kit-CUDA + description<span style="color: #f92672;">=</span>NVIDIA CUDA 5.5 Kit + version<span style="color: #f92672;">=</span>5.5 + release<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span> + ostype<span style="color: #f92672;">=</span>Linux + kitlicense<span style="color: #f92672;">=</span>Proprietary + + +kitrepo: + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + osbasename<span style="color: #f92672;">=</span>rhels + osmajorversion<span style="color: #f92672;">=</span><span style="color: #ae81ff;">6</span> + osminorversion<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4</span> + osarch<span style="color: #f92672;">=</span>x86_64 + + +kitcomponent: + basename<span style="color: #f92672;">=</span>component-NVIDIA_Driver + description<span style="color: #f92672;">=</span>NVIDIA Display Driver 319.37 + serverroles<span style="color: #f92672;">=</span>mgmt,compute + ospkgdeps<span style="color: #f92672;">=</span>make + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + kitpkgdeps<span style="color: #f92672;">=</span>nvidia-kmod,nvidia-modprobe,nvidia-settings,nvidia-xconfig,xorg-x11-drv-nvidia,xorg-x11-drv-nvidia-devel,xorg-x11-drv-nvidia-libs + postinstall<span style="color: #f92672;">=</span>nvidia/createsymlink.sh + +kitcomponent: + basename<span style="color: #f92672;">=</span>component-CUDA_Toolkit + description<span style="color: #f92672;">=</span>NVIDIA CUDA 5.5 Toolkit 5.5-22 + serverroles<span style="color: #f92672;">=</span>mgmt,compute + ospkgdeps<span style="color: #f92672;">=</span>gcc-c++ + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + kitpkgdeps<span style="color: #f92672;">=</span>cuda-command-line-tools,cuda-core,cuda-core-libs,cuda-extra-libs,cuda-headers,cuda-license,cuda-misc,cuda-visual-tools + +kitcomponent: + basename<span style="color: #f92672;">=</span>component-CUDA_Samples + description<span style="color: #f92672;">=</span>NVIDIA CUDA 5.5 Samples and Documentation 5.5-22 + serverroles<span style="color: #f92672;">=</span>mgmt,compute + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + kitpkgdeps<span style="color: #f92672;">=</span>cuda-documentation,cuda-samples + + +kitpackage: + filename<span style="color: #f92672;">=</span>nvidia-kmod-*.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>nvidia-driver + +kitpackage: + filename<span style="color: #f92672;">=</span>nvidia-modprobe-*.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>nvidia-driver + +kitpackage: + filename<span style="color: #f92672;">=</span>nvidia-settings-*.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>nvidia-driver + +kitpackage: + filename<span style="color: #f92672;">=</span>nvidia-xconfig-*.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>nvidia-driver + +kitpackage: + filename<span style="color: #f92672;">=</span>xorg-x11-drv-nvidia-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>nvidia-driver + +kitpackage: + filename<span style="color: #f92672;">=</span>xorg-x11-drv-nvidia-devel-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>nvidia-driver + +kitpackage: + filename<span style="color: #f92672;">=</span>xorg-x11-drv-nvidia-libs-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>nvidia-driver + +kitpackage: + filename<span style="color: #f92672;">=</span>cuda-command-line-tools-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>cuda-toolkit + +kitpackage: + filename<span style="color: #f92672;">=</span>cuda-core-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>cuda-toolkit + +kitpackage: + filename<span style="color: #f92672;">=</span>cuda-core-libs-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>cuda-toolkit + +kitpackage: + filename<span style="color: #f92672;">=</span>cuda-extra-libs-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>cuda-toolkit + +kitpackage: + filename<span style="color: #f92672;">=</span>cuda-headers-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>cuda-toolkit + +kitpackage: + filename<span style="color: #f92672;">=</span>cuda-license-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>cuda-toolkit + +kitpackage: + filename<span style="color: #f92672;">=</span>cuda-misc-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>cuda-toolkit + +kitpackage: + filename<span style="color: #f92672;">=</span>cuda-visual-tools-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>cuda-toolkit + +kitpackage: + filename<span style="color: #f92672;">=</span>cuda-documentation-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>cuda-samples + +kitpackage: + filename<span style="color: #f92672;">=</span>cuda-samples-*.x86_64.rpm + kitrepoid<span style="color: #f92672;">=</span>rhels6.4 + <span style="color: #75715e;"># Method 1: Use pre-built RPM package</span> + isexternalpkg<span style="color: #f92672;">=</span>no + rpm_prebuiltdir<span style="color: #f92672;">=</span>cuda-samples</code></pre></div> + +<p><strong>Appendix B: createsymlink.sh</strong></p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># Copyright International Business Machine Corporation, 2012-2013</span> + +<span style="color: #75715e;"># This information contains sample application programs in source language, which</span> +<span style="color: #75715e;"># illustrates programming techniques on various operating platforms. You may copy,</span> +<span style="color: #75715e;"># modify, and distribute these sample programs in any form without payment to IBM,</span> +<span style="color: #75715e;"># for the purposes of developing, using, marketing or distributing application</span> +<span style="color: #75715e;"># programs conforming to the application programming interface for the operating</span> +<span style="color: #75715e;"># platform for which the sample programs are written. These examples have not been</span> +<span style="color: #75715e;"># thoroughly tested under all conditions. IBM, therefore, cannot guarantee or</span> +<span style="color: #75715e;"># imply reliability, serviceability, or function of these programs. The sample</span> +<span style="color: #75715e;"># programs are provided "AS IS", without warranty of any kind. IBM shall not be</span> +<span style="color: #75715e;"># liable for any damages arising out of your use of the sample programs.</span> + +<span style="color: #75715e;"># Each copy or any portion of these sample programs or any derivative work, must</span> +<span style="color: #75715e;"># include a copyright notice as follows:</span> + +<span style="color: #75715e;"># (C) Copyright IBM Corp. 2012-2013.</span> +<span style="color: #75715e;"># createsymlink.sh</span> +<span style="color: #75715e;"># The script will produce a symbolic link required by the</span> +<span style="color: #75715e;"># IBM Platform LSF elim for NVIDIA GPUs.</span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;">#!/bin/sh</span> +LIBNVIDIA<span style="color: #f92672;">=</span><span style="color: #e6db74;">"/usr/lib64/nvidia/libnvidia-ml.so"</span> +<span style="color: #66d9ef;">if</span> <span style="color: #f92672;">[</span> -a <span style="color: #e6db74;">"</span>$LIBNVIDIA<span style="color: #e6db74;">"</span> <span style="color: #f92672;">]</span> +<span style="color: #66d9ef;">then</span> +ln -s /usr/lib64/nvidia/libnvidia-ml.so /usr/lib64/libnvidia-ml.so +/etc/init.d/lsf stop +/etc/init.d/lsf start +<span style="color: #66d9ef;">fi</span> +exit <span style="color: #ae81ff;">0</span></code></pre></div> + + + + + Supercomputing 2013 (SC13) + + 2013-11-22T16:44:50-07:00 + https://hpc.social/2013/supercomputing-2013-sc13- + <p>Super Computing 2013 has now come to a close. For those of you who were in +Denver, we hope that you had the opportunity to visit the IBM booth. Among +the many live demonstrations running at the IBM booth, there was a demo of +IBM Platform HPC for System x.</p> + +<p>In addition to the demo running live on IBM NeXtScale, there was also a static +IBM NeXtScale system on display for people to touch and see.</p> + +<figure><img src="https://www.gaborsamu.com/images/nextscale_booth2.png" /> +</figure> + +<p>The IBM Platform HPC demo featured IBM NeXtScale and the Weather Research +and Forecasting Model (WRF) application.</p> + +<figure><img src="https://www.gaborsamu.com/images/sc2013_demo-1.png" /> +</figure> + +<p>Even though SC13 has just wrapped up, I&rsquo;m already looking forward next years +events.</p> + + + + + IBM Platform HPC 4.1.1- Creating a network bridge on compute nodes + + 2013-10-09T19:12:15-06:00 + https://hpc.social/2013/ibm-platform-hpc-4-1-1-creating-a-network-bridge-on-compute-nodes + <p><strong>Applies to</strong></p> + +<ul> +<li>IBM Platform HPC V4.1.1</li> +<li>IBM Platform Cluster Manager V4.1.1</li> +</ul> +<p><strong>Introduction</strong></p> + +<p>IBM Platform HPC provides the ability to customise the network configuration +of compute nodes via Network Profiles. Network Profiles support a custom NIC +script for each defined interface.</p> + +<p>This provides the ability to configure network bonding and bridging. Here we +provide a detailed example on how to configure a network bridge in a cluster +managed by IBM Platform HPC.</p> + +<p>IBM Platform HPC includes xCAT technology for cluster provisioning. xCAT +includes a script (<em>/install/postscripts/xHRM</em>) which may be used to +configure network bridging. This script is leveraged as a custom network +script in the example below.</p> + +<p><strong>Example</strong></p> + +<p>The configuration of the network provision may be viewed in the IBM Platform HPC Web console at: <em>Resources &gt; Node Provisioning &gt; Networks</em>.</p> + +<figure><img src="https://www.gaborsamu.com/images/provision_net_wiki.png" /> +</figure> + +<p>The configuration of network provision may also be viewed using the <em>lsdef</em> CLI.</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># lsdef -t network provision</span> +Object name: provision + domain<span style="color: #f92672;">=</span>private.dns.zone + dynamicrange<span style="color: #f92672;">=</span>192.0.2.201-192.0.2.254 + gateway<span style="color: #f92672;">=</span>&lt;xcatmaster&gt; + mask<span style="color: #f92672;">=</span>255.255.255.0 + mgtifname<span style="color: #f92672;">=</span>eth0 + net<span style="color: #f92672;">=</span>192.0.2.0 + staticrange<span style="color: #f92672;">=</span>192.0.2.15-192.0.2.49 + staticrangeincrement<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span> + tftpserver<span style="color: #f92672;">=</span>192.0.2.50</code></pre></div> + +<p>The Network Profile <em>default_network_profile</em> which includes the network +provision may be viewed in the IBM Platform HPC Web console at: <em>Resources &gt; +Node Provisioning &gt; Provisioning Templates &gt; Network Profiles</em>.</p> + +<p></p> + +<p>The Network Profile <em>default_network_profile</em> configuration may also be viewed +using the <em>lsdef</em> CLI.</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># lsdef -t group __NetworkProfile_default_network_profile</span> +Object name: __NetworkProfile_default_network_profile + grouptype<span style="color: #f92672;">=</span>static + installnic<span style="color: #f92672;">=</span>eth0 + members<span style="color: #f92672;">=</span> + netboot<span style="color: #f92672;">=</span>xnba + nichostnamesuffixes.eth0<span style="color: #f92672;">=</span>-eth0 + nichostnamesuffixes.bmc<span style="color: #f92672;">=</span>-bmc + nicnetworks.eth0<span style="color: #f92672;">=</span>provision + nicnetworks.bmc<span style="color: #f92672;">=</span>provision + nictypes.eth0<span style="color: #f92672;">=</span>Ethernet + nictypes.bmc<span style="color: #f92672;">=</span>BMC + primarynic<span style="color: #f92672;">=</span>eth0</code></pre></div> + +<p>Here, we configure a network bridge <em>br0</em> against <em>eth0</em> for compute nodes +using a new Network Profile.</p> + +<ol> +<li>Add a new Network Profile with name <em>default_network_profile_bridge</em> via +the IBM Platform HPC Web console. As an Administrator user, browse to <em>Resources &gt; Node Provisioning &gt; Provisioning Templates &gt; Network Profiles</em> and select +the button <em>Add</em>.</li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/new_profile_wiki.png" /> +</figure> + +<p>A total of three devices are required to be added:</p> + +<ul> +<li> +<p>eth0</p> + +</li> +<li> +<p>Type: Ethernet</p> + +</li> +<li> +<p>Network: provision</p> + +</li> +<li> +<p>bmc</p> + +</li> +<li> +<p>Type: BMC</p> + +</li> +<li> +<p>Network: provision</p> + +</li> +<li> +<p>br0</p> + +</li> +<li> +<p>Type: Customized</p> + +</li> +<li> +<p>Network: provision</p> + +</li> +<li> +<p>Configuration Command: xHRM bridgeprereq eth0:br0 (creates network bridge +br0 against eth0)</p> + +</li> +</ul> +<p>The new Network Profile <em>default_network_profile_bridge</em> is shown below.</p> + +<figure><img src="https://www.gaborsamu.com/images/new_profile5_wiki.png" /> +</figure> + +<ol start="2"> +<li>Now we are ready to provision the nodes using the new Network Profile +<em>default_network_profile_bridge</em>. To begin the process to add nodes, navigate +in the the IBM Platform HPC Web console to <em>Resources &gt; Devices &gt; Nodes</em> and +select the button <em>Add</em>. Within the Add Nodes window, select optionally +Node Group <em>compute</em> and Select Specify Properties for the provisioning +template. This will allow you to select the newly created network profile +<em>default_network_profile_bridge</em>. Here the hardware profile <em>IPMI</em> and stateful +provisioning are used.</li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/add_nodes_wiki.png" /> +</figure> + +<p>Nodes are added using Auto discovery by PXE boot. Nodes may also be added +using a node information file.</p> + +<p>The nodes are powered on, detected by IBM Platform HPC and provisioned. In +this example, two nodes <em>compute000</em>, <em>compute001</em> are detected and +subsequently provisioned.</p> + +<ol start="3"> +<li>Once the nodes have been provisioned and complete their initial boot, they +appear in the IBM Platform HPC Web console (<em>Resources &gt; Devices &gt; Nodes</em>) with +Status <em>booted</em> and Workload Agent <em>OK</em>.</li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/nodes_wiki.png" /> +</figure> + +<p>The network bridge is configured on the nodes as expected. We may see this +via the IBM Platform HPC Web console by browsing to <em>Resources &gt; Devices &gt; +Nodes</em> and selecting the <em>Summary</em> tab and scrolling to <em>Other Key Properties</em>.</p> + +<figure><img src="https://www.gaborsamu.com/images/nodes2_wiki.png" /> +</figure> + +<p>Finally, using the CLI <em>xdsh</em>, we remotely execute ifconfig on node <em>compute001</em>to check the configuration of interface <em>br0</em>.</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># xdsh compute001 ifconfig br0</span> +compute001: br0 Link encap:Ethernet HWaddr 00:1E:67:49:CC:E5 +compute001: inet addr:192.0.2.20 Bcast:192.0.2.255 Mask:255.255.255.0 +compute001: inet6 addr: fe80::b03b:7cff:fe61:c1d4/64 Scope:Link +compute001: UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1 +compute001: RX packets:26273 errors:0 dropped:0 overruns:0 frame:0 +compute001: TX packets:42490 errors:0 dropped:0 overruns:0 carrier:0 +compute001: collisions:0 txqueuelen:0 +compute001: RX bytes:11947435 <span style="color: #f92672;">(</span>11.3 MiB<span style="color: #f92672;">)</span> TX bytes:7827365 <span style="color: #f92672;">(</span>7.4 MiB<span style="color: #f92672;">)</span> +compute001:</code></pre></div> + +<p>As expected, the compute nodes have been provisioned with a network bridge +<em>br0</em> configured.</p> + + + + + ISC 2013 wrapup- IBM Platform HPC and Intel Xeon Phi + + 2013-07-09T17:58:32-06:00 + https://hpc.social/2013/isc-2013-wrapup-ibm-platform-hpc-and-intel-xeon-phi + <p>This past June at ISC 2013 the IBM booth featured live demonstration of IBM +Platform HPC V3.2 managing an IBM iDataplex cluster equipped with Intel Xeon +Phi coprocessors.</p> + +<p>As part of the demonstration, the potential performance gains running an +application on Intel Xeon Phi coprocessors was shown by running the visually +stunning <a href="https://software.intel.com/en-us/articles/embree-photo-realistic-ray-tracing-kernels">Intel Embree</a> <em>crown</em> rendering on Intel Xeon and Intel Xeon +Phi simultaneously.</p> + +<p>IBM Platform HPC provides a unified web-based interface for deployment and +managment of the cluster. Additionally, it includes application submission +templates to allow administrators the flexiblity to create templates to greatly simplify the submission of jobs for their users. A number of templates for +well known ISV and open source applications are also included as standard. +For ISC, a template was created to allow Intel Embree to be easily launched +through the built-in workload manager for execution on Intel Xeon or +Intel Xeon Phi coprocessors.</p> + +<p>Finally, when the processor intensive Intel Embree application was running, +the monitoring and reporting capabilities of IBM Platform HPC provided both +real time and historical reporting on the health of each node in the cluster, +including metrics specific to the Intel Xeon Phi coprocessor such as +temperature, power consumption and utilization - all through a consistent +web-based interface.</p> + +<p>Enjoy the short video of the demo here.</p> + +<div style="padding-bottom: 56.25%; height: 0; overflow: hidden;"> + +</div> + + + + + IBM Platform HPC V3.2- GPU Management with NVIDIA CUDA 5 + + 2013-04-24T18:32:15-06:00 + https://hpc.social/2013/ibm-platform-hpc-v3-2-gpu-management-with-nvidia-cuda-5 + <p>IBM Platform HPC V3.2 is easy-to-use, yet comprehensive technical computing +cluster management software. It includes as standard GPU scheduling, managementand monitoring capabilities for systems equipped with NVIDIA Tesla GPUs.</p> + +<p>IBM Platform HPC V3.2 has support out of the box for NVIDIA CUDA 4.1, +including a NVIDIA CUDA 4.1 software <em>Kit</em>. The Kit allows for simplified +deployment of software in a clustered environmnet.</p> + +<p>If your cluster is equipped with the latest NVIDIA Tesla hardware based upon +the Kepler architecture, you may require NVIDIA CUDA 5. Here we discuss the +steps to install a configure a IBM Platform HPC V3.2 cluster with NVIDIA +Tesla Kepler hardware.</p> + +<p><strong>Definitions</strong></p> + +<p>The following capabilities in IBM Platform HPC V3.2 will be used to facilitate +the deployment of NVIDIA CUDA 5. The steps detailed below will assume +familiarity with IBM Platform HPC V3.2 tools.</p> + +<ul> +<li><strong>Cluster File Manager (CFM)</strong>: This will be used to automate patching of the +system boot files to perform the installation of NVIDIA CUDA 5.</li> +<li><strong>Post-Install script</strong>: This is used to trigger the execution of the system +startup file on first boot post-provisioning.</li> +</ul> +<p><strong>Environment Preparation</strong></p> + +<p>It is assumed that the IBM Platform HPC V3.2 head node has been installed and +that there are compute nodes equipped with NIVDIA Tesla GPUs that will be added +(provisioned) to the cluster. The specifications of the example environment +follow:</p> + +<ul> +<li>IBM Platform HPC V3.2 (Red Hat Enterprise Linux 6.2 x64)</li> +<li>NVIDIA® Tesla® K20c</li> +<li>NVIDIA CUDA 5 (cuda_5.0.35_linux_64_rhel6.x-1.run)</li> +</ul> +<p>Two node cluster:</p> + +<ul> +<li><em>installer000</em> (Cluster head node)</li> +<li><em>compute000</em> (Compute node equipped with Tesla K20C)</li> +</ul> +<p>Here we fulfil the pre-requisites necessary for before provisioning the compute +node(s) equipped with NVIDIA Tesla.</p> + +<ol> +<li>The Administrator of the cluster must download NVIDIA CUDA 5 and copy to the +/shared directory on the IBM Platform HPC head node. This directory is NFS +mounted by all compute nodes managed by IBM Platform HPC. Note that the +execute bit must be set on the CUDA package file.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># cp ./cuda_5.0.35_linux_64_rhel6.x-1.run /shared</span> + +<span style="color: #75715e;"># chmod 755 /shared/cuda_5.0.35_linux_64_rhel6.x-1.run </span> + +<span style="color: #75715e;"># ls -la /shared/cuda*</span> +-rwxr-xr-x <span style="color: #ae81ff;">1</span> root root <span style="color: #ae81ff;">702136770</span> Apr <span style="color: #ae81ff;">4</span> 20:59 /shared/cuda_5.0.35_linux_64_rhel6.x-1.run</code></pre></div> + +<ol start="2"> +<li>On the IBM Platform HPC head node, create a new nodegroup for nodes +equipped with NVIDIA Tesla hardware. The new nodegroup template is given the +name <em>compute-rhel-6.2-x86_64_Tesla</em> and is a copy of the built-in nodegroup +template <em>compute-rhel-6.2-x86_64</em>.</li> +</ol> +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># kusu-ngedit -c compute-rhel-6.2-x86_64 -n compute-rhel-6.2-x86_64_Tesla</span> +Running plugin: /opt/kusu/lib/plugins/cfmsync/getent-data.sh +…. +…. +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/root/.ssh/authorized_keys +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/root/.ssh/id_rsa +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/opt/kusu/etc/logserver.addr +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/opt/lsf/conf/hosts +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/opt/lsf/conf/profile.lsf +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/etc/group.merge +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/etc/hosts.equiv +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/etc/hosts +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/etc/shadow.merge +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/etc/.updatenics +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/etc/passwd.merge +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/etc/fstab.kusuappend +New file found: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/etc/ssh/ssh_config +…. +…. +Distributing <span style="color: #ae81ff;">76</span> KBytes to all nodes. +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255</code></pre></div> + +<ol start="3"> +<li>Configure the CFM framework to patch the <em>/etc/rc.local</em> on a set of compute +nodes. The following example script will check for the existence of the NVIDIA +CUDA tool <em>nvidia-smi</em> on a node in <em>/usr/bin</em>. If <em>nvidia-smi</em> is not found +in <em>/usr/bin</em>, the script will mount the NFS share <em>/depot/shared</em> to <em>/share</em> and will run the NVIDIA CUDA installation with the option for silent +(non-interactive) installation. Note that this example will need to be altered +according to your environment (IPs, CUDA package name, etc).</li> +</ol> +<p><strong>Filename: /etc/cfm/compute-rhel-6.2-x86_64_Tesla/etc/rc.local.append</strong> +<div class="highlight"><pre><code class="language-bash"> + +<span style="color: #75715e;">#!/bin/sh</span> + +<span style="color: #75715e;"># If /usr/bin/nvidia-smi does not exist, then mount /shared filesystem on IBM Platform HPC # head node and run NVIDIA CUDA install with the silent option. </span> + +<span style="color: #66d9ef;">if</span> <span style="color: #f92672;">[</span> ! -f /usr/bin/nvidia-smi <span style="color: #f92672;">]</span> +<span style="color: #66d9ef;">then</span> + mkdir /shared + mount -t nfs 10.1.1.150:/depot/shared /shared + /shared/cuda_5.0.35_linux_64_rhel6.x-1.run -driver -toolkit -silent +<span style="color: #66d9ef;">fi</span></code></pre></div> +</p> + +<ol start="4"> +<li>Create a post-installation script which will be configured to execute on a +set of compute nodes. The purpose of the post-installation script is to force +the execution of the updated <em>/etc/rc.local</em> script during the initial boot of +a node after provisioning. The following example script is saved as <em>/root/run_rc_local.sh</em> on the IBM Platform HPC head node. Note that this script will be +specified as a post-installation script in the subsequent steps.</li> +</ol> +<p><strong>Filename: /root/run_rc_local.sh</strong></p> + +<div class="highlight"><pre><code class="language-bash"> <span style="color: #75715e;">#!/bin/sh -x</span> + +/etc/rc.local &gt; /tmp/runrc.log 2&gt;&amp;<span style="color: #ae81ff;">1</span></code></pre></div> + +<ol start="5"> +<li>On the IBM Platform HPC head node, start <em>kusu-ngedit</em> and edit the +nodegroup <em>installer-rhel-6.2-x86_64</em>. The following updates are required to +enable monitoring of GPU devices in the IBM Platform HPC Web console.</li> +</ol> +<ul> +<li>On the Components screen, enable <em>component-platform-lsf-gpu</em> under <em>platform-lsf-gpu</em>.</li> +<li>(Select Yes to synchronise changes).</li> +</ul> +<ol start="6"> +<li>On the IBM Platform HPC head node, start <em>kusu-ngedit</em> and Edit the +nodegroup <em>compute-rhel-6.2-x86_64_Tesla</em>. The following updates are required +to enable the GPU monitoring agents on nodes, in addition to the required +OS software packages, and kernel parameters for NVIDIA GPUs.</li> +</ol> +<ul> +<li>On the Boot Time Paramters screen, add the following Kernel Params (at the +end of the line): <em>rdblacklist=nouveau nouveau.modeset=0</em>.</li> +<li>On the Components screen, enable <em>component-platform-lsf-gpu</em> under +<em>platform-lsf-gpu</em>.</li> +<li>On the Optional Packages screen, enable the following packages: +<em>kernel-devel, gcc, gcc-c++</em></li> +<li>On the Custom Scripts screen, add the script <em>/root/run_rc_local.sh</em></li> +<li>(Select Yes to synchronise changes).</li> +</ul> +<ol start="7"> +<li>Update the configuration of the IBM Platform HPC workload manager. This is +required in order for the NVIDIA CUDA specific metrics to be taken into account. +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># kusu-addhost -u</span> +Running plugin: /opt/kusu/lib/plugins/cfmsync/getent-data.sh +Updating installer<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Setting up dhcpd service... +Setting up dhcpd service successfully... +Setting up NFS export service... +Running plugin: /opt/kusu/lib/plugins/cfmsync/getent-data.sh +Distributing <span style="color: #ae81ff;">60</span> KBytes to all nodes. +Updating installer<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255</code></pre></div> +</li> +</ol> +<p><strong>Provision nodes equipped with NVIDIA Tesla</strong></p> + +<p>With the environment pre-requisites complete, the provisioning of the compute +nodes equipped with NVIDIA Tesla follows. Provisioning of nodes may be done +using the IBM Platform HPC Web Console, or via the kusu-addhost CLI/TUI. Here, +we provision the node using the <em>kusu-addhost</em> CLI with the newly created +nodegroup template <em>compute-rhel-6.2-x86_64_Tesla</em>.</p> + +<p>Note that once nodes are discovered by <em>kusu-addhost</em>, the administrator must +exit from the listening mode by pressing Control-C. This will complete the +node discovery process.</p> + +<div class="highlight"><pre><code class="language-bash"><span style="color: #75715e;"># kusu-addhost -i eth0 -n compute-rhel-6.2-x86_64_Tesla -b</span> + +Scanning syslog <span style="color: #66d9ef;">for</span> PXE requests... +Discovered Node: compute000 +Mac Address: 00:1e:67:31:45:58 +^C +Command aborted by user... +Setting up dhcpd service... +Setting up dhcpd service successfully... +Setting up NFS export service... +Running plugin: /opt/kusu/lib/plugins/cfmsync/getent-data.sh +Distributing <span style="color: #ae81ff;">84</span> KBytes to all nodes. +Updating installer<span style="color: #f92672;">(</span>s<span style="color: #f92672;">)</span> +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255 +Sending to 10.1.1.255</code></pre></div> + +<p><strong>Monitoring nodes equipped with NVIDIA Tesla</strong></p> + +<p>After having provisioned all of your GPU equipped nodes, it is now possible to +monitor GPU related metrics via the IBM Platform HPC Web Console. Point a +supported web browser to the IBM Platform HPC head node and login as a user +with Administrative privileges. The URL to be used: <em>http://&lt;IBM_Platform_HPC_head_node&gt;</em></p> + +<p>The IBM Platform Web Console provides a view of GPU metrics under</p> + +<ul> +<li>Dashboard/Rack View</li> +</ul> +<figure><img src="https://www.gaborsamu.com/images/HPC32_Rack_singleGPU.png" /> +</figure> + +<p>Within the Dashboard view, hover the mouse pointer over a node equipped with +NVIDIA Tesla. The popup will display the GPU temperature and ECC errors.</p> + +<ul> +<li>Host List View (GPU Tab)</li> +</ul> +<figure><img src="https://www.gaborsamu.com/images/HPC32_GUI_singleGPU.png" /> +</figure> + + + + + Present and Future Computing, Data, and Networks Committee of the Canadian Astronomical Society (CASCA) + + 2012-01-13T00:00:00-07:00 + https://hpc.social/2012/present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-casca- + <p><a href="https://www.dursi.ca/assets/pdfs/CCI_WhitePaper_2012.pdf">This document</a> is a whitepaper I wrote for the <a href="http://casca.ca/?page_id=273">CASCA Computing and Data committee</a> outlining the computing needs for the Canadian astronomy community for the coming several years. It does a fairly decent job of laying out the diverse range of large-scale R&amp;D computing needs for the national community.</p> + +<h2 id="executive-summary">Executive Summary</h2> + +<p>Advanced research computing resources have never been so essential to the Canadian Astronomy and Astrophysics research community. In the past few years, astronomical researchers have benefited greatly from modern large-scale computing systems; a diverse range of resources, which are a good match to the diverse computing needs of our scientists; and good working relationships with existing providers, allowing flexibility and collaboration between these centres and research groups.</p> + +<p>However, CASCA has concerns about the near future of advanced research computing available to its researchers. Here the Computers, Data, and Networks Committee of CASCA present, on behalf of the Society, a summary of the current state of the computing needs, successes, and concerns of our researchers taken from previous consultative summaries and their updates. This is the first step of a process that will continue through the first half of 2013, which will include a comprehensive survey of research computing needs of the Canadian Astronomy and Astrophysics community, and will investigate a variety of strategies for meeting those needs.</p> + +<p>Early systems funded by the CFI NPF are already showing their age; in many cases they are out of their maintenance contract and are already starting to fail. The lack of any clear signs of new investment on the horizon means that even if existing systems were to continue operating perfectly, as other nations continue to invest in new research computing platforms, our researchers, using stagnant computing hardware, will not only fall behind our international competitors as data volumes continue to increase, but also be unable to make full use of prior investments.</p> + +<p>When new funding does become available, the Canadian astronomy community would like to see changes in emphasis taken as lessons learned from the CFI NPF procurement. Previous investment focused largely on computing hardware. While this addressed a real and pressing need resulting from years of underinvestment, the research endeavor requires a more holistic approach. Computing hardware investments must be balanced with similar investments in storage, highly qualified personnel, software development, and networking to maximize results.</p> + +<p>In this report, we recommend an urgent search for new and sustainable sources of funding for advanced research computing funding; an increased focus on personnel, software development, and storage; maintaining a diverse range of systems; enabling major longer-term projects by committing resources for longer than the one-year allocation window currently of the RAC process; continuing to enable close working relationships with research groups and computing providers, preferably as close to the researchers as possible. In addition, we recommend that CCI’s board, through the proposed Researcher Advisory Committee or otherwise, establish a direct relationship with CASCA (and similar professional groups), with via persons charged with representing the needs of these research communities in planning for Compute Canada.</p> + + + + + Stopping your program at the first NaN + + 2012-01-12T00:00:00-07:00 + https://hpc.social/2012/stopping-your-program-at-the-first-nan + <p>If you know that somewhere in your program, there lurks a catastrophic numerical bug that puts NaNs or Infs into your results and you want to know where it first happens, the search can be a little frustrating. However, as before, the IEEE standard can help you; these illegal events (divide by zero, underflow or overflow, or invalid operations which cause NaNs) can be made to trigger exceptions, which will stop your code right at the point where it happens; then if you run your code through a debugger, you can find the very line where it happens.</p> + +<p>We’ll discuss using the gnu compilers here; other compiler suites have similar options.</p> + +<p>Let’s take a look at the following Fortran code:</p> + +<pre><code>program nantest + real :: a, b, c + + a = 1. + b = 2. + + c = a/b + print *, c,a,b + + a = 0. + b = 0. + + c = a/b + print *, c,a,b + + a = 2. + b = 1. + + c = a/b + print *,c,a,b +end program nantest +</code></pre> + +<p>If we compile this code with <code>-ffpe-trap=invalid</code> (I usually add <code>,zero,overflow</code> , and even <code>underflow</code> if I think that’s causing me a problem in intermediate results), then the debugger can tell us the line where it all goes wrong:</p> + +<pre><code class="language-bash">$ gfortran -o nantest nantest.f90 -ffpe-trap=invalid,zero,overflow -g -static +$ gdb nantest +[...] +(gdb) run +Starting program: /scratch/ljdursi/Testing/fortran/nantest + 0.50000000 1.0000000 2.0000000 + +Program received signal SIGFPE, Arithmetic exception. +0x0000000000400384 in nantest () at nantest.f90:13 +13 c = a/b +Current language: auto; currently fortran +</code></pre> + +<p>With the intel fortran compiler (ifort), using the option <code>-fpe0</code> will do the same thing.</p> + +<p>It’s a little tricker with C code; we have to actually insert a call to <code>feenableexcept()</code>, which enables floating point exceptions, and is defined in fenv.h;</p> + +<pre><code class="language-c">#include &lt;stdio.h&gt; +#include &lt;fenv.h&gt; + +int main(int argc, char **argv) { + float a, b, c; + feenableexcept(FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW); + + a = 1.; + b = 2.; + + c = a/b; + printf("%f %f %f\n", a, b, c); + + a = 0.; + b = 0.; + + c = a/b; + printf("%f %f %f\n", a, b, c); + + a = 2.; + b = 1.; + + c = a/b; + printf("%f %f %f\n", a, b, c); + + return 0; +} +</code></pre> +<p>but the effect is the same:</p> + +<pre><code class="language-bash">$ gcc -o nantest nantest.c -lm -g +$ gdb ./nantest +[...] +(gdb) run +Starting program: /scratch/s/scinet/ljdursi/Testing/exception/nantest +1.000000 2.000000 0.500000 + +Program received signal SIGFPE, Arithmetic exception. +0x00000000004005d0 in main (argc=1, argv=0x7fffffffe4b8) at nantest.c:17 +17 c = a/b; +</code></pre> + +<p>either way, you have a much better handle on where the errors are occuring.</p> + + + + + Testing Roundoff + + 2011-11-23T00:00:00-07:00 + https://hpc.social/2011/testing-roundoff + <p>A <a href="http://www.cs.berkeley.edu/~wkahan/Stnfrd50.pdf">talk</a> has been circulating (HT: Hacker News) from a conference celebrating <a href="http://compmath50.stanford.edu/">50 years of scientific computing at Stanford</a> where the author, William Kahan, discusses an old and sadly disused trick for testing the numerical stability of the implementation of an algorithm that should work with any C99 or Fortran 2003 compiler without changing the underlying code. It’s definitely a tool that’s worth having in your toolbox, so it’s worth mentioning here.</p> + +<p>We’ll consider a simple numerical problem; imagine a projectile launched from height $h = 0$ with velocity $v_0=5000 \mathrm{m s}^{-1}$, and subject to the Earth’s gravitational accelleration, $g = 9.81 \mathrm{m} \mathrm{s}^{-2}$. We’re going to ask when the (first) time is that the projectile hits a height h.</p> + +<p>This is going to be an application of our friend the quadratic equation:</p> + +<p>[r = \frac{-b \pm \sqrt{b^2 - 4 a c}}{2 a}]</p> + +<p>Now, because of the repeated subtraction, a naive implementation of this equation is known to undergo catastrophic cancellation near $b^2=4 a c$, or for where the discriminant is much less than \(b\) — in our case, near the ends and the peak of the projectile’s trajectory. We’re going to demonstrate that below.</p> + +<p>Now, before we show that such sensitivity can happen, we should ask — why would we care? If we test our code and know it gives “good enough” answers under the conditions that matter to us, does it really matter what could happen in other circumstances? The answer, of course, is yes. There are a lot of things we could want to do — increase the agressiveness of compiler optimizations when compiling our code, for instance — which will have the effect of numerically perturbing our computation; and we need to know if those small perturbations will have small, or large, effects on our answers.</p> + +<p>It turns out that IEEE 754, the standard for floating point numbers, can give us some help with this. (Everyone who does numerical work should know at least a little bit about the floating point standard, or at least the issues involved with floating point numbers. <a href="http://docs.oracle.com/cd/E19957-01/806-3568/ncg_goldberg.html">What every computer scientist should know about floating point</a>, particularly the first few sections, is an essential guide). The floating point standard - which almost all widely-used computing hardware should support - allows you to set certain properties of the mathematics “on the fly”. One particularly useful feature is the ability to set how the last digit of all floating point operations are rounded - to nearest (the default), to zero (eg, always truncate), to positive infinity (eg, always round up) or to negative infinity (always round down). In the C99 standard, this is implemented in the “fenv.h” header and the math library; in Fortran2003, this is part of the intrinsic IEEE_ARITHMETIC module, where you can call IEEE_SET_ROUNDING_MODE.</p> + +<p>By changing the rounding, you are perturbing every floating point operation in your calculation. If this perturbation results in significant changes in your result, then your calculation is very fragile, and you may have to look into re-writing the calculation, using another algorithm, or resorting to using higher precision for that calculation (which will push the perturbations to less significant decimal places). If not, then you have some evidence that your calculation is robust to perturbations, at least in the last bit.</p> + +<p>Below we have an example of how you’d do this in C. We have a simple routine which uses the obvious implementation of the quadratic equation to calculate the time when the projectile is at one meter, and we perform this calculation with all available rounding modes:</p> + +<pre><code class="language-c">#include &lt;stdio.h&gt; +#include &lt;math.h&gt; +#include &lt;fenv.h&gt; + +const int NOSOLN=-1; +const int SOLN = 0; + +int time(const float vo, const float g, const float ho, float *time) { + float disc = (vo*vo - 2.*g*ho); + + if (disc &lt; 0) return NOSOLN; + + disc = sqrt(disc); + float root1 = (vo + disc)/g; + float root2 = (vo - disc)/g; + + if ((root2 &gt;= 0.) &amp;&amp; root2 &lt; root1) + *time = root2; + else + *time = root1; + + return SOLN; +} + + +int main(int argc, char **argv) { + + const float g =9.81; + const float vo=5000.; + const int ho=1.; + + int nroundings=4; + int roundings[]={FE_TONEAREST, FE_UPWARD, FE_DOWNWARD, FE_TOWARDZERO}; + char *names[] ={"To nearest", "To +inf", "To -inf", "To zero"}; + + for (int r=0; r&lt;nroundings; r++) { + int status = fesetround(roundings[r]); + if (status) { + fprintf(stderr,"Could not set rounding to '%s'.\n", names[r]); + } else { + float soln; + time(vo, g, ho, &amp;soln); + printf("%s: %f\n", names[r], soln); + } + } + + return 0; +} +</code></pre> + +<p>We compile the code with gcc (any C99 compiler should work):</p> + +<pre><code class="language-bash">$ gcc -O0 -Wall -std=c99 quadratic.c -o quadratic -lm +</code></pre> +<p>Note that we need to explicitly link in the math library, and to turn off optimization (so that the compiler doesn’t replace the repeated calls to time() with a single call). Running this, we find:</p> + +<pre><code>$ ./quadratic +To nearest: 0.000199 +To +inf: 0.000149 +To -inf: 0.000249 +To zero: 0.000249 +</code></pre> + +<p>Changing the rounding modes changes the result by 50%! This shows that our current implementation - which is not giving obviously wrong answers - is extremely fragile in the presence of numerical noise, and we should exercise extreme caution with compiler flags, etc. (How to re-write the expression to be more robust to small changes is a topic for another day.)</p> + + + + + Codes as Instruments- Community Applications and Simulation Software for the Hardware Architectures of the Next Decade + + 2010-06-17T01:00:00-06:00 + https://hpc.social/2010/codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decade + <p>It is becoming increasingly problematic that, even as computing and data becomes more and more fundamental to research, and the complexity and diversity of computing technologies out there grows, getting stable funding for developing high-quality research software remains so difficult.</p> + +<p>In <a href="https://www.dursi.ca/assets/pdfs/CAI.pdf">this whitepaper</a> for the <a href="http://www.casca.ca/lrp2010/">CASCA 2010 Long Range Plan</a>, my colleague <a href="http://www.astro.uvic.ca/~fherwig/">Falk Herwig</a> and I lay out the case for increased funding of R&amp;D software development by professional research software developers. We make a couple points which I genuinely believe to be strong:</p> + +<p>First, increased benefits. A successful community code can support an enoormous body of research. By the (admittedly somewhat crude) count we use in this paper, the top six reseach codes in Astronomy accounted for approximately 50% of the computational astronomy publications over the period of study, and the top three - <a href="http://www.nublado.org/">Cloudy</a>, <a href="http://www.mpa-garching.mpg.de/galform/gadget/">Gadget</a>, and <a href="http://www.flash.uchicago.edu/site/">FLASH</a>, which I was part of - accounted for nearly 40%. That is an enormous about of R&amp;D effort enabled by those projects.</p> + +<p>Second, reduced costs. We cite from the growing research software development literature to demonstrate the high (and growing) challenges of engineering these codes in a scientists’ spare time, and the high cost of software defects. By having a small cadre of professional research software development personnel, better quality software can be developed more efficiently.</p> + +<p>Finally, a word about the title - this is an analogy due to Falk, and while it’s been controversial, I think there’s a lot of truth to it. Astronomy has always relied heavily on, for instance, telescopes - but a telescope is only part of an observational facility. A big photon-gathering dish is only as useful as the scientific instrument that’s placed at its focus to make sense of those photons. Similarly, a huge computer by itself has no scientific value without software to run on it. Unless our community invests in computational instruments with the same level of seriousness as observational instruments, our ability to make use of these facilities is going to be needlessly limited.</p> + +<h3 id="abstract">Abstract</h3> + +<p>Modern astronomical research requires increasingly sophisticated computing facilities and software tools. Computational tools have become the fundamental tools to turn observational raw data into scientific insight. Complex multi-physics simulation codes have developed into tools for numerical experiments that provide scientific insight beyond classical theory. Canadian researchers need an environment for developement and maintenance of these critical tools. In particular, the drastically enhanced complexity of deeply heterogeneous hardware architectures poses a real challenge to using present and future HPC facilties.</p> + +<p>Without a national program in astrophysical simulation science and astronomy application code developement we are becoming vulnerable with respect to our ability to maximise the scientific return from existing and planned investments into atronomy. In addition, there are significant industrial/commercial HQP needs that simulation and application code program could start to address, if it is properly aligned with academic training opportunities.</p> + +<p>We outline the framework and requirements for such a framework for developing Canadian astronomical application and simulation codes — and code builders. In the US decadal plan process, voices are calling for similar emphasis on developing infrastructure and incentives for open community codes (Weiner et al. 2009). We propose funding several small interdisciplinary teams of postdocs, graduate students, and staff, housed in departments at Universities that have or are about to make a commitment in a relevant area (e.g. applied math, computational physics, modeling science). These teams can, while training astronomical and computational HQP, focus on building tools that have been deemed to be high priorities by the astronomical and astrophysical communities in order to make the best scientific use of our new computational faciliites.</p> + + + + + Canadian Astronomical Computing, Data And Network Facilities- A White Paper for the 2010 Long Range Plan + + 2010-05-01T01:00:00-06:00 + https://hpc.social/2010/canadian-astronomical-computing-data-and-network-facilities-a-white-paper-for-the-2010-long-range-plan + <p>In <a href="https://www.dursi.ca/assets/pdfs/CDandN_WP.pdf">this whitepaper</a> for the <a href="http://www.casca.ca/lrp2010/">CASCA 2010 Long Range Plan</a>, I and the rest of the Computing, Data, and Network committee of CASCA lay out the state of ecosystem for computation in support of Canadian astronomy, and suggests a path forward for the time period of the 2010-2020 long range plan.</p> + +<h3 id="abstract">Abstract</h3> + +<p>Significant investment in new large, expensive astronomical observing facilities spanning a substantial portion of the electronic spectrum was a dominant theme of LRP2000 and continues to be necessary for Canadian astronomy to maintain its world position. These developments are generating increasingly large volumes of data. Such investments only makes sense if they are balanced by strong infrastructure support to ensure that data acquired with these facilities can be readily accessed and analyzed by observers, and that theoreticians have the tools available to simulate and understand their context. This will require continuing investment in computational facilities to store and analyze the data, networks to ensure useful access to the data and products by Canadian researchers, and personnel to help Canadian researchers make use of these tools.</p> + +<p>In addition, large parallel simulations have become an essential tool for astrophysical theory, and Canadian Astronomy has world-leading simulators and developers who rely on world-class High Performance Computing facilities being maintained in Canada to do their research effectively.</p> + +<p>We recommend that Compute Canada be funded at $72M/yr to bring HPC funding per capita in line with G8 norms; that part of every Compute Canada technology renewal include a Top-20 class computing facility; NSERC and other funding agencies begin supporting software development as an integral component of scientific research; that the staff funding for consortia be tripled, including local access to technical analyst staff; and that the last mile bottleneck of campus networking less than 10 Gb/s be addressed where it is impacting researchers, with particular urgency for the current 1 Gb/s connection at the CADC.</p> + + + + + diff --git a/feed.json b/feed.json new file mode 100644 index 0000000..bb5603e --- /dev/null +++ b/feed.json @@ -0,0 +1,704 @@ +{ + "version": "https://jsonfeed.org/version/1", + "title": "hpc.social - Aggregated Personal Blog", + "home_page_url": "https://hpc.social/personal-blog/", + "feed_url": "https://hpc.social/personal-blog/feed.json", + "description": "Shared personal experiences and stories", + "icon": "https://hpc.social/personal-blog/assets/images/apple-touch-icon.png", + "favicon": "https://hpc.social/personal-blog/assets/images/favicon.png", + "expired": false, + + "author": { + "name": "hpc.social", + "url": null, + "avatar": null + }, + +"items": [ + + { + "id": "https://hpc.social/personal-blog/2023/lsf-client-on-macos-submitting-from-your-laptop/", + "title": "LSF client on macOS - submitting from your laptop", + "summary": null, + "content_text": "In traditional HPC environments, login nodes are typically used as an access point for users to submitand manage jobs. Although login nodes are still used today, HPC environments areincreasingly being used by a broad class of users with domain expertise and not necessarily IT experts.In other words, such users may be more comfortable using their native desktopenvironment rather than the CLI. Given the factors, in the commercial HPC space, organizations are always lookingfor ways to lower the barto access and interact with HPC environments.Spectrum LSF provides many ways to submit and manage jobs in an HPC cluster. For power users, the richCLI functionality exists. There is also an available web-based interface for jobsubmission and management which provides customizable application templates to greatly simplify job submission, while hiding complexity of the underlying infrastructure. A RESTful APIis also available to users of IBM Spectrum LSF Application Center or IBM Spectrum LSF Suites, which enables organizations to access the HPC environment via web services.I’ve written previously in detail about the the LSF web-based interface in the blogThe Easy HPC Button. Here, we’ll take a closer look at theavailable LSF client for macOS that uses the RESTful API. First, a bit about LSF clients. LSF clientscan access resources on LSF server hosts without running the LSF daemons. LSF clients don’t require a softwarelicense and from clients, users can run all of the familiar LSF commands. Additionally, LSF clients aresubmit only, and don’t execute jobs.Note: The macOS LSF client uses the LSF RESTful API. This means that it will function in environmentsrunning LSF Standard Edition with LSF Application Center or LSF Suites.ConfigurationThe configuration used for the example below is as follows:HostnameOSDetailkilencCentOS Stream 8.4LSF Suite for HPC v10.2.0.13My-Macbook-AirmacOS Ventura 13.2.1 (Apple M1)LSF clientOn the Spectrum LSF Suite for HPC management host (kilenc), add the following variables to the Parametersection in the file lsf.cluster.name. The FLOAT_CLIENTS variable determines how many floating clients canjoin the LSF cluster, The FLOAT_CLIENTS_ADDR_RANGE specifies the allowable IP addresses. In this case, theclient system is on a 192.168.x.x network.Begin ParametersFLOAT_CLIENTS=2FLOAT_CLIENTS_ADDR_RANGE=192.*End ParametersTo make the changes take effect, issue the following commands as the LSF administrator:lsadmin reconfigbadmin reconfigObtain the tarball pacdesktop_client10.2.0.13_macos-x86_64.tar. For users with an LSF entitlement this package is available onIBM Fix Central. Note that this package will work on systems with Apple M1 silicon through emulation.Open a Terminal on the macOS client system, copy the tarball to the $HOME/Desktop directory of user lsfuser and uncompress the tarball.lsfuser@My-MacBook-Air Desktop % pwd/Users/lsfuser/Desktoplsfuser@My-MacBook-Air Desktop % ls -la pacdesktop_client10.2.0.13_macos-x86_64.tar-rw-r--r--@ 1 lsfuser staff 18452480 27 Feb 17:12 pacdesktop_client10.2.0.13_macos-x86_64.tarlsfuser@My-MacBook-Air Desktop % tar -xvf pacdesktop_client10.2.0.13_macos-x86_64.tarx LSF_Desktop_Client/x LSF_Desktop_Client/bappx LSF_Desktop_Client/btopx LSF_Desktop_Client/bwaitx LSF_Desktop_Client/lseligiblex LSF_Desktop_Client/bslax LSF_Desktop_Client/blparamsx LSF_Desktop_Client/bhpartx LSF_Desktop_Client/bclustersx LSF_Desktop_Client/blstartupx LSF_Desktop_Client/lsacctx LSF_Desktop_Client/bsubx LSF_Desktop_Client/bugroupx LSF_Desktop_Client/bpeekx LSF_Desktop_Client/bacctx LSF_Desktop_Client/brequeuex LSF_Desktop_Client/bjgroupx LSF_Desktop_Client/bslotsx LSF_Desktop_Client/lsrunx LSF_Desktop_Client/bjobsx LSF_Desktop_Client/lshostsx LSF_Desktop_Client/lsloadx LSF_Desktop_Client/brlainfox LSF_Desktop_Client/bresourcesx LSF_Desktop_Client/bladminx LSF_Desktop_Client/bstatusx LSF_Desktop_Client/bmodx LSF_Desktop_Client/bpostx LSF_Desktop_Client/lsidx LSF_Desktop_Client/bentagsx LSF_Desktop_Client/chx LSF_Desktop_Client/bchkpntx LSF_Desktop_Client/bparamsx LSF_Desktop_Client/bjdepinfox LSF_Desktop_Client/bgmodx LSF_Desktop_Client/brestartx LSF_Desktop_Client/lsltasksx LSF_Desktop_Client/blusersx LSF_Desktop_Client/paclogonx LSF_Desktop_Client/regnotifyx LSF_Desktop_Client/cacert.pemx LSF_Desktop_Client/bresumex LSF_Desktop_Client/blstatx LSF_Desktop_Client/bhistx LSF_Desktop_Client/bqueuesx LSF_Desktop_Client/bltasksx LSF_Desktop_Client/bresizex LSF_Desktop_Client/blcollectx LSF_Desktop_Client/lsacctmrgx LSF_Desktop_Client/bgaddx LSF_Desktop_Client/bmigx LSF_Desktop_Client/bstopx LSF_Desktop_Client/bswitchx LSF_Desktop_Client/blhostsx LSF_Desktop_Client/blcstatx LSF_Desktop_Client/brsvsx LSF_Desktop_Client/brunx LSF_Desktop_Client/blinfox LSF_Desktop_Client/lsgrunx LSF_Desktop_Client/busersx LSF_Desktop_Client/lsloadadjx LSF_Desktop_Client/blkillx LSF_Desktop_Client/bbotx LSF_Desktop_Client/lsclustersx LSF_Desktop_Client/bconfx LSF_Desktop_Client/lsinfox LSF_Desktop_Client/lsmakex LSF_Desktop_Client/blimitsx LSF_Desktop_Client/bmgroupx LSF_Desktop_Client/breadx LSF_Desktop_Client/bkillx LSF_Desktop_Client/lstcshx LSF_Desktop_Client/lsrtasksx LSF_Desktop_Client/README.TXTx LSF_Desktop_Client/lsplacex LSF_Desktop_Client/bhostsx LSF_Desktop_Client/paclogoutx LSF_Desktop_Client/bgdelFollowing the directions in the file README.TXT, set the environment variable LSF_DESKTOP_CLIENT=yes, and set the PATH variable accordingly.lsfuser@My-MacBook-Air LSF_Desktop_Client % export LSF_DESKTOP_CLIENT=yeslsfuser@My-MacBook-Air LSF_Desktop_Client % export PATH=`pwd`:$PATHNext, it’s necessary to run the paclogon command to connect to the LSF Application Center (or LSF Suite installation). Here we point to the LSF server kilenc on port 8080.lsfuser@My-MacBook-Air LSF_Desktop_Client % paclogonLog on to IBM Spectrum LSF Application CenterUser account: lsfuserEnter password: Specify the URL to connect to IBM Spectrum LSF Application Center. Format: http://host_name:port_number/platform or https://host_name:port_number/platformURL: http://kilenc:8080/platformYou have successfully logged on to IBM Spectrum LSF Application Center.After successfully logging in using the paclogon command, it should be possible to run LSF “base” commands from the macOS terminal including lsid, lsload, lshosts.lsfuser@My-MacBook-Air LSF_Desktop_Client % lsidIBM Spectrum LSF 10.1.0.13, Apr 15 2022Suite Edition: IBM Spectrum LSF Suite for HPC 10.2.0.13Copyright International Business Machines Corp. 1992, 2016.US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule Contract with IBM Corp.My cluster name is KlaszterMy master name is kilenclsfuser@My-MacBook-Air LSF_Desktop_Client % lshosts -wHOST_NAME type model cpuf ncpus maxmem maxswp server RESOURCESkilenc LINUXPPC64LE POWER9 25.0 32 30.7G 15.8G Yes (mg docker)lsfuser@My-MacBook-Air LSF_Desktop_Client % lsload -wHOST_NAME status r15s r1m r15m ut pg ls it tmp swp memkilenc ok 0.8 2.1 2.4 7% 0.0 0 1156 551M 15.6G 10GNext, run the LSF batch commands bqueues and bhosts.lsfuser@My-MacBook-Air LSF_Desktop_Client % bqueuesQUEUE_NAME PRIO STATUS MAX JL/U JL/P JL/H NJOBS PEND RUN SUSP admin 50 Open:Active - - - - 0 0 0 0owners 43 Open:Active - - - - 0 0 0 0priority 43 Open:Active - - - - 75835 75803 32 0night 40 Open:Inact - - - - 0 0 0 0short 35 Open:Active - - - - 0 0 0 0dataq 33 Open:Active - - - - 0 0 0 0normal 30 Open:Active - - - - 0 0 0 0interactive 30 Open:Active - - - - 0 0 0 0sendq 30 Open:Active - - - - 0 0 0 0idle 20 Open:Active - - - - 0 0 0 0lsfuser@My-MacBook-Air LSF_Desktop_Client % bhostsHOST_NAME STATUS JL/U MAX NJOBS RUN SSUSP USUSP RSV kilenc ok - 32 19 19 0 0 0Running the bjobs will result in a warning message appearing on macOS stating: “bjobs” cannot be opened because the developer cannot be verified.To remedy the issue observed in step 9, click cancel on the warning message and browse to System Settings -> Privacy & Security -> Security Settings. In the Security Settings view,you’ll see the message: “bjobs” was blocked from use because it is not from an identified developer. To allow the bjobs command to execute, click on the Allow Anyway button. You willthen be promped to authenticate to make the change take effect.Run the LSF bjobs command again. You will now receive a new warning error popup indicating: macOS cannot verify the developer of “bjobs”. Are you sure you want to open it?. Toproceed, click on the Open button.The bjobs command will then run to completion as expected. Subsequent executions of bjobs will run without any system warnings. Finally, to submita job, run the bsub command. Here we try to submit a simple sleep job (i.e. bsub -q normal sleep 3600). As was the case with the bjobs command, the bsub command is also blocked. Here,repeat the steps 10, 11 as described above but for the bsub command. Once the steps have been completed, repeat the bsub job submission command.Finally, to submit a job, run the bsub command. Here we try to submit a simple sleep job (i.e. bsub -q normal sleep 3600). As was the case with the bjobs command, the bsubcommand is also blocked. Here, repeat the steps 10, 11 as described above but for the bsub command. Once the steps have been completed, repeat the bsub job submission command.lsfuser@My-MacBook-Air LSF_Desktop_Client % bsub -q normal sleep 3600Job <617551> is submitted to queue <normal>.", + "content_html": "

    In traditional HPC environments, login nodes are typically used as an access point for users to submitand manage jobs. Although login nodes are still used today, HPC environments areincreasingly being used by a broad class of users with domain expertise and not necessarily IT experts.In other words, such users may be more comfortable using their native desktopenvironment rather than the CLI. Given the factors, in the commercial HPC space, organizations are always lookingfor ways to lower the barto access and interact with HPC environments.

    Spectrum LSF provides many ways to submit and manage jobs in an HPC cluster. For power users, the richCLI functionality exists. There is also an available web-based interface for jobsubmission and management which provides customizable application templates to greatly simplify job submission, while hiding complexity of the underlying infrastructure. A RESTful APIis also available to users of IBM Spectrum LSF Application Center or IBM Spectrum LSF Suites, which enables organizations to access the HPC environment via web services.

    I’ve written previously in detail about the the LSF web-based interface in the blogThe Easy HPC Button. Here, we’ll take a closer look at theavailable LSF client for macOS that uses the RESTful API. First, a bit about LSF clients. LSF clientscan access resources on LSF server hosts without running the LSF daemons. LSF clients don’t require a softwarelicense and from clients, users can run all of the familiar LSF commands. Additionally, LSF clients aresubmit only, and don’t execute jobs.

    Note: The macOS LSF client uses the LSF RESTful API. This means that it will function in environmentsrunning LSF Standard Edition with LSF Application Center or LSF Suites.

    Configuration

    The configuration used for the example below is as follows:

    HostnameOSDetail
    kilencCentOS Stream 8.4LSF Suite for HPC v10.2.0.13
    My-Macbook-AirmacOS Ventura 13.2.1 (Apple M1)LSF client
    1. On the Spectrum LSF Suite for HPC management host (kilenc), add the following variables to the Parametersection in the file lsf.cluster.name. The FLOAT_CLIENTS variable determines how many floating clients canjoin the LSF cluster, The FLOAT_CLIENTS_ADDR_RANGE specifies the allowable IP addresses. In this case, theclient system is on a 192.168.x.x network.
    Begin ParametersFLOAT_CLIENTS=2FLOAT_CLIENTS_ADDR_RANGE=192.*End Parameters
    1. To make the changes take effect, issue the following commands as the LSF administrator:
    lsadmin reconfigbadmin reconfig
    1. Obtain the tarball pacdesktop_client10.2.0.13_macos-x86_64.tar. For users with an LSF entitlement this package is available onIBM Fix Central. Note that this package will work on systems with Apple M1 silicon through emulation.

    2. Open a Terminal on the macOS client system, copy the tarball to the $HOME/Desktop directory of user lsfuser and uncompress the tarball.

    lsfuser@My-MacBook-Air Desktop % pwd/Users/lsfuser/Desktoplsfuser@My-MacBook-Air Desktop % ls -la pacdesktop_client10.2.0.13_macos-x86_64.tar-rw-r--r--@ 1 lsfuser  staff  18452480 27 Feb 17:12 pacdesktop_client10.2.0.13_macos-x86_64.tarlsfuser@My-MacBook-Air Desktop % tar -xvf pacdesktop_client10.2.0.13_macos-x86_64.tarx LSF_Desktop_Client/x LSF_Desktop_Client/bappx LSF_Desktop_Client/btopx LSF_Desktop_Client/bwaitx LSF_Desktop_Client/lseligiblex LSF_Desktop_Client/bslax LSF_Desktop_Client/blparamsx LSF_Desktop_Client/bhpartx LSF_Desktop_Client/bclustersx LSF_Desktop_Client/blstartupx LSF_Desktop_Client/lsacctx LSF_Desktop_Client/bsubx LSF_Desktop_Client/bugroupx LSF_Desktop_Client/bpeekx LSF_Desktop_Client/bacctx LSF_Desktop_Client/brequeuex LSF_Desktop_Client/bjgroupx LSF_Desktop_Client/bslotsx LSF_Desktop_Client/lsrunx LSF_Desktop_Client/bjobsx LSF_Desktop_Client/lshostsx LSF_Desktop_Client/lsloadx LSF_Desktop_Client/brlainfox LSF_Desktop_Client/bresourcesx LSF_Desktop_Client/bladminx LSF_Desktop_Client/bstatusx LSF_Desktop_Client/bmodx LSF_Desktop_Client/bpostx LSF_Desktop_Client/lsidx LSF_Desktop_Client/bentagsx LSF_Desktop_Client/chx LSF_Desktop_Client/bchkpntx LSF_Desktop_Client/bparamsx LSF_Desktop_Client/bjdepinfox LSF_Desktop_Client/bgmodx LSF_Desktop_Client/brestartx LSF_Desktop_Client/lsltasksx LSF_Desktop_Client/blusersx LSF_Desktop_Client/paclogonx LSF_Desktop_Client/regnotifyx LSF_Desktop_Client/cacert.pemx LSF_Desktop_Client/bresumex LSF_Desktop_Client/blstatx LSF_Desktop_Client/bhistx LSF_Desktop_Client/bqueuesx LSF_Desktop_Client/bltasksx LSF_Desktop_Client/bresizex LSF_Desktop_Client/blcollectx LSF_Desktop_Client/lsacctmrgx LSF_Desktop_Client/bgaddx LSF_Desktop_Client/bmigx LSF_Desktop_Client/bstopx LSF_Desktop_Client/bswitchx LSF_Desktop_Client/blhostsx LSF_Desktop_Client/blcstatx LSF_Desktop_Client/brsvsx LSF_Desktop_Client/brunx LSF_Desktop_Client/blinfox LSF_Desktop_Client/lsgrunx LSF_Desktop_Client/busersx LSF_Desktop_Client/lsloadadjx LSF_Desktop_Client/blkillx LSF_Desktop_Client/bbotx LSF_Desktop_Client/lsclustersx LSF_Desktop_Client/bconfx LSF_Desktop_Client/lsinfox LSF_Desktop_Client/lsmakex LSF_Desktop_Client/blimitsx LSF_Desktop_Client/bmgroupx LSF_Desktop_Client/breadx LSF_Desktop_Client/bkillx LSF_Desktop_Client/lstcshx LSF_Desktop_Client/lsrtasksx LSF_Desktop_Client/README.TXTx LSF_Desktop_Client/lsplacex LSF_Desktop_Client/bhostsx LSF_Desktop_Client/paclogoutx LSF_Desktop_Client/bgdel
    1. Following the directions in the file README.TXT, set the environment variable LSF_DESKTOP_CLIENT=yes, and set the PATH variable accordingly.
    lsfuser@My-MacBook-Air LSF_Desktop_Client % export LSF_DESKTOP_CLIENT=yeslsfuser@My-MacBook-Air LSF_Desktop_Client % export PATH=`pwd`:$PATH
    1. Next, it’s necessary to run the paclogon command to connect to the LSF Application Center (or LSF Suite installation). Here we point to the LSF server kilenc on port 8080.
    lsfuser@My-MacBook-Air LSF_Desktop_Client % paclogonLog on to IBM Spectrum LSF Application CenterUser account: lsfuserEnter password: Specify the URL to connect to IBM Spectrum LSF Application Center. Format: http://host_name:port_number/platform or https://host_name:port_number/platformURL: http://kilenc:8080/platformYou have successfully logged on to IBM Spectrum LSF Application Center.
    1. After successfully logging in using the paclogon command, it should be possible to run LSF “base” commands from the macOS terminal including lsid, lsload, lshosts.
    lsfuser@My-MacBook-Air LSF_Desktop_Client % lsidIBM Spectrum LSF 10.1.0.13, Apr 15 2022Suite Edition: IBM Spectrum LSF Suite for HPC 10.2.0.13Copyright International Business Machines Corp. 1992, 2016.US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule Contract with IBM Corp.My cluster name is KlaszterMy master name is kilenclsfuser@My-MacBook-Air LSF_Desktop_Client % lshosts -wHOST_NAME                       type       model  cpuf ncpus maxmem maxswp server RESOURCESkilenc                    LINUXPPC64LE      POWER9  25.0    32  30.7G  15.8G    Yes (mg docker)lsfuser@My-MacBook-Air LSF_Desktop_Client % lsload -wHOST_NAME               status  r15s   r1m  r15m   ut    pg  ls    it   tmp   swp   memkilenc                      ok   0.8   2.1   2.4   7%   0.0   0  1156  551M 15.6G   10G
    1. Next, run the LSF batch commands bqueues and bhosts.
    lsfuser@My-MacBook-Air LSF_Desktop_Client % bqueuesQUEUE_NAME      PRIO STATUS          MAX JL/U JL/P JL/H NJOBS  PEND   RUN  SUSP admin            50  Open:Active       -    -    -    -     0     0     0     0owners           43  Open:Active       -    -    -    -     0     0     0     0priority         43  Open:Active       -    -    -    - 75835 75803    32     0night            40  Open:Inact        -    -    -    -     0     0     0     0short            35  Open:Active       -    -    -    -     0     0     0     0dataq            33  Open:Active       -    -    -    -     0     0     0     0normal           30  Open:Active       -    -    -    -     0     0     0     0interactive      30  Open:Active       -    -    -    -     0     0     0     0sendq            30  Open:Active       -    -    -    -     0     0     0     0idle             20  Open:Active       -    -    -    -     0     0     0     0lsfuser@My-MacBook-Air LSF_Desktop_Client % bhostsHOST_NAME          STATUS       JL/U    MAX  NJOBS    RUN  SSUSP  USUSP    RSV kilenc             ok              -     32     19     19      0      0      0
    1. Running the bjobs will result in a warning message appearing on macOS stating: “bjobs” cannot be opened because the developer cannot be verified.
    1. To remedy the issue observed in step 9, click cancel on the warning message and browse to System Settings -> Privacy & Security -> Security Settings. In the Security Settings view,you’ll see the message: “bjobs” was blocked from use because it is not from an identified developer. To allow the bjobs command to execute, click on the Allow Anyway button. You willthen be promped to authenticate to make the change take effect.

    1. Run the LSF bjobs command again. You will now receive a new warning error popup indicating: macOS cannot verify the developer of “bjobs”. Are you sure you want to open it?. Toproceed, click on the Open button.The bjobs command will then run to completion as expected. Subsequent executions of bjobs will run without any system warnings. Finally, to submita job, run the bsub command. Here we try to submit a simple sleep job (i.e. bsub -q normal sleep 3600). As was the case with the bjobs command, the bsub command is also blocked. Here,repeat the steps 10, 11 as described above but for the bsub command. Once the steps have been completed, repeat the bsub job submission command.
    1. Finally, to submit a job, run the bsub command. Here we try to submit a simple sleep job (i.e. bsub -q normal sleep 3600). As was the case with the bjobs command, the bsubcommand is also blocked. Here, repeat the steps 10, 11 as described above but for the bsub command. Once the steps have been completed, repeat the bsub job submission command.
    lsfuser@My-MacBook-Air LSF_Desktop_Client % bsub -q normal sleep 3600Job <617551> is submitted to queue <normal>.
    ", + "url": "https://hpc.social/personal-blog/2023/lsf-client-on-macos-submitting-from-your-laptop/", + + + + + + "date_published": "2023-03-01T19:10:58-07:00", + "date_modified": "2023-03-01T19:10:58-07:00", + + "author": "Ramblings of a supercomputing enthusiast." + + }, + + { + "id": "https://hpc.social/personal-blog/2023/monitoring-ibm-spectrum-lsf-with-the-tig-stack/", + "title": "Monitoring .-.. ... ..-. (IBM Spectrum LSF) with the TIG stack", + "summary": null, + "content_text": "Much like dashboards in automobiles, dashboards in the context of HPC infrastructure are crucial to get an understanding of what’s happening under the hood of your HPC cluster - ata glance. During my IT career, I’ve used a myriad of monitoring solutions ranging from SNMP and Ganglia, to the ELK (Elasticsearch, Logstash, Kibana) stack. For example, I’ve recentlywritten an overview on how it is possible to visualize IBM Spectrum LSF (LSF) data in Grafana. LSF is an HPC job scheduler which brings to the table three decades of experience inworkload and resource management.For this blog, I decided to take this to the next level by monitoring IBM Spectrum LSF with the well known TIG (Telegraf, InfluxDB, Grafana) stack. This article is not meant to be adebate on the advantages of one monitoring stack over another. Rather, the focus is to demonstrate what is feasible in terms of monitoring Spectrum LSF clusters with the TIG stack,given the many available ways to query LSF for key information using CLI commands.The JourneyThere already exists many write-ups on how to deploy the TIG stack to monitor systems. This isn’t meant to be a guide on setting up the TIG stack. Rather, it’s assumed that the readeralready has some familiarity with the TIG stack. If not, then [insert your favourite search engine] is your friend.On my home network, I decided to setup a VM running on my trusty Traverse Ten64 running Fedora where InfluxDB was installed. The idea was to run InfluxDB on a system that is guaranteedto be always on in my home environment and that is energy efficient. Installing telegraf on all of the LSF cluster servers (x3) proved to be straight forward. Note that in all cases, I used the OSsupplied versions of InfluxDB, Telegraf. Finally, I already had a Grafana server running on a server in my network.Out of the box, Telegraf has the ability to monitor numerous system metrics. Furthermore, there exists literally hundreds of plugins for Telegraf to monitor a wide variety of devices,services and software. A search however, didn’t reveal the existence of any plugin to monitor LSF. So it was time to get creative.What to monitor?A bit of research revealed that InfluxDB supports what is known as “line protocol”. This is a well defined text-based format for writing data to InfluxDB. I used the followingreference on “line protocol” to guide me. Using line protocol it would be ultimately possible towrite a plugin for Telegraf to effecively scrape information from Spectrum LSF and output in line protocol format for writing to InfluxDB.Before I could begin writing the plugin, the key was to determine what information from Spectrum LSF would be useful to display in the dashboard, and how that information could beextracted. For this I followed the KISS principle to keep things as simple as possible. The key metrics I decided to report on were servers, queues and jobs (oh my!), as well as processinformation for the LSF scheduler daemons. Refer to the following table for details:Metric(s)CommandLSF scheduler performance metricsbadmin perfmon view -jsonLSF available servers, CPUs, cores, slotsbadmin showstatusLSF server by status (total number Ok, closed, unreachable, unavailable)badmin showstatusLSF job statistics (total number running, suspended, pending)badmin showstatusLSF queue statistics (per queue, total number of jobs running, suspended, pending)bqueues -json -o queue_name:12 njobs pend run susp rsv ususp ssuspLSF mbatchd process metrics(Telegraf - inputs.procstat)LSF mbschd process metrics(Telegraf - inputs.procstat)LSF management lim process metrics(Telegraf - inputs.procstat)Scrapin' funThese above metrics would give a good idea of the state of the Spectrum LSF cluster at a glance. With the list of metrics prepared, the next step was to create a plugin script which wouldscrape data from the noted commands. Both bqueues and badmin perfmon view support output in JSON format with the appropriate flags specified. However, badmin showstatus does not supportoutput in JSON format. This meant that for badmin showstatus it was necessary to scrape data assuming hard coded field positions in the output.A copy of the Telegraf plugin for Spectrum LSF is provided below. This is just an example and is provided “as is” for testing purposes. Your mileage may vary. Example lsf_telegraf_agent.py script. Click to expand! #!/usr/bin/python3.8# # v0.9 # Sample inputs.exec script for Telegraf which outputs metrics from an IBM Spectrum LSF management server# in InfluxDB Line Protocol input format.## NOTE: It is required to set the lsf_envfile variable to point to the LSF profile.lsf file# for the LSF installation. ## Gabor Samu# January 4, 2023# import osimport jsonimport timeimport subprocessimport sysfrom pathlib import Path## Variable declarations# **NOTE: lsf_envfile needs to be set to point to the profile.lsf file for the LSF installation. #lsf_envfile = \"/opt/ibm/lsfsuite/lsf/conf/profile.lsf\"## Source the Spectrum LSF profile. # Check for existing of lsf_envfile (profile.lsf) and source the environment. # If the specified file does not exist, then exit. #path = Path(lsf_envfile)if path.is_file(): lsf_env = (f'env -i sh -c \"source {lsf_envfile} && env\"') for line in subprocess.getoutput(lsf_env).split(\"\\n\"): key, value = line.split(\"=\") os.environ[key]= valueelse: sys.exit(f'The file {lsf_envfile} does not exist.') # # Get the time in nanoseconds since the epoch. # This is required as part of the InfluxDB line protocol reference. # Only supported on Python 3.7+#time_nanosec = time.time_ns()## Here we set the LSF environment variable LSB_NTRIES. This will be used to determine the # number of retries before failure of a LSF batch command. This is used to cover the case # when the LSF mbatchd is not running. #os.environ[\"LSB_NTRIES\"] = \"2\"## Check if LSF performance metric monitoring is enabled. This is done by running# 'badmin perfmon view'. If badmin is not found, then exit. ## Check the return status from 'badmin perfmon view' and take the appropriate action:# - If return status is 7, it means that performance monitoring is not enabled. The script# will enable LSF performance metric monitoring by running 'badmin perfmon start'.# Note that a 70 second sleep is required before LSF metrics will be available. # - If return status is 65, it means that the badmin command reported that the# LSF batch system is down. This is a fatal error which will cause the script# to exit. #lsf_path = os.environ['LSF_BINDIR']badmin_path = lsf_path + \"/badmin\"bqueues_path = lsf_path + \"/bqueues\"path = Path(badmin_path)if path.is_file(): cmd = [badmin_path, 'perfmon', 'view'] p = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) while p.poll() is None: time.sleep(0.1) return_code = p.returncode if return_code == 7: cmd = [badmin_path, 'perfmon', 'start'] p = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) while p.poll() is None: time.sleep(0.1) return_code = p.returncode time.sleep(70) elif return_code == 65: sys.exit(f'The LSF batch system is down.')else: sys.exit(f'{badmin_path} does not exist.')## Run badmin with the \"perfmon view\" keywords and the -json option to product JSON output# We assume here that the LSF batch system is responsive (a check was done above); if# the mbatchd is very busy there is a possiblity that it may not be responsive here. This# case is not considered; LSB_NTRIES setting will determine how many tries are made before# badmin gives up the ghost. # # Note: We previously checked for the existence of the 'badmin' binary. #cmd = [badmin_path, 'perfmon', 'view', '-json'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True) stdout, stderr = p.communicate()## Guard for the case that the performance monitor has just been enabled, but is not# producing any data as the first sample period has not elapsed. #if stdout == \"\": sys.exit(f'Output from badmin perfmon view -json is empty.')else: data = json.loads(stdout)# # Run badmin showstatus# Next, run the command 'badmin showstatus' and capture the output. Note that badmin showstatus# does not produce JSON output. So here we must do some scraping of the output. # The output from 'badmin showstatus' it placed into the array 'showstatus'. The hard coded# positions in the output of 'badmin showstatus' are assumed when building the output # strings below. Should the format of the output of 'badmin showstatus' change, this will# need to be updated. cmd = [badmin_path, 'showstatus']p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True)stdout, stderr = p.communicate()# Convert badmin showstatus output into an arrayshowstatus = stdout.split()## Run bqueues#cmd = [bqueues_path, '-json', '-o', 'queue_name:12 njobs pend run susp rsv ususp ssusp']p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True)stdout, stderr = p.communicate()data_queues = json.loads(stdout)## At this stage, we've captured the output from 'badmin perfmon view -json' and # 'badmin showstatus'. We're now ready to print to standard output the metric# strings in InfluxDB line procotol format. ## Details about the line protocol format can be found here:# https://docs.influxdata.com/influxdb/v2.6/reference/syntax/line-protocol/# # ## LSF server status#print(\"lsf_servers,\",\"status=total\",\" value=\",showstatus[21],\"i \",time_nanosec,sep='')print(\"lsf_servers,\",\"status=ok\",\" value=\",showstatus[23],\"i \",time_nanosec,sep='')print(\"lsf_servers,\",\"status=closed\",\" value=\",showstatus[25],\"i \",time_nanosec,sep='')print(\"lsf_servers,\",\"status=unreachable\",\" value=\",showstatus[27],\"i \",time_nanosec,sep='')print(\"lsf_servers,\",\"status=unavailable\",\" value=\",showstatus[29],\"i \",time_nanosec,sep='')## LSF job status#print(\"lsf_jobs,\",\"state=total\",\" value=\",showstatus[33],\"i \",time_nanosec,sep='')print(\"lsf_jobs,\",\"state=running\",\" value=\",showstatus[35],\"i \",time_nanosec,sep='')print(\"lsf_jobs,\",\"state=suspended\",\" value=\",showstatus[37],\"i \",time_nanosec,sep='')print(\"lsf_jobs,\",\"state=pending\",\" value=\",showstatus[39],\"i \",time_nanosec,sep='')print(\"lsf_jobs,\",\"state=finished\",\" value=\",showstatus[41],\"i \",time_nanosec,sep='')## LSF user stats#print(\"lsf_users,\",\"state=numusers\",\" value=\",showstatus[45],\"i \",time_nanosec,sep='')print(\"lsf_users,\",\"state=numgroups\",\" value=\",showstatus[50],\"i \",time_nanosec,sep='')print(\"lsf_users,\",\"state=numactive\",\" value=\",showstatus[55],\"i \",time_nanosec,sep='')## LSF hosts stats# First we split out the current and peak values for clients, servers, cpus, cores, and slots.# The current and peak values are separated by the \"/\" delimiter.# clientssplit = showstatus[9].split(\"/\")serverssplit = showstatus[11].split(\"/\")cpussplit = showstatus[13].split(\"/\")coressplit = showstatus[15].split(\"/\")slotssplit = showstatus[17].split(\"/\")print(\"lsf_hosts,\",\"state=clients\",\" current=\",clientssplit[0],\"i,\",\"peak=\",clientssplit[1],\"i \",time_nanosec,sep='')print(\"lsf_hosts,\",\"state=servers\",\" current=\",serverssplit[0],\"i,\",\"peak=\",serverssplit[1],\"i \",time_nanosec,sep='')print(\"lsf_hosts,\",\"state=cpus\",\" current=\",cpussplit[0],\"i,\",\"peak=\",cpussplit[1],\"i \",time_nanosec,sep='')print(\"lsf_hosts,\",\"state=cores\",\" current=\",coressplit[0],\"i,\",\"peak=\",coressplit[1],\"i \",time_nanosec,sep='')print(\"lsf_hosts,\",\"state=slots\",\" current=\",slotssplit[0],\"i,\",\"peak=\",slotssplit[1],\"i \",time_nanosec,sep='')## Print mbatchd query metrics#print(\"lsf_mbatchd,\",\"query=job\",\" value=\",data['record'][1]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"query=host\",\" value=\",data['record'][2]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"query=queue\",\" value=\",data['record'][3]['current'],\"i \",time_nanosec,sep='')## Print mbatchd job metrics#print(\"lsf_mbatchd,\",\"jobs=submitreqs\",\" value=\",data['record'][4]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"jobs=submitted\",\" value=\",data['record'][5]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"jobs=dispatched\",\" value=\",data['record'][6]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"jobs=completed\",\" value=\",data['record'][7]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"jobs=sentremote\",\" value=\",data['record'][8]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"jobs=acceptremote\",\" value=\",data['record'][9]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"sched=interval\",\" value=\",data['record'][10]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"sched=matchhost\",\" value=\",data['record'][11]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"sched=buckets\",\" value=\",data['record'][12]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"sched=reordered\",\" value=\",data['record'][13]['current'],\"i \",time_nanosec,sep='')## Print mbatchd efficiency metrics. Here check if the efficiency metric indicated is \"-\". If so, # then assume a zero value. The trailing \"%\" sign on the metrics (percentages) is also stripped here. #slots = (data['record'][14]['current'])slots_percent = slotsif slots_percent == \"-\": slots_percent = \"0\"elif slots_percent != \"0\": # Strip % sign and decimal. This is to work around issue inserting float to InfluxDB # \"type float, already exists as type integer dropped ...\" slots_percent = slots[:-4]memory = (data['record'][15]['current'])memory_percent = memoryif memory_percent == \"-\": memory_percent = \"0\"elif memory_percent != \"0\": # Strip % sign and decimal. This is to work around issue inserting float to InfluxDB # \"type float, already exists as type integer dropped ...\" memory_percent = memory[:-4]print(\"lsf_mbatchd,\",\"utilization=slots\",\" value=\",slots_percent,\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"utilization=memory\",\" value=\",memory_percent,\"i \",time_nanosec,sep='')## Print mbatchd file descriptor usage#print(\"lsf_mbatchd,\",\"fd=free\",\" value=\",data['fd']['free'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"fd=used\",\" value=\",data['fd']['used'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"fd=total\",\" value=\",data['fd']['total'],\"i \",time_nanosec,sep='')## Print LSF queue status (njobs)#iterations = data_queues[\"QUEUES\"]for n in range(iterations): print(\"lsf_queues,\",\"name=\", data_queues['RECORDS'][n]['QUEUE_NAME'], \" njobs=\", data_queues['RECORDS'][n]['NJOBS'],\"i,\", \"pend=\", data_queues['RECORDS'][n]['PEND'],\"i,\", \"run=\", data_queues['RECORDS'][n]['RUN'],\"i,\", \"susp=\", data_queues['RECORDS'][n]['SUSP'],\"i,\", \"rsv=\", data_queues['RECORDS'][n]['RSV'],\"i,\", \"ususp=\", data_queues['RECORDS'][n]['USUSP'],\"i,\", \"ssusp=\", data_queues['RECORDS'][n]['SSUSP'],\"i \", time_nanosec, sep='')exit() Bringing it all togetherFor completeness, below is the detail regarding the configuration of the environment. It should be noted that the simple test environment consists of a single server running IBMSpectrum LSF Suite for HPC and a separate server which runs the InfluxDB instance.HostnameComponentVersionkilencOS (LSF mgmt server)CentOS Stream release 8 (ppc64le)kilencSpectrum LSF Suite for HPCv10.2.0.13adatbazisOS (InfluxDB server)Fedora release 36 (aarch64)adatbazisInfluxDBv1.8.10kilencTelegrafv1.24.3kilencGrafanav9.1.6The follwing steps assume that IBM Spectrum LSF Suite for HPC, InfluxDB and Telegraf have been installed.Start InfluxDB on the host adatbazisOn the LSF management server kilenc, configure telegraf to connect to the influxDB instance on host adatbazis. Edit the configuration /etc/telegraf/telegraf.conf and specifythe correct URL in the outputs.influxdb section as follows:# # Configuration for sending metrics to InfluxDB[[outputs.influxdb]]# ## The full HTTP or UDP URL for your InfluxDB instance.# ### ## Multiple URLs can be specified for a single cluster, only ONE of the# ## urls will be written to each interval.# # urls = [\"unix:///var/run/influxdb.sock\"]# # urls = [\"udp://127.0.0.1:8089\"]# # urls = [\"http://127.0.0.1:8086\"]# Added gsamu Jan 04 2023urls = [\"http://adatbazis:8086\"]On the LSF management server kilenc, configure telegraf with the custom plugin script lsf_telegraf_agent_0.9.py to collect and log metrics from IBM Spectrum LSF Suite for HPC.Edit the configuration /etc/telegraf/telegraf.conf and specify the correct command path in the section inputs.exec. Additionally, set data_format equal to influx.Note that thescript lsf_telegraf_agent_0.9.py was copied to the directory /etc/telegraf/telegraf.d/scripts with permissions octal 755 and owner set to user telegraf.Note: User telegraf was automatically created during the installation of telegraf. # ## Gather LSF metrics[[inputs.exec]] ## Commands array commands = [ \"/etc/telegraf/telegraf.d/scripts/lsf_telegraf_agent_0.9.py\" ] timeout = \"30s\" interval = \"30s\" data_format = \"influx\" # ## End LSF metricsTelegraf provides the ability to collect metrics on processes. Here we’ll use the telegraf procstat facility to monitor the LSF mbatchd and mbschd processes. These are the keydaemons involved in handling query requests and making scheduling decisions for jobs in the environment. Edit the configuration /etc/telegraf/telegraf.conf and configure the twofollowing inputs.procstat sections.# ## Monitor CPU and memory utilization for LSF processes# ## mbatchd, mbschd, lim (manager)[[inputs.procstat]]exe = \"lim\"pattern = \"lim\"pid_finder = \"pgrep\"[[inputs.procstat]]exe = \"mbschd\"pattern = \"mbschd\"pid_finder = \"pgrep\"[[inputs.procstat]]exe = \"mbatchd\"pattern = \"mbatchd\"pid_finder = \"pgrep\"With the configuration to telegraf complete, it’s now time to test if the configuration and custom LSF agent is functioning as expected. Note that the following operation is performedon the LSF management candidate host kilenc and assumes that the LSF daemons are up and running. This is achieve by running the command:telegraf –config /etc/telegraf/telegraf.conf –test. Note: Any errors in the configuration file /etc/telegraf/telegraf.conf will result in errors in the output. Output of telegraf –config /etc/telegraf/telegraf.conf –test. Click to expand! [root@kilenc telegraf]# pwd/etc/telegraf[root@kilenc telegraf]# telegraf --config /etc/telegraf/telegraf.conf --test> mem,host=kilenc active=1938817024i,available=6820003840i,available_percent=20.653390597462806,buffered=4849664i,cached=6317735936i,commit_limit=33560395776i,committed_as=18635292672i,dirty=4128768i,free=2623799296i,high_free=0i,high_total=0i,huge_page_size=2097152i,huge_pages_free=0i,huge_pages_total=0i,inactive=13852016640i,low_free=0i,low_total=0i,mapped=1007353856i,page_tables=22478848i,shared=259063808i,slab=4946919424i,sreclaimable=902234112i,sunreclaim=4044685312i,swap_cached=3866624i,swap_free=16994729984i,swap_total=17049780224i,total=33021231104i,used=24074846208i,used_percent=72.90717336424115,vmalloc_chunk=0i,vmalloc_total=562949953421312i,vmalloc_used=0i,write_back=0i,write_back_tmp=0i 1674246976000000000> kernel,host=kilenc boot_time=1673790850i,context_switches=1943864437i,entropy_avail=4037i,interrupts=1294179599i,processes_forked=4255316i 1674246976000000000> swap,host=kilenc free=16994729984i,total=17049780224i,used=55050240i,used_percent=0.3228794698626609 1674246976000000000> swap,host=kilenc in=172032i,out=851968i 1674246976000000000> net,host=kilenc,interface=lo bytes_recv=90039931116i,bytes_sent=90039931116i,drop_in=0i,drop_out=0i,err_in=0i,err_out=0i,packets_recv=17245997i,packets_sent=17245997i 1674246976000000000> net,host=kilenc,interface=enP4p1s0f0 bytes_recv=0i,bytes_sent=0i,drop_in=0i,drop_out=0i,err_in=0i,err_out=0i,packets_recv=0i,packets_sent=0i 1674246976000000000> net,host=kilenc,interface=enP4p1s0f1 bytes_recv=11791041280i,bytes_sent=1701152001i,drop_in=0i,drop_out=0i,err_in=0i,err_out=0i,packets_recv=10322276i,packets_sent=4594948i 1674246976000000000> net,host=kilenc,interface=all icmp_inaddrmaskreps=0i,icmp_inaddrmasks=0i,icmp_incsumerrors=0i,icmp_indestunreachs=8609i,icmp_inechoreps=20i,icmp_inechos=11i,icmp_inerrors=1084i,icmp_inmsgs=8640i,icmp_inparmprobs=0i,icmp_inredirects=0i,icmp_insrcquenchs=0i,icmp_intimeexcds=0i,icmp_intimestampreps=0i,icmp_intimestamps=0i,icmp_outaddrmaskreps=0i,icmp_outaddrmasks=0i,icmp_outdestunreachs=4805i,icmp_outechoreps=11i,icmp_outechos=94i,icmp_outerrors=0i,icmp_outmsgs=4910i,icmp_outparmprobs=0i,icmp_outredirects=0i,icmp_outsrcquenchs=0i,icmp_outtimeexcds=0i,icmp_outtimestampreps=0i,icmp_outtimestamps=0i,icmpmsg_intype0=20i,icmpmsg_intype3=8609i,icmpmsg_intype8=11i,icmpmsg_outtype0=11i,icmpmsg_outtype3=4805i,icmpmsg_outtype8=94i,ip_defaultttl=64i,ip_forwarding=1i,ip_forwdatagrams=0i,ip_fragcreates=62958i,ip_fragfails=0i,ip_fragoks=12611i,ip_inaddrerrors=1i,ip_indelivers=21324370i,ip_indiscards=0i,ip_inhdrerrors=0i,ip_inreceives=21324371i,ip_inunknownprotos=0i,ip_outdiscards=0i,ip_outnoroutes=30i,ip_outrequests=21248264i,ip_reasmfails=0i,ip_reasmoks=0i,ip_reasmreqds=0i,ip_reasmtimeout=0i,tcp_activeopens=763497i,tcp_attemptfails=96617i,tcp_currestab=118i,tcp_estabresets=1917i,tcp_incsumerrors=0i,tcp_inerrs=0i,tcp_insegs=19488475i,tcp_maxconn=-1i,tcp_outrsts=137188i,tcp_outsegs=20220038i,tcp_passiveopens=675805i,tcp_retranssegs=9827i,tcp_rtoalgorithm=1i,tcp_rtomax=120000i,tcp_rtomin=200i,udp_ignoredmulti=10509i,udp_incsumerrors=0i,udp_indatagrams=1816997i,udp_inerrors=0i,udp_memerrors=0i,udp_noports=264i,udp_outdatagrams=1506724i,udp_rcvbuferrors=0i,udp_sndbuferrors=0i,udplite_ignoredmulti=0i,udplite_incsumerrors=0i,udplite_indatagrams=0i,udplite_inerrors=0i,udplite_memerrors=0i,udplite_noports=0i,udplite_outdatagrams=0i,udplite_rcvbuferrors=0i,udplite_sndbuferrors=0i 1674246976000000000> diskio,host=kilenc,name=dm-2 io_time=9739370i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,read_bytes=4015612416i,read_time=604060i,reads=40592i,weighted_io_time=60563370i,write_bytes=47025459712i,write_time=59959310i,writes=1079691i 1674246976000000000> diskio,host=kilenc,name=sda1 io_time=1460i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,read_bytes=4849664i,read_time=1304i,reads=67i,weighted_io_time=1304i,write_bytes=0i,write_time=0i,writes=0i 1674246976000000000> diskio,host=kilenc,name=sda3 io_time=45872430i,iops_in_progress=0i,merged_reads=623i,merged_writes=1061314i,read_bytes=16398521856i,read_time=3371612i,reads=139298i,weighted_io_time=311521720i,write_bytes=133715422208i,write_time=308150107i,writes=7031512i 1674246976000000000> diskio,host=kilenc,name=dm-1 io_time=5780i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,read_bytes=5636096i,read_time=3030i,reads=81i,weighted_io_time=26500i,write_bytes=13631488i,write_time=23470i,writes=208i 1674246976000000000> disk,device=dm-0,fstype=xfs,host=kilenc,mode=rw,path=/ free=9315028992i,inodes_free=18214222i,inodes_total=19822888i,inodes_used=1608666i,total=53660876800i,used=44345847808i,used_percent=82.64093032486566 1674246976000000000> disk,device=sda2,fstype=ext4,host=kilenc,mode=rw,path=/boot free=309653504i,inodes_free=65264i,inodes_total=65536i,inodes_used=272i,total=1020702720i,used=640585728i,used_percent=67.41310045173972 1674246976000000000> disk,device=dm-2,fstype=xfs,host=kilenc,mode=rw,path=/home free=856442515456i,inodes_free=452529686i,inodes_total=453312512i,inodes_used=782826i,total=927930712064i,used=71488196608i,used_percent=7.704044674735306 1674246976000000000> disk,device=dm-2,fstype=xfs,host=kilenc,mode=rw,path=/home/opt/at13.0/lib free=856442515456i,inodes_free=452529686i,inodes_total=453312512i,inodes_used=782826i,total=927930712064i,used=71488196608i,used_percent=7.704044674735306 1674246976000000000> disk,device=dm-2,fstype=xfs,host=kilenc,mode=rw,path=/home/opt/at13.0/lib64 free=856442515456i,inodes_free=452529686i,inodes_total=453312512i,inodes_used=782826i,total=927930712064i,used=71488196608i,used_percent=7.704044674735306 1674246976000000000> disk,device=ST31000524AS/raktar,fstype=zfs,host=kilenc,mode=rw,path=/mnt/ST31000524AS free=210837438464i,inodes_free=411792117i,inodes_total=412304487i,inodes_used=512370i,total=965496143872i,used=754658705408i,used_percent=78.16278813725106 1674246976000000000> diskio,host=kilenc,name=sda io_time=45899860i,iops_in_progress=0i,merged_reads=650i,merged_writes=1061332i,read_bytes=16495536128i,read_time=3440899i,reads=141325i,weighted_io_time=311596362i,write_bytes=133715696640i,write_time=308155462i,writes=7031531i 1674246976000000000> disk,device=ST31000524AS,fstype=zfs,host=kilenc,mode=rw,path=/ST31000524AS free=210837438464i,inodes_free=411792117i,inodes_total=411792123i,inodes_used=6i,total=210837569536i,used=131072i,used_percent=0.00006216728844316324 1674246976000000000> diskio,host=kilenc,name=sda2 io_time=18060i,iops_in_progress=0i,merged_reads=27i,merged_writes=18i,read_bytes=88372224i,read_time=31224i,reads=436i,weighted_io_time=36579i,write_bytes=274432i,write_time=5355i,writes=19i 1674246976000000000> diskio,host=kilenc,name=dm-0 io_time=38788720i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,read_bytes=12341294080i,read_time=1143210i,reads=51814i,weighted_io_time=303329620i,write_bytes=86676331008i,write_time=302186410i,writes=6798400i 1674246976000000000> diskio,host=kilenc,name=sdb io_time=668810i,iops_in_progress=0i,merged_reads=9i,merged_writes=58i,read_bytes=104550912i,read_time=746540i,reads=31054i,weighted_io_time=1445858i,write_bytes=10845920256i,write_time=699318i,writes=124780i 1674246976000000000> diskio,host=kilenc,name=sdb1 io_time=341330i,iops_in_progress=0i,merged_reads=0i,merged_writes=58i,read_bytes=95562240i,read_time=383066i,reads=25026i,weighted_io_time=1082385i,write_bytes=10845920256i,write_time=699318i,writes=124780i 1674246976000000000> diskio,host=kilenc,name=sdb9 io_time=190i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,read_bytes=4980736i,read_time=37i,reads=69i,weighted_io_time=37i,write_bytes=0i,write_time=0i,writes=0i 1674246976000000000> system,host=kilenc load1=2.06,load15=2.12,load5=2.12,n_cpus=32i,n_users=0i 1674246976000000000> system,host=kilenc uptime=456127i 1674246976000000000> system,host=kilenc uptime_format=\"5 days, 6:42\" 1674246976000000000> processes,host=kilenc blocked=1i,dead=0i,idle=569i,paging=0i,parked=1i,running=0i,sleeping=412i,stopped=0i,total=1366i,total_threads=2683i,unknown=0i,zombies=0i 1674246976000000000> lsf_servers,host=kilenc,status=total value=1i 1674246976000000000> lsf_servers,host=kilenc,status=ok value=1i 1674246976000000000> lsf_servers,host=kilenc,status=closed value=0i 1674246976000000000> lsf_servers,host=kilenc,status=unreachable value=0i 1674246976000000000> lsf_servers,host=kilenc,status=unavailable value=0i 1674246976000000000> lsf_jobs,host=kilenc,state=total value=121776i 1674246976000000000> lsf_jobs,host=kilenc,state=running value=32i 1674246976000000000> lsf_jobs,host=kilenc,state=suspended value=0i 1674246976000000000> lsf_jobs,host=kilenc,state=pending value=120771i 1674246976000000000> lsf_jobs,host=kilenc,state=finished value=973i 1674246976000000000> lsf_users,host=kilenc,state=numusers value=4i 1674246976000000000> lsf_users,host=kilenc,state=numgroups value=1i 1674246976000000000> lsf_users,host=kilenc,state=numactive value=1i 1674246976000000000> lsf_hosts,host=kilenc,state=clients current=0i,peak=0i 1674246976000000000> lsf_hosts,host=kilenc,state=servers current=1i,peak=1i 1674246976000000000> lsf_hosts,host=kilenc,state=cpus current=2i,peak=2i 1674246976000000000> lsf_hosts,host=kilenc,state=cores current=32i,peak=32i 1674246976000000000> lsf_hosts,host=kilenc,state=slots current=32i,peak=32i 1674246976000000000> lsf_mbatchd,host=kilenc,query=job value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,query=host value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,query=queue value=2i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=submitreqs value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=submitted value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=dispatched value=19i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=completed value=12i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=sentremote value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=acceptremote value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,sched=interval value=1i 1674246976000000000> lsf_mbatchd,host=kilenc,sched=matchhost value=5i 1674246976000000000> lsf_mbatchd,host=kilenc,sched=buckets value=5i 1674246976000000000> lsf_mbatchd,host=kilenc,sched=reordered value=7i 1674246976000000000> lsf_mbatchd,host=kilenc,utilization=slots value=100i 1674246976000000000> lsf_mbatchd,host=kilenc,utilization=memory value=0i 1674246976000000000> lsf_mbatchd,fd=free,host=kilenc value=65509i 1674246976000000000> lsf_mbatchd,fd=used,host=kilenc value=26i 1674246976000000000> lsf_mbatchd,fd=total,host=kilenc value=65535i 1674246976000000000> lsf_queues,host=kilenc,name=admin njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=owners njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=priority njobs=93951i,pend=93923i,rsv=0i,run=28i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=night njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=short njobs=2504i,pend=2504i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=dataq njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=normal njobs=1750i,pend=1750i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=interactive njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=sendq njobs=22598i,pend=22594i,rsv=0i,run=4i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=idle njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> cpu,cpu=cpu0,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu4,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu8,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu12,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu16,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=98.03921568448419,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=1.9607843137324836 1674246977000000000> cpu,cpu=cpu20,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu24,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu28,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu32,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu36,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu40,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=98.03921568448419,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=1.9607843136879006,usage_user=0 1674246977000000000> cpu,cpu=cpu44,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu48,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu52,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=0,usage_iowait=100,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu56,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu60,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu64,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=87.99999999906868,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=10.000000001155058,usage_user=2.0000000002764864 1674246977000000000> cpu,cpu=cpu68,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu72,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=86.27450980280263,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=11.764705882127403,usage_user=1.9607843137324836 1674246977000000000> cpu,cpu=cpu76,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu80,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=92.30769231113655,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=3.8461538464431086,usage_user=3.84615384653056 1674246977000000000> cpu,cpu=cpu84,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=94.11764706486585,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=5.882352941197451 1674246977000000000> cpu,cpu=cpu88,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu92,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=70.58823529344627,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=29.411764701983955,usage_user=0 1674246977000000000> cpu,cpu=cpu96,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=96.15384615040192,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=3.8461538460125784,usage_user=0 1674246977000000000> cpu,cpu=cpu100,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=97.99999999813735,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=1.999999999998181,usage_user=0 1674246977000000000> cpu,cpu=cpu104,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=96.07843137993407,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=3.92156862782338,usage_user=0 1674246977000000000> cpu,cpu=cpu108,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=96.07843136896838,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=1.9607843136879006,usage_user=1.9607843137324836 1674246977000000000> cpu,cpu=cpu112,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu116,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=95.91836734305988,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=4.08163265313509,usage_user=0 1674246977000000000> cpu,cpu=cpu120,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=84.61538461280144,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=3.8461538460344413,usage_user=11.53846153830009 1674246977000000000> cpu,cpu=cpu124,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu-total,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=93.47826086554115,usage_iowait=3.1055900618243673,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=2.484472049468532,usage_user=0.9316770186919254 1674246977000000000> procstat,exe=mbatchd,host=kilenc,process_name=mbatchd,user=root child_major_faults=0i,child_minor_faults=0i,cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=0.03,cpu_time_user=0.05,cpu_usage=0,created_at=1674246974000000000i,involuntary_context_switches=1i,major_faults=0i,memory_data=834994176i,memory_locked=0i,memory_rss=815595520i,memory_stack=327680i,memory_swap=0i,memory_usage=2.469912528991699,memory_vms=1091108864i,minor_faults=726i,nice_priority=20i,num_fds=10i,num_threads=2i,pid=62056i,ppid=4103699i,read_bytes=0i,read_count=27i,realtime_priority=0i,rlimit_cpu_time_hard=9223372036854775807i,rlimit_cpu_time_soft=9223372036854775807i,rlimit_file_locks_hard=9223372036854775807i,rlimit_file_locks_soft=9223372036854775807i,rlimit_memory_data_hard=9223372036854775807i,rlimit_memory_data_soft=9223372036854775807i,rlimit_memory_locked_hard=67108864i,rlimit_memory_locked_soft=67108864i,rlimit_memory_rss_hard=9223372036854775807i,rlimit_memory_rss_soft=9223372036854775807i,rlimit_memory_stack_hard=9223372036854775807i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=9223372036854775807i,rlimit_memory_vms_soft=9223372036854775807i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=262144i,rlimit_num_fds_soft=65535i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=118856i,rlimit_signals_pending_soft=118856i,signals_pending=0i,voluntary_context_switches=5i,write_bytes=0i,write_count=16i 1674246977000000000> procstat,exe=mbschd,host=kilenc,process_name=mbschd,user=lsfadmin child_major_faults=0i,child_minor_faults=2457641i,cpu_time=320i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0.02,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=8.4,cpu_time_user=312.14,cpu_usage=1.836645120693344,created_at=1674227581000000000i,involuntary_context_switches=3553i,major_faults=1i,memory_data=228851712i,memory_locked=0i,memory_rss=236847104i,memory_stack=196608i,memory_swap=0i,memory_usage=0.717257022857666,memory_vms=246808576i,minor_faults=2137969i,nice_priority=20i,num_fds=3i,num_threads=1i,pid=4103740i,ppid=4103699i,read_bytes=1552384i,read_count=936861i,realtime_priority=0i,rlimit_cpu_time_hard=9223372036854775807i,rlimit_cpu_time_soft=9223372036854775807i,rlimit_file_locks_hard=9223372036854775807i,rlimit_file_locks_soft=9223372036854775807i,rlimit_memory_data_hard=9223372036854775807i,rlimit_memory_data_soft=9223372036854775807i,rlimit_memory_locked_hard=67108864i,rlimit_memory_locked_soft=67108864i,rlimit_memory_rss_hard=9223372036854775807i,rlimit_memory_rss_soft=9223372036854775807i,rlimit_memory_stack_hard=9223372036854775807i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=9223372036854775807i,rlimit_memory_vms_soft=9223372036854775807i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=262144i,rlimit_num_fds_soft=65535i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=118856i,rlimit_signals_pending_soft=118856i,signals_pending=0i,voluntary_context_switches=43952i,write_bytes=0i,write_count=42311i 1674246977000000000> procstat_lookup,exe=mbschd,host=kilenc,pid_finder=pgrep,result=success pid_count=1i,result_code=0i,running=1i 1674246977000000000> procstat,exe=mbatchd,host=kilenc,process_name=mbatchd,user=root child_major_faults=2i,child_minor_faults=4476280i,cpu_time=177i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=6.68,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=51.01,cpu_time_user=126.42,cpu_usage=0,created_at=1674227573000000000i,involuntary_context_switches=4993i,major_faults=3i,memory_data=834994176i,memory_locked=0i,memory_rss=827785216i,memory_stack=327680i,memory_swap=0i,memory_usage=2.5068273544311523,memory_vms=1091108864i,minor_faults=2406945i,nice_priority=20i,num_fds=26i,num_threads=3i,pid=4103699i,ppid=4103684i,read_bytes=21008384i,read_count=364726i,realtime_priority=0i,rlimit_cpu_time_hard=9223372036854775807i,rlimit_cpu_time_soft=9223372036854775807i,rlimit_file_locks_hard=9223372036854775807i,rlimit_file_locks_soft=9223372036854775807i,rlimit_memory_data_hard=9223372036854775807i,rlimit_memory_data_soft=9223372036854775807i,rlimit_memory_locked_hard=67108864i,rlimit_memory_locked_soft=67108864i,rlimit_memory_rss_hard=9223372036854775807i,rlimit_memory_rss_soft=9223372036854775807i,rlimit_memory_stack_hard=9223372036854775807i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=9223372036854775807i,rlimit_memory_vms_soft=9223372036854775807i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=262144i,rlimit_num_fds_soft=65535i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=118856i,rlimit_signals_pending_soft=118856i,signals_pending=0i,voluntary_context_switches=172583i,write_bytes=1562181632i,write_count=12164760i 1674246977000000000> procstat_lookup,exe=mbatchd,host=kilenc,pid_finder=pgrep,result=success pid_count=2i,result_code=0i,running=2i 1674246977000000000Assuming there were no errors in the previous step with telegraf, proceed to start the telegraf process via systemd.[root@kilenc telegraf]# systemctl start telegraf[root@kilenc telegraf]# systemctl status telegraf● telegraf.service - Telegraf Loaded: loaded (/usr/lib/systemd/system/telegraf.service; enabled; vendor preset: disabled) Active: active (running) since Thu 2023-01-19 14:13:51 EST; 1 day 1h ago Docs: https://github.com/influxdata/telegraf Main PID: 3225959 (telegraf) Tasks: 35 (limit: 190169) Memory: 192.6M CGroup: /system.slice/telegraf.service └─3225959 /usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/tele>Jan 19 14:13:51 kilenc systemd[1]: Starting Telegraf...Jan 19 14:13:51 kilenc systemd[1]: Started Telegraf.On the host running the database instance, adatbazis, perform queries to check whether the database telegraf exists, as well as checking if LSF related data is being logged.This is confirmed in the output below. Output from InfluxDB queries. Click to expand! [root@adatbazis fedora]# influxConnected to https://localhost:8086 version 1.8.10InfluxDB shell version: 1.8.10> authusername: influxpassword: > show databasesname: databasesname----_internaltelegraf> use telegrafUsing database telegraf> show field keysname: cpufieldKey fieldType-------- ---------usage_guest floatusage_guest_nice floatusage_idle floatusage_iowait floatusage_irq floatusage_nice floatusage_softirq floatusage_steal floatusage_system floatusage_user floatname: diskfieldKey fieldType-------- ---------free integerinodes_free integerinodes_total integerinodes_used integertotal integerused integerused_percent floatname: diskiofieldKey fieldType-------- ---------io_time integeriops_in_progress integermerged_reads integermerged_writes integerread_bytes integerread_time integerreads integerweighted_io_time integerwrite_bytes integerwrite_time integerwrites integername: kernelfieldKey fieldType-------- ---------boot_time integercontext_switches integerentropy_avail integerinterrupts integerprocesses_forked integername: lsf_hostsfieldKey fieldType-------- ---------current integerpeak integername: lsf_jobsfieldKey fieldType-------- ---------value integername: lsf_mbatchdfieldKey fieldType-------- ---------value integername: lsf_queuesfieldKey fieldType-------- ---------njobs integerpend integerrsv integerrun integerssusp integersusp integerususp integername: lsf_serversfieldKey fieldType-------- ---------value integername: lsf_usersfieldKey fieldType-------- ---------value integername: memfieldKey fieldType-------- ---------active integeravailable integeravailable_percent floatbuffered integercached integercommit_limit integercommitted_as integerdirty integerfree integerhigh_free integerhigh_total integerhuge_page_size integerhuge_pages_free integerhuge_pages_total integerinactive integerlow_free integerlow_total integermapped integerpage_tables integershared integerslab integersreclaimable integersunreclaim integerswap_cached integerswap_free integerswap_total integertotal integerused integerused_percent floatvmalloc_chunk integervmalloc_total integervmalloc_used integerwrite_back integerwrite_back_tmp integername: netfieldKey fieldType-------- ---------bytes_recv integerbytes_sent integerdrop_in integerdrop_out integererr_in integererr_out integericmp_inaddrmaskreps integericmp_inaddrmasks integericmp_incsumerrors integericmp_indestunreachs integericmp_inechoreps integericmp_inechos integericmp_inerrors integericmp_inmsgs integericmp_inparmprobs integericmp_inredirects integericmp_insrcquenchs integericmp_intimeexcds integericmp_intimestampreps integericmp_intimestamps integericmp_outaddrmaskreps integericmp_outaddrmasks integericmp_outdestunreachs integericmp_outechoreps integericmp_outechos integericmp_outerrors integericmp_outmsgs integericmp_outparmprobs integericmp_outredirects integericmp_outsrcquenchs integericmp_outtimeexcds integericmp_outtimestampreps integericmp_outtimestamps integericmpmsg_intype0 integericmpmsg_intype3 integericmpmsg_intype8 integericmpmsg_outtype0 integericmpmsg_outtype3 integericmpmsg_outtype8 integerip_defaultttl integerip_forwarding integerip_forwdatagrams integerip_fragcreates integerip_fragfails integerip_fragoks integerip_inaddrerrors integerip_indelivers integerip_indiscards integerip_inhdrerrors integerip_inreceives integerip_inunknownprotos integerip_outdiscards integerip_outnoroutes integerip_outrequests integerip_reasmfails integerip_reasmoks integerip_reasmreqds integerip_reasmtimeout integerpackets_recv integerpackets_sent integertcp_activeopens integertcp_attemptfails integertcp_currestab integertcp_estabresets integertcp_incsumerrors integertcp_inerrs integertcp_insegs integertcp_maxconn integertcp_outrsts integertcp_outsegs integertcp_passiveopens integertcp_retranssegs integertcp_rtoalgorithm integertcp_rtomax integertcp_rtomin integerudp_ignoredmulti integerudp_incsumerrors integerudp_indatagrams integerudp_inerrors integerudp_memerrors integerudp_noports integerudp_outdatagrams integerudp_rcvbuferrors integerudp_sndbuferrors integerudplite_ignoredmulti integerudplite_incsumerrors integerudplite_indatagrams integerudplite_inerrors integerudplite_memerrors integerudplite_noports integerudplite_outdatagrams integerudplite_rcvbuferrors integerudplite_sndbuferrors integername: processesfieldKey fieldType-------- ---------blocked integerdead integeridle integerpaging integerparked integerrunning integersleeping integerstopped integertotal integertotal_threads integerunknown integerzombies integername: procstatfieldKey fieldType-------- ---------child_major_faults integerchild_minor_faults integercpu_time_guest floatcpu_time_guest_nice floatcpu_time_idle floatcpu_time_iowait floatcpu_time_irq floatcpu_time_nice floatcpu_time_soft_irq floatcpu_time_steal floatcpu_time_system floatcpu_time_user floatcpu_usage floatcreated_at integerinvoluntary_context_switches integermajor_faults integermemory_data integermemory_locked integermemory_rss integermemory_stack integermemory_swap integermemory_usage floatmemory_vms integerminor_faults integernum_threads integerpid integerppid integervoluntary_context_switches integername: procstat_lookupfieldKey fieldType-------- ---------pid_count integerresult_code integerrunning integername: swapfieldKey fieldType-------- ---------free integerin integerout integertotal integerused integerused_percent floatname: systemfieldKey fieldType-------- ---------load1 floatload15 floatload5 floatn_cpus integern_unique_users integern_users integeruptime integeruptime_format string> select * from metrics> SELECT * FROM \"lsf_hosts\";name: lsf_hoststime current host peak state---- ------- ---- ---- -----1674493170000000000 0 kilenc 0 clients1674493170000000000 32 kilenc 32 slots1674493170000000000 32 kilenc 32 cores1674493170000000000 1 kilenc 1 servers1674493170000000000 2 kilenc 2 cpus1674493200000000000 1 kilenc 1 servers1674493200000000000 2 kilenc 2 cpus1674493200000000000 32 kilenc 32 slots1674493200000000000 0 kilenc 0 clients1674493200000000000 32 kilenc 32 cores1674493230000000000 0 kilenc 0 clients1674493230000000000 32 kilenc 32 cores1674493230000000000 2 kilenc 2 cpus1674493230000000000 1 kilenc 1 servers1674493230000000000 32 kilenc 32 slots1674493260000000000 1 kilenc 1 servers1674493260000000000 32 kilenc 32 slots1674493260000000000 0 kilenc 0 clients1674493260000000000 2 kilenc 2 cpus1674493260000000000 32 kilenc 32 cores> quitWith telegraf successfully logging data to the InfluxDB instance, it will now be possible to create a data source in Grafana in order to create a dashboard containing LSF metrics.As noted at the outset, this article is not meant to be an extensive guide to the creation of dashoards in Grafana. In the Grafana navigation select Configuration > Data sources.Select the Add data source button, followed by InfluxDB, which is listed under Time series databases. On the settings page specify following values:VariableValueURLhttp://adatbazis:8086DatabasetelegrafBasic auth(enable)User<influxdb_username>Password<influxdb_passwordNext, click on Save & test. If all variables and settings were properly specified, the message datasource is working. 17 measurements found.With the datasource configured in Grafana, the final step is to create a dashboard. Creating a dashboard requires creating panels which display data pulled from the confiugred datasource using targeted queries. With a bit of effort, I was able to piece together the following dashboard which includes both metrics from LSF, as well as metrics from Telegrafinput.procstat for the LSF processes mbatchd, mbschd and the management lim. Example dashboard definition (JSON). Click to expand! { \"annotations\": { \"list\": [ { \"builtIn\": 1, \"datasource\": { \"type\": \"datasource\", \"uid\": \"grafana\" }, \"enable\": true, \"hide\": true, \"iconColor\": \"rgba(0, 211, 255, 1)\", \"name\": \"Annotations & Alerts\", \"target\": { \"limit\": 100, \"matchAny\": false, \"tags\": [], \"type\": \"dashboard\" }, \"type\": \"dashboard\" } ] }, \"editable\": true, \"fiscalYearStartMonth\": 0, \"graphTooltip\": 0, \"id\": 17, \"links\": [], \"liveNow\": false, \"panels\": [ { \"collapsed\": false, \"gridPos\": { \"h\": 1, \"w\": 24, \"x\": 0, \"y\": 0 }, \"id\": 35, \"panels\": [], \"title\": \"Cluster aggregate current statistics\", \"type\": \"row\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"A view of the current status of the LSF servers in the cluster. Servers can be in one of four states: Ok, Unavailable, Closed and Unreachable. \", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"palette-classic\" }, \"custom\": { \"hideFrom\": { \"legend\": false, \"tooltip\": false, \"viz\": false } }, \"decimals\": 2, \"mappings\": [] }, \"overrides\": [] }, \"gridPos\": { \"h\": 8, \"w\": 9, \"x\": 0, \"y\": 1 }, \"id\": 32, \"options\": { \"displayLabels\": [ \"name\", \"value\" ], \"legend\": { \"displayMode\": \"table\", \"placement\": \"right\", \"showLegend\": true, \"sortBy\": \"Value\", \"sortDesc\": true, \"values\": [ \"value\", \"percent\" ] }, \"pieType\": \"donut\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"tooltip\": { \"mode\": \"multi\", \"sort\": \"none\" } }, \"targets\": [ { \"alias\": \"Ok\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_servers\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"status\", \"operator\": \"=\", \"value\": \"ok\" } ] }, { \"alias\": \"Closed\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_servers\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"B\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"status\", \"operator\": \"=\", \"value\": \"closed\" } ] }, { \"alias\": \"Unreachable\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_servers\", \"orderByTime\": \"ASC\", \"policy\": \"default\", \"refId\": \"C\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"status\", \"operator\": \"=\", \"value\": \"unreachable\" } ] }, { \"alias\": \"Unavailable\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_servers\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"D\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"status\", \"operator\": \"=\", \"value\": \"unavailable\" } ] } ], \"title\": \"Current aggregate LSF server statistics\", \"type\": \"piechart\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 9, \"y\": 1 }, \"id\": 43, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"distinct\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"running\" } ] } ], \"title\": \"Currently running\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"light-red\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 12, \"y\": 1 }, \"id\": 45, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"default\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"suspended\" } ] } ], \"title\": \"Currently suspended\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"palette-classic\" }, \"custom\": { \"hideFrom\": { \"legend\": false, \"tooltip\": false, \"viz\": false } }, \"decimals\": 2, \"mappings\": [] }, \"overrides\": [] }, \"gridPos\": { \"h\": 8, \"w\": 9, \"x\": 15, \"y\": 1 }, \"id\": 33, \"options\": { \"displayLabels\": [ \"name\", \"value\" ], \"legend\": { \"displayMode\": \"table\", \"placement\": \"right\", \"showLegend\": true, \"sortBy\": \"Value\", \"sortDesc\": true, \"values\": [ \"value\", \"percent\" ] }, \"pieType\": \"donut\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"tooltip\": { \"mode\": \"multi\", \"sort\": \"none\" } }, \"targets\": [ { \"alias\": \"Running\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"running\" } ] }, { \"alias\": \"Pending\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"B\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"pending\" } ] }, { \"alias\": \"Suspended\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"C\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"suspended\" } ] } ], \"title\": \"Current aggregate LSF job statistics\", \"type\": \"piechart\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"yellow\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 9, \"y\": 5 }, \"id\": 44, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"default\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"pending\" } ] } ], \"title\": \"Currently pending \", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"blue\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 12, \"y\": 5 }, \"id\": 46, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"default\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"finished\" } ] } ], \"title\": \"Finished (past hour)\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"Spectrum LSF queue statistics. Here we show jobs in running, pending and suspended jobs. \", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"palette-classic\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null }, { \"color\": \"red\", \"value\": 80 } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 8, \"w\": 9, \"x\": 0, \"y\": 9 }, \"id\": 41, \"options\": { \"displayMode\": \"lcd\", \"minVizHeight\": 10, \"minVizWidth\": 0, \"orientation\": \"horizontal\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"showUnfilled\": true }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"alias\": \"Running\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"measurement\": \"lsf_queues\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"run\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"name\", \"operator\": \"=~\", \"value\": \"/^$Queue$/\" } ] }, { \"alias\": \"Pending\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_queues\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"B\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"pend\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"name\", \"operator\": \"=~\", \"value\": \"/^$Queue$/\" } ] }, { \"alias\": \"Suspended\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_queues\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"C\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"susp\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"name\", \"operator\": \"=~\", \"value\": \"/^$Queue$/\" } ] } ], \"title\": \"Current queue statistics ($Queue)\", \"type\": \"bargauge\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"min\": 0, \"thresholds\": { \"mode\": \"percentage\", \"steps\": [ { \"color\": \"green\", \"value\": null } ] }, \"unit\": \"none\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 9, \"y\": 9 }, \"id\": 53, \"options\": { \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"/^lsf_hosts\\\\.last$/\", \"values\": false }, \"showThresholdLabels\": false, \"showThresholdMarkers\": true }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ], [ { \"params\": [ \"peak\" ], \"type\": \"field\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"servers\" } ] } ], \"title\": \"Servers\", \"type\": \"gauge\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"min\": 0, \"thresholds\": { \"mode\": \"percentage\", \"steps\": [ { \"color\": \"yellow\", \"value\": null } ] }, \"unit\": \"none\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 12, \"y\": 9 }, \"id\": 54, \"options\": { \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"/^lsf_hosts\\\\.last$/\", \"values\": false }, \"showThresholdLabels\": false, \"showThresholdMarkers\": true }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ], [ { \"params\": [ \"peak\" ], \"type\": \"field\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"cpus\" } ] } ], \"title\": \"CPUs\", \"type\": \"gauge\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"palette-classic\" }, \"custom\": { \"axisCenteredZero\": false, \"axisColorMode\": \"text\", \"axisLabel\": \"\", \"axisPlacement\": \"auto\", \"barAlignment\": 0, \"drawStyle\": \"line\", \"fillOpacity\": 0, \"gradientMode\": \"none\", \"hideFrom\": { \"legend\": false, \"tooltip\": false, \"viz\": false }, \"lineInterpolation\": \"stepBefore\", \"lineWidth\": 1, \"pointSize\": 5, \"scaleDistribution\": { \"log\": 2, \"type\": \"log\" }, \"showPoints\": \"auto\", \"spanNulls\": true, \"stacking\": { \"group\": \"A\", \"mode\": \"none\" }, \"thresholdsStyle\": { \"mode\": \"off\" } }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null }, { \"color\": \"red\", \"value\": 80 } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 8, \"w\": 9, \"x\": 15, \"y\": 9 }, \"id\": 42, \"options\": { \"legend\": { \"calcs\": [], \"displayMode\": \"list\", \"placement\": \"bottom\", \"showLegend\": true }, \"tooltip\": { \"mode\": \"single\", \"sort\": \"none\" } }, \"targets\": [ { \"alias\": \"Running\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"running\" } ] }, { \"alias\": \"Pending\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"B\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"pending\" } ] }, { \"alias\": \"Suspended\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"C\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"suspended\" } ] } ], \"title\": \"Aggregate LSF job statistics\", \"type\": \"timeseries\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"min\": 0, \"thresholds\": { \"mode\": \"percentage\", \"steps\": [ { \"color\": \"light-red\", \"value\": null } ] }, \"unit\": \"none\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 9, \"y\": 13 }, \"id\": 55, \"options\": { \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"/^lsf_hosts\\\\.last$/\", \"values\": false }, \"showThresholdLabels\": false, \"showThresholdMarkers\": true }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ], [ { \"params\": [ \"peak\" ], \"type\": \"field\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"cores\" } ] } ], \"title\": \"Cores\", \"type\": \"gauge\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"min\": 0, \"thresholds\": { \"mode\": \"percentage\", \"steps\": [ { \"color\": \"blue\", \"value\": null } ] }, \"unit\": \"none\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 12, \"y\": 13 }, \"id\": 56, \"options\": { \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"/^lsf_hosts\\\\.last$/\", \"values\": false }, \"showThresholdLabels\": false, \"showThresholdMarkers\": true }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ], [ { \"params\": [ \"peak\" ], \"type\": \"field\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"slots\" } ] } ], \"title\": \"Slots\", \"type\": \"gauge\" }, { \"collapsed\": false, \"gridPos\": { \"h\": 1, \"w\": 24, \"x\": 0, \"y\": 17 }, \"id\": 37, \"panels\": [], \"title\": \"LSF scheduler statistics\", \"type\": \"row\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"palette-classic\" }, \"custom\": { \"axisCenteredZero\": false, \"axisColorMode\": \"text\", \"axisLabel\": \"\", \"axisPlacement\": \"auto\", \"barAlignment\": 0, \"drawStyle\": \"line\", \"fillOpacity\": 10, \"gradientMode\": \"none\", \"hideFrom\": { \"graph\": false, \"legend\": false, \"tooltip\": false, \"viz\": false }, \"lineInterpolation\": \"linear\", \"lineWidth\": 1, \"pointSize\": 5, \"scaleDistribution\": { \"type\": \"linear\" }, \"showPoints\": \"never\", \"spanNulls\": true, \"stacking\": { \"group\": \"A\", \"mode\": \"none\" }, \"thresholdsStyle\": { \"mode\": \"off\" } }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null }, { \"color\": \"red\", \"value\": 80 } ] }, \"unit\": \"short\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 8, \"w\": 12, \"x\": 0, \"y\": 18 }, \"id\": 20, \"options\": { \"graph\": {}, \"legend\": { \"calcs\": [], \"displayMode\": \"list\", \"placement\": \"right\", \"showLegend\": true }, \"tooltip\": { \"mode\": \"single\", \"sort\": \"none\" } }, \"pluginVersion\": \"7.5.15\", \"targets\": [ { \"alias\": \"CPU utilization (%)\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"measurement\": \"procstat\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"cpu_usage\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"exe\", \"operator\": \"=\", \"value\": \"mbatchd\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] }, { \"alias\": \"Memory utilization (%)\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"procstat\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"B\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"memory_usage\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"exe\", \"operator\": \"=\", \"value\": \"mbatchd\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] }, { \"alias\": \"Number of threads\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"procstat\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"C\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"num_threads\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"exe\", \"operator\": \"=\", \"value\": \"mbatchd\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] }, { \"alias\": \"File descriptors\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_mbatchd\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"D\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"fd\", \"operator\": \"=\", \"value\": \"used\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] } ], \"title\": \"LSF mbatchd process metrics\", \"type\": \"timeseries\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"palette-classic\" }, \"custom\": { \"axisCenteredZero\": false, \"axisColorMode\": \"text\", \"axisLabel\": \"\", \"axisPlacement\": \"auto\", \"barAlignment\": 0, \"drawStyle\": \"line\", \"fillOpacity\": 10, \"gradientMode\": \"none\", \"hideFrom\": { \"graph\": false, \"legend\": false, \"tooltip\": false, \"viz\": false }, \"lineInterpolation\": \"linear\", \"lineWidth\": 1, \"pointSize\": 5, \"scaleDistribution\": { \"type\": \"linear\" }, \"showPoints\": \"never\", \"spanNulls\": true, \"stacking\": { \"group\": \"A\", \"mode\": \"none\" }, \"thresholdsStyle\": { \"mode\": \"off\" } }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null }, { \"color\": \"red\", \"value\": 80 } ] }, \"unit\": \"short\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 8, \"w\": 12, \"x\": 12, \"y\": 18 }, \"id\": 57, \"options\": { \"graph\": {}, \"legend\": { \"calcs\": [], \"displayMode\": \"list\", \"placement\": \"right\", \"showLegend\": true }, \"tooltip\": { \"mode\": \"single\", \"sort\": \"none\" } }, \"pluginVersion\": \"7.5.15\", \"targets\": [ { \"alias\": \"CPU utilization (%)\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"measurement\": \"procstat\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"cpu_usage\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"exe\", \"operator\": \"=\", \"value\": \"lim\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] }, { \"alias\": \"Memory utilization (%)\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"procstat\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"B\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"memory_usage\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"exe\", \"operator\": \"=\", \"value\": \"lim\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] }, { \"alias\": \"Number of threads\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"procstat\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"C\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"num_threads\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"exe\", \"operator\": \"=\", \"value\": \"lim\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] } ], \"title\": \"LSF management lim process metrics\", \"type\": \"timeseries\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"palette-classic\" }, \"custom\": { \"axisCenteredZero\": false, \"axisColorMode\": \"text\", \"axisLabel\": \"\", \"axisPlacement\": \"auto\", \"barAlignment\": 0, \"drawStyle\": \"line\", \"fillOpacity\": 10, \"gradientMode\": \"none\", \"hideFrom\": { \"graph\": false, \"legend\": false, \"tooltip\": false, \"viz\": false }, \"lineInterpolation\": \"linear\", \"lineWidth\": 1, \"pointSize\": 5, \"scaleDistribution\": { \"type\": \"linear\" }, \"showPoints\": \"never\", \"spanNulls\": true, \"stacking\": { \"group\": \"A\", \"mode\": \"none\" }, \"thresholdsStyle\": { \"mode\": \"off\" } }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null }, { \"color\": \"red\", \"value\": 80 } ] }, \"unit\": \"short\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 8, \"w\": 12, \"x\": 0, \"y\": 26 }, \"id\": 27, \"options\": { \"graph\": {}, \"legend\": { \"calcs\": [], \"displayMode\": \"list\", \"placement\": \"right\", \"showLegend\": true }, \"tooltip\": { \"mode\": \"single\", \"sort\": \"none\" } }, \"pluginVersion\": \"7.5.15\", \"targets\": [ { \"alias\": \"Job buckets\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"measurement\": \"lsf_mbatchd\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"sched\", \"operator\": \"=\", \"value\": \"buckets\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] }, { \"alias\": \"Matching host criteria\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_mbatchd\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"B\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"sched\", \"operator\": \"=\", \"value\": \"matchhost\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] }, { \"alias\": \"Scheduling interval (seconds)\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_mbatchd\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"C\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"sched\", \"operator\": \"=\", \"value\": \"interval\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] } ], \"title\": \"LSF scheduler metrics\", \"type\": \"timeseries\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"palette-classic\" }, \"custom\": { \"axisCenteredZero\": false, \"axisColorMode\": \"text\", \"axisLabel\": \"\", \"axisPlacement\": \"auto\", \"barAlignment\": 0, \"drawStyle\": \"line\", \"fillOpacity\": 10, \"gradientMode\": \"none\", \"hideFrom\": { \"graph\": false, \"legend\": false, \"tooltip\": false, \"viz\": false }, \"lineInterpolation\": \"linear\", \"lineWidth\": 1, \"pointSize\": 5, \"scaleDistribution\": { \"type\": \"linear\" }, \"showPoints\": \"never\", \"spanNulls\": true, \"stacking\": { \"group\": \"A\", \"mode\": \"none\" }, \"thresholdsStyle\": { \"mode\": \"off\" } }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null }, { \"color\": \"red\", \"value\": 80 } ] }, \"unit\": \"short\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 8, \"w\": 12, \"x\": 12, \"y\": 26 }, \"id\": 58, \"options\": { \"graph\": {}, \"legend\": { \"calcs\": [], \"displayMode\": \"list\", \"placement\": \"right\", \"showLegend\": true }, \"tooltip\": { \"mode\": \"single\", \"sort\": \"none\" } }, \"pluginVersion\": \"7.5.15\", \"targets\": [ { \"alias\": \"CPU utilization (%)\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"measurement\": \"procstat\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"cpu_usage\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"exe\", \"operator\": \"=\", \"value\": \"mbschd\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] }, { \"alias\": \"Memory utilization (%)\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"procstat\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"B\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"memory_usage\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"exe\", \"operator\": \"=\", \"value\": \"mbatchd\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] }, { \"alias\": \"Number of threads\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"procstat\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"C\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"num_threads\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"exe\", \"operator\": \"=\", \"value\": \"mbatchd\" }, { \"condition\": \"AND\", \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" } ] } ], \"title\": \"LSF mbschd process metrics\", \"type\": \"timeseries\" }, { \"collapsed\": false, \"gridPos\": { \"h\": 1, \"w\": 24, \"x\": 0, \"y\": 34 }, \"id\": 39, \"panels\": [], \"title\": \"Additional metrics (scratch)\", \"type\": \"row\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 0, \"y\": 35 }, \"id\": 2, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"distinct\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"running\" } ] } ], \"title\": \"Running\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"yellow\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 3, \"y\": 35 }, \"id\": 5, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"default\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"pending\" } ] } ], \"title\": \"Pending\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"red\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 6, \"y\": 35 }, \"id\": 6, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"default\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"suspended\" } ] } ], \"title\": \"Suspended\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"blue\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 9, \"y\": 35 }, \"id\": 7, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"measurement\": \"lsf_jobs\", \"orderByTime\": \"ASC\", \"policy\": \"default\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"state\", \"operator\": \"=\", \"value\": \"finished\" } ] } ], \"title\": \"Finished\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 12, \"y\": 35 }, \"id\": 15, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"alias\": \"Ok\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_servers\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"status\", \"operator\": \"=\", \"value\": \"ok\" } ] } ], \"title\": \"Ok\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"blue\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 15, \"y\": 35 }, \"id\": 16, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"alias\": \"Closed\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_servers\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"status\", \"operator\": \"=\", \"value\": \"closed\" } ] } ], \"title\": \"Closed\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"yellow\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 18, \"y\": 35 }, \"id\": 17, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"alias\": \"Unreachable\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_servers\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"status\", \"operator\": \"=\", \"value\": \"unreachable\" } ] } ], \"title\": \"Unreachable\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"red\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 21, \"y\": 35 }, \"id\": 18, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"alias\": \"Unavailable\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_servers\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"value\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"mean\" } ] ], \"tags\": [ { \"key\": \"status\", \"operator\": \"=\", \"value\": \"unavailable\" } ] } ], \"title\": \"Unavailable\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 0, \"y\": 39 }, \"id\": 21, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"alias\": \"Clients\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"clients\" } ] } ], \"title\": \"Clients\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 3, \"y\": 39 }, \"id\": 22, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"alias\": \"Servers\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"servers\" } ] } ], \"title\": \"Servers\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 6, \"y\": 39 }, \"id\": 23, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"alias\": \"Servers\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"cpus\" } ] } ], \"title\": \"CPUs\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 9, \"y\": 39 }, \"id\": 24, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"alias\": \"Cores\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"cores\" } ] } ], \"title\": \"Cores\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"thresholds\": { \"mode\": \"absolute\", \"steps\": [ { \"color\": \"green\", \"value\": null } ] } }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 12, \"y\": 39 }, \"id\": 25, \"options\": { \"colorMode\": \"value\", \"graphMode\": \"none\", \"justifyMode\": \"auto\", \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"\", \"values\": false }, \"text\": {}, \"textMode\": \"auto\" }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"alias\": \"Slots\", \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"slots\" } ] } ], \"title\": \"Slots\", \"type\": \"stat\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"min\": 0, \"thresholds\": { \"mode\": \"percentage\", \"steps\": [ { \"color\": \"green\", \"value\": null } ] }, \"unit\": \"none\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 3, \"y\": 43 }, \"id\": 52, \"options\": { \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"/^lsf_hosts\\\\.last$/\", \"values\": false }, \"showThresholdLabels\": false, \"showThresholdMarkers\": true }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ], [ { \"params\": [ \"peak\" ], \"type\": \"field\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"servers\" } ] } ], \"title\": \"Servers\", \"type\": \"gauge\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"min\": 0, \"thresholds\": { \"mode\": \"percentage\", \"steps\": [ { \"color\": \"yellow\", \"value\": null } ] }, \"unit\": \"none\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 6, \"y\": 43 }, \"id\": 51, \"options\": { \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"/^lsf_hosts\\\\.last$/\", \"values\": false }, \"showThresholdLabels\": false, \"showThresholdMarkers\": true }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ], [ { \"params\": [ \"peak\" ], \"type\": \"field\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"cpus\" } ] } ], \"title\": \"CPUs\", \"type\": \"gauge\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"min\": 0, \"thresholds\": { \"mode\": \"percentage\", \"steps\": [ { \"color\": \"light-red\", \"value\": null } ] }, \"unit\": \"none\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 9, \"y\": 43 }, \"id\": 50, \"options\": { \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"/^lsf_hosts\\\\.last$/\", \"values\": false }, \"showThresholdLabels\": false, \"showThresholdMarkers\": true }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ], [ { \"params\": [ \"peak\" ], \"type\": \"field\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"cores\" } ] } ], \"title\": \"Cores\", \"type\": \"gauge\" }, { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"description\": \"\", \"fieldConfig\": { \"defaults\": { \"color\": { \"mode\": \"thresholds\" }, \"mappings\": [], \"min\": 0, \"thresholds\": { \"mode\": \"percentage\", \"steps\": [ { \"color\": \"blue\", \"value\": null } ] }, \"unit\": \"none\" }, \"overrides\": [] }, \"gridPos\": { \"h\": 4, \"w\": 3, \"x\": 12, \"y\": 43 }, \"id\": 49, \"options\": { \"orientation\": \"auto\", \"reduceOptions\": { \"calcs\": [ \"lastNotNull\" ], \"fields\": \"/^lsf_hosts\\\\.last$/\", \"values\": false }, \"showThresholdLabels\": false, \"showThresholdMarkers\": true }, \"pluginVersion\": \"9.1.6\", \"targets\": [ { \"datasource\": { \"type\": \"influxdb\", \"uid\": \"eNfWCy5Vk\" }, \"groupBy\": [ { \"params\": [ \"$__interval\" ], \"type\": \"time\" }, { \"params\": [ \"null\" ], \"type\": \"fill\" } ], \"hide\": false, \"measurement\": \"lsf_hosts\", \"orderByTime\": \"ASC\", \"policy\": \"autogen\", \"refId\": \"A\", \"resultFormat\": \"time_series\", \"select\": [ [ { \"params\": [ \"current\" ], \"type\": \"field\" }, { \"params\": [], \"type\": \"last\" } ], [ { \"params\": [ \"peak\" ], \"type\": \"field\" } ] ], \"tags\": [ { \"key\": \"host\", \"operator\": \"=\", \"value\": \"kilenc\" }, { \"condition\": \"AND\", \"key\": \"state\", \"operator\": \"=\", \"value\": \"slots\" } ] } ], \"title\": \"Slots\", \"type\": \"gauge\" } ], \"refresh\": \"30s\", \"schemaVersion\": 37, \"style\": \"dark\", \"tags\": [], \"templating\": { \"list\": [ { \"current\": { \"selected\": true, \"text\": [ \"priority\" ], \"value\": [ \"priority\" ] }, \"datasource\": { \"type\": \"influxdb\", \"uid\": \"oSnSlVc4k\" }, \"definition\": \"show tag values from \\\"lsf_queues\\\" with key=\\\"name\\\"\", \"hide\": 0, \"includeAll\": false, \"multi\": false, \"name\": \"Queue\", \"options\": [], \"query\": \"show tag values from \\\"lsf_queues\\\" with key=\\\"name\\\"\", \"refresh\": 1, \"regex\": \"\", \"skipUrlSync\": false, \"sort\": 0, \"tagValuesQuery\": \"\", \"tagsQuery\": \"\", \"type\": \"query\", \"useTags\": false } ] }, \"time\": { \"from\": \"now-1h\", \"to\": \"now\" }, \"timepicker\": {}, \"timezone\": \"\", \"title\": \"LSF cluster status\", \"uid\": \"ORojp8cVz\", \"version\": 160, \"weekStart\": \"\"}As you can see, with a short plugin script to collect information from LSF, it’s possible to monitor your LSF cluster using the TIG stack. It’s important to note that there are powerfulmonitoring and reporting tools available from IBM as add-ons to LSF; IBM Spectrum LSF RTM and IBM Spectrum LSF Explorer. You can find more details about the add-on capabilities for LSFhere.", + "content_html": "

    Much like dashboards in automobiles, dashboards in the context of HPC infrastructure are crucial to get an understanding of what’s happening under the hood of your HPC cluster - ata glance. During my IT career, I’ve used a myriad of monitoring solutions ranging from SNMP and Ganglia, to the ELK (Elasticsearch, Logstash, Kibana) stack. For example, I’ve recentlywritten an overview on how it is possible to visualize IBM Spectrum LSF (LSF) data in Grafana. LSF is an HPC job scheduler which brings to the table three decades of experience inworkload and resource management.

    For this blog, I decided to take this to the next level by monitoring IBM Spectrum LSF with the well known TIG (Telegraf, InfluxDB, Grafana) stack. This article is not meant to be adebate on the advantages of one monitoring stack over another. Rather, the focus is to demonstrate what is feasible in terms of monitoring Spectrum LSF clusters with the TIG stack,given the many available ways to query LSF for key information using CLI commands.


    The Journey

    There already exists many write-ups on how to deploy the TIG stack to monitor systems. This isn’t meant to be a guide on setting up the TIG stack. Rather, it’s assumed that the readeralready has some familiarity with the TIG stack. If not, then [insert your favourite search engine] is your friend.

    On my home network, I decided to setup a VM running on my trusty Traverse Ten64 running Fedora where InfluxDB was installed. The idea was to run InfluxDB on a system that is guaranteedto be always on in my home environment and that is energy efficient. Installing telegraf on all of the LSF cluster servers (x3) proved to be straight forward. Note that in all cases, I used the OSsupplied versions of InfluxDB, Telegraf. Finally, I already had a Grafana server running on a server in my network.

    Out of the box, Telegraf has the ability to monitor numerous system metrics. Furthermore, there exists literally hundreds of plugins for Telegraf to monitor a wide variety of devices,services and software. A search however, didn’t reveal the existence of any plugin to monitor LSF. So it was time to get creative.


    What to monitor?

    A bit of research revealed that InfluxDB supports what is known as “line protocol”. This is a well defined text-based format for writing data to InfluxDB. I used the followingreference on “line protocol” to guide me. Using line protocol it would be ultimately possible towrite a plugin for Telegraf to effecively scrape information from Spectrum LSF and output in line protocol format for writing to InfluxDB.

    Before I could begin writing the plugin, the key was to determine what information from Spectrum LSF would be useful to display in the dashboard, and how that information could beextracted. For this I followed the KISS principle to keep things as simple as possible. The key metrics I decided to report on were servers, queues and jobs (oh my!), as well as processinformation for the LSF scheduler daemons. Refer to the following table for details:


    Metric(s)Command
    LSF scheduler performance metricsbadmin perfmon view -json
    LSF available servers, CPUs, cores, slotsbadmin showstatus
    LSF server by status (total number Ok, closed, unreachable, unavailable)badmin showstatus
    LSF job statistics (total number running, suspended, pending)badmin showstatus
    LSF queue statistics (per queue, total number of jobs running, suspended, pending)bqueues -json -o queue_name:12 njobs pend run susp rsv ususp ssusp
    LSF mbatchd process metrics(Telegraf - inputs.procstat)
    LSF mbschd process metrics(Telegraf - inputs.procstat)
    LSF management lim process metrics(Telegraf - inputs.procstat)

    Scrapin' fun

    These above metrics would give a good idea of the state of the Spectrum LSF cluster at a glance. With the list of metrics prepared, the next step was to create a plugin script which wouldscrape data from the noted commands. Both bqueues and badmin perfmon view support output in JSON format with the appropriate flags specified. However, badmin showstatus does not supportoutput in JSON format. This meant that for badmin showstatus it was necessary to scrape data assuming hard coded field positions in the output.

    A copy of the Telegraf plugin for Spectrum LSF is provided below. This is just an example and is provided “as is” for testing purposes. Your mileage may vary.


    Example lsf_telegraf_agent.py script. Click to expand!
    #!/usr/bin/python3.8# # v0.9 # Sample inputs.exec script for Telegraf which outputs metrics from an IBM Spectrum LSF management server# in InfluxDB Line Protocol input format.## NOTE: It is required to set the lsf_envfile variable to point to the LSF profile.lsf file# for the LSF installation. ## Gabor Samu# January 4, 2023# import osimport jsonimport timeimport subprocessimport sysfrom pathlib import Path## Variable declarations# **NOTE: lsf_envfile needs to be set to point to the profile.lsf file for the LSF installation. #lsf_envfile = \"/opt/ibm/lsfsuite/lsf/conf/profile.lsf\"## Source the Spectrum LSF profile.  # Check for existing of lsf_envfile (profile.lsf) and source the environment. # If the specified file does not exist, then exit.  #path = Path(lsf_envfile)if path.is_file():     lsf_env = (f'env -i sh -c \"source {lsf_envfile} && env\"')    for line in subprocess.getoutput(lsf_env).split(\"\\n\"):        key, value = line.split(\"=\")        os.environ[key]= valueelse:    sys.exit(f'The file {lsf_envfile} does not exist.')    # # Get the time in nanoseconds since the epoch. # This is required as part of the InfluxDB line protocol reference. # Only supported on Python 3.7+#time_nanosec = time.time_ns()## Here we set the LSF environment variable LSB_NTRIES. This will be used to determine the # number of retries before failure of a LSF batch command. This is used to cover the case # when the LSF mbatchd is not running. #os.environ[\"LSB_NTRIES\"] = \"2\"## Check if LSF performance metric monitoring is enabled. This is done by running# 'badmin perfmon view'. If badmin is not found, then exit. ## Check the return status from 'badmin perfmon view' and take the appropriate action:#  - If return status is 7, it means that performance monitoring is not enabled. The script#    will enable LSF performance metric monitoring by running 'badmin perfmon start'.#    Note that a 70 second sleep is required before LSF metrics will be available.  #  - If return status is 65, it means that the badmin command reported that the#    LSF batch system is down. This is a fatal error which will cause the script#    to exit. #lsf_path = os.environ['LSF_BINDIR']badmin_path = lsf_path + \"/badmin\"bqueues_path = lsf_path + \"/bqueues\"path = Path(badmin_path)if path.is_file():    cmd = [badmin_path, 'perfmon', 'view']    p = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)    while p.poll() is None:        time.sleep(0.1)    return_code = p.returncode    if return_code == 7:        cmd = [badmin_path, 'perfmon', 'start']        p = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)        while p.poll() is None:            time.sleep(0.1)        return_code = p.returncode        time.sleep(70)    elif return_code == 65:        sys.exit(f'The LSF batch system is down.')else:    sys.exit(f'{badmin_path} does not exist.')## Run badmin with the \"perfmon view\" keywords and the -json option to product JSON output# We assume here that the LSF batch system is responsive (a check was done above); if# the mbatchd is very busy there is a possiblity that it may not be responsive here. This# case is not considered; LSB_NTRIES setting will determine how many tries are made before# badmin gives up the ghost.  # # Note: We previously checked for the existence of the 'badmin' binary. #cmd = [badmin_path, 'perfmon', 'view', '-json'] p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True) stdout, stderr = p.communicate()## Guard for the case that the performance monitor has just been enabled, but is not# producing any data as the first sample period has not elapsed. #if stdout == \"\":    sys.exit(f'Output from badmin perfmon view -json is empty.')else:     data = json.loads(stdout)# # Run badmin showstatus# Next, run the command 'badmin showstatus' and capture the output. Note that badmin showstatus# does not produce JSON output. So here we must do some scraping of the output. # The output from 'badmin showstatus' it placed into the array 'showstatus'. The hard coded# positions in the output of 'badmin showstatus' are assumed when building the output # strings below. Should the format of the output of 'badmin showstatus' change, this will# need to be updated. cmd = [badmin_path, 'showstatus']p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True)stdout, stderr = p.communicate()# Convert badmin showstatus output into an arrayshowstatus = stdout.split()## Run bqueues#cmd = [bqueues_path, '-json', '-o', 'queue_name:12 njobs pend run susp rsv ususp ssusp']p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True)stdout, stderr = p.communicate()data_queues = json.loads(stdout)## At this stage, we've captured the output from 'badmin perfmon view -json' and # 'badmin showstatus'. We're now ready to print to standard output the metric# strings in InfluxDB line procotol format. ## Details about the line protocol format can be found here:# https://docs.influxdata.com/influxdb/v2.6/reference/syntax/line-protocol/# # ## LSF server status#print(\"lsf_servers,\",\"status=total\",\" value=\",showstatus[21],\"i \",time_nanosec,sep='')print(\"lsf_servers,\",\"status=ok\",\" value=\",showstatus[23],\"i \",time_nanosec,sep='')print(\"lsf_servers,\",\"status=closed\",\" value=\",showstatus[25],\"i \",time_nanosec,sep='')print(\"lsf_servers,\",\"status=unreachable\",\" value=\",showstatus[27],\"i \",time_nanosec,sep='')print(\"lsf_servers,\",\"status=unavailable\",\" value=\",showstatus[29],\"i \",time_nanosec,sep='')## LSF job status#print(\"lsf_jobs,\",\"state=total\",\" value=\",showstatus[33],\"i \",time_nanosec,sep='')print(\"lsf_jobs,\",\"state=running\",\" value=\",showstatus[35],\"i \",time_nanosec,sep='')print(\"lsf_jobs,\",\"state=suspended\",\" value=\",showstatus[37],\"i \",time_nanosec,sep='')print(\"lsf_jobs,\",\"state=pending\",\" value=\",showstatus[39],\"i \",time_nanosec,sep='')print(\"lsf_jobs,\",\"state=finished\",\" value=\",showstatus[41],\"i \",time_nanosec,sep='')## LSF user stats#print(\"lsf_users,\",\"state=numusers\",\" value=\",showstatus[45],\"i \",time_nanosec,sep='')print(\"lsf_users,\",\"state=numgroups\",\" value=\",showstatus[50],\"i \",time_nanosec,sep='')print(\"lsf_users,\",\"state=numactive\",\" value=\",showstatus[55],\"i \",time_nanosec,sep='')## LSF hosts stats# First we split out the current and peak values for clients, servers, cpus, cores, and slots.# The current and peak values are separated by the \"/\" delimiter.# clientssplit = showstatus[9].split(\"/\")serverssplit = showstatus[11].split(\"/\")cpussplit = showstatus[13].split(\"/\")coressplit = showstatus[15].split(\"/\")slotssplit = showstatus[17].split(\"/\")print(\"lsf_hosts,\",\"state=clients\",\" current=\",clientssplit[0],\"i,\",\"peak=\",clientssplit[1],\"i \",time_nanosec,sep='')print(\"lsf_hosts,\",\"state=servers\",\" current=\",serverssplit[0],\"i,\",\"peak=\",serverssplit[1],\"i \",time_nanosec,sep='')print(\"lsf_hosts,\",\"state=cpus\",\" current=\",cpussplit[0],\"i,\",\"peak=\",cpussplit[1],\"i \",time_nanosec,sep='')print(\"lsf_hosts,\",\"state=cores\",\" current=\",coressplit[0],\"i,\",\"peak=\",coressplit[1],\"i \",time_nanosec,sep='')print(\"lsf_hosts,\",\"state=slots\",\" current=\",slotssplit[0],\"i,\",\"peak=\",slotssplit[1],\"i \",time_nanosec,sep='')## Print mbatchd query metrics#print(\"lsf_mbatchd,\",\"query=job\",\" value=\",data['record'][1]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"query=host\",\" value=\",data['record'][2]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"query=queue\",\" value=\",data['record'][3]['current'],\"i \",time_nanosec,sep='')## Print mbatchd job metrics#print(\"lsf_mbatchd,\",\"jobs=submitreqs\",\" value=\",data['record'][4]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"jobs=submitted\",\" value=\",data['record'][5]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"jobs=dispatched\",\" value=\",data['record'][6]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"jobs=completed\",\" value=\",data['record'][7]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"jobs=sentremote\",\" value=\",data['record'][8]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"jobs=acceptremote\",\" value=\",data['record'][9]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"sched=interval\",\" value=\",data['record'][10]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"sched=matchhost\",\" value=\",data['record'][11]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"sched=buckets\",\" value=\",data['record'][12]['current'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"sched=reordered\",\" value=\",data['record'][13]['current'],\"i \",time_nanosec,sep='')## Print mbatchd efficiency metrics. Here check if the efficiency metric indicated is \"-\". If so, # then assume a zero value. The trailing \"%\" sign on the metrics (percentages) is also stripped here. #slots = (data['record'][14]['current'])slots_percent = slotsif slots_percent == \"-\":    slots_percent = \"0\"elif slots_percent != \"0\":    # Strip % sign and decimal. This is to work around issue inserting float to InfluxDB    # \"type float, already exists as type integer dropped ...\"    slots_percent = slots[:-4]memory = (data['record'][15]['current'])memory_percent = memoryif memory_percent == \"-\":    memory_percent = \"0\"elif memory_percent != \"0\":    # Strip % sign and decimal. This is to work around issue inserting float to InfluxDB    # \"type float, already exists as type integer dropped ...\"    memory_percent = memory[:-4]print(\"lsf_mbatchd,\",\"utilization=slots\",\" value=\",slots_percent,\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"utilization=memory\",\" value=\",memory_percent,\"i \",time_nanosec,sep='')## Print mbatchd file descriptor usage#print(\"lsf_mbatchd,\",\"fd=free\",\" value=\",data['fd']['free'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"fd=used\",\" value=\",data['fd']['used'],\"i \",time_nanosec,sep='')print(\"lsf_mbatchd,\",\"fd=total\",\" value=\",data['fd']['total'],\"i \",time_nanosec,sep='')## Print LSF queue status (njobs)#iterations = data_queues[\"QUEUES\"]for n in range(iterations):    print(\"lsf_queues,\",\"name=\", data_queues['RECORDS'][n]['QUEUE_NAME'], \" njobs=\", data_queues['RECORDS'][n]['NJOBS'],\"i,\",          \"pend=\", data_queues['RECORDS'][n]['PEND'],\"i,\",          \"run=\", data_queues['RECORDS'][n]['RUN'],\"i,\",          \"susp=\", data_queues['RECORDS'][n]['SUSP'],\"i,\",          \"rsv=\", data_queues['RECORDS'][n]['RSV'],\"i,\",          \"ususp=\", data_queues['RECORDS'][n]['USUSP'],\"i,\",          \"ssusp=\", data_queues['RECORDS'][n]['SSUSP'],\"i \",          time_nanosec, sep='')exit()    

    Bringing it all together

    For completeness, below is the detail regarding the configuration of the environment. It should be noted that the simple test environment consists of a single server running IBMSpectrum LSF Suite for HPC and a separate server which runs the InfluxDB instance.


    HostnameComponentVersion
    kilencOS (LSF mgmt server)CentOS Stream release 8 (ppc64le)
    kilencSpectrum LSF Suite for HPCv10.2.0.13
    adatbazisOS (InfluxDB server)Fedora release 36 (aarch64)
    adatbazisInfluxDBv1.8.10
    kilencTelegrafv1.24.3
    kilencGrafanav9.1.6

    The follwing steps assume that IBM Spectrum LSF Suite for HPC, InfluxDB and Telegraf have been installed.

    1. Start InfluxDB on the host adatbazis

    2. On the LSF management server kilenc, configure telegraf to connect to the influxDB instance on host adatbazis. Edit the configuration /etc/telegraf/telegraf.conf and specifythe correct URL in the outputs.influxdb section as follows:

    # # Configuration for sending metrics to InfluxDB[[outputs.influxdb]]#   ## The full HTTP or UDP URL for your InfluxDB instance.#   ###   ## Multiple URLs can be specified for a single cluster, only ONE of the#   ## urls will be written to each interval.#   # urls = [\"unix:///var/run/influxdb.sock\"]#   # urls = [\"udp://127.0.0.1:8089\"]#   # urls = [\"http://127.0.0.1:8086\"]# Added gsamu Jan 04 2023urls = [\"http://adatbazis:8086\"]
    1. On the LSF management server kilenc, configure telegraf with the custom plugin script lsf_telegraf_agent_0.9.py to collect and log metrics from IBM Spectrum LSF Suite for HPC.Edit the configuration /etc/telegraf/telegraf.conf and specify the correct command path in the section inputs.exec. Additionally, set data_format equal to influx.Note that thescript lsf_telegraf_agent_0.9.py was copied to the directory /etc/telegraf/telegraf.d/scripts with permissions octal 755 and owner set to user telegraf.Note: User telegraf was automatically created during the installation of telegraf.
     # ## Gather LSF metrics[[inputs.exec]]  ## Commands array   commands = [  \"/etc/telegraf/telegraf.d/scripts/lsf_telegraf_agent_0.9.py\" ]   timeout = \"30s\"   interval = \"30s\"   data_format = \"influx\" # ## End LSF metrics
    1. Telegraf provides the ability to collect metrics on processes. Here we’ll use the telegraf procstat facility to monitor the LSF mbatchd and mbschd processes. These are the keydaemons involved in handling query requests and making scheduling decisions for jobs in the environment. Edit the configuration /etc/telegraf/telegraf.conf and configure the twofollowing inputs.procstat sections.
    # ## Monitor CPU and memory utilization for LSF processes# ## mbatchd, mbschd, lim (manager)[[inputs.procstat]]exe = \"lim\"pattern = \"lim\"pid_finder = \"pgrep\"[[inputs.procstat]]exe = \"mbschd\"pattern = \"mbschd\"pid_finder = \"pgrep\"[[inputs.procstat]]exe = \"mbatchd\"pattern = \"mbatchd\"pid_finder = \"pgrep\"
    1. With the configuration to telegraf complete, it’s now time to test if the configuration and custom LSF agent is functioning as expected. Note that the following operation is performedon the LSF management candidate host kilenc and assumes that the LSF daemons are up and running. This is achieve by running the command:telegraf –config /etc/telegraf/telegraf.conf –test. Note: Any errors in the configuration file /etc/telegraf/telegraf.conf will result in errors in the output.

    Output of telegraf –config /etc/telegraf/telegraf.conf –test. Click to expand!
    [root@kilenc telegraf]# pwd/etc/telegraf[root@kilenc telegraf]# telegraf --config /etc/telegraf/telegraf.conf --test> mem,host=kilenc active=1938817024i,available=6820003840i,available_percent=20.653390597462806,buffered=4849664i,cached=6317735936i,commit_limit=33560395776i,committed_as=18635292672i,dirty=4128768i,free=2623799296i,high_free=0i,high_total=0i,huge_page_size=2097152i,huge_pages_free=0i,huge_pages_total=0i,inactive=13852016640i,low_free=0i,low_total=0i,mapped=1007353856i,page_tables=22478848i,shared=259063808i,slab=4946919424i,sreclaimable=902234112i,sunreclaim=4044685312i,swap_cached=3866624i,swap_free=16994729984i,swap_total=17049780224i,total=33021231104i,used=24074846208i,used_percent=72.90717336424115,vmalloc_chunk=0i,vmalloc_total=562949953421312i,vmalloc_used=0i,write_back=0i,write_back_tmp=0i 1674246976000000000> kernel,host=kilenc boot_time=1673790850i,context_switches=1943864437i,entropy_avail=4037i,interrupts=1294179599i,processes_forked=4255316i 1674246976000000000> swap,host=kilenc free=16994729984i,total=17049780224i,used=55050240i,used_percent=0.3228794698626609 1674246976000000000> swap,host=kilenc in=172032i,out=851968i 1674246976000000000> net,host=kilenc,interface=lo bytes_recv=90039931116i,bytes_sent=90039931116i,drop_in=0i,drop_out=0i,err_in=0i,err_out=0i,packets_recv=17245997i,packets_sent=17245997i 1674246976000000000> net,host=kilenc,interface=enP4p1s0f0 bytes_recv=0i,bytes_sent=0i,drop_in=0i,drop_out=0i,err_in=0i,err_out=0i,packets_recv=0i,packets_sent=0i 1674246976000000000> net,host=kilenc,interface=enP4p1s0f1 bytes_recv=11791041280i,bytes_sent=1701152001i,drop_in=0i,drop_out=0i,err_in=0i,err_out=0i,packets_recv=10322276i,packets_sent=4594948i 1674246976000000000> net,host=kilenc,interface=all icmp_inaddrmaskreps=0i,icmp_inaddrmasks=0i,icmp_incsumerrors=0i,icmp_indestunreachs=8609i,icmp_inechoreps=20i,icmp_inechos=11i,icmp_inerrors=1084i,icmp_inmsgs=8640i,icmp_inparmprobs=0i,icmp_inredirects=0i,icmp_insrcquenchs=0i,icmp_intimeexcds=0i,icmp_intimestampreps=0i,icmp_intimestamps=0i,icmp_outaddrmaskreps=0i,icmp_outaddrmasks=0i,icmp_outdestunreachs=4805i,icmp_outechoreps=11i,icmp_outechos=94i,icmp_outerrors=0i,icmp_outmsgs=4910i,icmp_outparmprobs=0i,icmp_outredirects=0i,icmp_outsrcquenchs=0i,icmp_outtimeexcds=0i,icmp_outtimestampreps=0i,icmp_outtimestamps=0i,icmpmsg_intype0=20i,icmpmsg_intype3=8609i,icmpmsg_intype8=11i,icmpmsg_outtype0=11i,icmpmsg_outtype3=4805i,icmpmsg_outtype8=94i,ip_defaultttl=64i,ip_forwarding=1i,ip_forwdatagrams=0i,ip_fragcreates=62958i,ip_fragfails=0i,ip_fragoks=12611i,ip_inaddrerrors=1i,ip_indelivers=21324370i,ip_indiscards=0i,ip_inhdrerrors=0i,ip_inreceives=21324371i,ip_inunknownprotos=0i,ip_outdiscards=0i,ip_outnoroutes=30i,ip_outrequests=21248264i,ip_reasmfails=0i,ip_reasmoks=0i,ip_reasmreqds=0i,ip_reasmtimeout=0i,tcp_activeopens=763497i,tcp_attemptfails=96617i,tcp_currestab=118i,tcp_estabresets=1917i,tcp_incsumerrors=0i,tcp_inerrs=0i,tcp_insegs=19488475i,tcp_maxconn=-1i,tcp_outrsts=137188i,tcp_outsegs=20220038i,tcp_passiveopens=675805i,tcp_retranssegs=9827i,tcp_rtoalgorithm=1i,tcp_rtomax=120000i,tcp_rtomin=200i,udp_ignoredmulti=10509i,udp_incsumerrors=0i,udp_indatagrams=1816997i,udp_inerrors=0i,udp_memerrors=0i,udp_noports=264i,udp_outdatagrams=1506724i,udp_rcvbuferrors=0i,udp_sndbuferrors=0i,udplite_ignoredmulti=0i,udplite_incsumerrors=0i,udplite_indatagrams=0i,udplite_inerrors=0i,udplite_memerrors=0i,udplite_noports=0i,udplite_outdatagrams=0i,udplite_rcvbuferrors=0i,udplite_sndbuferrors=0i 1674246976000000000> diskio,host=kilenc,name=dm-2 io_time=9739370i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,read_bytes=4015612416i,read_time=604060i,reads=40592i,weighted_io_time=60563370i,write_bytes=47025459712i,write_time=59959310i,writes=1079691i 1674246976000000000> diskio,host=kilenc,name=sda1 io_time=1460i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,read_bytes=4849664i,read_time=1304i,reads=67i,weighted_io_time=1304i,write_bytes=0i,write_time=0i,writes=0i 1674246976000000000> diskio,host=kilenc,name=sda3 io_time=45872430i,iops_in_progress=0i,merged_reads=623i,merged_writes=1061314i,read_bytes=16398521856i,read_time=3371612i,reads=139298i,weighted_io_time=311521720i,write_bytes=133715422208i,write_time=308150107i,writes=7031512i 1674246976000000000> diskio,host=kilenc,name=dm-1 io_time=5780i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,read_bytes=5636096i,read_time=3030i,reads=81i,weighted_io_time=26500i,write_bytes=13631488i,write_time=23470i,writes=208i 1674246976000000000> disk,device=dm-0,fstype=xfs,host=kilenc,mode=rw,path=/ free=9315028992i,inodes_free=18214222i,inodes_total=19822888i,inodes_used=1608666i,total=53660876800i,used=44345847808i,used_percent=82.64093032486566 1674246976000000000> disk,device=sda2,fstype=ext4,host=kilenc,mode=rw,path=/boot free=309653504i,inodes_free=65264i,inodes_total=65536i,inodes_used=272i,total=1020702720i,used=640585728i,used_percent=67.41310045173972 1674246976000000000> disk,device=dm-2,fstype=xfs,host=kilenc,mode=rw,path=/home free=856442515456i,inodes_free=452529686i,inodes_total=453312512i,inodes_used=782826i,total=927930712064i,used=71488196608i,used_percent=7.704044674735306 1674246976000000000> disk,device=dm-2,fstype=xfs,host=kilenc,mode=rw,path=/home/opt/at13.0/lib free=856442515456i,inodes_free=452529686i,inodes_total=453312512i,inodes_used=782826i,total=927930712064i,used=71488196608i,used_percent=7.704044674735306 1674246976000000000> disk,device=dm-2,fstype=xfs,host=kilenc,mode=rw,path=/home/opt/at13.0/lib64 free=856442515456i,inodes_free=452529686i,inodes_total=453312512i,inodes_used=782826i,total=927930712064i,used=71488196608i,used_percent=7.704044674735306 1674246976000000000> disk,device=ST31000524AS/raktar,fstype=zfs,host=kilenc,mode=rw,path=/mnt/ST31000524AS free=210837438464i,inodes_free=411792117i,inodes_total=412304487i,inodes_used=512370i,total=965496143872i,used=754658705408i,used_percent=78.16278813725106 1674246976000000000> diskio,host=kilenc,name=sda io_time=45899860i,iops_in_progress=0i,merged_reads=650i,merged_writes=1061332i,read_bytes=16495536128i,read_time=3440899i,reads=141325i,weighted_io_time=311596362i,write_bytes=133715696640i,write_time=308155462i,writes=7031531i 1674246976000000000> disk,device=ST31000524AS,fstype=zfs,host=kilenc,mode=rw,path=/ST31000524AS free=210837438464i,inodes_free=411792117i,inodes_total=411792123i,inodes_used=6i,total=210837569536i,used=131072i,used_percent=0.00006216728844316324 1674246976000000000> diskio,host=kilenc,name=sda2 io_time=18060i,iops_in_progress=0i,merged_reads=27i,merged_writes=18i,read_bytes=88372224i,read_time=31224i,reads=436i,weighted_io_time=36579i,write_bytes=274432i,write_time=5355i,writes=19i 1674246976000000000> diskio,host=kilenc,name=dm-0 io_time=38788720i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,read_bytes=12341294080i,read_time=1143210i,reads=51814i,weighted_io_time=303329620i,write_bytes=86676331008i,write_time=302186410i,writes=6798400i 1674246976000000000> diskio,host=kilenc,name=sdb io_time=668810i,iops_in_progress=0i,merged_reads=9i,merged_writes=58i,read_bytes=104550912i,read_time=746540i,reads=31054i,weighted_io_time=1445858i,write_bytes=10845920256i,write_time=699318i,writes=124780i 1674246976000000000> diskio,host=kilenc,name=sdb1 io_time=341330i,iops_in_progress=0i,merged_reads=0i,merged_writes=58i,read_bytes=95562240i,read_time=383066i,reads=25026i,weighted_io_time=1082385i,write_bytes=10845920256i,write_time=699318i,writes=124780i 1674246976000000000> diskio,host=kilenc,name=sdb9 io_time=190i,iops_in_progress=0i,merged_reads=0i,merged_writes=0i,read_bytes=4980736i,read_time=37i,reads=69i,weighted_io_time=37i,write_bytes=0i,write_time=0i,writes=0i 1674246976000000000> system,host=kilenc load1=2.06,load15=2.12,load5=2.12,n_cpus=32i,n_users=0i 1674246976000000000> system,host=kilenc uptime=456127i 1674246976000000000> system,host=kilenc uptime_format=\"5 days,  6:42\" 1674246976000000000> processes,host=kilenc blocked=1i,dead=0i,idle=569i,paging=0i,parked=1i,running=0i,sleeping=412i,stopped=0i,total=1366i,total_threads=2683i,unknown=0i,zombies=0i 1674246976000000000> lsf_servers,host=kilenc,status=total value=1i 1674246976000000000> lsf_servers,host=kilenc,status=ok value=1i 1674246976000000000> lsf_servers,host=kilenc,status=closed value=0i 1674246976000000000> lsf_servers,host=kilenc,status=unreachable value=0i 1674246976000000000> lsf_servers,host=kilenc,status=unavailable value=0i 1674246976000000000> lsf_jobs,host=kilenc,state=total value=121776i 1674246976000000000> lsf_jobs,host=kilenc,state=running value=32i 1674246976000000000> lsf_jobs,host=kilenc,state=suspended value=0i 1674246976000000000> lsf_jobs,host=kilenc,state=pending value=120771i 1674246976000000000> lsf_jobs,host=kilenc,state=finished value=973i 1674246976000000000> lsf_users,host=kilenc,state=numusers value=4i 1674246976000000000> lsf_users,host=kilenc,state=numgroups value=1i 1674246976000000000> lsf_users,host=kilenc,state=numactive value=1i 1674246976000000000> lsf_hosts,host=kilenc,state=clients current=0i,peak=0i 1674246976000000000> lsf_hosts,host=kilenc,state=servers current=1i,peak=1i 1674246976000000000> lsf_hosts,host=kilenc,state=cpus current=2i,peak=2i 1674246976000000000> lsf_hosts,host=kilenc,state=cores current=32i,peak=32i 1674246976000000000> lsf_hosts,host=kilenc,state=slots current=32i,peak=32i 1674246976000000000> lsf_mbatchd,host=kilenc,query=job value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,query=host value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,query=queue value=2i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=submitreqs value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=submitted value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=dispatched value=19i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=completed value=12i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=sentremote value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,jobs=acceptremote value=0i 1674246976000000000> lsf_mbatchd,host=kilenc,sched=interval value=1i 1674246976000000000> lsf_mbatchd,host=kilenc,sched=matchhost value=5i 1674246976000000000> lsf_mbatchd,host=kilenc,sched=buckets value=5i 1674246976000000000> lsf_mbatchd,host=kilenc,sched=reordered value=7i 1674246976000000000> lsf_mbatchd,host=kilenc,utilization=slots value=100i 1674246976000000000> lsf_mbatchd,host=kilenc,utilization=memory value=0i 1674246976000000000> lsf_mbatchd,fd=free,host=kilenc value=65509i 1674246976000000000> lsf_mbatchd,fd=used,host=kilenc value=26i 1674246976000000000> lsf_mbatchd,fd=total,host=kilenc value=65535i 1674246976000000000> lsf_queues,host=kilenc,name=admin njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=owners njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=priority njobs=93951i,pend=93923i,rsv=0i,run=28i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=night njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=short njobs=2504i,pend=2504i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=dataq njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=normal njobs=1750i,pend=1750i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=interactive njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=sendq njobs=22598i,pend=22594i,rsv=0i,run=4i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> lsf_queues,host=kilenc,name=idle njobs=0i,pend=0i,rsv=0i,run=0i,ssusp=0i,susp=0i,ususp=0i 1674246976000000000> cpu,cpu=cpu0,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu4,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu8,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu12,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu16,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=98.03921568448419,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=1.9607843137324836 1674246977000000000> cpu,cpu=cpu20,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu24,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu28,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu32,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu36,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu40,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=98.03921568448419,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=1.9607843136879006,usage_user=0 1674246977000000000> cpu,cpu=cpu44,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu48,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu52,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=0,usage_iowait=100,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu56,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu60,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu64,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=87.99999999906868,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=10.000000001155058,usage_user=2.0000000002764864 1674246977000000000> cpu,cpu=cpu68,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu72,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=86.27450980280263,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=11.764705882127403,usage_user=1.9607843137324836 1674246977000000000> cpu,cpu=cpu76,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu80,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=92.30769231113655,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=3.8461538464431086,usage_user=3.84615384653056 1674246977000000000> cpu,cpu=cpu84,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=94.11764706486585,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=5.882352941197451 1674246977000000000> cpu,cpu=cpu88,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu92,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=70.58823529344627,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=29.411764701983955,usage_user=0 1674246977000000000> cpu,cpu=cpu96,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=96.15384615040192,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=3.8461538460125784,usage_user=0 1674246977000000000> cpu,cpu=cpu100,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=97.99999999813735,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=1.999999999998181,usage_user=0 1674246977000000000> cpu,cpu=cpu104,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=96.07843137993407,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=3.92156862782338,usage_user=0 1674246977000000000> cpu,cpu=cpu108,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=96.07843136896838,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=1.9607843136879006,usage_user=1.9607843137324836 1674246977000000000> cpu,cpu=cpu112,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu116,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=95.91836734305988,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=4.08163265313509,usage_user=0 1674246977000000000> cpu,cpu=cpu120,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=84.61538461280144,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=3.8461538460344413,usage_user=11.53846153830009 1674246977000000000> cpu,cpu=cpu124,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=100,usage_iowait=0,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=0,usage_user=0 1674246977000000000> cpu,cpu=cpu-total,host=kilenc usage_guest=0,usage_guest_nice=0,usage_idle=93.47826086554115,usage_iowait=3.1055900618243673,usage_irq=0,usage_nice=0,usage_softirq=0,usage_steal=0,usage_system=2.484472049468532,usage_user=0.9316770186919254 1674246977000000000> procstat,exe=mbatchd,host=kilenc,process_name=mbatchd,user=root child_major_faults=0i,child_minor_faults=0i,cpu_time=0i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=0.03,cpu_time_user=0.05,cpu_usage=0,created_at=1674246974000000000i,involuntary_context_switches=1i,major_faults=0i,memory_data=834994176i,memory_locked=0i,memory_rss=815595520i,memory_stack=327680i,memory_swap=0i,memory_usage=2.469912528991699,memory_vms=1091108864i,minor_faults=726i,nice_priority=20i,num_fds=10i,num_threads=2i,pid=62056i,ppid=4103699i,read_bytes=0i,read_count=27i,realtime_priority=0i,rlimit_cpu_time_hard=9223372036854775807i,rlimit_cpu_time_soft=9223372036854775807i,rlimit_file_locks_hard=9223372036854775807i,rlimit_file_locks_soft=9223372036854775807i,rlimit_memory_data_hard=9223372036854775807i,rlimit_memory_data_soft=9223372036854775807i,rlimit_memory_locked_hard=67108864i,rlimit_memory_locked_soft=67108864i,rlimit_memory_rss_hard=9223372036854775807i,rlimit_memory_rss_soft=9223372036854775807i,rlimit_memory_stack_hard=9223372036854775807i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=9223372036854775807i,rlimit_memory_vms_soft=9223372036854775807i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=262144i,rlimit_num_fds_soft=65535i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=118856i,rlimit_signals_pending_soft=118856i,signals_pending=0i,voluntary_context_switches=5i,write_bytes=0i,write_count=16i 1674246977000000000> procstat,exe=mbschd,host=kilenc,process_name=mbschd,user=lsfadmin child_major_faults=0i,child_minor_faults=2457641i,cpu_time=320i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=0.02,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=8.4,cpu_time_user=312.14,cpu_usage=1.836645120693344,created_at=1674227581000000000i,involuntary_context_switches=3553i,major_faults=1i,memory_data=228851712i,memory_locked=0i,memory_rss=236847104i,memory_stack=196608i,memory_swap=0i,memory_usage=0.717257022857666,memory_vms=246808576i,minor_faults=2137969i,nice_priority=20i,num_fds=3i,num_threads=1i,pid=4103740i,ppid=4103699i,read_bytes=1552384i,read_count=936861i,realtime_priority=0i,rlimit_cpu_time_hard=9223372036854775807i,rlimit_cpu_time_soft=9223372036854775807i,rlimit_file_locks_hard=9223372036854775807i,rlimit_file_locks_soft=9223372036854775807i,rlimit_memory_data_hard=9223372036854775807i,rlimit_memory_data_soft=9223372036854775807i,rlimit_memory_locked_hard=67108864i,rlimit_memory_locked_soft=67108864i,rlimit_memory_rss_hard=9223372036854775807i,rlimit_memory_rss_soft=9223372036854775807i,rlimit_memory_stack_hard=9223372036854775807i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=9223372036854775807i,rlimit_memory_vms_soft=9223372036854775807i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=262144i,rlimit_num_fds_soft=65535i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=118856i,rlimit_signals_pending_soft=118856i,signals_pending=0i,voluntary_context_switches=43952i,write_bytes=0i,write_count=42311i 1674246977000000000> procstat_lookup,exe=mbschd,host=kilenc,pid_finder=pgrep,result=success pid_count=1i,result_code=0i,running=1i 1674246977000000000> procstat,exe=mbatchd,host=kilenc,process_name=mbatchd,user=root child_major_faults=2i,child_minor_faults=4476280i,cpu_time=177i,cpu_time_guest=0,cpu_time_guest_nice=0,cpu_time_idle=0,cpu_time_iowait=6.68,cpu_time_irq=0,cpu_time_nice=0,cpu_time_soft_irq=0,cpu_time_steal=0,cpu_time_system=51.01,cpu_time_user=126.42,cpu_usage=0,created_at=1674227573000000000i,involuntary_context_switches=4993i,major_faults=3i,memory_data=834994176i,memory_locked=0i,memory_rss=827785216i,memory_stack=327680i,memory_swap=0i,memory_usage=2.5068273544311523,memory_vms=1091108864i,minor_faults=2406945i,nice_priority=20i,num_fds=26i,num_threads=3i,pid=4103699i,ppid=4103684i,read_bytes=21008384i,read_count=364726i,realtime_priority=0i,rlimit_cpu_time_hard=9223372036854775807i,rlimit_cpu_time_soft=9223372036854775807i,rlimit_file_locks_hard=9223372036854775807i,rlimit_file_locks_soft=9223372036854775807i,rlimit_memory_data_hard=9223372036854775807i,rlimit_memory_data_soft=9223372036854775807i,rlimit_memory_locked_hard=67108864i,rlimit_memory_locked_soft=67108864i,rlimit_memory_rss_hard=9223372036854775807i,rlimit_memory_rss_soft=9223372036854775807i,rlimit_memory_stack_hard=9223372036854775807i,rlimit_memory_stack_soft=8388608i,rlimit_memory_vms_hard=9223372036854775807i,rlimit_memory_vms_soft=9223372036854775807i,rlimit_nice_priority_hard=0i,rlimit_nice_priority_soft=0i,rlimit_num_fds_hard=262144i,rlimit_num_fds_soft=65535i,rlimit_realtime_priority_hard=0i,rlimit_realtime_priority_soft=0i,rlimit_signals_pending_hard=118856i,rlimit_signals_pending_soft=118856i,signals_pending=0i,voluntary_context_switches=172583i,write_bytes=1562181632i,write_count=12164760i 1674246977000000000> procstat_lookup,exe=mbatchd,host=kilenc,pid_finder=pgrep,result=success pid_count=2i,result_code=0i,running=2i 1674246977000000000

    1. Assuming there were no errors in the previous step with telegraf, proceed to start the telegraf process via systemd.
    [root@kilenc telegraf]# systemctl start telegraf[root@kilenc telegraf]# systemctl status telegraf● telegraf.service - Telegraf   Loaded: loaded (/usr/lib/systemd/system/telegraf.service; enabled; vendor preset: disabled)   Active: active (running) since Thu 2023-01-19 14:13:51 EST; 1 day 1h ago     Docs: https://github.com/influxdata/telegraf Main PID: 3225959 (telegraf)    Tasks: 35 (limit: 190169)   Memory: 192.6M   CGroup: /system.slice/telegraf.service           └─3225959 /usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/tele>Jan 19 14:13:51 kilenc systemd[1]: Starting Telegraf...Jan 19 14:13:51 kilenc systemd[1]: Started Telegraf.
    1. On the host running the database instance, adatbazis, perform queries to check whether the database telegraf exists, as well as checking if LSF related data is being logged.This is confirmed in the output below.

    Output from InfluxDB queries. Click to expand!
    [root@adatbazis fedora]# influxConnected to https://localhost:8086 version 1.8.10InfluxDB shell version: 1.8.10> authusername: influxpassword: > show databasesname: databasesname----_internaltelegraf> use telegrafUsing database telegraf> show field keysname: cpufieldKey         fieldType--------         ---------usage_guest      floatusage_guest_nice floatusage_idle       floatusage_iowait     floatusage_irq        floatusage_nice       floatusage_softirq    floatusage_steal      floatusage_system     floatusage_user       floatname: diskfieldKey     fieldType--------     ---------free         integerinodes_free  integerinodes_total integerinodes_used  integertotal        integerused         integerused_percent floatname: diskiofieldKey         fieldType--------         ---------io_time          integeriops_in_progress integermerged_reads     integermerged_writes    integerread_bytes       integerread_time        integerreads            integerweighted_io_time integerwrite_bytes      integerwrite_time       integerwrites           integername: kernelfieldKey         fieldType--------         ---------boot_time        integercontext_switches integerentropy_avail    integerinterrupts       integerprocesses_forked integername: lsf_hostsfieldKey fieldType-------- ---------current  integerpeak     integername: lsf_jobsfieldKey fieldType-------- ---------value    integername: lsf_mbatchdfieldKey fieldType-------- ---------value    integername: lsf_queuesfieldKey fieldType-------- ---------njobs    integerpend     integerrsv      integerrun      integerssusp    integersusp     integerususp    integername: lsf_serversfieldKey fieldType-------- ---------value    integername: lsf_usersfieldKey fieldType-------- ---------value    integername: memfieldKey          fieldType--------          ---------active            integeravailable         integeravailable_percent floatbuffered          integercached            integercommit_limit      integercommitted_as      integerdirty             integerfree              integerhigh_free         integerhigh_total        integerhuge_page_size    integerhuge_pages_free   integerhuge_pages_total  integerinactive          integerlow_free          integerlow_total         integermapped            integerpage_tables       integershared            integerslab              integersreclaimable      integersunreclaim        integerswap_cached       integerswap_free         integerswap_total        integertotal             integerused              integerused_percent      floatvmalloc_chunk     integervmalloc_total     integervmalloc_used      integerwrite_back        integerwrite_back_tmp    integername: netfieldKey              fieldType--------              ---------bytes_recv            integerbytes_sent            integerdrop_in               integerdrop_out              integererr_in                integererr_out               integericmp_inaddrmaskreps   integericmp_inaddrmasks      integericmp_incsumerrors     integericmp_indestunreachs   integericmp_inechoreps       integericmp_inechos          integericmp_inerrors         integericmp_inmsgs           integericmp_inparmprobs      integericmp_inredirects      integericmp_insrcquenchs     integericmp_intimeexcds      integericmp_intimestampreps  integericmp_intimestamps     integericmp_outaddrmaskreps  integericmp_outaddrmasks     integericmp_outdestunreachs  integericmp_outechoreps      integericmp_outechos         integericmp_outerrors        integericmp_outmsgs          integericmp_outparmprobs     integericmp_outredirects     integericmp_outsrcquenchs    integericmp_outtimeexcds     integericmp_outtimestampreps integericmp_outtimestamps    integericmpmsg_intype0       integericmpmsg_intype3       integericmpmsg_intype8       integericmpmsg_outtype0      integericmpmsg_outtype3      integericmpmsg_outtype8      integerip_defaultttl         integerip_forwarding         integerip_forwdatagrams      integerip_fragcreates        integerip_fragfails          integerip_fragoks            integerip_inaddrerrors       integerip_indelivers         integerip_indiscards         integerip_inhdrerrors        integerip_inreceives         integerip_inunknownprotos    integerip_outdiscards        integerip_outnoroutes        integerip_outrequests        integerip_reasmfails         integerip_reasmoks           integerip_reasmreqds         integerip_reasmtimeout       integerpackets_recv          integerpackets_sent          integertcp_activeopens       integertcp_attemptfails      integertcp_currestab         integertcp_estabresets       integertcp_incsumerrors      integertcp_inerrs            integertcp_insegs            integertcp_maxconn           integertcp_outrsts           integertcp_outsegs           integertcp_passiveopens      integertcp_retranssegs       integertcp_rtoalgorithm      integertcp_rtomax            integertcp_rtomin            integerudp_ignoredmulti      integerudp_incsumerrors      integerudp_indatagrams       integerudp_inerrors          integerudp_memerrors         integerudp_noports           integerudp_outdatagrams      integerudp_rcvbuferrors      integerudp_sndbuferrors      integerudplite_ignoredmulti  integerudplite_incsumerrors  integerudplite_indatagrams   integerudplite_inerrors      integerudplite_memerrors     integerudplite_noports       integerudplite_outdatagrams  integerudplite_rcvbuferrors  integerudplite_sndbuferrors  integername: processesfieldKey      fieldType--------      ---------blocked       integerdead          integeridle          integerpaging        integerparked        integerrunning       integersleeping      integerstopped       integertotal         integertotal_threads integerunknown       integerzombies       integername: procstatfieldKey                     fieldType--------                     ---------child_major_faults           integerchild_minor_faults           integercpu_time_guest               floatcpu_time_guest_nice          floatcpu_time_idle                floatcpu_time_iowait              floatcpu_time_irq                 floatcpu_time_nice                floatcpu_time_soft_irq            floatcpu_time_steal               floatcpu_time_system              floatcpu_time_user                floatcpu_usage                    floatcreated_at                   integerinvoluntary_context_switches integermajor_faults                 integermemory_data                  integermemory_locked                integermemory_rss                   integermemory_stack                 integermemory_swap                  integermemory_usage                 floatmemory_vms                   integerminor_faults                 integernum_threads                  integerpid                          integerppid                         integervoluntary_context_switches   integername: procstat_lookupfieldKey    fieldType--------    ---------pid_count   integerresult_code integerrunning     integername: swapfieldKey     fieldType--------     ---------free         integerin           integerout          integertotal        integerused         integerused_percent floatname: systemfieldKey       fieldType--------       ---------load1          floatload15         floatload5          floatn_cpus         integern_unique_users integern_users        integeruptime         integeruptime_format  string> select * from metrics> SELECT * FROM \"lsf_hosts\";name: lsf_hoststime                current host   peak state----                ------- ----   ---- -----1674493170000000000 0       kilenc 0    clients1674493170000000000 32      kilenc 32   slots1674493170000000000 32      kilenc 32   cores1674493170000000000 1       kilenc 1    servers1674493170000000000 2       kilenc 2    cpus1674493200000000000 1       kilenc 1    servers1674493200000000000 2       kilenc 2    cpus1674493200000000000 32      kilenc 32   slots1674493200000000000 0       kilenc 0    clients1674493200000000000 32      kilenc 32   cores1674493230000000000 0       kilenc 0    clients1674493230000000000 32      kilenc 32   cores1674493230000000000 2       kilenc 2    cpus1674493230000000000 1       kilenc 1    servers1674493230000000000 32      kilenc 32   slots1674493260000000000 1       kilenc 1    servers1674493260000000000 32      kilenc 32   slots1674493260000000000 0       kilenc 0    clients1674493260000000000 2       kilenc 2    cpus1674493260000000000 32      kilenc 32   cores> quit

    1. With telegraf successfully logging data to the InfluxDB instance, it will now be possible to create a data source in Grafana in order to create a dashboard containing LSF metrics.As noted at the outset, this article is not meant to be an extensive guide to the creation of dashoards in Grafana. In the Grafana navigation select Configuration > Data sources.
    1. Select the Add data source button, followed by InfluxDB, which is listed under Time series databases. On the settings page specify following values:

    VariableValue
    URLhttp://adatbazis:8086
    Databasetelegraf
    Basic auth(enable)
    User<influxdb_username>
    Password<influxdb_password

    Next, click on Save & test. If all variables and settings were properly specified, the message datasource is working. 17 measurements found.

    1. With the datasource configured in Grafana, the final step is to create a dashboard. Creating a dashboard requires creating panels which display data pulled from the confiugred datasource using targeted queries. With a bit of effort, I was able to piece together the following dashboard which includes both metrics from LSF, as well as metrics from Telegrafinput.procstat for the LSF processes mbatchd, mbschd and the management lim.

    Example dashboard definition (JSON). Click to expand!
    {  \"annotations\": {    \"list\": [      {        \"builtIn\": 1,        \"datasource\": {          \"type\": \"datasource\",          \"uid\": \"grafana\"        },        \"enable\": true,        \"hide\": true,        \"iconColor\": \"rgba(0, 211, 255, 1)\",        \"name\": \"Annotations & Alerts\",        \"target\": {          \"limit\": 100,          \"matchAny\": false,          \"tags\": [],          \"type\": \"dashboard\"        },        \"type\": \"dashboard\"      }    ]  },  \"editable\": true,  \"fiscalYearStartMonth\": 0,  \"graphTooltip\": 0,  \"id\": 17,  \"links\": [],  \"liveNow\": false,  \"panels\": [    {      \"collapsed\": false,      \"gridPos\": {        \"h\": 1,        \"w\": 24,        \"x\": 0,        \"y\": 0      },      \"id\": 35,      \"panels\": [],      \"title\": \"Cluster aggregate current statistics\",      \"type\": \"row\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"A view of the current status of the LSF servers in the cluster. Servers can be in one of four states: Ok, Unavailable, Closed and Unreachable. \",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"palette-classic\"          },          \"custom\": {            \"hideFrom\": {              \"legend\": false,              \"tooltip\": false,              \"viz\": false            }          },          \"decimals\": 2,          \"mappings\": []        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 8,        \"w\": 9,        \"x\": 0,        \"y\": 1      },      \"id\": 32,      \"options\": {        \"displayLabels\": [          \"name\",          \"value\"        ],        \"legend\": {          \"displayMode\": \"table\",          \"placement\": \"right\",          \"showLegend\": true,          \"sortBy\": \"Value\",          \"sortDesc\": true,          \"values\": [            \"value\",            \"percent\"          ]        },        \"pieType\": \"donut\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"tooltip\": {          \"mode\": \"multi\",          \"sort\": \"none\"        }      },      \"targets\": [        {          \"alias\": \"Ok\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_servers\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"status\",              \"operator\": \"=\",              \"value\": \"ok\"            }          ]        },        {          \"alias\": \"Closed\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_servers\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"B\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"status\",              \"operator\": \"=\",              \"value\": \"closed\"            }          ]        },        {          \"alias\": \"Unreachable\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_servers\",          \"orderByTime\": \"ASC\",          \"policy\": \"default\",          \"refId\": \"C\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"status\",              \"operator\": \"=\",              \"value\": \"unreachable\"            }          ]        },        {          \"alias\": \"Unavailable\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_servers\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"D\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"status\",              \"operator\": \"=\",              \"value\": \"unavailable\"            }          ]        }      ],      \"title\": \"Current aggregate LSF server statistics\",      \"type\": \"piechart\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 9,        \"y\": 1      },      \"id\": 43,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"distinct\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"running\"            }          ]        }      ],      \"title\": \"Currently running\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"light-red\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 12,        \"y\": 1      },      \"id\": 45,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"default\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"suspended\"            }          ]        }      ],      \"title\": \"Currently suspended\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"palette-classic\"          },          \"custom\": {            \"hideFrom\": {              \"legend\": false,              \"tooltip\": false,              \"viz\": false            }          },          \"decimals\": 2,          \"mappings\": []        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 8,        \"w\": 9,        \"x\": 15,        \"y\": 1      },      \"id\": 33,      \"options\": {        \"displayLabels\": [          \"name\",          \"value\"        ],        \"legend\": {          \"displayMode\": \"table\",          \"placement\": \"right\",          \"showLegend\": true,          \"sortBy\": \"Value\",          \"sortDesc\": true,          \"values\": [            \"value\",            \"percent\"          ]        },        \"pieType\": \"donut\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"tooltip\": {          \"mode\": \"multi\",          \"sort\": \"none\"        }      },      \"targets\": [        {          \"alias\": \"Running\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"running\"            }          ]        },        {          \"alias\": \"Pending\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"B\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"pending\"            }          ]        },        {          \"alias\": \"Suspended\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"C\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"suspended\"            }          ]        }      ],      \"title\": \"Current aggregate LSF job statistics\",      \"type\": \"piechart\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"yellow\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 9,        \"y\": 5      },      \"id\": 44,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"default\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"pending\"            }          ]        }      ],      \"title\": \"Currently pending \",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"blue\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 12,        \"y\": 5      },      \"id\": 46,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"default\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"finished\"            }          ]        }      ],      \"title\": \"Finished (past hour)\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"Spectrum LSF queue statistics. Here we show jobs in running, pending and suspended jobs. \",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"palette-classic\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              },              {                \"color\": \"red\",                \"value\": 80              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 8,        \"w\": 9,        \"x\": 0,        \"y\": 9      },      \"id\": 41,      \"options\": {        \"displayMode\": \"lcd\",        \"minVizHeight\": 10,        \"minVizWidth\": 0,        \"orientation\": \"horizontal\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"showUnfilled\": true      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"alias\": \"Running\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"measurement\": \"lsf_queues\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"run\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"name\",              \"operator\": \"=~\",              \"value\": \"/^$Queue$/\"            }          ]        },        {          \"alias\": \"Pending\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_queues\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"B\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"pend\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"name\",              \"operator\": \"=~\",              \"value\": \"/^$Queue$/\"            }          ]        },        {          \"alias\": \"Suspended\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_queues\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"C\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"susp\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"name\",              \"operator\": \"=~\",              \"value\": \"/^$Queue$/\"            }          ]        }      ],      \"title\": \"Current queue statistics ($Queue)\",      \"type\": \"bargauge\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"min\": 0,          \"thresholds\": {            \"mode\": \"percentage\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              }            ]          },          \"unit\": \"none\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 9,        \"y\": 9      },      \"id\": 53,      \"options\": {        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"/^lsf_hosts\\\\.last$/\",          \"values\": false        },        \"showThresholdLabels\": false,        \"showThresholdMarkers\": true      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ],            [              {                \"params\": [                  \"peak\"                ],                \"type\": \"field\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"servers\"            }          ]        }      ],      \"title\": \"Servers\",      \"type\": \"gauge\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"min\": 0,          \"thresholds\": {            \"mode\": \"percentage\",            \"steps\": [              {                \"color\": \"yellow\",                \"value\": null              }            ]          },          \"unit\": \"none\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 12,        \"y\": 9      },      \"id\": 54,      \"options\": {        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"/^lsf_hosts\\\\.last$/\",          \"values\": false        },        \"showThresholdLabels\": false,        \"showThresholdMarkers\": true      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ],            [              {                \"params\": [                  \"peak\"                ],                \"type\": \"field\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"cpus\"            }          ]        }      ],      \"title\": \"CPUs\",      \"type\": \"gauge\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"palette-classic\"          },          \"custom\": {            \"axisCenteredZero\": false,            \"axisColorMode\": \"text\",            \"axisLabel\": \"\",            \"axisPlacement\": \"auto\",            \"barAlignment\": 0,            \"drawStyle\": \"line\",            \"fillOpacity\": 0,            \"gradientMode\": \"none\",            \"hideFrom\": {              \"legend\": false,              \"tooltip\": false,              \"viz\": false            },            \"lineInterpolation\": \"stepBefore\",            \"lineWidth\": 1,            \"pointSize\": 5,            \"scaleDistribution\": {              \"log\": 2,              \"type\": \"log\"            },            \"showPoints\": \"auto\",            \"spanNulls\": true,            \"stacking\": {              \"group\": \"A\",              \"mode\": \"none\"            },            \"thresholdsStyle\": {              \"mode\": \"off\"            }          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              },              {                \"color\": \"red\",                \"value\": 80              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 8,        \"w\": 9,        \"x\": 15,        \"y\": 9      },      \"id\": 42,      \"options\": {        \"legend\": {          \"calcs\": [],          \"displayMode\": \"list\",          \"placement\": \"bottom\",          \"showLegend\": true        },        \"tooltip\": {          \"mode\": \"single\",          \"sort\": \"none\"        }      },      \"targets\": [        {          \"alias\": \"Running\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"running\"            }          ]        },        {          \"alias\": \"Pending\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"B\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"pending\"            }          ]        },        {          \"alias\": \"Suspended\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"C\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"suspended\"            }          ]        }      ],      \"title\": \"Aggregate LSF job statistics\",      \"type\": \"timeseries\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"min\": 0,          \"thresholds\": {            \"mode\": \"percentage\",            \"steps\": [              {                \"color\": \"light-red\",                \"value\": null              }            ]          },          \"unit\": \"none\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 9,        \"y\": 13      },      \"id\": 55,      \"options\": {        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"/^lsf_hosts\\\\.last$/\",          \"values\": false        },        \"showThresholdLabels\": false,        \"showThresholdMarkers\": true      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ],            [              {                \"params\": [                  \"peak\"                ],                \"type\": \"field\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"cores\"            }          ]        }      ],      \"title\": \"Cores\",      \"type\": \"gauge\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"min\": 0,          \"thresholds\": {            \"mode\": \"percentage\",            \"steps\": [              {                \"color\": \"blue\",                \"value\": null              }            ]          },          \"unit\": \"none\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 12,        \"y\": 13      },      \"id\": 56,      \"options\": {        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"/^lsf_hosts\\\\.last$/\",          \"values\": false        },        \"showThresholdLabels\": false,        \"showThresholdMarkers\": true      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ],            [              {                \"params\": [                  \"peak\"                ],                \"type\": \"field\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"slots\"            }          ]        }      ],      \"title\": \"Slots\",      \"type\": \"gauge\"    },    {      \"collapsed\": false,      \"gridPos\": {        \"h\": 1,        \"w\": 24,        \"x\": 0,        \"y\": 17      },      \"id\": 37,      \"panels\": [],      \"title\": \"LSF scheduler statistics\",      \"type\": \"row\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"palette-classic\"          },          \"custom\": {            \"axisCenteredZero\": false,            \"axisColorMode\": \"text\",            \"axisLabel\": \"\",            \"axisPlacement\": \"auto\",            \"barAlignment\": 0,            \"drawStyle\": \"line\",            \"fillOpacity\": 10,            \"gradientMode\": \"none\",            \"hideFrom\": {              \"graph\": false,              \"legend\": false,              \"tooltip\": false,              \"viz\": false            },            \"lineInterpolation\": \"linear\",            \"lineWidth\": 1,            \"pointSize\": 5,            \"scaleDistribution\": {              \"type\": \"linear\"            },            \"showPoints\": \"never\",            \"spanNulls\": true,            \"stacking\": {              \"group\": \"A\",              \"mode\": \"none\"            },            \"thresholdsStyle\": {              \"mode\": \"off\"            }          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              },              {                \"color\": \"red\",                \"value\": 80              }            ]          },          \"unit\": \"short\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 8,        \"w\": 12,        \"x\": 0,        \"y\": 18      },      \"id\": 20,      \"options\": {        \"graph\": {},        \"legend\": {          \"calcs\": [],          \"displayMode\": \"list\",          \"placement\": \"right\",          \"showLegend\": true        },        \"tooltip\": {          \"mode\": \"single\",          \"sort\": \"none\"        }      },      \"pluginVersion\": \"7.5.15\",      \"targets\": [        {          \"alias\": \"CPU utilization (%)\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"measurement\": \"procstat\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"cpu_usage\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"exe\",              \"operator\": \"=\",              \"value\": \"mbatchd\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        },        {          \"alias\": \"Memory utilization (%)\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"procstat\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"B\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"memory_usage\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"exe\",              \"operator\": \"=\",              \"value\": \"mbatchd\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        },        {          \"alias\": \"Number of threads\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"procstat\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"C\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"num_threads\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"exe\",              \"operator\": \"=\",              \"value\": \"mbatchd\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        },        {          \"alias\": \"File descriptors\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_mbatchd\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"D\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"fd\",              \"operator\": \"=\",              \"value\": \"used\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        }      ],      \"title\": \"LSF mbatchd process metrics\",      \"type\": \"timeseries\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"palette-classic\"          },          \"custom\": {            \"axisCenteredZero\": false,            \"axisColorMode\": \"text\",            \"axisLabel\": \"\",            \"axisPlacement\": \"auto\",            \"barAlignment\": 0,            \"drawStyle\": \"line\",            \"fillOpacity\": 10,            \"gradientMode\": \"none\",            \"hideFrom\": {              \"graph\": false,              \"legend\": false,              \"tooltip\": false,              \"viz\": false            },            \"lineInterpolation\": \"linear\",            \"lineWidth\": 1,            \"pointSize\": 5,            \"scaleDistribution\": {              \"type\": \"linear\"            },            \"showPoints\": \"never\",            \"spanNulls\": true,            \"stacking\": {              \"group\": \"A\",              \"mode\": \"none\"            },            \"thresholdsStyle\": {              \"mode\": \"off\"            }          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              },              {                \"color\": \"red\",                \"value\": 80              }            ]          },          \"unit\": \"short\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 8,        \"w\": 12,        \"x\": 12,        \"y\": 18      },      \"id\": 57,      \"options\": {        \"graph\": {},        \"legend\": {          \"calcs\": [],          \"displayMode\": \"list\",          \"placement\": \"right\",          \"showLegend\": true        },        \"tooltip\": {          \"mode\": \"single\",          \"sort\": \"none\"        }      },      \"pluginVersion\": \"7.5.15\",      \"targets\": [        {          \"alias\": \"CPU utilization (%)\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"measurement\": \"procstat\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"cpu_usage\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"exe\",              \"operator\": \"=\",              \"value\": \"lim\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        },        {          \"alias\": \"Memory utilization (%)\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"procstat\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"B\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"memory_usage\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"exe\",              \"operator\": \"=\",              \"value\": \"lim\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        },        {          \"alias\": \"Number of threads\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"procstat\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"C\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"num_threads\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"exe\",              \"operator\": \"=\",              \"value\": \"lim\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        }      ],      \"title\": \"LSF management lim process metrics\",      \"type\": \"timeseries\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"palette-classic\"          },          \"custom\": {            \"axisCenteredZero\": false,            \"axisColorMode\": \"text\",            \"axisLabel\": \"\",            \"axisPlacement\": \"auto\",            \"barAlignment\": 0,            \"drawStyle\": \"line\",            \"fillOpacity\": 10,            \"gradientMode\": \"none\",            \"hideFrom\": {              \"graph\": false,              \"legend\": false,              \"tooltip\": false,              \"viz\": false            },            \"lineInterpolation\": \"linear\",            \"lineWidth\": 1,            \"pointSize\": 5,            \"scaleDistribution\": {              \"type\": \"linear\"            },            \"showPoints\": \"never\",            \"spanNulls\": true,            \"stacking\": {              \"group\": \"A\",              \"mode\": \"none\"            },            \"thresholdsStyle\": {              \"mode\": \"off\"            }          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              },              {                \"color\": \"red\",                \"value\": 80              }            ]          },          \"unit\": \"short\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 8,        \"w\": 12,        \"x\": 0,        \"y\": 26      },      \"id\": 27,      \"options\": {        \"graph\": {},        \"legend\": {          \"calcs\": [],          \"displayMode\": \"list\",          \"placement\": \"right\",          \"showLegend\": true        },        \"tooltip\": {          \"mode\": \"single\",          \"sort\": \"none\"        }      },      \"pluginVersion\": \"7.5.15\",      \"targets\": [        {          \"alias\": \"Job buckets\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"measurement\": \"lsf_mbatchd\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"sched\",              \"operator\": \"=\",              \"value\": \"buckets\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        },        {          \"alias\": \"Matching host criteria\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_mbatchd\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"B\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"sched\",              \"operator\": \"=\",              \"value\": \"matchhost\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        },        {          \"alias\": \"Scheduling interval (seconds)\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_mbatchd\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"C\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"sched\",              \"operator\": \"=\",              \"value\": \"interval\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        }      ],      \"title\": \"LSF scheduler metrics\",      \"type\": \"timeseries\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"palette-classic\"          },          \"custom\": {            \"axisCenteredZero\": false,            \"axisColorMode\": \"text\",            \"axisLabel\": \"\",            \"axisPlacement\": \"auto\",            \"barAlignment\": 0,            \"drawStyle\": \"line\",            \"fillOpacity\": 10,            \"gradientMode\": \"none\",            \"hideFrom\": {              \"graph\": false,              \"legend\": false,              \"tooltip\": false,              \"viz\": false            },            \"lineInterpolation\": \"linear\",            \"lineWidth\": 1,            \"pointSize\": 5,            \"scaleDistribution\": {              \"type\": \"linear\"            },            \"showPoints\": \"never\",            \"spanNulls\": true,            \"stacking\": {              \"group\": \"A\",              \"mode\": \"none\"            },            \"thresholdsStyle\": {              \"mode\": \"off\"            }          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              },              {                \"color\": \"red\",                \"value\": 80              }            ]          },          \"unit\": \"short\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 8,        \"w\": 12,        \"x\": 12,        \"y\": 26      },      \"id\": 58,      \"options\": {        \"graph\": {},        \"legend\": {          \"calcs\": [],          \"displayMode\": \"list\",          \"placement\": \"right\",          \"showLegend\": true        },        \"tooltip\": {          \"mode\": \"single\",          \"sort\": \"none\"        }      },      \"pluginVersion\": \"7.5.15\",      \"targets\": [        {          \"alias\": \"CPU utilization (%)\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"measurement\": \"procstat\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"cpu_usage\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"exe\",              \"operator\": \"=\",              \"value\": \"mbschd\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        },        {          \"alias\": \"Memory utilization (%)\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"procstat\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"B\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"memory_usage\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"exe\",              \"operator\": \"=\",              \"value\": \"mbatchd\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        },        {          \"alias\": \"Number of threads\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"procstat\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"C\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"num_threads\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"exe\",              \"operator\": \"=\",              \"value\": \"mbatchd\"            },            {              \"condition\": \"AND\",              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            }          ]        }      ],      \"title\": \"LSF mbschd process metrics\",      \"type\": \"timeseries\"    },    {      \"collapsed\": false,      \"gridPos\": {        \"h\": 1,        \"w\": 24,        \"x\": 0,        \"y\": 34      },      \"id\": 39,      \"panels\": [],      \"title\": \"Additional metrics (scratch)\",      \"type\": \"row\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 0,        \"y\": 35      },      \"id\": 2,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"distinct\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"running\"            }          ]        }      ],      \"title\": \"Running\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"yellow\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 3,        \"y\": 35      },      \"id\": 5,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"default\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"pending\"            }          ]        }      ],      \"title\": \"Pending\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"red\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 6,        \"y\": 35      },      \"id\": 6,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"default\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"suspended\"            }          ]        }      ],      \"title\": \"Suspended\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"blue\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 9,        \"y\": 35      },      \"id\": 7,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"measurement\": \"lsf_jobs\",          \"orderByTime\": \"ASC\",          \"policy\": \"default\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"finished\"            }          ]        }      ],      \"title\": \"Finished\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 12,        \"y\": 35      },      \"id\": 15,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"alias\": \"Ok\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_servers\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"status\",              \"operator\": \"=\",              \"value\": \"ok\"            }          ]        }      ],      \"title\": \"Ok\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"blue\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 15,        \"y\": 35      },      \"id\": 16,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"alias\": \"Closed\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_servers\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"status\",              \"operator\": \"=\",              \"value\": \"closed\"            }          ]        }      ],      \"title\": \"Closed\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"yellow\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 18,        \"y\": 35      },      \"id\": 17,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"alias\": \"Unreachable\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_servers\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"status\",              \"operator\": \"=\",              \"value\": \"unreachable\"            }          ]        }      ],      \"title\": \"Unreachable\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"red\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 21,        \"y\": 35      },      \"id\": 18,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"alias\": \"Unavailable\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_servers\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"value\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"mean\"              }            ]          ],          \"tags\": [            {              \"key\": \"status\",              \"operator\": \"=\",              \"value\": \"unavailable\"            }          ]        }      ],      \"title\": \"Unavailable\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 0,        \"y\": 39      },      \"id\": 21,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"alias\": \"Clients\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"clients\"            }          ]        }      ],      \"title\": \"Clients\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 3,        \"y\": 39      },      \"id\": 22,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"alias\": \"Servers\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"servers\"            }          ]        }      ],      \"title\": \"Servers\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 6,        \"y\": 39      },      \"id\": 23,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"alias\": \"Servers\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"cpus\"            }          ]        }      ],      \"title\": \"CPUs\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 9,        \"y\": 39      },      \"id\": 24,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"alias\": \"Cores\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"cores\"            }          ]        }      ],      \"title\": \"Cores\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"thresholds\": {            \"mode\": \"absolute\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              }            ]          }        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 12,        \"y\": 39      },      \"id\": 25,      \"options\": {        \"colorMode\": \"value\",        \"graphMode\": \"none\",        \"justifyMode\": \"auto\",        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"\",          \"values\": false        },        \"text\": {},        \"textMode\": \"auto\"      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"alias\": \"Slots\",          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"slots\"            }          ]        }      ],      \"title\": \"Slots\",      \"type\": \"stat\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"min\": 0,          \"thresholds\": {            \"mode\": \"percentage\",            \"steps\": [              {                \"color\": \"green\",                \"value\": null              }            ]          },          \"unit\": \"none\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 3,        \"y\": 43      },      \"id\": 52,      \"options\": {        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"/^lsf_hosts\\\\.last$/\",          \"values\": false        },        \"showThresholdLabels\": false,        \"showThresholdMarkers\": true      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ],            [              {                \"params\": [                  \"peak\"                ],                \"type\": \"field\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"servers\"            }          ]        }      ],      \"title\": \"Servers\",      \"type\": \"gauge\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"min\": 0,          \"thresholds\": {            \"mode\": \"percentage\",            \"steps\": [              {                \"color\": \"yellow\",                \"value\": null              }            ]          },          \"unit\": \"none\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 6,        \"y\": 43      },      \"id\": 51,      \"options\": {        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"/^lsf_hosts\\\\.last$/\",          \"values\": false        },        \"showThresholdLabels\": false,        \"showThresholdMarkers\": true      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ],            [              {                \"params\": [                  \"peak\"                ],                \"type\": \"field\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"cpus\"            }          ]        }      ],      \"title\": \"CPUs\",      \"type\": \"gauge\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"min\": 0,          \"thresholds\": {            \"mode\": \"percentage\",            \"steps\": [              {                \"color\": \"light-red\",                \"value\": null              }            ]          },          \"unit\": \"none\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 9,        \"y\": 43      },      \"id\": 50,      \"options\": {        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"/^lsf_hosts\\\\.last$/\",          \"values\": false        },        \"showThresholdLabels\": false,        \"showThresholdMarkers\": true      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ],            [              {                \"params\": [                  \"peak\"                ],                \"type\": \"field\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"cores\"            }          ]        }      ],      \"title\": \"Cores\",      \"type\": \"gauge\"    },    {      \"datasource\": {        \"type\": \"influxdb\",        \"uid\": \"eNfWCy5Vk\"      },      \"description\": \"\",      \"fieldConfig\": {        \"defaults\": {          \"color\": {            \"mode\": \"thresholds\"          },          \"mappings\": [],          \"min\": 0,          \"thresholds\": {            \"mode\": \"percentage\",            \"steps\": [              {                \"color\": \"blue\",                \"value\": null              }            ]          },          \"unit\": \"none\"        },        \"overrides\": []      },      \"gridPos\": {        \"h\": 4,        \"w\": 3,        \"x\": 12,        \"y\": 43      },      \"id\": 49,      \"options\": {        \"orientation\": \"auto\",        \"reduceOptions\": {          \"calcs\": [            \"lastNotNull\"          ],          \"fields\": \"/^lsf_hosts\\\\.last$/\",          \"values\": false        },        \"showThresholdLabels\": false,        \"showThresholdMarkers\": true      },      \"pluginVersion\": \"9.1.6\",      \"targets\": [        {          \"datasource\": {            \"type\": \"influxdb\",            \"uid\": \"eNfWCy5Vk\"          },          \"groupBy\": [            {              \"params\": [                \"$__interval\"              ],              \"type\": \"time\"            },            {              \"params\": [                \"null\"              ],              \"type\": \"fill\"            }          ],          \"hide\": false,          \"measurement\": \"lsf_hosts\",          \"orderByTime\": \"ASC\",          \"policy\": \"autogen\",          \"refId\": \"A\",          \"resultFormat\": \"time_series\",          \"select\": [            [              {                \"params\": [                  \"current\"                ],                \"type\": \"field\"              },              {                \"params\": [],                \"type\": \"last\"              }            ],            [              {                \"params\": [                  \"peak\"                ],                \"type\": \"field\"              }            ]          ],          \"tags\": [            {              \"key\": \"host\",              \"operator\": \"=\",              \"value\": \"kilenc\"            },            {              \"condition\": \"AND\",              \"key\": \"state\",              \"operator\": \"=\",              \"value\": \"slots\"            }          ]        }      ],      \"title\": \"Slots\",      \"type\": \"gauge\"    }  ],  \"refresh\": \"30s\",  \"schemaVersion\": 37,  \"style\": \"dark\",  \"tags\": [],  \"templating\": {    \"list\": [      {        \"current\": {          \"selected\": true,          \"text\": [            \"priority\"          ],          \"value\": [            \"priority\"          ]        },        \"datasource\": {          \"type\": \"influxdb\",          \"uid\": \"oSnSlVc4k\"        },        \"definition\": \"show tag values from \\\"lsf_queues\\\" with key=\\\"name\\\"\",        \"hide\": 0,        \"includeAll\": false,        \"multi\": false,        \"name\": \"Queue\",        \"options\": [],        \"query\": \"show tag values from \\\"lsf_queues\\\" with key=\\\"name\\\"\",        \"refresh\": 1,        \"regex\": \"\",        \"skipUrlSync\": false,        \"sort\": 0,        \"tagValuesQuery\": \"\",        \"tagsQuery\": \"\",        \"type\": \"query\",        \"useTags\": false      }    ]  },  \"time\": {    \"from\": \"now-1h\",    \"to\": \"now\"  },  \"timepicker\": {},  \"timezone\": \"\",  \"title\": \"LSF cluster status\",  \"uid\": \"ORojp8cVz\",  \"version\": 160,  \"weekStart\": \"\"}

    As you can see, with a short plugin script to collect information from LSF, it’s possible to monitor your LSF cluster using the TIG stack. It’s important to note that there are powerfulmonitoring and reporting tools available from IBM as add-ons to LSF; IBM Spectrum LSF RTM and IBM Spectrum LSF Explorer. You can find more details about the add-on capabilities for LSFhere.

    ", + "url": "https://hpc.social/personal-blog/2023/monitoring-ibm-spectrum-lsf-with-the-tig-stack/", + + + + + + "date_published": "2023-01-24T19:48:44-07:00", + "date_modified": "2023-01-24T19:48:44-07:00", + + "author": "Ramblings of a supercomputing enthusiast." + + }, + + { + "id": "https://hpc.social/personal-blog/2022/adam-s-weekly-ish-update-2022-12-20/", + "title": "Adam’s weekly (-ish) update, 2022-12-20", + "summary": null, + "content_text": "What’s newThe past few weeks have been on the intense side at work, so I completely lost track of the blog and haven’t had a chance to write much in that time. However, I’m now on a holiday break, and finally have time to sit down at a keyboard to write more than code and Slack messages.One of the highlights of the past few weeks was a trip to San Jose, and the NVIDIA headquarters. I changed teams at work back in July, transferring from a group that was closely integrated with product management, to a more straightforward engineering team which designs and builds new high-performance computing systems. This was the first chance I’ve had to meet up with other members of my new team in person, and it was a really wonderful experience to be in the same physical space as folks who were previously just images on my screen. I love working remotely, but it’s also great to be able to stand in front of a white board with someone and brainstorm, or get coffee and just have a chat with a coworker outside of a video call with an agenda.(Plus, we were all careful and managed to avoid catching COVID from each other! Which was a win on its own.)Now, for the next two weeks I’m off work, and planning to take some time to relax and spend time on projects that are harder to focus on during busy work weeks. Expect (maybe) less about computers in my blog and social feeds, and more about D&D, baking, and tasty cocktails.What I’m reading, watching, and listening toI’ve been a bit too scattered to focus on actual books the past few weeks, but I did find time for a few interesting articles and podcasts. In particular,“Why Roman Egypt was such a strange province”, from Bret Devereaux: As usual from Devereaux, an accessible but extremely detailed discussion of why so much of what we know about the Roman empire is from Egyptian records, but why that also might not be representative of the broader empire.“Emoji as incident resolution tools”, from Will Gallego: A fun discussion of how using emoji as part of a team’s communication can add nuance and shared understanding during incident management, along with a discussion of the disadvantages and costs associated with the practice.“What does modern software architecture look like in 2022?”, from Bartosz Mikulski: A nice article which discusses how service-oriented software architecture can often include an explicit expectation of change. For example, the architecture might include notes on an ongoing deprecation of a library, or might signpost the need to factor a new microservice out when overall system load gets high enough.The Brady Heywood podcast: Found via the Oxide and Friends podcast, the Brady Heywood podcast is a series on engineering disasters and their consequences from a forensic engineering firm. It’s mostly not being updated any more (with the podcasters moving on to a separate series on complexity science), but it has a deep back catalog of good episodes, and includes thoughtful discussions of human factors, safety engineering, and how organizational pressures become manifest in engineering artifacts.Recent recipesSmitten Kitchen’s Homemade Irish Cream: This is a recipe I make every year, and I often give away small bottles of it as holiday gifts. It’s really ridiculously tasty, much better than Baileys or similar, and good either on its own or in hot chocolate.Smitten Kitchen’s Fairytale of New York: This is a really tasty whiskey cocktail, and the star of the show is a “winter warmth syrup” that substitutes in for simple syrup. The syrup is simply very tasty, and turns what’s effectively an OId Fashioned variant into a lovely holiday cocktail.Sparkling gingerbread from Yossy Arefi’s Snaking Cakes: This recipe takes a little more prep than most of Arefi’s “snacking cakes”, as it includes ginger three ways (ground, fresh, and crystallized), but it’s worth the few minutes of extra work.Pet photosI’m pretty sure these two want me to turn the fireplace on.Just Percy bullying the dog by stealing his bed.", + "content_html": "

    What’s new

    The past few weeks have been on the intense side at work, so I completely lost track of the blog and haven’t had a chance to write much in that time. However, I’m now on a holiday break, and finally have time to sit down at a keyboard to write more than code and Slack messages.

    One of the highlights of the past few weeks was a trip to San Jose, and the NVIDIA headquarters. I changed teams at work back in July, transferring from a group that was closely integrated with product management, to a more straightforward engineering team which designs and builds new high-performance computing systems.

    This was the first chance I’ve had to meet up with other members of my new team in person, and it was a really wonderful experience to be in the same physical space as folks who were previously just images on my screen. I love working remotely, but it’s also great to be able to stand in front of a white board with someone and brainstorm, or get coffee and just have a chat with a coworker outside of a video call with an agenda.

    (Plus, we were all careful and managed to avoid catching COVID from each other! Which was a win on its own.)

    Now, for the next two weeks I’m off work, and planning to take some time to relax and spend time on projects that are harder to focus on during busy work weeks. Expect (maybe) less about computers in my blog and social feeds, and more about D&D, baking, and tasty cocktails.

    What I’m reading, watching, and listening to

    I’ve been a bit too scattered to focus on actual books the past few weeks, but I did find time for a few interesting articles and podcasts. In particular,

    • “Why Roman Egypt was such a strange province”, from Bret Devereaux: As usual from Devereaux, an accessible but extremely detailed discussion of why so much of what we know about the Roman empire is from Egyptian records, but why that also might not be representative of the broader empire.
    • “Emoji as incident resolution tools”, from Will Gallego: A fun discussion of how using emoji as part of a team’s communication can add nuance and shared understanding during incident management, along with a discussion of the disadvantages and costs associated with the practice.
    • “What does modern software architecture look like in 2022?”, from Bartosz Mikulski: A nice article which discusses how service-oriented software architecture can often include an explicit expectation of change. For example, the architecture might include notes on an ongoing deprecation of a library, or might signpost the need to factor a new microservice out when overall system load gets high enough.
    • The Brady Heywood podcast: Found via the Oxide and Friends podcast, the Brady Heywood podcast is a series on engineering disasters and their consequences from a forensic engineering firm. It’s mostly not being updated any more (with the podcasters moving on to a separate series on complexity science), but it has a deep back catalog of good episodes, and includes thoughtful discussions of human factors, safety engineering, and how organizational pressures become manifest in engineering artifacts.

    Recent recipes

    • Smitten Kitchen’s Homemade Irish Cream: This is a recipe I make every year, and I often give away small bottles of it as holiday gifts. It’s really ridiculously tasty, much better than Baileys or similar, and good either on its own or in hot chocolate.
    • Smitten Kitchen’s Fairytale of New York: This is a really tasty whiskey cocktail, and the star of the show is a “winter warmth syrup” that substitutes in for simple syrup. The syrup is simply very tasty, and turns what’s effectively an OId Fashioned variant into a lovely holiday cocktail.
    • Sparkling gingerbread from Yossy Arefi’s Snaking Cakes: This recipe takes a little more prep than most of Arefi’s “snacking cakes”, as it includes ginger three ways (ground, fresh, and crystallized), but it’s worth the few minutes of extra work.

    Pet photos

    \"A
    I’m pretty sure these two want me to turn the fireplace on.
    \"A
    Just Percy bullying the dog by stealing his bed.
    ", + "url": "https://hpc.social/personal-blog/2022/adam-s-weekly-ish-update-2022-12-20/", + + + + + + "date_published": "2022-12-20T18:14:52-07:00", + "date_modified": "2022-12-20T18:14:52-07:00", + + "author": "Thinking Out Loud" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/visualizing-spectrum-lsf-data-with-grafana/", + "title": "Visualizing Spectrum LSF data with Grafana", + "summary": null, + "content_text": "OverviewSystem monitoring is a fundamental part of IT best practices. High performance computing (HPC) environments are no exception to this. At the high-end, HPC clusters can consist ofthousands of servers, processing millions of jobs per day. HPC admins need ways to monitor the overall cluster to determine system status and availability through to the efficiencyof workloads. Servers today produce a wide array of metrics which can be monitored for example to check for various conditions. Additionally, workload schedulers also produce a wealthof data about jobs. Having a single dashboard to show this type of detail can be of great benefit.IBM Spectrum LSF Suites provide a complete solution for HPC workload management. This includes reporting capabilities out of the box. Spectrum LSF Suite features an integrated webinterface for job management and reporting. The reporting capabilities include a number of reports out of the box, with the ability to customize and add new reports. The reportingcapability in Spectrum LSF Suite and IBM Spectrum LSF Explorer is underpinned by Elasticsearch, which is used to store, index and query data. With LSF data in Elasticsearch, it’salso possible to configure LSF command-line interface (CLI) tools to query information from Elasticsearch rather than flat files – for greater performance. This is controlled viathe LSF_QUERY_ES_FUNCTIONS parameter of Spectrum LSF. More details about the LSF_QUERY_ES_FUNCTIONS can be found in the LSF documentation here.(1) Here is a look at the indices that are created by LSF in Elasticsearch. Note that the status shows as yellow because I only have a single Elasticsearch node.# curl -XGET localhost:9200/_cat/indicesyellow open lsf_events-202205 tejh7jsMSwSeQUJzYM7cww 5 1 1137 0 808.1kb 808.1kbyellow open lsf_jobs_pendingreason-202204 4wi7Ta8uQPSXlFBqPh4kOQ 5 1 90531 0 8.6mb 8.6mbyellow open lsf_events-202204 tWYvW_w8TVyU1deRFOEoZg 5 1 116957 32691 59.1mb 59.1mbyellow open lsf_jobs_active-202212 Q0pStQxvTgaeL7R-f02XWA 5 1 210052 0 50.6mb 50.6mbyellow open lsf_jobs_pendingreason-202206 ENWIwfGrSqCHvi53aUQXJQ 5 1 44991 0 4.5mb 4.5mbyellow open host_booleanres_latest RE8thZCgTGeMBGodeMfXEQ 5 1 5 0 23.3kb 23.3kbyellow open lsf_jobs_pendingreason-202205 yo0iZH_4TvOqq6kQgBluvA 5 1 111 0 181.4kb 181.4kbyellow open lsf_jobs_pend-202212 9ViIS3nDRFewrqtILEbKTQ 5 1 707 0 446.9kb 446.9kbyellow open lsf_hostconf_latest 9N1Y8ML4TiyaamCPEDRQog 5 1 2 0 10.6kb 10.6kbyellow open lsf_events-202209 rtKQ8F4bSleHl8EbAQez8A 5 1 8200 955 4.4mb 4.4mbyellow open lsf_events-202206 UUKPWfN7SZ-dzVs5NAkjUg 5 1 79503 23452 36.8mb 36.8mbyellow open lsf_hostmetrics-202209 7FUNFCWPQtuGyx5jTJLb1A 5 1 4701 0 2.2mb 2.2mbyellow open lsf_hostmetrics-202208 52xef_3hQWK-jVuJqyUpHA 5 1 3823 0 1.9mb 1.9mbyellow open lsf_hostmetrics-202207 IqZYhU0RQNGIFWSRH-Ym8Q 5 1 6316 0 2.9mb 2.9mbyellow open lsf_job_acct-202209 h1ZgCSB8RwCBxwIUUzDHEQ 5 1 2050 438 1.9mb 1.9mbyellow open lsf_jobs_active-202209 iBfnf07CTcS7Gb6TxwomRA 5 1 2658 0 1mb 1mbyellow open lsf_hostmetrics-202206 0PXSYBOgTA2Qa_zzaafUPg 5 1 4301 0 2.1mb 2.1mbyellow open model xSqB_T_VSByOzYavEcEVyQ 1 1 55 0 257kb 257kbyellow open lsf_job_acct-202206 C639GnzBSjCEVczfh5u23g 5 1 16719 353 8.9mb 8.9mbyellow open lsf_jobs_active-202204 8gN_ENkQRTSfnmxrtMcOlA 5 1 33286 0 9.8mb 9.8mbyellow open lsf_job_acct-202205 LOxmhm_8RxaCuTd7YWYbLw 5 1 274 0 439.4kb 439.4kbyellow open lsf_jobs_active-202205 61u2RlXgR_SXagmZfrmttQ 5 1 1880 0 1.1mb 1.1mbyellow open lsf_jobs_pend-202209 eTgqPp9nQOScNiwyUWXmHA 5 1 9 0 106.2kb 106.2kbyellow open lsf_job_acct-202204 dDDegS6RQSWtWN99eklexg 5 1 28902 2177 17.4mb 17.4mbyellow open lsf_jobs_active-202206 8ivkjWSNR1Sh_BxWACP0ZA 5 1 16921 0 4.6mb 4.6mbyellow open lsf_current_status 92KE3V4YSJ-RtRp_kepxYg 5 1 115450 0 9mb 9mbyellow open lsf_hostmetrics-202210 vbuK2wW3RRmXuY07tDPUNQ 5 1 785 0 942.1kb 942.1kbyellow open lsf_jobs_pend-202206 OhSwn-b0SiSj8mCW5tcNIA 5 1 22 0 244.6kb 244.6kbyellow open lsf_jobs_pend-202205 OfBtWklETYK9cRx000aNPw 5 1 1 0 12.7kb 12.7kbyellow open lsf_events-202212 WUC5KJWmS-2WIN8XCQpSuw 5 1 712399 74728 337mb 337mbyellow open lsf_jobs_pend-202204 OhUsXqohSciZTPZlTryMyA 5 1 50 0 275.3kb 275.3kbyellow open resource_attributes_latest R9bk_WIPTU62dVg3O1LDBA 5 1 5 0 24.4kb 24.4kbyellow open lsf_jobs_pendingreason-202212 55iwDC5mRI-eRbzQLwWP6Q 5 1 3314828 0 288.7mb 288.7mbyellow open pa-lite-log o8-jaNoGTsSVcjJW5Ufs0w 5 1 1549 0 547.2kb 547.2kbyellow open lsf_job_acct-202212 4HXvAD02Sxq0tgp2fS2cfQ 5 1 161502 0 73.6mb 73.6mbyellow open lsf_hostmetrics-202212 Tki6OJ41R363u9Tx02N4zw 5 1 2548 0 1.7mb 1.7mbyellow open lsf_jobs_pendingreason-202209 D3TOZY2ORiK9PppGVt10Fg 5 1 2511 0 381.4kb 381.4kb(2) With the LSF data stored in Elasticsearch, the next step is to connect to the Grafana server. Here we point our browser to the Grafana server on the default port: http://lsf_manager:3000 and login to Grafana. This step assumes an account has already been setup on Grafana. Here we are using the default admin account.(3) In Grafana, navigate to Configuration -> Data sources. It’s here that it will be possible to add an Elasticsearch data source(4) Next, click the Add data source button.(5 In the list of data sources, filter by name for Elasticsearch and click the Select button on the Elasticsearch entry.(6) When configuring the data source, it’s necessary to specify an index name. This is where the list of indices in Elasticsearch that we generated earlier will come in handy. For this example, we wish to display the total number of pending jobs in the Spectrum LSF cluster over time. This data is stored in the lsf_jobs_pend* indices in Elasticsearch. To configure the data source appropriately, we specify the following values:Name:\t“LSF pending jobs”URL: http://localhost:9200Index name: “lsf_jobs_pend*”Time field name: “time_stamp”Version: 7.0+Note that the URL needs to point to the Elasticsearch server. In this case, both the Elasticsearch server and Grafana server are running on the same host.Next click on the Save & Test button. It should return the message Index OK. Time field name OK..Assuming that no errors were found, click on the Back button.(7) Now you should see LSF pending jobs listed as a Data Source.(8) With the data source configured, we’re now ready to configure a dashboard to display the LSF pending job information. Navigate to Create -> Dashboard.(9) Click on Add an empty panel. This is used to create a new panel where the LSF pending job information will be plotted.(10) In the panel editor, specify the following options:Panel title: “LSF pending jobs”Specify the data source “LSF pending jobs” which was created previouslySpecify a suitable time range (2 days)Line width (5 points)You should immediately see in the panel editor the plot of the hourly pending jobs. Click on the Apply button to save the changes.(11) After clicking Apply, you will be returned to the Dashboard screen. The Dashboard should now display the new LSF pending jobs panel that was created above. This Dashboard could also include panels for system metrics collected by Prometheus for example.(12) Next, click on the diskette icon in the upper right to save the Dashboard with the LSF pending jobs panel. We’ll name it Spectrum LSF cluster status.Additional panels can be added to the Spectrum LSF cluster status based on the data logged by Spectrum LSF to Elasticsearch.That concludes the simple example of plotting Spectrum LSF cluster data from Elasticsearch in Grafana. As mentioned, the IBM Spectrum LSF Suites integrated web interface also provides reporting capabilities, with several built-in reports provided out of the box. Below, we’ve included a screenshot of the pending job analysis report included with Spectrum LSF Suites.SummarySpectrum LSF provides many hooks and integration points enabling administrators to change things ranging from scheduling behavior and the output of query commands through to job information being logged to Elasticsearch. Spectrum LSF is highly customizable by organizations to suit specific needs and requirements. We’ve demonstrated this using Grafana to visualize data from the LSF scheduler in a simple example. Following the above example, administrators can combine existing HPC cluster system level reporting in Grafana with job information from Spectrum LSF for a better overall view and understanding of the infrastructure.", + "content_html": "

    Overview

    System monitoring is a fundamental part of IT best practices. High performance computing (HPC) environments are no exception to this. At the high-end, HPC clusters can consist ofthousands of servers, processing millions of jobs per day. HPC admins need ways to monitor the overall cluster to determine system status and availability through to the efficiencyof workloads. Servers today produce a wide array of metrics which can be monitored for example to check for various conditions. Additionally, workload schedulers also produce a wealthof data about jobs. Having a single dashboard to show this type of detail can be of great benefit.

    IBM Spectrum LSF Suites provide a complete solution for HPC workload management. This includes reporting capabilities out of the box. Spectrum LSF Suite features an integrated webinterface for job management and reporting. The reporting capabilities include a number of reports out of the box, with the ability to customize and add new reports. The reportingcapability in Spectrum LSF Suite and IBM Spectrum LSF Explorer is underpinned by Elasticsearch, which is used to store, index and query data. With LSF data in Elasticsearch, it’salso possible to configure LSF command-line interface (CLI) tools to query information from Elasticsearch rather than flat files – for greater performance. This is controlled viathe LSF_QUERY_ES_FUNCTIONS parameter of Spectrum LSF. More details about the LSF_QUERY_ES_FUNCTIONS can be found in the LSF documentation here.

    (1) Here is a look at the indices that are created by LSF in Elasticsearch. Note that the status shows as yellow because I only have a single Elasticsearch node.

    # curl -XGET localhost:9200/_cat/indicesyellow open lsf_events-202205             tejh7jsMSwSeQUJzYM7cww 5 1    1137     0 808.1kb 808.1kbyellow open lsf_jobs_pendingreason-202204 4wi7Ta8uQPSXlFBqPh4kOQ 5 1   90531     0   8.6mb   8.6mbyellow open lsf_events-202204             tWYvW_w8TVyU1deRFOEoZg 5 1  116957 32691  59.1mb  59.1mbyellow open lsf_jobs_active-202212        Q0pStQxvTgaeL7R-f02XWA 5 1  210052     0  50.6mb  50.6mbyellow open lsf_jobs_pendingreason-202206 ENWIwfGrSqCHvi53aUQXJQ 5 1   44991     0   4.5mb   4.5mbyellow open host_booleanres_latest        RE8thZCgTGeMBGodeMfXEQ 5 1       5     0  23.3kb  23.3kbyellow open lsf_jobs_pendingreason-202205 yo0iZH_4TvOqq6kQgBluvA 5 1     111     0 181.4kb 181.4kbyellow open lsf_jobs_pend-202212          9ViIS3nDRFewrqtILEbKTQ 5 1     707     0 446.9kb 446.9kbyellow open lsf_hostconf_latest           9N1Y8ML4TiyaamCPEDRQog 5 1       2     0  10.6kb  10.6kbyellow open lsf_events-202209             rtKQ8F4bSleHl8EbAQez8A 5 1    8200   955   4.4mb   4.4mbyellow open lsf_events-202206             UUKPWfN7SZ-dzVs5NAkjUg 5 1   79503 23452  36.8mb  36.8mbyellow open lsf_hostmetrics-202209        7FUNFCWPQtuGyx5jTJLb1A 5 1    4701     0   2.2mb   2.2mbyellow open lsf_hostmetrics-202208        52xef_3hQWK-jVuJqyUpHA 5 1    3823     0   1.9mb   1.9mbyellow open lsf_hostmetrics-202207        IqZYhU0RQNGIFWSRH-Ym8Q 5 1    6316     0   2.9mb   2.9mbyellow open lsf_job_acct-202209           h1ZgCSB8RwCBxwIUUzDHEQ 5 1    2050   438   1.9mb   1.9mbyellow open lsf_jobs_active-202209        iBfnf07CTcS7Gb6TxwomRA 5 1    2658     0     1mb     1mbyellow open lsf_hostmetrics-202206        0PXSYBOgTA2Qa_zzaafUPg 5 1    4301     0   2.1mb   2.1mbyellow open model                         xSqB_T_VSByOzYavEcEVyQ 1 1      55     0   257kb   257kbyellow open lsf_job_acct-202206           C639GnzBSjCEVczfh5u23g 5 1   16719   353   8.9mb   8.9mbyellow open lsf_jobs_active-202204        8gN_ENkQRTSfnmxrtMcOlA 5 1   33286     0   9.8mb   9.8mbyellow open lsf_job_acct-202205           LOxmhm_8RxaCuTd7YWYbLw 5 1     274     0 439.4kb 439.4kbyellow open lsf_jobs_active-202205        61u2RlXgR_SXagmZfrmttQ 5 1    1880     0   1.1mb   1.1mbyellow open lsf_jobs_pend-202209          eTgqPp9nQOScNiwyUWXmHA 5 1       9     0 106.2kb 106.2kbyellow open lsf_job_acct-202204           dDDegS6RQSWtWN99eklexg 5 1   28902  2177  17.4mb  17.4mbyellow open lsf_jobs_active-202206        8ivkjWSNR1Sh_BxWACP0ZA 5 1   16921     0   4.6mb   4.6mbyellow open lsf_current_status            92KE3V4YSJ-RtRp_kepxYg 5 1  115450     0     9mb     9mbyellow open lsf_hostmetrics-202210        vbuK2wW3RRmXuY07tDPUNQ 5 1     785     0 942.1kb 942.1kbyellow open lsf_jobs_pend-202206          OhSwn-b0SiSj8mCW5tcNIA 5 1      22     0 244.6kb 244.6kbyellow open lsf_jobs_pend-202205          OfBtWklETYK9cRx000aNPw 5 1       1     0  12.7kb  12.7kbyellow open lsf_events-202212             WUC5KJWmS-2WIN8XCQpSuw 5 1  712399 74728   337mb   337mbyellow open lsf_jobs_pend-202204          OhUsXqohSciZTPZlTryMyA 5 1      50     0 275.3kb 275.3kbyellow open resource_attributes_latest    R9bk_WIPTU62dVg3O1LDBA 5 1       5     0  24.4kb  24.4kbyellow open lsf_jobs_pendingreason-202212 55iwDC5mRI-eRbzQLwWP6Q 5 1 3314828     0 288.7mb 288.7mbyellow open pa-lite-log                   o8-jaNoGTsSVcjJW5Ufs0w 5 1    1549     0 547.2kb 547.2kbyellow open lsf_job_acct-202212           4HXvAD02Sxq0tgp2fS2cfQ 5 1  161502     0  73.6mb  73.6mbyellow open lsf_hostmetrics-202212        Tki6OJ41R363u9Tx02N4zw 5 1    2548     0   1.7mb   1.7mbyellow open lsf_jobs_pendingreason-202209 D3TOZY2ORiK9PppGVt10Fg 5 1    2511     0 381.4kb 381.4kb

    (2) With the LSF data stored in Elasticsearch, the next step is to connect to the Grafana server. Here we point our browser to the Grafana server on the default port: http://lsf_manager:3000 and login to Grafana. This step assumes an account has already been setup on Grafana. Here we are using the default admin account.

    (3) In Grafana, navigate to Configuration -> Data sources. It’s here that it will be possible to add an Elasticsearch data source

    (4) Next, click the Add data source button.

    (5 In the list of data sources, filter by name for Elasticsearch and click the Select button on the Elasticsearch entry.

    (6) When configuring the data source, it’s necessary to specify an index name. This is where the list of indices in Elasticsearch that we generated earlier will come in handy. For this example, we wish to display the total number of pending jobs in the Spectrum LSF cluster over time. This data is stored in the lsf_jobs_pend* indices in Elasticsearch. To configure the data source appropriately, we specify the following values:

    • Name:\t“LSF pending jobs”
    • URL: http://localhost:9200
    • Index name: “lsf_jobs_pend*”
    • Time field name: “time_stamp”
    • Version: 7.0+Note that the URL needs to point to the Elasticsearch server. In this case, both the Elasticsearch server and Grafana server are running on the same host.

    Next click on the Save & Test button. It should return the message Index OK. Time field name OK..

    Assuming that no errors were found, click on the Back button.

    (7) Now you should see LSF pending jobs listed as a Data Source.

    (8) With the data source configured, we’re now ready to configure a dashboard to display the LSF pending job information. Navigate to Create -> Dashboard.

    (9) Click on Add an empty panel. This is used to create a new panel where the LSF pending job information will be plotted.

    (10) In the panel editor, specify the following options:

    • Panel title: “LSF pending jobs”
    • Specify the data source “LSF pending jobs” which was created previously
    • Specify a suitable time range (2 days)
    • Line width (5 points)

    You should immediately see in the panel editor the plot of the hourly pending jobs. Click on the Apply button to save the changes.

    (11) After clicking Apply, you will be returned to the Dashboard screen. The Dashboard should now display the new LSF pending jobs panel that was created above. This Dashboard could also include panels for system metrics collected by Prometheus for example.

    (12) Next, click on the diskette icon in the upper right to save the Dashboard with the LSF pending jobs panel. We’ll name it Spectrum LSF cluster status.

    Additional panels can be added to the Spectrum LSF cluster status based on the data logged by Spectrum LSF to Elasticsearch.

    That concludes the simple example of plotting Spectrum LSF cluster data from Elasticsearch in Grafana. As mentioned, the IBM Spectrum LSF Suites integrated web interface also provides reporting capabilities, with several built-in reports provided out of the box. Below, we’ve included a screenshot of the pending job analysis report included with Spectrum LSF Suites.

    Summary

    Spectrum LSF provides many hooks and integration points enabling administrators to change things ranging from scheduling behavior and the output of query commands through to job information being logged to Elasticsearch. Spectrum LSF is highly customizable by organizations to suit specific needs and requirements. We’ve demonstrated this using Grafana to visualize data from the LSF scheduler in a simple example. Following the above example, administrators can combine existing HPC cluster system level reporting in Grafana with job information from Spectrum LSF for a better overall view and understanding of the infrastructure.

    ", + "url": "https://hpc.social/personal-blog/2022/visualizing-spectrum-lsf-data-with-grafana/", + + + + + + "date_published": "2022-12-13T00:06:51-07:00", + "date_modified": "2022-12-13T00:06:51-07:00", + + "author": "Ramblings of a supercomputing enthusiast." + + }, + + { + "id": "https://hpc.social/personal-blog/2022/adam-s-weekly-update-2022-12-04/", + "title": "Adam’s weekly update, 2022-12-04", + "summary": null, + "content_text": "What’s newThis week was really intense from a work perspective. Not “bad intense”, but the kind of week where every day was spent with such a level of focus, that at 5 PM or so I found myself staring off into space and forgetting words. I think I got some good things accomplished, but my brain also felt like mush by the time the weekend came.This week I’m traveling to San Jose for work (I just checked into my hotel a little while ago!), so I fully expect this week to also be eaten by work. So I don’t promise anything terribly interesting for next week’s post…However, I did take advantage of a Sunday in San Jose to visit the Computer History Museum in Mountain View! I try to visit the museum every few years, and while a lot of the exhibits are the same, enough things change that I always get something new from the visit. Also, I’ve been doing a lot of reading about hardware development and the history thereof lately, so it was interesting to examine the museum through that new lens.I may write more about my visit later this week — it definitely sparked some thoughts — but in the mean time, here are a few photos I took while wandering around the museum.The Babbage Difference Engine, and other mechanical computers, have always fascinated me.Can’t visit the museum without visiting the Cray-1.I would have loved to have seen a CM-1 in operation, with its red LEDs showing the operation of its many single-bit CPUs.Having recently read Charles Petzold’s “Code”, I was struck by how closely the front panel of the Altair 8800 resembles the fictional front panel of the computer that Petzold constructs from logic gates up.The CHM Learning Lab now includes a back room with a couple of Dell PowerEdge R710 servers, complete with instructions for how to disassemble and reassemble them. Anyone who wants can wander in and take them apart. It was great fun watching a 5-year-old kid pulling components out of one of these… As well as feeling a little weird, as I think I’ve run these in production!What I’m readingI don’t have a ton to share this week — honestly, the whole week feels like a blur — but here are two books that I recommend.The Red Scholar’s Wake, by Aliette de Bodard: As the blurb says, “Lesbian space pirates!” Also, a really wonderful novella about building a new relationship amidst grief, power differentials, politics, and space battles. I think I basically recommend everything that de Bodard writes, but especially this. And it basically stands alone! So you can read this first, without going back to the other stories in the same world.Dealers of Lightning: XEROX PARC and the Dawn of the Computer Age, by Michael Hiltzik: I’ve just started this, but it’s already a really interesting snapshot of a key period in the development of the personal computer.Recent recipesSmitten Kitchen’s Unfussy Sugar Cookies: These cookies did, indeed, prove to be both tasty and easy to make. If you just want some easy cookies to snack on, I absolutely recommend this recipe.Pet photos", + "content_html": "

    What’s new

    This week was really intense from a work perspective. Not “bad intense”, but the kind of week where every day was spent with such a level of focus, that at 5 PM or so I found myself staring off into space and forgetting words. I think I got some good things accomplished, but my brain also felt like mush by the time the weekend came.

    This week I’m traveling to San Jose for work (I just checked into my hotel a little while ago!), so I fully expect this week to also be eaten by work. So I don’t promise anything terribly interesting for next week’s post…

    However, I did take advantage of a Sunday in San Jose to visit the Computer History Museum in Mountain View! I try to visit the museum every few years, and while a lot of the exhibits are the same, enough things change that I always get something new from the visit. Also, I’ve been doing a lot of reading about hardware development and the history thereof lately, so it was interesting to examine the museum through that new lens.

    I may write more about my visit later this week — it definitely sparked some thoughts — but in the mean time, here are a few photos I took while wandering around the museum.

    \"A
    The Babbage Difference Engine, and other mechanical computers, have always fascinated me.
    \"The
    Can’t visit the museum without visiting the Cray-1.
    \"The
    I would have loved to have seen a CM-1 in operation, with its red LEDs showing the operation of its many single-bit CPUs.
    \"The
    Having recently read Charles Petzold’s “Code”, I was struck by how closely the front panel of the Altair 8800 resembles the fictional front panel of the computer that Petzold constructs from logic gates up.
    \"A
    The CHM Learning Lab now includes a back room with a couple of Dell PowerEdge R710 servers, complete with instructions for how to disassemble and reassemble them. Anyone who wants can wander in and take them apart. It was great fun watching a 5-year-old kid pulling components out of one of these… As well as feeling a little weird, as I think I’ve run these in production!

    What I’m reading

    I don’t have a ton to share this week — honestly, the whole week feels like a blur — but here are two books that I recommend.

    Recent recipes

    Pet photos

    \"Phyrne
    \"Close-up
    \"Benny
    ", + "url": "https://hpc.social/personal-blog/2022/adam-s-weekly-update-2022-12-04/", + + + + + + "date_published": "2022-12-05T05:49:35-07:00", + "date_modified": "2022-12-05T05:49:35-07:00", + + "author": "Thinking Out Loud" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/an-initial-look-at-deep-learning-io-performance/", + "title": "An Initial Look at Deep Learning IO Performance", + "summary": null, + "content_text": "AbstractThis blog post describes an investigation of IO behavior of TensorFlow and PyTorch during resnet50 training running on Lambda Lab’s 8x V100 GPU instances. Both ephemeral local NVMe storage and network attached persistent storage was tested. The local NVMe storage was fast enough to achieve a throughput rate required to hit synthetic test targets. The network attached persistent storage may not be able to fully saturate 8 V100 GPUs during training, though can achieve nearly the same level of performance as the local storage so long as TFRecords are utilized. Further, there are specific behaviors and bottlenecks in TensorFlow and PyTorch that can reduce training performance when using real data from ImageNet.AcknowledgementsThank you to Michael Balaban at Lambda Labs for providing access to their GPU cloud for this testing. Thank you to Chuan Li for the creation of his TensorFlow benchmarking tools. Thank you also to Andrej Karpathy, Toby Boyd, Yanan Cao, Sanjoy Das, Thomas Joerg, and Justin Lebar for their excellent blog posts on deep learning and XLA performance that helped inform this article. I hope that this post will be useful for others as your work and writing was useful for me.Introduction …just because you can formulate your problem as RL doesn’t mean you should. If you insist on using the technology without understanding how it works you are likely to fail.         Andrej Karpathy, A Recipe for Training Neural Networks, 2019That was the phrase that stuck in my head when I first started this project. What project you may ask? I want to understand how deep learning experiments utilize fast storage devices. Not just any experiments either: real ones, preferably big. That’s how I happened upon Andrej Karpathy’s blog. He is the former Sr. Director of AI at Tesla and knows a thing or two about training big neural networks. I’ve spent the last decade working on Ceph and have worked on distributed systems and distributed storage for nearly 2 decades at this point. But training neural nets? The closest I’ve come was back in the early 2000s when I tried to build a tool to predict video game framerates. I scraped benchmark numbers from review websites and built M5 decision trees based on hardware and video card settings. It sort of worked, but was terribly overtrained on a small (~4000 sample) dataset. Training with petabytes of data to teach an AI how to responsibly drive a car? I can already feel a bit of imposter syndrome setting in.Thankfully my goal is comparatively modest. I don’t need to build a cutting edge classifier or explore the intricacies of manually implementing back-propagation. I simply want to understand the IO patterns that are involved when training big datasets with fast GPUs so I can help researchers speed up their work. Up until now, my ability to do this was fairly limited. At the day job I’ve had access to a small group of nodes with extremely modest GPUs. I set up runs with MLPerf but the datasets (WMT G-E and CoCo) easily fit into memory. Other than a short burst of read traffic at the very beginning of training there was very little IO. Recently I had the opportunity to meet Michael Balaban, Co-Founder of Lambda Labs. I told him what I wanted to do and he gave me access to Lambda’s GPU cloud and beta persistent storage to give it a try. I was able to grab one of Lambda’s 8x Tesla V100 instances (These things are incredibly popular so it’s best to grab one early in the morning!). Not all of Lambda’s instance types currently have access to the persistent storage but the V100 instances in the Texas zone do. Once secured, I got to work.TensorFlow - SyntheticBefore even attempting to run tests with real data, I realized I needed a baseline to start with. Luckily, Chuan Li, Lambda’s Chief Scientific Officer, wrote a tool for running TensorFlow benchmarks and made it available on github here. One of the advantages of Lambda’s cloud is that they’ve already bundled up many popular tools for running deep-learning workloads into one package called Lambda Stack which comes pre-installed when you start an instance. This made it fast to get started, though I did run into one issue. Lambda Stack comes standard with TensorFlow 2, but Chuan Li’s tool relies on a TensorFlow benchmark submodule that is designed to work with TensorFlow 1. Luckily, the parent repository was unofficially updated to work with Tensorflow 2 (with a warning that it is no longer being maintained). A quick “git checkout master” in the “benchmarks” submodule directory got everything working. Chuan Li’s tool makes it simple to run tests with several preconfigured templates already included. I chose the fp16 resnet50 configuration as it should be fast at processing images and is fairly standard.TF_XLA_FLAGS=--tf_xla_auto_jit=2 ./batch_benchmark.sh X X 1 100 2 config/config_resnet50_replicated_fp16_train_synUsing the invocation provided in the benchmark README.md file, I was able to quickly run benchmarks with synthetic data on up to 8 V100 GPUs in the node. At one point I got stuck, hitting what appeared at first to be an unexplainable 25% performance loss. I reran the tests multiple times and even monitored GPU clockspeeds/temperatures in nvidia-smi with no luck. Ultimately I discovered my error. In the slow cases, I had inadvertently left out the “TF_XLA_FLAGS=–tf_xla_auto_jit=2” environment variable. It turns out that setting this allows Tensorflow compile and execute functions with XLA (Accelerated Linear Algebra) support which is a pretty big win for these tests.At this point I decided that I needed to understand how Chuan Li’s tool works. It turns out that he is using the same base tf_cnn_benchmarks.py benchmark code that companies like Nvidia and Dell also use for benchmarking their GPU solutions. I spent some time running it directly with Dell’s settings from their deep learning overview here. Unfortunately those tests had mixed results, even after various tweaks. While researching the XLA issues I mentioned earlier however, I made an even better discovery on the TensorFlow website. I found an excellent blog post with performance data written by some of the core Tensorflow developers. It’s now 4 years old, but still appears to be quite valid. The tuning options used were both simpler and resulted in higher performance versus other configurations that I’ve come across.Training with synthetic data in Lambda’s cloud resulted in similar performance to what the Tensorflow developer’s reported. In fact, using their own settings yielded slightly faster results when running on Lambda’s 8xV100 instance! It was incredibly encouraging to me that even in Lambda’s cloud environment with virtual machine instances I could achieve performance that was as fast or faster than what the Tensorflow developers were reporting.Choosing a Real Data Set The first step to training a neural net is to not touch any neural net code at all and instead begin by thoroughly inspecting your data.         Andrej Karpathy, A Recipe for Training Neural Networks, 2019Having convinced myself that I had Tensorflow operating reasonably efficiently in synthetic tests, it was time to start thinking about what dataset to use for “real” training. The largest and most obvious choice is ImageNet. ImageNet is composed of over 1.2 million categorized images that form a roughly 160GB training dataset. It is also the largest dataset I could find that was publicly accessible. Downloading it isn’t so easy however. The only version that I could access is the ImageNet Object Localization Challenge dataset hosted on kaggle.After finally figuring out how to download the data, it was time to follow Andrej’s advice and try to learn something about it. While ImageNet is curated and annotated, it has many images of different sizes, dimensions, and pixel counts. Images also come from many sources with different levels of quality. Through the power of stack-exchange I was able to find a bash one-liner script to generate a histogram of image sizes:find . -type f -print0 | xargs -0 ls -l | awk '{size[int(log($5)/log(2))]++}END{for (i in size) printf(\"%10d %3d\\n\", 2^i, size[i])}' | sort -nRoughly 80% of the images are in the 64KB or 128KB size bins. Almost all of the remaining images are smaller. That gives us a pretty good idea of what kind of IOs to expect during classification. Or at least…it does for frameworks that read those images directly. In Tensorflow’s case, there’s an alternative format called TFRecord. TFRecords are basically collections of image data sequentially laid out in much larger files. Instead of iterating over thousands or millions of individual image files, TFRecords allow Tensorflow to instead stream fewer, larger files that each house multiple images. It’s a one time cost to pre-process the data so Tensorflow has less work to do during training. After I downloaded the ImageNet data I took a shot at converting the ImageNet LOC data into TensorFlow records. Luckily, the TensorFlow tpu github repository already has a tool that can do this. I had to manipulate the dataset slightly, but ultimately this process worked (at least for the training data):pip install gcloud google-cloud-storagepip install protobuf==3.20.1mkdir ~/data/ImageNetFooln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/train ~/data/ImageNetFoo/trainln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/val ~/data/ImageNetFoo/valln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/test ~/data/ImageNetFoo/testln -s ~/data/ImageNet/LOC_synset_mapping.txt ~/data/ImageNetFoo/synset_labels.txtpython imagenet_to_gcs.py --raw_data_dir=/home/ubuntu/data/ImageNetFoo --local_scratch_dir=/home/ubuntu/ExaltedOrbs/ImageNet/tf_records --nogcs_uploadPerhaps I should say that this worked so long as the original dataset was located on the local NVMe drive. The persistent storage didn’t fare as well. Attempting to decompress ImageNet on the persistent storage resulted in blowing past the max number of open files allowed with errors like:OSError: [Errno 24] Too many open files.Unfortunately this couldn’t be fixed on the instance. It appeared to be passed through from the host and the persistent storage was completely unusable until the instance was rebooted. Recently I spoke to one of Lambda’s engineers and they are working on a fix. (It may already be implemented by the time you read this!) I also want to note that the persistent storage is still in beta so issues like this are not entirely unexpected. Having said that, before hitting the error it was significantly slower extracting ImageNet on the persistent storage vs on the local NVMe storage. It’s probably best to extract ImageNet locally and then write the large TFRecords to the persistent storage during the conversion process. Luckily extracting ImageNet to local storage was fine, and storing the original archive and the resulting TFRecords on the persistent storage worked perfectly fine as well.FIO - Baseline IO ResultsNext, I turned my attention to running baseline tests on Lambda’s local and persistent storage using fio. Fio is a highly configurable and well respected benchmark in the storage community and perfect for generating baseline results. I decided to use a dataset size that is roughly similar to ImageNet (200GB), the libaio engine in fio with direct IO, and an appropriately high IO depth to let the NVMe drives stretch their legs a bit.Throughput with the local NVMe drive(s) is surprisingly good. The persistent storage is slower, but still might be fast enough at a little over 1GB/s for large reads. 16K IOPS was somewhat slower in both cases. I chose 16K so that I could quickly compare to tests I ran in my Ceph QEMU/KVM performance blog post here. Without getting into the details, I suspect there’s still some room for improved IOPS with Lambda’s setup. Luckily though, converting into TFRecords should make Tensorflow throughput bound instead of latency bound. What about PyTorch or other tools that want to read images directly though? Fio gives us the ability to simulate it by using its ‘bssplit’ feature. We can take the size ranges and percentiles generated when examining ImageNet and give fio a similar distribution:fio --ioengine=libaio --direct=1 --bssplit=2K/1:4K/2:8K/4:16K/8:32K/13:64K/38:128K/33:256K/1 --iodepth=128 --rw=randread --norandommap --size=200G --numjobs=1 --runtime=300 --time_based --name=fooThis isn’t exactly right as we are not reading data spread across millions of files, but it should provide something of an upper bound on what to expect. It looks like the persistent storage can do approximately 10K reads/second at a throughput rate of around 750MB/s. The local storage is about 3-4 times faster. Local storage should be fast enough to support the kind of images/second throughput rates we want to hit in Tensorflow on 8 V100 GPUs, but the jury is still out for the persistent storage.Tensorflow - ImageNetRunning benchmarks with real data rather than synthetic data is fairly straightforward in Tensorflow. You simply append data_dir and data_name flags to the CLI invocation to let it know where the TFRecords are located:sync; echo 3 | sudo tee /proc/sys/vm/drop_cachespython ./tf_cnn_benchmarks.py --batch_size=256 --num_batches=100 --model=resnet50 --optimizer=momentum --variable_update=replicated --all_reduce_spec=nccl --use_fp16=True --nodistortions --gradient_repacking=2 --compute_lr_on_cpu=True --single_l2_loss_op=True --xla_compile=True --num_gpus=8 --loss_type_to_report=base_loss --data_dir=/home/ubuntu/ImageNet-TF/train --data_name=imagenetOuch. Much lower performance with the ImageNet data vs synthetic! This is especially unfortunate given that 4 years ago the Tensorflow developers reported much better results. I spent some time reading and experimenting with different settings. Ultimately the one setting that made a substantial difference was “datasets_num_private_threads”. In the Tensorflow benchmark source code, this setting is described as: “[The] number of threads for a private threadpool created for all datasets computation.” I’ll go into more detail what these threads are doing in a bit. For now, let’s see how increasing the number of threads affects the results:Increasing the number of private threads has a dramatic effect on performance, though I was unable to fully match the performance achieved in the synthetic tests on either the local or persistent storage. The local storage fared better at high thread counts gradually topping out at around 8600 images/second. At high private thread counts the persistent storage topped out between 7000-8000 images/second with a higher degree of variability between runs. I suspect that in this case the persistent storage has likely hit its (per instance) limit.In addition to having a dramatic effect on performance, changing the private thread count also had a large effect on the CPU consumption of the TensorFlow process. CPU usage increases almost linearly with additional private threads up to around 30 cores. What exactly are these private threads doing? To answer that question, I utilized two tools that I often deploy when diagnosing CPU usage in Ceph. When testing with a lower number of private threads, I used linux’s perf tool to look at where cycles are being consumed when the private threads are fully saturated. At higher levels of private threads, I used my wallclock profiler uwpmp to look at how private threads spend their time when increasing the thread count no longer improves performance.In the first case with perf, we can get a good view of the work that these private threads are doing:--77.31%--tensorflow::ThreadPoolDevice::Compute | |--51.19%--0x7f511a00c7d8 | | | --51.18%--tensorflow::jpeg::Uncompress |--14.48%--tensorflow::ResizeBilinearOp<Eigen::ThreadPoolDevice, unsigned char>::Compute |--5.47%--tensorflow::CastOpBase::Compute |--2.66%--tensorflow::ReverseV2Op<Eigen::ThreadPoolDevice, unsigned char, int>::ComputeThe majority of the cycles consumed is in jpeg decompression and resize operations, along with a smattering of other stuff. What happens if we look at a case with a higher private thread count but now look at wallclock time instead of cycles? I ended up having some trouble getting the profiler to work properly and consistently get clean callgraphs, but I was able to get at least one run in that revealed some interesting information. First, I saw time spent in the same functions that perf told us we were spending cycles in:+ 100.00% Eigen::ThreadPoolTempl<tensorflow::thread::EigenEnvironment>::WorkerLoop(int) + 99.90% ??? |+ 97.30% ??? ||+ 92.40% ??? |||+ 77.10% _PyEval_EvalFrameDefault ||||+ 47.20% ??? |||||+ 38.10% tensorflow::jpeg::Uncompress(void const*, int, tensorflow::jpeg::UncompressFlags const&, long*, std::function<unsigned char* (int, int, int)>) ||||+ 12.20% tensorflow::ResizeBilinearOp<Eigen::ThreadPoolDevice, unsigned char>::Compute(tensorflow::OpKernelContext*) ||||+ 4.40% tensorflow::CastOpBase::Compute(tensorflow::OpKernelContext*) ||||+ 1.70% tensorflow::ReverseV2Op<Eigen::ThreadPoolDevice, unsigned char, int>::Compute(tensorflow::OpKernelContext*)But the wallclock profile also exposed that there may be contention in multiple areas in the private threads around some of the nsync synchronization primitives being used: ||||||| | + 4.50% nsync::nsync_mu_semaphore_p(nsync::nsync_semaphore_s_*) ||||||| | + 4.50% syscallThis almost always appeared nested deep inside:tensorflow::BFCAllocator::AllocateRaw(unsigned long, unsigned long, tensorflow::AllocationAttributes const&)Sadly I was missing a number of debug symbols and don’t 100% trust the wallclock trace. For now I’ll just say that the private threads are doing a significant amount of work decompressing and manipulating the image data to keep the GPUs fed. I suspect that with newer and faster GPUs the image retrieval pipeline could become an even bigger issue when training with real image data. The mystery for me is how The TensorFlow developers achieved such good results 4 years ago without using dedicated private threads at all. Perhaps they had a significantly faster jpeg decompression mechanism that I am unaware of?PyTorch - ImageNetAfter running Tensorflow, I also ran some benchmarks in PyTorch using Nvidia’s “DeepLearningExamples” github repo. First, I installed the prereqs and setup the repository:pip install 'git+https://github.com/NVIDIA/dllogger'pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-cuda110git clone https://github.com/NVIDIA/DeepLearningExamplesThen, prepared ImageNet for usage in PyTorch:cd ~/data/ImageNet/ILSVRC/Data/CLS-LOC/valwget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bashAnd finally ran a test:cd DeepLearningExamples/PyTorch/Classification/ConvNetssync; echo 3 | sudo tee /proc/sys/vm/drop_cachespython ./multiproc.py --nproc_per_node 1 ./main.py --arch resnet50 --label-smoothing 0.1 --run-epoch 1 --amp --static-loss-scale 256 --workspace /home/ubuntu/data/ImageNet-Scratch /home/ubuntu/data/ImageNet-Orig/ILSVRC/Data/CLS-LOC/There are a couple of differences here versus the TensorFlow tests. First, I’m using the raw ImageNet archive instead of a preprocessed TFRecord dataset, so the read behavior is different. Because I was unable to extract or copy the raw ImageNet archive onto the persistent storage, I’m also only testing the local NVMe drive. Finally, I didn’t see any specific examples for running with fp16 in nVidia’s documentation, so I’m using amp (automatic mixed precision) which may be slightly slower.Given the number of differences it’s tough to draw direct comparisons with Tensorflow. Amp is one difference, but it’s quite possible that there are tuning options that could improve performance here that I don’t know about. I did notice that PyTorch, like Tensorflow, is using quite a bit of CPU to keep the GPUs working. I suspect that there are ways to tweak the IO pipeline that could improve performance. For now though, let’s compare the IO patterns on the local NVMe drive during the Tensorflow and PyTorch runs. I was hoping to be able to use blktrace to do this, but unfortunately was unable to get any data from the virtual devices in the instance. I was able to collect more general statistics using collectl however.Disk Read Statistics During PyTorch 8 GPU run: Time Name KBytes Merged IOs Size Wait QLen SvcTim 00:29:18 vda 761136 0 6746 113 58 431 0 00:29:19 vda 752172 0 6648 113 112 810 0 00:29:20 vda 747824 0 6595 113 84 604 0 00:29:21 vda 735964 0 6583 112 73 551 0 00:29:22 vda 695636 0 6237 112 102 760 0 Disk Read Statistics During TensorFlow 8 GPU run: Time Name KBytes Merged IOs Size Wait QLen SvcTim 00:38:45 vda 1081324 0 8440 128 0 7 0 00:38:46 vda 927512 0 7241 128 0 7 0 00:38:47 vda 913512 0 7130 128 0 7 0 00:38:48 vda 1047444 0 8186 128 0 6 0 00:38:49 vda 968776 0 7560 128 0 6 0 When just looking at the IO sizes, both runs appear similar, but that doesn’t tell the whole story. It is likely that Tensorflow is doing much larger reads that are broken up into contiguous 128KB chunks by the block layer based on the underlying device’s max_sectors_kb setting. The tells here are the very low queue length and wait times for the TensorFlow run versus the PyTorch run. In both case the device service times are low (0), but in the TensorFlow case IOs are still backing up in the device queue.Interestingly, it appears that it may be possible to use nVidia’s DALI (Data Loading Library) package to read TFRecords into PyTorch. I didn’t have time to attempt it, but potentially that could have a big effect on IO behavior and performance as well.ConclusionAs I’ve been writing this post, I realize just how complicated it is to understand the performance characteristics of training of neural networks. Even as we talk about metrics like images/second, the options that are used (batch size for instance) can also affect convergence. It’s very difficult to come up with a common methodology that is always better than others. I wonder if another metric, like reaching a desired level of convergence, would be better in the end. Having said that, I am glad for having done this exercise as I learned some valuable things: Pre-processing data into a format like TFRecords on fast local storage is a big win from an IO perspective. It lets storage systems that have slow metadata performance succeed so long as they have enough sequential read throughput to keep the machine learning framework busy. This is a big win for many distributed file systems that may have substandard metadata performance (and even the good ones may still benefit). To train on a dataset like ImageNet, you need somewhere around 1-1.3GB/s of raw disk throughput to keep 8 V100 GPUs busy when training in fp16. For amp or fp32 the requirements are likely lower since the GPUs can’t work quite as fast. With modern GPUs that are faster than the V100, the disk throughput requirements could be significantly higher. Lambda’s local NVMe storage is likely fast enough to saturate 8 GPUs, even newer ones, so long as the rest of the IO path can keep up. The persistent storage appears to become a bottleneck with sufficient GPUs and TensorFlow private threads, though can still function fairly well so long as TFRecords are used. A concern going forward is how to ensure that the data pipeline in TensorFlow and PyTorch are fast enough to keep the GPUs fed. The Tensorflow benchmark required a large number of private threads and showed potential evidence of contention at high thread counts. PyTorch did not appear to natively support TFRecords, but NVidia DALI or other 3rd party code might help improve the IO path. If it’s necessary to train directly with images rather than TFRecords, it may not make sense to host them on shared file systems. It appears that Tensorflow and possibly PyTorch give users the ability to specify a separate training data and work directory. If all operations against the training data are reads, it may be better to host datasets on read-only block device snapshots. For instance with Ceph, perhaps you could create a read/write RBD volume where you put a certain dataset, take a snapshot, and then map that snapshot as read only on multiple instances that all need access to the same image set. Even with a training set as large as ImageNet, Lambda’s instances have so much memory that eventually the entire dataset becomes cached. It was necessary to sync and drop caches before each test and keep tests short enough that they didn’t re-read the same data from buffer cache. I was able to watch as long running tests eventually stopped performing reads and got faster as time went on. This could make apples-to-apples comparison between different storage vendors difficult if not carefully controlled. I’m almost certainly missing additional tweaks that can help speed up both Tensorflow and PyTorch. This post shouldn’t be seen as the be-all/end-all for how to achieve high performance with these frameworks, but I hope it may at least help showcase some of the areas that are valuable to investigate when trying to train with real data and achieve high performance. This wraps up my initial work looking at Deep Learning IO behavior. I hope that next time I can come armed with a bit more knowledge about the internals of how PyTorch and Tensorflow work, focus a bit more on the quality of the training, find even larger datasets to work with, and maybe actually accomplish something useful rather than just play with ImageNet.Thanks for reading!", + "content_html": "

    Abstract

    This blog post describes an investigation of IO behavior of TensorFlow and PyTorch during resnet50 training running on Lambda Lab’s 8x V100 GPU instances. Both ephemeral local NVMe storage and network attached persistent storage was tested. The local NVMe storage was fast enough to achieve a throughput rate required to hit synthetic test targets. The network attached persistent storage may not be able to fully saturate 8 V100 GPUs during training, though can achieve nearly the same level of performance as the local storage so long as TFRecords are utilized. Further, there are specific behaviors and bottlenecks in TensorFlow and PyTorch that can reduce training performance when using real data from ImageNet.

    Acknowledgements

    Thank you to Michael Balaban at Lambda Labs for providing access to their GPU cloud for this testing. Thank you to Chuan Li for the creation of his TensorFlow benchmarking tools. Thank you also to Andrej Karpathy, Toby Boyd, Yanan Cao, Sanjoy Das, Thomas Joerg, and Justin Lebar for their excellent blog posts on deep learning and XLA performance that helped inform this article. I hope that this post will be useful for others as your work and writing was useful for me.

    Introduction

    …just because you can formulate your problem as RL doesn’t mean you should. If you insist on using the technology without understanding how it works you are likely to fail.

            Andrej Karpathy, A Recipe for Training Neural Networks, 2019

    That was the phrase that stuck in my head when I first started this project. What project you may ask? I want to understand how deep learning experiments utilize fast storage devices. Not just any experiments either: real ones, preferably big. That’s how I happened upon Andrej Karpathy’s blog. He is the former Sr. Director of AI at Tesla and knows a thing or two about training big neural networks. I’ve spent the last decade working on Ceph and have worked on distributed systems and distributed storage for nearly 2 decades at this point. But training neural nets? The closest I’ve come was back in the early 2000s when I tried to build a tool to predict video game framerates. I scraped benchmark numbers from review websites and built M5 decision trees based on hardware and video card settings. It sort of worked, but was terribly overtrained on a small (~4000 sample) dataset. Training with petabytes of data to teach an AI how to responsibly drive a car? I can already feel a bit of imposter syndrome setting in.

    Thankfully my goal is comparatively modest. I don’t need to build a cutting edge classifier or explore the intricacies of manually implementing back-propagation. I simply want to understand the IO patterns that are involved when training big datasets with fast GPUs so I can help researchers speed up their work. Up until now, my ability to do this was fairly limited. At the day job I’ve had access to a small group of nodes with extremely modest GPUs. I set up runs with MLPerf but the datasets (WMT G-E and CoCo) easily fit into memory. Other than a short burst of read traffic at the very beginning of training there was very little IO. Recently I had the opportunity to meet Michael Balaban, Co-Founder of Lambda Labs. I told him what I wanted to do and he gave me access to Lambda’s GPU cloud and beta persistent storage to give it a try. I was able to grab one of Lambda’s 8x Tesla V100 instances (These things are incredibly popular so it’s best to grab one early in the morning!). Not all of Lambda’s instance types currently have access to the persistent storage but the V100 instances in the Texas zone do. Once secured, I got to work.

    TensorFlow - Synthetic

    Before even attempting to run tests with real data, I realized I needed a baseline to start with. Luckily, Chuan Li, Lambda’s Chief Scientific Officer, wrote a tool for running TensorFlow benchmarks and made it available on github here. One of the advantages of Lambda’s cloud is that they’ve already bundled up many popular tools for running deep-learning workloads into one package called Lambda Stack which comes pre-installed when you start an instance. This made it fast to get started, though I did run into one issue. Lambda Stack comes standard with TensorFlow 2, but Chuan Li’s tool relies on a TensorFlow benchmark submodule that is designed to work with TensorFlow 1. Luckily, the parent repository was unofficially updated to work with Tensorflow 2 (with a warning that it is no longer being maintained). A quick “git checkout master” in the “benchmarks” submodule directory got everything working. Chuan Li’s tool makes it simple to run tests with several preconfigured templates already included. I chose the fp16 resnet50 configuration as it should be fast at processing images and is fairly standard.

    TF_XLA_FLAGS=--tf_xla_auto_jit=2 ./batch_benchmark.sh X X 1 100 2 config/config_resnet50_replicated_fp16_train_syn

    Using the invocation provided in the benchmark README.md file, I was able to quickly run benchmarks with synthetic data on up to 8 V100 GPUs in the node. At one point I got stuck, hitting what appeared at first to be an unexplainable 25% performance loss. I reran the tests multiple times and even monitored GPU clockspeeds/temperatures in nvidia-smi with no luck. Ultimately I discovered my error. In the slow cases, I had inadvertently left out the “TF_XLA_FLAGS=–tf_xla_auto_jit=2” environment variable. It turns out that setting this allows Tensorflow compile and execute functions with XLA (Accelerated Linear Algebra) support which is a pretty big win for these tests.

    \"\"

    At this point I decided that I needed to understand how Chuan Li’s tool works. It turns out that he is using the same base tf_cnn_benchmarks.py benchmark code that companies like Nvidia and Dell also use for benchmarking their GPU solutions. I spent some time running it directly with Dell’s settings from their deep learning overview here. Unfortunately those tests had mixed results, even after various tweaks. While researching the XLA issues I mentioned earlier however, I made an even better discovery on the TensorFlow website. I found an excellent blog post with performance data written by some of the core Tensorflow developers. It’s now 4 years old, but still appears to be quite valid. The tuning options used were both simpler and resulted in higher performance versus other configurations that I’ve come across.

    \"\"

    Training with synthetic data in Lambda’s cloud resulted in similar performance to what the Tensorflow developer’s reported. In fact, using their own settings yielded slightly faster results when running on Lambda’s 8xV100 instance! It was incredibly encouraging to me that even in Lambda’s cloud environment with virtual machine instances I could achieve performance that was as fast or faster than what the Tensorflow developers were reporting.

    Choosing a Real Data Set

    The first step to training a neural net is to not touch any neural net code at all and instead begin by thoroughly inspecting your data.

            Andrej Karpathy, A Recipe for Training Neural Networks, 2019

    Having convinced myself that I had Tensorflow operating reasonably efficiently in synthetic tests, it was time to start thinking about what dataset to use for “real” training. The largest and most obvious choice is ImageNet. ImageNet is composed of over 1.2 million categorized images that form a roughly 160GB training dataset. It is also the largest dataset I could find that was publicly accessible. Downloading it isn’t so easy however. The only version that I could access is the ImageNet Object Localization Challenge dataset hosted on kaggle.

    After finally figuring out how to download the data, it was time to follow Andrej’s advice and try to learn something about it. While ImageNet is curated and annotated, it has many images of different sizes, dimensions, and pixel counts. Images also come from many sources with different levels of quality. Through the power of stack-exchange I was able to find a bash one-liner script to generate a histogram of image sizes:

    find . -type f -print0 | xargs -0 ls -l | awk '{size[int(log($5)/log(2))]++}END{for (i in size) printf(\"%10d %3d\\n\", 2^i, size[i])}' | sort -n

    \"\"

    Roughly 80% of the images are in the 64KB or 128KB size bins. Almost all of the remaining images are smaller. That gives us a pretty good idea of what kind of IOs to expect during classification. Or at least…it does for frameworks that read those images directly. In Tensorflow’s case, there’s an alternative format called TFRecord. TFRecords are basically collections of image data sequentially laid out in much larger files. Instead of iterating over thousands or millions of individual image files, TFRecords allow Tensorflow to instead stream fewer, larger files that each house multiple images. It’s a one time cost to pre-process the data so Tensorflow has less work to do during training. After I downloaded the ImageNet data I took a shot at converting the ImageNet LOC data into TensorFlow records. Luckily, the TensorFlow tpu github repository already has a tool that can do this. I had to manipulate the dataset slightly, but ultimately this process worked (at least for the training data):

    pip install gcloud google-cloud-storagepip install protobuf==3.20.1mkdir ~/data/ImageNetFooln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/train ~/data/ImageNetFoo/trainln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/val ~/data/ImageNetFoo/valln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/test ~/data/ImageNetFoo/testln -s ~/data/ImageNet/LOC_synset_mapping.txt ~/data/ImageNetFoo/synset_labels.txtpython imagenet_to_gcs.py --raw_data_dir=/home/ubuntu/data/ImageNetFoo --local_scratch_dir=/home/ubuntu/ExaltedOrbs/ImageNet/tf_records --nogcs_upload

    Perhaps I should say that this worked so long as the original dataset was located on the local NVMe drive. The persistent storage didn’t fare as well. Attempting to decompress ImageNet on the persistent storage resulted in blowing past the max number of open files allowed with errors like:

    OSError: [Errno 24] Too many open files.

    Unfortunately this couldn’t be fixed on the instance. It appeared to be passed through from the host and the persistent storage was completely unusable until the instance was rebooted. Recently I spoke to one of Lambda’s engineers and they are working on a fix. (It may already be implemented by the time you read this!) I also want to note that the persistent storage is still in beta so issues like this are not entirely unexpected. Having said that, before hitting the error it was significantly slower extracting ImageNet on the persistent storage vs on the local NVMe storage. It’s probably best to extract ImageNet locally and then write the large TFRecords to the persistent storage during the conversion process. Luckily extracting ImageNet to local storage was fine, and storing the original archive and the resulting TFRecords on the persistent storage worked perfectly fine as well.

    FIO - Baseline IO Results

    Next, I turned my attention to running baseline tests on Lambda’s local and persistent storage using fio. Fio is a highly configurable and well respected benchmark in the storage community and perfect for generating baseline results. I decided to use a dataset size that is roughly similar to ImageNet (200GB), the libaio engine in fio with direct IO, and an appropriately high IO depth to let the NVMe drives stretch their legs a bit.

    \"\"

    Throughput with the local NVMe drive(s) is surprisingly good. The persistent storage is slower, but still might be fast enough at a little over 1GB/s for large reads. 16K IOPS was somewhat slower in both cases. I chose 16K so that I could quickly compare to tests I ran in my Ceph QEMU/KVM performance blog post here. Without getting into the details, I suspect there’s still some room for improved IOPS with Lambda’s setup. Luckily though, converting into TFRecords should make Tensorflow throughput bound instead of latency bound. What about PyTorch or other tools that want to read images directly though? Fio gives us the ability to simulate it by using its ‘bssplit’ feature. We can take the size ranges and percentiles generated when examining ImageNet and give fio a similar distribution:

    fio --ioengine=libaio --direct=1 --bssplit=2K/1:4K/2:8K/4:16K/8:32K/13:64K/38:128K/33:256K/1 --iodepth=128 --rw=randread --norandommap --size=200G --numjobs=1 --runtime=300 --time_based --name=foo

    \"\"

    This isn’t exactly right as we are not reading data spread across millions of files, but it should provide something of an upper bound on what to expect. It looks like the persistent storage can do approximately 10K reads/second at a throughput rate of around 750MB/s. The local storage is about 3-4 times faster. Local storage should be fast enough to support the kind of images/second throughput rates we want to hit in Tensorflow on 8 V100 GPUs, but the jury is still out for the persistent storage.

    Tensorflow - ImageNet

    Running benchmarks with real data rather than synthetic data is fairly straightforward in Tensorflow. You simply append data_dir and data_name flags to the CLI invocation to let it know where the TFRecords are located:

    sync; echo 3 | sudo tee /proc/sys/vm/drop_cachespython ./tf_cnn_benchmarks.py --batch_size=256 --num_batches=100 --model=resnet50 --optimizer=momentum --variable_update=replicated --all_reduce_spec=nccl --use_fp16=True --nodistortions --gradient_repacking=2 --compute_lr_on_cpu=True --single_l2_loss_op=True --xla_compile=True --num_gpus=8 --loss_type_to_report=base_loss --data_dir=/home/ubuntu/ImageNet-TF/train --data_name=imagenet

    \"\"

    Ouch. Much lower performance with the ImageNet data vs synthetic! This is especially unfortunate given that 4 years ago the Tensorflow developers reported much better results. I spent some time reading and experimenting with different settings. Ultimately the one setting that made a substantial difference was “datasets_num_private_threads”. In the Tensorflow benchmark source code, this setting is described as: “[The] number of threads for a private threadpool created for all datasets computation.” I’ll go into more detail what these threads are doing in a bit. For now, let’s see how increasing the number of threads affects the results:

    \"\"

    Increasing the number of private threads has a dramatic effect on performance, though I was unable to fully match the performance achieved in the synthetic tests on either the local or persistent storage. The local storage fared better at high thread counts gradually topping out at around 8600 images/second. At high private thread counts the persistent storage topped out between 7000-8000 images/second with a higher degree of variability between runs. I suspect that in this case the persistent storage has likely hit its (per instance) limit.

    In addition to having a dramatic effect on performance, changing the private thread count also had a large effect on the CPU consumption of the TensorFlow process. CPU usage increases almost linearly with additional private threads up to around 30 cores. What exactly are these private threads doing? To answer that question, I utilized two tools that I often deploy when diagnosing CPU usage in Ceph. When testing with a lower number of private threads, I used linux’s perf tool to look at where cycles are being consumed when the private threads are fully saturated. At higher levels of private threads, I used my wallclock profiler uwpmp to look at how private threads spend their time when increasing the thread count no longer improves performance.

    In the first case with perf, we can get a good view of the work that these private threads are doing:

    --77.31%--tensorflow::ThreadPoolDevice::Compute          |                    |--51.19%--0x7f511a00c7d8          |          |                    |           --51.18%--tensorflow::jpeg::Uncompress          |--14.48%--tensorflow::ResizeBilinearOp<Eigen::ThreadPoolDevice, unsigned char>::Compute          |--5.47%--tensorflow::CastOpBase::Compute          |--2.66%--tensorflow::ReverseV2Op<Eigen::ThreadPoolDevice, unsigned char, int>::Compute

    The majority of the cycles consumed is in jpeg decompression and resize operations, along with a smattering of other stuff. What happens if we look at a case with a higher private thread count but now look at wallclock time instead of cycles? I ended up having some trouble getting the profiler to work properly and consistently get clean callgraphs, but I was able to get at least one run in that revealed some interesting information. First, I saw time spent in the same functions that perf told us we were spending cycles in:

    + 100.00% Eigen::ThreadPoolTempl<tensorflow::thread::EigenEnvironment>::WorkerLoop(int) + 99.90% ??? |+ 97.30% ??? ||+ 92.40% ??? |||+ 77.10% _PyEval_EvalFrameDefault ||||+ 47.20% ??? |||||+ 38.10% tensorflow::jpeg::Uncompress(void const*, int, tensorflow::jpeg::UncompressFlags const&, long*, std::function<unsigned char* (int, int, int)>) ||||+ 12.20% tensorflow::ResizeBilinearOp<Eigen::ThreadPoolDevice, unsigned char>::Compute(tensorflow::OpKernelContext*) ||||+ 4.40% tensorflow::CastOpBase::Compute(tensorflow::OpKernelContext*) ||||+ 1.70% tensorflow::ReverseV2Op<Eigen::ThreadPoolDevice, unsigned char, int>::Compute(tensorflow::OpKernelContext*)

    But the wallclock profile also exposed that there may be contention in multiple areas in the private threads around some of the nsync synchronization primitives being used:

     |||||||    |  + 4.50% nsync::nsync_mu_semaphore_p(nsync::nsync_semaphore_s_*) |||||||    |   + 4.50% syscall

    This almost always appeared nested deep inside:

    tensorflow::BFCAllocator::AllocateRaw(unsigned long, unsigned long, tensorflow::AllocationAttributes const&)

    Sadly I was missing a number of debug symbols and don’t 100% trust the wallclock trace. For now I’ll just say that the private threads are doing a significant amount of work decompressing and manipulating the image data to keep the GPUs fed. I suspect that with newer and faster GPUs the image retrieval pipeline could become an even bigger issue when training with real image data. The mystery for me is how The TensorFlow developers achieved such good results 4 years ago without using dedicated private threads at all. Perhaps they had a significantly faster jpeg decompression mechanism that I am unaware of?

    PyTorch - ImageNet

    After running Tensorflow, I also ran some benchmarks in PyTorch using Nvidia’s “DeepLearningExamples” github repo. First, I installed the prereqs and setup the repository:

    pip install 'git+https://github.com/NVIDIA/dllogger'pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-cuda110git clone https://github.com/NVIDIA/DeepLearningExamples

    Then, prepared ImageNet for usage in PyTorch:

    cd ~/data/ImageNet/ILSVRC/Data/CLS-LOC/valwget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash

    And finally ran a test:

    cd DeepLearningExamples/PyTorch/Classification/ConvNetssync; echo 3 | sudo tee /proc/sys/vm/drop_cachespython ./multiproc.py --nproc_per_node 1 ./main.py --arch resnet50 --label-smoothing 0.1 --run-epoch 1 --amp --static-loss-scale 256 --workspace /home/ubuntu/data/ImageNet-Scratch /home/ubuntu/data/ImageNet-Orig/ILSVRC/Data/CLS-LOC/

    There are a couple of differences here versus the TensorFlow tests. First, I’m using the raw ImageNet archive instead of a preprocessed TFRecord dataset, so the read behavior is different. Because I was unable to extract or copy the raw ImageNet archive onto the persistent storage, I’m also only testing the local NVMe drive. Finally, I didn’t see any specific examples for running with fp16 in nVidia’s documentation, so I’m using amp (automatic mixed precision) which may be slightly slower.

    \"\"

    Given the number of differences it’s tough to draw direct comparisons with Tensorflow. Amp is one difference, but it’s quite possible that there are tuning options that could improve performance here that I don’t know about. I did notice that PyTorch, like Tensorflow, is using quite a bit of CPU to keep the GPUs working. I suspect that there are ways to tweak the IO pipeline that could improve performance. For now though, let’s compare the IO patterns on the local NVMe drive during the Tensorflow and PyTorch runs. I was hoping to be able to use blktrace to do this, but unfortunately was unable to get any data from the virtual devices in the instance. I was able to collect more general statistics using collectl however.

    Disk Read Statistics During PyTorch 8 GPU run:
    Time Name KBytes Merged IOs Size Wait QLen SvcTim
    00:29:18 vda 761136 0 6746 113 58 431 0
    00:29:19 vda 752172 0 6648 113 112 810 0
    00:29:20 vda 747824 0 6595 113 84 604 0
    00:29:21 vda 735964 0 6583 112 73 551 0
    00:29:22 vda 695636 0 6237 112 102 760 0
    Disk Read Statistics During TensorFlow 8 GPU run:
    Time Name KBytes Merged IOs Size Wait QLen SvcTim
    00:38:45 vda 1081324 0 8440 128 0 7 0
    00:38:46 vda 927512 0 7241 128 0 7 0
    00:38:47 vda 913512 0 7130 128 0 7 0
    00:38:48 vda 1047444 0 8186 128 0 6 0
    00:38:49 vda 968776 0 7560 128 0 6 0


    When just looking at the IO sizes, both runs appear similar, but that doesn’t tell the whole story. It is likely that Tensorflow is doing much larger reads that are broken up into contiguous 128KB chunks by the block layer based on the underlying device’s max_sectors_kb setting. The tells here are the very low queue length and wait times for the TensorFlow run versus the PyTorch run. In both case the device service times are low (0), but in the TensorFlow case IOs are still backing up in the device queue.

    Interestingly, it appears that it may be possible to use nVidia’s DALI (Data Loading Library) package to read TFRecords into PyTorch. I didn’t have time to attempt it, but potentially that could have a big effect on IO behavior and performance as well.

    Conclusion

    As I’ve been writing this post, I realize just how complicated it is to understand the performance characteristics of training of neural networks. Even as we talk about metrics like images/second, the options that are used (batch size for instance) can also affect convergence. It’s very difficult to come up with a common methodology that is always better than others. I wonder if another metric, like reaching a desired level of convergence, would be better in the end. Having said that, I am glad for having done this exercise as I learned some valuable things:

    1. Pre-processing data into a format like TFRecords on fast local storage is a big win from an IO perspective. It lets storage systems that have slow metadata performance succeed so long as they have enough sequential read throughput to keep the machine learning framework busy. This is a big win for many distributed file systems that may have substandard metadata performance (and even the good ones may still benefit).

    2. To train on a dataset like ImageNet, you need somewhere around 1-1.3GB/s of raw disk throughput to keep 8 V100 GPUs busy when training in fp16. For amp or fp32 the requirements are likely lower since the GPUs can’t work quite as fast. With modern GPUs that are faster than the V100, the disk throughput requirements could be significantly higher.

    3. Lambda’s local NVMe storage is likely fast enough to saturate 8 GPUs, even newer ones, so long as the rest of the IO path can keep up. The persistent storage appears to become a bottleneck with sufficient GPUs and TensorFlow private threads, though can still function fairly well so long as TFRecords are used. A concern going forward is how to ensure that the data pipeline in TensorFlow and PyTorch are fast enough to keep the GPUs fed. The Tensorflow benchmark required a large number of private threads and showed potential evidence of contention at high thread counts. PyTorch did not appear to natively support TFRecords, but NVidia DALI or other 3rd party code might help improve the IO path.

    4. If it’s necessary to train directly with images rather than TFRecords, it may not make sense to host them on shared file systems. It appears that Tensorflow and possibly PyTorch give users the ability to specify a separate training data and work directory. If all operations against the training data are reads, it may be better to host datasets on read-only block device snapshots. For instance with Ceph, perhaps you could create a read/write RBD volume where you put a certain dataset, take a snapshot, and then map that snapshot as read only on multiple instances that all need access to the same image set.

    5. Even with a training set as large as ImageNet, Lambda’s instances have so much memory that eventually the entire dataset becomes cached. It was necessary to sync and drop caches before each test and keep tests short enough that they didn’t re-read the same data from buffer cache. I was able to watch as long running tests eventually stopped performing reads and got faster as time went on. This could make apples-to-apples comparison between different storage vendors difficult if not carefully controlled.

    6. I’m almost certainly missing additional tweaks that can help speed up both Tensorflow and PyTorch. This post shouldn’t be seen as the be-all/end-all for how to achieve high performance with these frameworks, but I hope it may at least help showcase some of the areas that are valuable to investigate when trying to train with real data and achieve high performance.

    This wraps up my initial work looking at Deep Learning IO behavior. I hope that next time I can come armed with a bit more knowledge about the internals of how PyTorch and Tensorflow work, focus a bit more on the quality of the training, find even larger datasets to work with, and maybe actually accomplish something useful rather than just play with ImageNet.

    Thanks for reading!

    ", + "url": "https://hpc.social/personal-blog/2022/an-initial-look-at-deep-learning-io-performance/", + + + + + + "date_published": "2022-11-28T00:00:00-07:00", + "date_modified": "2022-11-28T00:00:00-07:00", + + "author": "Mark Nelson's Blog" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/adam-s-weekly-update-2022-11-27/", + "title": "Adam’s weekly update, 2022-11-27", + "summary": null, + "content_text": "What’s newThe first thing that’s new is… this post! I’m going to try to do at least a weekly post on the blog now, just a general update and some links. This will hopefully help me get back into the habit of writing on the blog regularly, and maybe inspire me to write a bit more in general.I was off work this week for the Thanksgiving holiday, and traveled Michigan to visit my parents and my brother’s family. My mom has been struggling with some pretty major health issues this year, so it was really wonderful and reassuring to get to spend some time with her and my dad. I also finally got to meet my brother’s three-year-old son, who was born right before the pandemic started, and who I hadn’t managed to meet up until now.On the tech-related front, I used this week to take a break from Twitter (mostly), and to be honest… it was kinda refreshing! I had developed a pretty bad Twitter habit this year, doomscrolling for more time than I like to admit. While I really like Twitter and I’ve had some nice career boosts from it, it was also a time sink that was not entirely healthy.Admittedly, that time was somewhat replaced by playing around on the Fediverse / Mastodon. But with the lack of algorithmic suggestions, quote tweets, and other means of virality, that network so far feels a lot quieter and less time-consuming than Twitter. Tim Bray has a good post up which discusses some of the advantages and pitfalls of federated social media, and I can highly recommend reading that. I’m still a bit skeptical that it will be a practical “Twitter replacement” for most people, but so far I’m finding it pleasant.What I’m readingNonfiction book: Code, Second Edition, by Charles Petzold. This book walks through the process of building a working computer, starting with ideas like Morse code, then working up from logic gates on up. This is technically a re-read, as I read the first edition… 10+ years ago? But I’m getting a lot more out of it this time around, and really enjoying it.Fiction book: The Spare Man, by Mary Robinette Kowal. A cozy murder mystery on a luxury cruise to Mars. I’m only a few chapters in, but already greatly enjoying myself.“Hiding theory in practice”, by Fred Hebert. I’ve been reading a lot about safety engineering and its application to computing lately, but that community can sometimes get off into the weeds about points of theory that don’t have consensus in the broader computing community. This post has a good discussion of how to use the theory of safety engineering to guide decisions, without requiring that everyone working with you be handed a reading list.“Paper: Repentance as Rebuke: Betrayal and Moral Injury in Safety Engineering”, also by Fred Hebert. A discussion of a paper by Dekker et al which looks at the aftermath of the 737 MAX air disasters, and the public repentance of some of the engineers who were involved. Go read the post, it’s great. And I’m planning to read the original paper this week.“Cannon Lake: Intel’s Forgotten Generation”, from Chips and Cheese. Really I’ve been reading a bunch of the technical posts from Chips and Cheese lately, and they’re doing pretty good analyses of recent hardware. They’ve definitely earned that spot in my RSS reader.Glenn K Lockwood’s “SC’22 Recap”. I was sad to miss Supercomputing this year, though enough folks have come down with COVID that I don’t really regret the decision. But Glenn wrote up a really interesting recap post, with an interesting new viewpoint now that he’s working at Microsoft Azure. Among other things, he included a whole section titled The underwhelming, with the opening line “The biggest deal appears to be that exascale is here, and it turns out that it’s not that big of a deal.”Recent recipesBecause it was Thanksgiving, I did a lot of cooking this week! I’m not going to list everything I made, but a few of my favorites were:Cheesy Garlic Butter Rolls from Delish: Nothing special, but really tasty.Challah Stuffing from Smitten Kitchen: This recipe was a huge winner, with most of the family coming back for seconds, and then having more the next day for leftovers. It was really good, and is probably what I’ll make if I ever do stuffing again.Best Challah from Smitten Kitchen: I baked the bread that went into the stuffing, and it was really tasty on its own! This recipe makes two loaves, and I only needed one for the stuffing. So I also made french toast with it, which worked really nicely.Pet photosGotta have those pet photos.", + "content_html": "

    What’s new

    The first thing that’s new is… this post! I’m going to try to do at least a weekly post on the blog now, just a general update and some links. This will hopefully help me get back into the habit of writing on the blog regularly, and maybe inspire me to write a bit more in general.

    I was off work this week for the Thanksgiving holiday, and traveled Michigan to visit my parents and my brother’s family. My mom has been struggling with some pretty major health issues this year, so it was really wonderful and reassuring to get to spend some time with her and my dad. I also finally got to meet my brother’s three-year-old son, who was born right before the pandemic started, and who I hadn’t managed to meet up until now.

    On the tech-related front, I used this week to take a break from Twitter (mostly), and to be honest… it was kinda refreshing! I had developed a pretty bad Twitter habit this year, doomscrolling for more time than I like to admit. While I really like Twitter and I’ve had some nice career boosts from it, it was also a time sink that was not entirely healthy.

    Admittedly, that time was somewhat replaced by playing around on the Fediverse / Mastodon. But with the lack of algorithmic suggestions, quote tweets, and other means of virality, that network so far feels a lot quieter and less time-consuming than Twitter. Tim Bray has a good post up which discusses some of the advantages and pitfalls of federated social media, and I can highly recommend reading that. I’m still a bit skeptical that it will be a practical “Twitter replacement” for most people, but so far I’m finding it pleasant.

    What I’m reading

    • Nonfiction book: Code, Second Edition, by Charles Petzold. This book walks through the process of building a working computer, starting with ideas like Morse code, then working up from logic gates on up. This is technically a re-read, as I read the first edition… 10+ years ago? But I’m getting a lot more out of it this time around, and really enjoying it.
    • Fiction book: The Spare Man, by Mary Robinette Kowal. A cozy murder mystery on a luxury cruise to Mars. I’m only a few chapters in, but already greatly enjoying myself.
    • “Hiding theory in practice”, by Fred Hebert. I’ve been reading a lot about safety engineering and its application to computing lately, but that community can sometimes get off into the weeds about points of theory that don’t have consensus in the broader computing community. This post has a good discussion of how to use the theory of safety engineering to guide decisions, without requiring that everyone working with you be handed a reading list.
    • “Paper: Repentance as Rebuke: Betrayal and Moral Injury in Safety Engineering”, also by Fred Hebert. A discussion of a paper by Dekker et al which looks at the aftermath of the 737 MAX air disasters, and the public repentance of some of the engineers who were involved. Go read the post, it’s great. And I’m planning to read the original paper this week.
    • “Cannon Lake: Intel’s Forgotten Generation”, from Chips and Cheese. Really I’ve been reading a bunch of the technical posts from Chips and Cheese lately, and they’re doing pretty good analyses of recent hardware. They’ve definitely earned that spot in my RSS reader.
    • Glenn K Lockwood’s “SC’22 Recap”. I was sad to miss Supercomputing this year, though enough folks have come down with COVID that I don’t really regret the decision. But Glenn wrote up a really interesting recap post, with an interesting new viewpoint now that he’s working at Microsoft Azure. Among other things, he included a whole section titled The underwhelming, with the opening line “The biggest deal appears to be that exascale is here, and it turns out that it’s not that big of a deal.”

    Recent recipes

    Because it was Thanksgiving, I did a lot of cooking this week! I’m not going to list everything I made, but a few of my favorites were:

    • Cheesy Garlic Butter Rolls from Delish: Nothing special, but really tasty.
    • Challah Stuffing from Smitten Kitchen: This recipe was a huge winner, with most of the family coming back for seconds, and then having more the next day for leftovers. It was really good, and is probably what I’ll make if I ever do stuffing again.
    • Best Challah from Smitten Kitchen: I baked the bread that went into the stuffing, and it was really tasty on its own! This recipe makes two loaves, and I only needed one for the stuffing. So I also made french toast with it, which worked really nicely.

    Pet photos

    Gotta have those pet photos.

    \"A
    \"A
    \"A
    ", + "url": "https://hpc.social/personal-blog/2022/adam-s-weekly-update-2022-11-27/", + + + + + + "date_published": "2022-11-27T15:28:16-07:00", + "date_modified": "2022-11-27T15:28:16-07:00", + + "author": "Thinking Out Loud" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/sc-22-recap/", + "title": "SC'22 Recap", + "summary": null, + "content_text": "The biggest annual conference in HPC, the SC conference, was recently held in Dallas, Texas in its second hybrid incarnation since being all-remote for the pandemic. This year attracted over 11,000 attendees which is much closer to the pre-pandemic high of 14,000 than last year's 7,000, and judging from the crushed conference rooms and busy expo floor, it looks like SC is not that much worse for wear.This year's conference quite different for me since I attended for my first time as a vendor, not a researcher or practitioner, and I spent most of my days behind closed doors talking to customers. I didn't get to attend any of the keynotes, BOFs, or panels to which I wasn't invited as a result, so I'm not really qualified to give an erudite summary of the conference or expo this year.So instead, I'm just writing down what I remember in order that I remember it and not necessarily in a coherent narrative form. I'm sure I missed a lot (for example, mixed precision seemed big this year, and I heard Jack Dongarra gave a fantastic Turing Award talk) so I encourage others to write their own recaps and share with the community!High-level themesI actually started writing an SC'21 recap last year which I never posted, and re-reading the intro was funny--you'd think nothing has changed in the last year.The underwhelmingThe biggest deal appears to be that exascale is here, and it turns out that it's not that big of a deal. China let the air out of the tires by debuting their exascale systems at SC'21, and not only did they thumb their nose at Top500 by not submitting, they debuted by winning a Gordon Bell prize instead. The first US exascale system, Frontier, was debuted at ISC this year leaving its showing at SC a bit deflated too. Frontier was featured in the Gordon Bell prize-winning paper this year, but that work required the use of four Top-10 systems, not just Frontier, painting the reality that one giant computer rarely stands on its own when it comes to advancing science.This isn't to say that deploying exascale systems isn't a noteworthy feat and worth commendation, but I felt like the hype over the last five years treated the achievement like an end state instead of a milestone. And now that we've passed the milestone, the community is grasping to figure out what comes next. So what is next?Quantum had a strong and growing presence at SC, as it has for the last few years. But the conclusion of the panel \"Quantum Computing: A Future for HPC Acceleration\" was that no, it's not close to being ready.Disaggregation and composability was another theme with growing momentum. And like quantum, there was a panel asking the same question: \"Does HPC need composability now?\" The answer, again, was no, not yet. More on that below.What about RISC-V? Surely that will revolutionize the field. As it turns out, the answer there is also that RISC-V is not ready to do anything useful for HPC yet.The list goes on of technologies and trends that people are trying to boost now that exascale is \"solved.\" The reality, I think, is that \"exascale\" will take years to actually mature since it appears to have a ton of technical debt that accumulated during the race to be first. US Exascale rests on the shoulders of AMD and Intel, two companies whose software stacks have not caught up to the market leader, so there will be a lot of thrashing around as development practices and optimization settle out around these systems.Struggling with code porting is not very exciting to computer science Ph.D.s, so I expect future SCs to mirror this one and bifurcate into two distinct tracks: those struggling to identify the next big thing in the research space, and those struggling to use the systems that were rushed to deployment.The unexpectedMy SC experience was very biased since I didn't get out much, but two related themes kept popping up across different meetings and the sessions I did attend.Power efficiency is serious business now. It used to seem like people talked about the need for energy-efficient HPC in an abstract sense while continuing to jam more power into every rack without changing their approach to system design, facilities, and deployment models. That has hit a hard wall with energy prices soaring in Europe, though. The financial impacts of power-inefficient supercomputing have gone from a one-time capex cost to an ongoing opex cost that is putting many HPC facilities on an unsustainable cost trajectory. Even sites that aren't doing new deployments are facing sudden, sharp increases in their costs, and nobody has good answers about how they will keep the lights on.Cloud HPC is confusing. With only 15% of total HPC dollars winding up in the cloud, it's little surprise that most HPC folks are only peripherally aware of what HPC in the cloud really means. Worse yet, a subset of those folks are actively hostile towards the idea of running HPC workloads in the cloud. I spoke with my colleagues from all three major cloud service providers as well as my colleagues in DOE, NSF, and education throughout the week, and everyone painted this same general picture.There seems to be a mismatch between the expectations of on-prem HPC folks and cloud HPC folks. For example, I was asked why Windows doesn't support OpenMP very well, and after a bit of digging, I realized that the question really wasn't about using OpenMP on Windows as much as it was about using OpenMP in the cloud. There was a latent assumption that \"HPC in Microsoft's cloud\" must mean \"HPC on Windows\" which, for the record, is false--I don't even know how to use Windows anymore. Similarly, people decried the performance impacts of sharing HPC nodes with others in the cloud (they are not shared), overheads of virtualizing InfiniBand or GPUs (everyone uses PCIe passthrough or SR-IOV for HPC nodes), and other misconceptions.This isn't to say that cloud people aren't confused too; I heard stories about conversations that went sideways because a cloud folks (not from my employer, thankfully!) didn’t realize that the requirements of a traditional gov/edu HPC facility couldn’t be neatly wrapped up into a single workload with a single solution, contrary to the case across many commercial AI shops. And both sides are struggling to find models for partnership and engagement that mirror the traditional relationship between places like a DOE or NSF facility and a company like Cray. HPC departments are used to buying supercomputers and parallel file systems, while cloud providers sell computing and storage as a service. The distinction may seem trivial at the surface, but there's a large divide that becomes evident once both sides start trying to drill into the details of what a partnership would look like.Parallel I/O in Practice TutorialThis was my fifth year contributing to the Parallel I/O in Practice Tutorial with my colleagues at Argonne and Google, and it was our first time doing it in-person since 2019. It felt really good to be back in front of people to opine about the perils of POSIX and the greatness of the Darshan I/O profiling tool, and this year I retired out the material I used to present on burst buffers (since DataWarp and Infinite Memory Engine have lost relevance in HPC) and the TOKIO holistic I/O analysis framework (since it is no longer funded/maintained). In their stead, I presented material on benchmarking with IOR and mdtest I debuted at LUG 2022 this year.I haven't gotten feedback yet on whether this change was a net positive one, but I think it went over well. Benchmarking I/O is really challenging if you don't understand how things like page cache really work in distributed systems, and walking through some benchmark examples concretizes a lot of abstract parallel file system concepts like locking and striping. And since benchmarking is a rabbit hole of arbitrary complexity, ending the tutorial with advanced benchmarking topics turned out to be a nice way to add buffer to the end of an eight-hour stretch of carefully timed presentations. It's very easy to skip over the nuances of analyzing mdtest outputs if attendees have a lot of questions about more important things at the end of the day.The most surprising observation of the tutorial is how many attendees aren't using MPI anymore. We got a lot of questions last year about task-oriented I/O, and this year had some great questions about trying to understand or tune the I/O performed by Python-based analytics frameworks. We decided to add support for Darshan to profile non-MPI applications back in 2019 which is now paying dividends by ensuring it is a relevant tool for these new analytics and AI workloads, and we'll probably have to give more attention to optimizing these workloads' I/O in the future.DAOS User GroupMonday morning was cold and rainy--a perfect day to attend the 2022 DAOS User Group which was held off-site at the Fairmont Hotel.Whether you particularly care about DAOS or not, the cross-community HPC I/O brain trust is guaranteed to be in attendance, and this year did not disappoint. In addition to the expected stakeholders from Intel and DOE, representatives from all three big CSPs were in attendance. Google Cloud, Seagate, and HPE/Cray were all on the agenda, painting a diversifying landscape of large HPC companies investing time into DAOS and the strength and willingness of the DAOS team to partner with all comers.Life after OptaneThe question that opened up the meeting, of course, was \"what is the future of DAOS since Intel cancelled Optane?\" Kelsey Prantis had the official statement (I'll replace the grainy photo once the DUG slides are online...):The high-level project answer is that DAOS isn't going anywhere. Aurora, by virtue of still having Optane DIMMs, will not be affected, and DAOS will maintain support for Optane until Intel drops its last Optane DIMMs (Crow Pass for Sapphire Rapids) from support life sometime towards the end of this decade.For new customers who aren't going to use Optane, the answer is \"Metadata on NVMe,\" a development being codeveloped by Intel, HPE, and Google to implement a write-ahead log (WAL) and allow DAOS to use volatile DRAM instead of Optane. It will work like a file system journal in that a compact representation of writes will be committed to NVMe immediately after landing in DRAM, and then DAOS will asynchronously write back the properly serialized representation of that transaction after it is acknowledged. Johann Lombardi had a helpful cartoon that showed how this WAL will fit into DAOS:A key benefit of DAOS's implementation of this WAL is that it will be able to still service incoming writes while flushing old writes; although I don't fully grasp how this works, it is something enabled by the sophisticated I/O scheduler already implemented in DAOS.The complete implementation isn't expected to be released until Spring 2024, but it appears to touch only a few components of DAOS and doesn't affect anything above the VOS layer of the DAOS server.There was also mention of developing operability with new CXL-attached memory-semantic SSDs to keep the persistent memory capability of DAOS alive beyond Optane. I'm not sure if this would offer a performance benefit over the metadata-on-NVMe feature; early results show that metadata-on-NVMe actually delivers higher IOPS than Optane since the synchronous write path is much simpler without having to account for memory persistence. That said, I didn't really follow the full extent of options on the table for how DAOS metadata may work across different types of memory though.DAOS in the flesh at ArgonneKevin Harms presented an update on Aurora's massive 220 PB DAOS installation and laid out its configuration. There are 1,024 DAOS servers based on the Intel Coyote Pass server design, each sporting2x Intel Xeon 5320 (Ice Lake) sockets2x DAOS engines (one per socket)16x 32GB DDR4 DIMMs16x 512GB Optane DIMMs (Persistent Memory 200)16x 15.36 TB Samsung PM1733 NVMe SSDs2x 200 Gb/s Slingshot NICsThe total configuration is quoted at 220 PB usable, but Kevin pointed out that this assumes that every object is erasure coded at 16+2. Unlike virtually every other storage system out there, though, users can choose the data protection for their individual objects when they create them, meaning this 220 PB capacity is an upper limit to what users can do. Users with very hot, read-only objects may choose to replicate instead of erasure code, while others who are capacity-constrained may choose to erasure code everything at 16+2 at the cost of latency and IOPS. This flexibility is really powerful for users since they can tailor their object layout (\"object class\" in DAOS parlance) to match the needs of their workload.Argonne will be slicing up this DAOS system by giving each scientific project its own DAOS pool, and each pool will be assigned to only 80% of the available DAOS servers by default. This seems like a nice way of providing most of the storage system performance to every user, but offering more freedom to work around bad hardware, bad users, and other performance problems that plague file systems like Lustre that distribute everything across every single server equally.Finally, I noticed that Aurora will be using Samsung SSDs, not the Intel (now Solidigm) QLC NAND that appeared in all the DAOS slides floating around two years ago. I'm not sure what happened there, but the move from Solidigm QLC to Samsung TLC couldn't have been cheap.New features and contributionsDAOS is starting to pick up some truly valuable features that are being developed and contributed by third parties. Of note, croit has contributed a feature which allows DAOS to serve up NVMe over Fabrics targets, and Seagate contributed an S3 gateway for DAOS. Along with the DFS file system interface, DAOS now offers the trifecta of standard object, block, and file services just like Ceph. Unlike Ceph though, performance on DAOS is a first-class citizen. While croit made it clear that the NVMeoF support still has a ways to go to improve the way it does thread pooling and provides resilience, they showed 1.4 million IOPS from a single storage client using TCP over Ethernet with minimal client-side overhead.Intel is also developing multitenant support for DFUSE, allowing a single compute node to share a DAOS mount and let permissions be enforced through UID/GID just like a regular file system. Before this update, the FUSE-based nature of DAOS allowed any unprivileged user to mount their container (good), but only one FUSE agent could be alive on a single node at a time (not good) which prevented multiple users sharing a node from both mounting their own containers.DAOS also has some longer-term enhancements that I thought were interesting:expanding the range of POSIX calls supported by DAOS's intercept library to include metadata calls and memory-mapped I/O using userfaultfdimplementing collaborative caching - essentially reimplementing the Linux kernel page cache in userspace so that multiple processes can share cached DAOS pagessupporting a computational storage paradigm by enabling offload of userspace eBPF scripts to DAOS serversDAOS in a larger data center ecosystemDean Hildebrand from Google Cloud then gave an overview of Google's efforts in bringing DAOS into the cloud. He had some nice performance graphs and I'll link the full presentation here once it's uploaded (it's worth a watch), but the part I found the most insightful was how they are trying to decide where a technology like DAOS fits in the larger cloud storage ecosystem. He outlined two different ways DAOS could work in GCP:Caching: Google Cloud Storage (GCS) is the point of truth and DAOS is a cacheTiering: DAOS is a point of truth, and GCS is an archiveHe said they were leaning towards the caching model where data only lives ephemerally in DAOS, and personally, I think this is the right move since DAOS in the cloud is not resilient without Optane. However, this choice reflects a much larger tension in cloud storage for HPC:The centerpiece of every cloud's data story is a scalable, low-cost, low-performance object store which is analogous to what on-prem HPC would call campaign, community, or project storage.HPC demands higher performance than what these object stores can generally deliver though.To bridge the gap between these two truths, auxiliary services must bolt on to the object layer and provide higher performance, at a higher cost, for the duration of I/O-intensive HPC jobs. Some choose to provide true tiering from object into a resilient layer of flash (like FSx Lustre and Weka do), while others project the contents of the object through a high-performance caching layer (like HPC Cache and File Cache) and are never meant to persistently hold data.This isn't rocket science, but I never thought deeply about the two models since campaign/community/project storage in on-prem HPC is usually fast enough to avoid needing caches or fine-grained tiering capabilities.John Bent also had a thought-provoking presentation about how Seagate's now-\"deprioritized\" CORTX object store, which once competed with DAOS as Mero, contains ideas that can complement DAOS:Whereas DAOS delivers high performance using NVMe, CORTX delivers great economics using HDDs, and their strengths are complementary to each other. While I don't fully grasp how a tiered (or caching!) system comprised of DAOS and CORTX could be implemented, John rightly pointed out that the same level of space efficiency can deliver higher data protection if multi-level erasure coding is used to stripe across durable block storage. His specific example was erasure coding at 8+1 across servers and 10+1 within servers to deliver both high efficiency and high durability. This could map to something like running DAOS atop something like CORVAULT, but I don't think all the necessary pieces are in place to realize such a harmonious coexistence yet.Of course, completely tossing Reed-Solomon for something more sophisticated (like VAST does with its locally decodable 150+4 scheme) obviates the need for multilevel erasure entirely. But DAOS has not gone down that route yet.And as with every talk John gives, there were lots of other interesting nuggets scattered throughout his presentation. Two of my favorites were:A slide that pointed out that, when you buy something like Ceph as an appliance, you may be spending only 25% of the total cost on storage media and the rest is infrastructure, service, and support. This struck me as a bit on the low end, but some enterprisey NAS and midrange parallel file system appliances can go this low. Spending 60% to 90% on media is a lot nicer for the buyer (and companies like Seagate) if you can buy at scale or eschew the white-glove support, and John suggested that it's up to companies like Seagate to fix the software issues that require customers to pay for white-glove support in the first place.  After all, the less someone spends on support and licenses, the more they can spend on Seagate hard drives.John's final slide pointed out that object stores were originally designed to get around the limitations of POSIX file systems, but as they've evolved over the last decade, they're starting to look a lot like file systems anyway since they require strong consistency, hierarchical namespaces, and familiar file semantics. Has all the work put into developing super-fast object stores like DAOS over the last ten years really just brought us back full circle to parallel file systems?  Companies like VAST and Weka have shown that maybe POSIX isn't as bad as the research community (myself included!) have claimed it to be; it was really just low-performance implementations that nobody wanted.Once John's talk is uploaded to the DUG 2022 website, I'll link it here.  Like Dean Hildebrand's talk, it is well worth watching (but for wildly different reasons!)PDSW 2022I had to duck out of the DAOS User Group early to run (through the rain) to the 7th International Parallel Data Systems Workshop (PDSW 2022) on Monday afternoon.Much to everyone’s surprise, PDSW was only given a half day this year and everything felt a little compressed as a result. The organizers kept the work-in-progress (WIP) sessions which can often be an interesting peek into what students are pursuing, but little A/V problems and the unforgiving schedule probably did a disservice to the up-and-comers who use the WIP track to lay the groundwork for future full-length papers. Hopefully SC’23 restores PDSW to its original full-day status.<p></p>Splinters keynote from Arif Merchant at GoogleThe keynote presentation was given by Arif Merchant from Google about Splinters, the framework that Google Cloud uses to sample I/Os in a scalable way. The challenge they face is that it's impossible to trace and store every single I/O that hits Google's storage servers (D servers), but having an understanding of I/O patterns is essential for characterizing workload I/O behavior and planning for future infrastructure. In fact, this problem is so important that Google isn't the only cloud that's solved it!A lot of what Arif talked about is very similar to how Azure does its I/O tracing under the hood. I suppose it should not be surprise that there are only so many ways to solve the challenge of sampling individual IOPS in a way that fairly represents the aggregate workload of a huge distributed storage system. One really smart thing Splinters does that I liked was sample along two different dimensions: not only do they evenly sample across all IOPS at a fixed rate (the obvious thing), but they also sample across files at a fixed rate. In this latter case of per-file sampling, they take a tiny fraction of files and capture every I/O for that file to get a complete picture of how individual files are being accessed.This file sampling fills the huge gap that exists when randomly sampling IOPS alone. Because different I/Os have different \"costs\" (for example, reading a 1 MiB file using a single 1 MiB read op or 256x 4 KiB read ops are functionally equivalent to an application), randomly sampling ops introduces systematic biases that can be difficult to back out after the data has been sampled, subsampled, aggregated, and reduced. Splinters' approach lets you see the workload from two different angles (and biases) and answer a much larger range of questions about what's really happening across thousands of storage servers.That said, it was interesting to hear Arif describe how Splinters evolved out of a different internal Google project but wound up outliving it. Splinters is also similar to, but slightly different from, their Dapper infrastructure which also does scalable distributed system tracing. And he made overtures to F1, a scalable SQL database that is similar to (but not the same as) the SQL-like query interface that Splinters uses. I got the impression that new technologies come and go pretty quickly at Google, and there's a large appetite for creating new software systems outright rather than shoehorning an existing system into solving a new problem. I can't say one way is better than the other; I was just surprised at the contrast with my own experiences.Practical papersPDSW had a healthy combination of both very-researchy papers and applied research papers this year. I could only stick around for the applied papers, and two left an impression.In the first, Jean Luca Bez presented Drishti, a tool that lives downstream of the Darshan I/O profiling library and finally does what the Darshan community has danced around for years--turning a Darshan log into an actionable set of recommendations on how to improve I/O performance. It does this by cataloguing a bunch of heuristics and using Darshan's new Python integrations to pore through a log and identify known-problematic I/O patterns. Like Jean Luca's DXT Explorer tool, Drishti has a slick user interface and greatly extends the usability and insights that can be pulled out of a Darshan log file. It probably won't win a Turing Award, but this sort of work is probably going to benefit scores of HPC end-users by making Darshan (and troubleshooting I/O problems) much more accessible to mere mortals for years to come.Adrian Jackson also presented a very tidy apples-to-apples comparison of DAOS and Lustre on the same hardware using both a systems-level benchmark and an application-inspired, object-oriented data model benchmark. The specific bake-off of a new curiosity (DAOS) and the decades-old incumbent (Lustre) is probably interesting to storage nerds, but I think the real novelty of the work is in its exploration of some uncomfortable realities that the HPC I/O community will have to face in the coming years:Does \"slow memory\" (nonvolatile Optane or CXL-attached memory SSDs) give actual benefit to existing file systems (like Lustre), or is rethinking the entire storage stack (like DAOS did) really necessary to unlock the performance of new hardware?Do applications need to rethink their approach to I/O to make use of post-POSIX storage systems like DAOS, or is performing I/O as you would on a file system (Lustre) on a post-POSIX storage system (DAOS) good enough?My take from the work is that, for simple I/O patterns like checkpoint/restart, you can get pretty far by just treating something like DAOS the same as you would a parallel file system:Figure from Manubens et al, \"Performance Comparison of DAOS and Lustre for Object Data Storage Approaches.\"But if you want your data at rest to have the same data model as how it's handled within the application, you really ought to use a storage system that supports data models that are more expressive than a stream of bytes (which is what POSIX files are).The authors didn't do a perfect job of giving Lustre its fair shake since they chose to use (abuse) directories and files to represent their application's data model on-disk instead of developing an object-file model that file systems like Lustre handle a little better. But let's be real--HPC is full of applications that do the exact same thing and represent datasets on-disk using complex hierarchies of directories and files simply because that's the easiest way to map the application's representation of data into the standard file system model. In that sense, storage systems that represent rich data models in a high-performance way should be really valuable to naive applications that map in-memory data structures directly to files and directories.Going back to John Bent's closing slide from his DAOS User Group talk, though, does any of this even matter since all answers lead back to parallel file systems? Maybe there's something to be learned about adding better back-door APIs that support more diverse data models than what POSIX file interfaces give us.The SC22 ExpoThe expo is my favorite part of SC because it's when I get to talk to people one-on-one and learn about corners of the HPC industry that I would've never otherwise sought out. Much to my dismay, though, I had very little time to walk the floor this year--so little that I didn't get any swag. If you want to read up on what interesting technology was being showcased, I strongly recommend reading all the great content that Patrick Kennedy and his team at STH created covering the expo.That said, I did notice some curious trends about the show floor overall.The NVIDIA booth was notably absent this year (though they shared booth space with partners), and many of the usual top vendors had significantly smaller presence on the expo floor. Just for fun, I compiled the top ten(ish) vendors by booth size:Weka.io (3,200 sqft)VAST Data, Department of Energy, Penguin Computing, HPE, and Microsoft (2,500 sqft)AWS (2,000 sqft)Google and TACC (1,600 sqft)Supermicro, AMD, Intel, Dell, NASA, and Indiana University (1,500 sqft)I think it's amazing to see all-flash storage companies at the top of the list alongside all of the Big 3 cloud service providers. I may be reading too much into this, but this may mean that the money behind SC is shifting towards companies playing in the cloud-based AI space instead of traditional big iron for simulation. Or perhaps it's a sign that most of the traditional HPC players are taking a hard look at the return they get on a big booth given the current economic climate and pulled back this year.I did chat with a couple colleagues who completely opted out of a booth this year (for reference, SC'21 had 10% fewer exhibitor booths than SC'19), and the reasoning was consistent: they found more value in having staff meet with customers privately or attend the technical sessions and engage with people organically. Combined with a bit of bad taste left over from SC's high cost of hosting pandemic-era \"digital booths\" despite low return (did anyone visit digital booths at SC'20 or SC'21?), I can see why some vendors may have chosen to skip the expo this year.Whatever the reasons may be, I was a bit sad to see such a small presence from some of my favorites like IBM, Fujitsu, Atos, and NEC. Hopefully the SC Exhibits Committee (and the economy!) can find ways to bring back the pre-pandemic glory of the show floor.The expo wasn't all doom and gloom though! Even though I couldn't make my complete rounds this year, there were a couple of highlights for me.VAST's masterful marketingPerhaps the splashiest vendor at SC was VAST Data who had a brilliant marketing presence. First was the giant Vastronaut mascot that was the centerpiece of their booth:A quick search of Twitter shows just how many people seized the opportunity to take a selfie at their booth. I would love to know how they transported that thing to and from the conference, but whatever the cost, I'll bet it was worth it.At the Grand Opening Gala on Monday, they also gave out delightfully tacky light-up cowboy hats that everyone seemed to be wearing:We were there! #sc22 #sc2022 @VAST_Data pic.twitter.com/fWhuSgBfpL— ntnu-hpc (@ntnuhpc) November 15, 2022The subtle genius of this was that not only did people wear them during the gala and the Flop Gun-themed Beowulf Bash 2022 party later that night, but they had to wear them on their plane rides home since they were so inconveniently bulky. Proof in point, my wife (who doesn't work in tech) sent me this text message to confirm that she was waiting for me at the right luggage carousel at San Francisco Airport:I wonder how many innocent bystanders, traveling home for Thanksgiving on Thursday or Friday, saw the shiny cowboy hats at airports around the country and wondered what VAST was.The icing on the cake was VAST's CEO, Renen Hallak, parading around in an unmissable Chuck McGill-style space suit all week, clearly not taking himself too seriously and painting VAST as a work hard/play hard kind of company. Now, do flashy space suits and blinking cowboy hats alone mean VAST has a great product? I can't say**. But marketing is an art that I appreciate, and VAST hit some great notes this year.** (Seriously, I'm not sure I wouldn't get in trouble for opining about another company here.)The Microsoft hardware barThe only booth where I spent any appreciable time this year was my own employer's. I personally love booth duty and accosting strangers on the show floor, especially if there's something interesting at the booth to jumpstart a conversation. When I worked at SDSC it was a Raspberry Pi cluster, and at the Microsoft booth this year it was the \"hardware bar.\"In addition to the customary booth presentations with giveaways, swag desk, seating area, and a fun caricature artist, the physical servers that underpin the HPC nodes in Azure were on display. Microsoft contributes its hardware platform designs to the Open Compute Project so the physical hardware that runs in Azure data centers isn't entirely mysterious. Still, every cloud has its hardware secrets, so I was surprised to see these servers laid bare.The newest HPC node type (dubbed HBv4) on display was a node powered by AMD's Genoa processors just announced a few days earlier:This wasn't a display model, either; it had real DDR5 DRAM, a real NDR InfiniBand HCA, real PCIe Gen5, and real big OCP mezzanine card with real big aluminum heat sinks and a big Microsoft sticker on top. A couple visitors commented on the way the heat piping for those Genoa CPUs was done which I guess is unusual; rather than have a giant copper block on top of each socket, heat pipes connect the socket to massive aluminum heat sinks that are closer to the chassis inlets. In retrospect it makes sense; Genoa has a whopping twelve DDR5 DIMMs per socket which leaves little extra room for heat sinks, and these 88+ core sockets have a staggering thermal design power.Another exotic piece of hardware on display was an \"ND MI200 v4\" server:It's logically similar to Azure's \"ND A100 v4\" server platform with two CPU sockets, eight SXM4 GPU sockets, eight 200G HDR InfiniBand HCAs, and a bunch of M.2 NVMes. But this specific server has eight MI200 GPUs on a common OAM baseboard and uses Infinity Fabric for GPU-to-GPU communication. I've never seen an OAM-socketed anything in real life before, much less eight of them on a baseboard, so I thought this was pretty great to see in the flesh.The ND A100 v4 platform was also on display and looked very similar-but-different with its eight A100 GPUs and HGX baseboard:And unlike the MI200 variant, the general public can run on these nodes.I'm not sure what more I'm allowed to say, but my colleague Karl made a nice, quick video that runs through the entire Microsoft booth that's worth a watch, and more details can be had by contacting me or your favorite Microsoft account team privately.Of course, the hardware bar was just a way to lure people into the booth so I could achieve my real goal: meeting new folks. As I wrote before, one of my biggest realizations at SC this year is how generally confused people are about what HPC in the cloud really means--both people who come from traditional on-prem HPC and people who come from traditional enterprisey cloud. I found myself surprising many of the people with whom I spoke on the show floor with factoids that I have taken for granted. For example,Linux is the most common OS on these HPC node types. While you probably(?) can run Windows if you want on this stuff, I think only a few niche markets do this.The usage model for an HPC cluster in the cloud can be the same as on-prem. You can have login nodes, Slurm, home directories, parallel file systems, and all that. Jobs don't have to be containerized or turned into a VM image.The InfiniBand coming out of these nodes is real InfiniBand with real OFED that supports real mpich/mvapich/OpenMPI. It's the same stuff as in on-prem supercomputers. And nodes are assembled into full-bisection fat tree InfiniBand clusters just like normal.There's no noisy neighbor problem on compute nodes because HPC node types aren't shared between users. When you run a VM on an HPC node, you get the whole thing. Just like on large supercomputers.There's no horrible loss of performance due to running in a VM. Virtualization extensions, PCIe passthrough, and SR-IOV bypass the hypervisor for most things. Inside your VM, you see real Zen cores and real Mellanox HCAs, not virtualized devices.My takeaway impression is that a lot of traditional HPC folks looked at the cloud five or ten years ago, had a sour experience, and haven't paid attention since. In those last five years, though, AI has changed the game. Massive demand for the latest CPUs and accelerators, funded by live-fast-die-young venture capital, has given cloud vendors tremendous financial incentive to catch up to on-prem levels of performance efficiency for AI workloads. And it just so happens that infrastructure that's good for AI is also good for traditional modeling and simulation.SCinet!One of the unexpected highlights of my SC this year arose from a chance encounter with a former coworker from NERSC, Ron Kumar, who gave me a whirlwind tour of SCinet.I have to confess great ignorance around SCinet in general; I always saw it was a weird technological proof of concept that the strange networking people at work would go off and do in the weeks leading up to the actual conference. I knew they did some impressive wide-area transfer demos (like the petabyte-in-a-day demo at SC'16), but I didn't really get the significance.So what is SCinet? It's this yellow bundle of cables dangling from the ceiling.<p>The yellow cables are 144-core fiber trunks that bring over a terabit per second of bandwidth into the convention center from the Internet via the national research backbones like ESnet and Internet2 and distribute many terabits per second of capacity throughout the SC conference venue. For comparison, most HPC centers in the US only have a tenth of SCinet’s wide-area bandwidth at best since 400G infrastructure is still rolling out.</p>Most attendees may be familiar with the row of expensive-looking networking racks behind a glass wall towards the back of the expo which is where those yellow cables dangling from the ceiling end. Here's a photo from inside that glass wall:What I didn't realize is that if you go around to the back of the giant walled area behind this glass display, there's a security checkpoint that gates entry into a massive network operations center (NOC) full of laptops, spools of fiber, meeting rooms, and busily working teams in charge of all the lower layers of the networking stack.The process to get into the NOC involves an escort and being tagged in with a tamper-proof wristband, and I learned on the tour that there's millions upon millions of dollars worth of high-end networking equipment in the racks shown above. If you look closely, you can see a security camera at the end of the aisle that speaks to this; that camera was one of many.Behind the pretty public-facing side of the SCinet racks is a mess of fiber and cables:I guess if you have to tear all this down after just a few weeks, there's no point in investing days in dressing it all up nicely! I particularly enjoyed the fiber panels in the third rack that appear to be affixed to the rack post with shoe laces.This year, SCinet did do a neat proof-of-concept where they demonstrated three 400G routers from three vendors (Juniper, Arista, and Cisco?) all talking the same protocol to handle what I assume is the core routing for everything in the convention center:I wish I remembered exactly what was going on here, but I know enough about networking to know that, despite there being standard protocols for coordinating between networking gear, each vendor does their own implementation that is rarely easy to get interoperability from. If anyone out there knows the details of this achievement, please let me know so I can explain this a little better!In addition to networking nerd-level demonstrations, SCinet also serves up all the wifi across the convention center. That is why there were tripods with access points scattered around, and why astute attendees may have noticed janky networking equipment scattered around that looked like this:Again, I get it: for a network infrastructure that's only going to last a week, I don't think it's a good use of anyone's time or money to nicely dress all the networking.One last factoid I didn't know until this year was that exhibitors can request 100 Gb/s network drops into their individual booths for demos (or downloading the latest version of a PowerPoint presentation really fast). The end result of supporting both a vast wifi network and 100G fiber across the show floor is that there was a lot of fiber going into the single row of SCinet equipment:Finally, when I posted some of these photos online during the conference, my colleague Bilel was kind enough to post a slide from the SC22 opening presentation that had the speeds and feeds of what I had toured:Candy Culhane shared Scinet facts #SC22 #HPC5.01 Tb/s of WAN capacity$70M in HW & SW, & services provided by 29 SCinet contrib.175 volunteers from 80 vol. organiz.> 450 wireless deployed29 network research exhibition proposals11.7 miles of fiber 2384 fiber patch https://t.co/JtPhjVHZJd pic.twitter.com/kwGl5Ydqp5— Bilel Hadri (@mnoukhiya) November 16, 2022If you know anyone involved with SCinet, I highly recommend seeing if you can get a tour at the next SC. Even as a relative networking novice, I walked away with a much greater appreciation for the annual achievement of building SCinet. And who knows? Once I get bored of this whole storage thing, maybe I'll try getting into high-performance networking.Composability panelThis year I was invited to participate in a panel titled \"Smackdown! Does HPC Need Composability Now?\" moderated by Addison Snell and Dan Olds from Intersect360 Research. This panel was...different. Unlike the traditional SC panel where panelists take turns presenting slides and saying erudite things, this panel had two teams of panelists. And my team only had one slide to present:The ground rules included \"personal attacks are allowed,\" and needless to say, the panel was about equal parts entertainment and technical discourse. That's not a bad thing, though.Addison and Dan did a phenomenal job of pulling their respective teams together and leading discussion in a format that both brought forward the key pros and cons of composability in HPC while poking fun at the thinly veiled, ego-driven personalities that often make up these sorts of panels. Rather than politely dancing around issues like sacrificing memory bandwidth by putting accelerators at the far end of a PCIe bus or gaining higher utilization by allowing users to mix and match CPU, NICs, and GPUs, us panelists were free to shoot straight (or perhaps a bit hyperbolically) and call each other out on our hidden agendas.I hope it goes without saying that all us panelists were in on the format and don't actually think people on the other side are dumb. By wrapping technical arguments in snarky comments, we could keep the level of discussion accessible to a wide audience, drive home the key points from both sides, and ensure that we weren't losing audience members who don't care about the PhD-level details as much as they want to hear what their peers are thinking about this exciting new space. I got some feedback afterwards that I didn't seem to hold back, so if anyone did take anything I said seriously, I am very sorry!On a technical level, what was the outcome?It turns out that there was about a 60/40 split between people who felt composability wasn't required yet and those who felt it was after both sides argued their case. Even among panelists, many of us were a lot less convinced about our respective positions than we let on during the panel itself. I got a chuckle when I realized that I wasn't the only one who, when invited to be on the panel, asked \"what side do you want me to argue?\" I honestly could have gone either way because the dust has not yet settled. Dan Stanzione, director of TACC, gave the truest answer to the question of \"will composability help HPC\" up front--\"it depends.\" Maybe this is a growth opportunity, or maybe it's a lukewarm reception.Either way, composable technologies are hitting the market regardless of whether you think they'll be useful or not.  AMD Genoa supports CXL 1.1 with extensions for memory pooling, Samsung has memory-semantic SSDs, and everyone and their mother is working on photonics to get higher bandwidths and lower latencies over longer distances. This makes it easier for people to dip their toes in the water to see if composability makes sense, and I think that's what a lot of people will wind up doing in the coming years.Customer meetingsUnlike in years past, my SC experience this year was dominated by customer meetings. I've been on the customer side of the table plenty of times, but I was surprised to find that it was actually more fun to be on the vendor side for a change. I'm part salesman at heart, so I found it personally gratifying to end a meeting with people nodding along rather than scratching their heads. I learned as a customer that it's very easy for vendors to go way off the rails and waste everyone's time, so I was grateful to have avoided the awkward confusion that punctuates those kinds of meetings. I also went into the week worrying that I'd be sitting in the same room, hearing the same pitch and the same jokes, and answering the same questions all week. Thankfully, I work with some great field, business, and product teams who set up interesting conversations rather than rote recitations of boring roadmap slides. Approaching the same topics from different angles helped me figure out how all the pieces of what I'm working on fit together to make a complete picture too; there weren't nearly as many opportunities to do this in the DOE world since the end-users of the HPC systems on which I worked aren't told anything until all the design decisions have already been made.A few personal notesThis SC was significant to me at a variety of levels; it was the first time I'd gotten on an airplane since February 2020, the first time I'd traveled since starting a new job at a new company, and the first time I'd met any of my new coworkers outside of the structure of a Teams call. During the pandemic I realized that getting out into the world and talking to people from all corners of HPC were my favorite part of my job. Not being able to go to events like SC and maintain that a sense of community involvement dramatically impacted my level of professional satisfaction for the last two years, so I'm glad I was able to finally go this year.Though customer meetings were a lot more fun than I expected them to be, I still felt bummed that I could spend so little time walking the expo, talking to folks, and attending all the BOFs normally on my must-attend list. Compounding this was my personal choice to not dine indoors and consequently miss out on almost all other chances to catch up with old friends and colleagues. I also decided to leave SC a day earlier than I usually do to reduce my risk of getting sick which didn't help either. There's never enough time at SC, but this year was particularly pressed.I say all this not to complain, but to say how much I appreciated the people who went out of their way to come accost me during the precious few hours I actually had on the exhibit floor. Some I'd not seen since SC'19, and some I'd never actually met since we only started working together mid-pandemic. The conference is busy for everyone, so giving me a slice of your time was very meaningful. That sense of community membership is why I go to SC, it's why I still work in this business, and it's why I try to contribute whatever I can to whomever wants it whether it be a student, engineer, salesperson, or marketer.", + "content_html": "

    The biggest annual conference in HPC, the SC conference, was recently held in Dallas, Texas in its second hybrid incarnation since being all-remote for the pandemic. This year attracted over 11,000 attendees which is much closer to the pre-pandemic high of 14,000 than last year's 7,000, and judging from the crushed conference rooms and busy expo floor, it looks like SC is not that much worse for wear.

    This year's conference quite different for me since I attended for my first time as a vendor, not a researcher or practitioner, and I spent most of my days behind closed doors talking to customers. I didn't get to attend any of the keynotes, BOFs, or panels to which I wasn't invited as a result, so I'm not really qualified to give an erudite summary of the conference or expo this year.

    So instead, I'm just writing down what I remember in order that I remember it and not necessarily in a coherent narrative form. I'm sure I missed a lot (for example, mixed precision seemed big this year, and I heard Jack Dongarra gave a fantastic Turing Award talk) so I encourage others to write their own recaps and share with the community!

    High-level themes

    I actually started writing an SC'21 recap last year which I never posted, and re-reading the intro was funny--you'd think nothing has changed in the last year.

    The underwhelming

    The biggest deal appears to be that exascale is here, and it turns out that it's not that big of a deal. China let the air out of the tires by debuting their exascale systems at SC'21, and not only did they thumb their nose at Top500 by not submitting, they debuted by winning a Gordon Bell prize instead. The first US exascale system, Frontier, was debuted at ISC this year leaving its showing at SC a bit deflated too. Frontier was featured in the Gordon Bell prize-winning paper this year, but that work required the use of four Top-10 systems, not just Frontier, painting the reality that one giant computer rarely stands on its own when it comes to advancing science.

    This isn't to say that deploying exascale systems isn't a noteworthy feat and worth commendation, but I felt like the hype over the last five years treated the achievement like an end state instead of a milestone. And now that we've passed the milestone, the community is grasping to figure out what comes next. So what is next?

    Quantum had a strong and growing presence at SC, as it has for the last few years. But the conclusion of the panel \"Quantum Computing: A Future for HPC Acceleration\" was that no, it's not close to being ready.

    Disaggregation and composability was another theme with growing momentum. And like quantum, there was a panel asking the same question: \"Does HPC need composability now?\" The answer, again, was no, not yet. More on that below.

    What about RISC-V? Surely that will revolutionize the field. As it turns out, the answer there is also that RISC-V is not ready to do anything useful for HPC yet.

    The list goes on of technologies and trends that people are trying to boost now that exascale is \"solved.\" The reality, I think, is that \"exascale\" will take years to actually mature since it appears to have a ton of technical debt that accumulated during the race to be first. US Exascale rests on the shoulders of AMD and Intel, two companies whose software stacks have not caught up to the market leader, so there will be a lot of thrashing around as development practices and optimization settle out around these systems.

    Struggling with code porting is not very exciting to computer science Ph.D.s, so I expect future SCs to mirror this one and bifurcate into two distinct tracks: those struggling to identify the next big thing in the research space, and those struggling to use the systems that were rushed to deployment.

    The unexpected

    My SC experience was very biased since I didn't get out much, but two related themes kept popping up across different meetings and the sessions I did attend.

    Power efficiency is serious business now. It used to seem like people talked about the need for energy-efficient HPC in an abstract sense while continuing to jam more power into every rack without changing their approach to system design, facilities, and deployment models. That has hit a hard wall with energy prices soaring in Europe, though. The financial impacts of power-inefficient supercomputing have gone from a one-time capex cost to an ongoing opex cost that is putting many HPC facilities on an unsustainable cost trajectory. Even sites that aren't doing new deployments are facing sudden, sharp increases in their costs, and nobody has good answers about how they will keep the lights on.

    Cloud HPC is confusing. With only 15% of total HPC dollars winding up in the cloud, it's little surprise that most HPC folks are only peripherally aware of what HPC in the cloud really means. Worse yet, a subset of those folks are actively hostile towards the idea of running HPC workloads in the cloud. I spoke with my colleagues from all three major cloud service providers as well as my colleagues in DOE, NSF, and education throughout the week, and everyone painted this same general picture.

    There seems to be a mismatch between the expectations of on-prem HPC folks and cloud HPC folks. For example, I was asked why Windows doesn't support OpenMP very well, and after a bit of digging, I realized that the question really wasn't about using OpenMP on Windows as much as it was about using OpenMP in the cloud. There was a latent assumption that \"HPC in Microsoft's cloud\" must mean \"HPC on Windows\" which, for the record, is false--I don't even know how to use Windows anymore. Similarly, people decried the performance impacts of sharing HPC nodes with others in the cloud (they are not shared), overheads of virtualizing InfiniBand or GPUs (everyone uses PCIe passthrough or SR-IOV for HPC nodes), and other misconceptions.

    This isn't to say that cloud people aren't confused too; I heard stories about conversations that went sideways because a cloud folks (not from my employer, thankfully!) didn’t realize that the requirements of a traditional gov/edu HPC facility couldn’t be neatly wrapped up into a single workload with a single solution, contrary to the case across many commercial AI shops. And both sides are struggling to find models for partnership and engagement that mirror the traditional relationship between places like a DOE or NSF facility and a company like Cray. HPC departments are used to buying supercomputers and parallel file systems, while cloud providers sell computing and storage as a service. The distinction may seem trivial at the surface, but there's a large divide that becomes evident once both sides start trying to drill into the details of what a partnership would look like.

    Parallel I/O in Practice Tutorial

    This was my fifth year contributing to the Parallel I/O in Practice Tutorial with my colleagues at Argonne and Google, and it was our first time doing it in-person since 2019. It felt really good to be back in front of people to opine about the perils of POSIX and the greatness of the Darshan I/O profiling tool, and this year I retired out the material I used to present on burst buffers (since DataWarp and Infinite Memory Engine have lost relevance in HPC) and the TOKIO holistic I/O analysis framework (since it is no longer funded/maintained). In their stead, I presented material on benchmarking with IOR and mdtest I debuted at LUG 2022 this year.

    I haven't gotten feedback yet on whether this change was a net positive one, but I think it went over well. Benchmarking I/O is really challenging if you don't understand how things like page cache really work in distributed systems, and walking through some benchmark examples concretizes a lot of abstract parallel file system concepts like locking and striping. And since benchmarking is a rabbit hole of arbitrary complexity, ending the tutorial with advanced benchmarking topics turned out to be a nice way to add buffer to the end of an eight-hour stretch of carefully timed presentations. It's very easy to skip over the nuances of analyzing mdtest outputs if attendees have a lot of questions about more important things at the end of the day.

    The most surprising observation of the tutorial is how many attendees aren't using MPI anymore. We got a lot of questions last year about task-oriented I/O, and this year had some great questions about trying to understand or tune the I/O performed by Python-based analytics frameworks. We decided to add support for Darshan to profile non-MPI applications back in 2019 which is now paying dividends by ensuring it is a relevant tool for these new analytics and AI workloads, and we'll probably have to give more attention to optimizing these workloads' I/O in the future.

    DAOS User Group

    Monday morning was cold and rainy--a perfect day to attend the 2022 DAOS User Group which was held off-site at the Fairmont Hotel.

    Whether you particularly care about DAOS or not, the cross-community HPC I/O brain trust is guaranteed to be in attendance, and this year did not disappoint. In addition to the expected stakeholders from Intel and DOE, representatives from all three big CSPs were in attendance. Google Cloud, Seagate, and HPE/Cray were all on the agenda, painting a diversifying landscape of large HPC companies investing time into DAOS and the strength and willingness of the DAOS team to partner with all comers.

    Life after Optane

    The question that opened up the meeting, of course, was \"what is the future of DAOS since Intel cancelled Optane?\" Kelsey Prantis had the official statement (I'll replace the grainy photo once the DUG slides are online...):

    The high-level project answer is that DAOS isn't going anywhere. Aurora, by virtue of still having Optane DIMMs, will not be affected, and DAOS will maintain support for Optane until Intel drops its last Optane DIMMs (Crow Pass for Sapphire Rapids) from support life sometime towards the end of this decade.

    For new customers who aren't going to use Optane, the answer is \"Metadata on NVMe,\" a development being codeveloped by Intel, HPE, and Google to implement a write-ahead log (WAL) and allow DAOS to use volatile DRAM instead of Optane. It will work like a file system journal in that a compact representation of writes will be committed to NVMe immediately after landing in DRAM, and then DAOS will asynchronously write back the properly serialized representation of that transaction after it is acknowledged. Johann Lombardi had a helpful cartoon that showed how this WAL will fit into DAOS:

    A key benefit of DAOS's implementation of this WAL is that it will be able to still service incoming writes while flushing old writes; although I don't fully grasp how this works, it is something enabled by the sophisticated I/O scheduler already implemented in DAOS.

    The complete implementation isn't expected to be released until Spring 2024, but it appears to touch only a few components of DAOS and doesn't affect anything above the VOS layer of the DAOS server.

    There was also mention of developing operability with new CXL-attached memory-semantic SSDs to keep the persistent memory capability of DAOS alive beyond Optane. I'm not sure if this would offer a performance benefit over the metadata-on-NVMe feature; early results show that metadata-on-NVMe actually delivers higher IOPS than Optane since the synchronous write path is much simpler without having to account for memory persistence. That said, I didn't really follow the full extent of options on the table for how DAOS metadata may work across different types of memory though.

    DAOS in the flesh at Argonne

    Kevin Harms presented an update on Aurora's massive 220 PB DAOS installation and laid out its configuration. There are 1,024 DAOS servers based on the Intel Coyote Pass server design, each sporting

    • 2x Intel Xeon 5320 (Ice Lake) sockets
    • 2x DAOS engines (one per socket)
    • 16x 32GB DDR4 DIMMs
    • 16x 512GB Optane DIMMs (Persistent Memory 200)
    • 16x 15.36 TB Samsung PM1733 NVMe SSDs
    • 2x 200 Gb/s Slingshot NICs

    The total configuration is quoted at 220 PB usable, but Kevin pointed out that this assumes that every object is erasure coded at 16+2. Unlike virtually every other storage system out there, though, users can choose the data protection for their individual objects when they create them, meaning this 220 PB capacity is an upper limit to what users can do. Users with very hot, read-only objects may choose to replicate instead of erasure code, while others who are capacity-constrained may choose to erasure code everything at 16+2 at the cost of latency and IOPS. This flexibility is really powerful for users since they can tailor their object layout (\"object class\" in DAOS parlance) to match the needs of their workload.

    Argonne will be slicing up this DAOS system by giving each scientific project its own DAOS pool, and each pool will be assigned to only 80% of the available DAOS servers by default. This seems like a nice way of providing most of the storage system performance to every user, but offering more freedom to work around bad hardware, bad users, and other performance problems that plague file systems like Lustre that distribute everything across every single server equally.

    Finally, I noticed that Aurora will be using Samsung SSDs, not the Intel (now Solidigm) QLC NAND that appeared in all the DAOS slides floating around two years ago. I'm not sure what happened there, but the move from Solidigm QLC to Samsung TLC couldn't have been cheap.

    New features and contributions

    DAOS is starting to pick up some truly valuable features that are being developed and contributed by third parties. Of note, croit has contributed a feature which allows DAOS to serve up NVMe over Fabrics targets, and Seagate contributed an S3 gateway for DAOS. Along with the DFS file system interface, DAOS now offers the trifecta of standard object, block, and file services just like Ceph. Unlike Ceph though, performance on DAOS is a first-class citizen. While croit made it clear that the NVMeoF support still has a ways to go to improve the way it does thread pooling and provides resilience, they showed 1.4 million IOPS from a single storage client using TCP over Ethernet with minimal client-side overhead.

    Intel is also developing multitenant support for DFUSE, allowing a single compute node to share a DAOS mount and let permissions be enforced through UID/GID just like a regular file system. Before this update, the FUSE-based nature of DAOS allowed any unprivileged user to mount their container (good), but only one FUSE agent could be alive on a single node at a time (not good) which prevented multiple users sharing a node from both mounting their own containers.

    DAOS also has some longer-term enhancements that I thought were interesting:

    • expanding the range of POSIX calls supported by DAOS's intercept library to include metadata calls and memory-mapped I/O using userfaultfd
    • implementing collaborative caching - essentially reimplementing the Linux kernel page cache in userspace so that multiple processes can share cached DAOS pages
    • supporting a computational storage paradigm by enabling offload of userspace eBPF scripts to DAOS servers

    DAOS in a larger data center ecosystem

    Dean Hildebrand from Google Cloud then gave an overview of Google's efforts in bringing DAOS into the cloud. He had some nice performance graphs and I'll link the full presentation here once it's uploaded (it's worth a watch), but the part I found the most insightful was how they are trying to decide where a technology like DAOS fits in the larger cloud storage ecosystem. He outlined two different ways DAOS could work in GCP:

    1. Caching: Google Cloud Storage (GCS) is the point of truth and DAOS is a cache
    2. Tiering: DAOS is a point of truth, and GCS is an archive

    He said they were leaning towards the caching model where data only lives ephemerally in DAOS, and personally, I think this is the right move since DAOS in the cloud is not resilient without Optane. However, this choice reflects a much larger tension in cloud storage for HPC:

    1. The centerpiece of every cloud's data story is a scalable, low-cost, low-performance object store which is analogous to what on-prem HPC would call campaign, community, or project storage.
    2. HPC demands higher performance than what these object stores can generally deliver though.
    To bridge the gap between these two truths, auxiliary services must bolt on to the object layer and provide higher performance, at a higher cost, for the duration of I/O-intensive HPC jobs. Some choose to provide true tiering from object into a resilient layer of flash (like FSx Lustre and Weka do), while others project the contents of the object through a high-performance caching layer (like HPC Cache and File Cache) and are never meant to persistently hold data.

    This isn't rocket science, but I never thought deeply about the two models since campaign/community/project storage in on-prem HPC is usually fast enough to avoid needing caches or fine-grained tiering capabilities.

    John Bent also had a thought-provoking presentation about how Seagate's now-\"deprioritized\" CORTX object store, which once competed with DAOS as Mero, contains ideas that can complement DAOS:

    Whereas DAOS delivers high performance using NVMe, CORTX delivers great economics using HDDs, and their strengths are complementary to each other. While I don't fully grasp how a tiered (or caching!) system comprised of DAOS and CORTX could be implemented, John rightly pointed out that the same level of space efficiency can deliver higher data protection if multi-level erasure coding is used to stripe across durable block storage. His specific example was erasure coding at 8+1 across servers and 10+1 within servers to deliver both high efficiency and high durability. This could map to something like running DAOS atop something like CORVAULT, but I don't think all the necessary pieces are in place to realize such a harmonious coexistence yet.

    Of course, completely tossing Reed-Solomon for something more sophisticated (like VAST does with its locally decodable 150+4 scheme) obviates the need for multilevel erasure entirely. But DAOS has not gone down that route yet.

    And as with every talk John gives, there were lots of other interesting nuggets scattered throughout his presentation. Two of my favorites were:

    • A slide that pointed out that, when you buy something like Ceph as an appliance, you may be spending only 25% of the total cost on storage media and the rest is infrastructure, service, and support. This struck me as a bit on the low end, but some enterprisey NAS and midrange parallel file system appliances can go this low. Spending 60% to 90% on media is a lot nicer for the buyer (and companies like Seagate) if you can buy at scale or eschew the white-glove support, and John suggested that it's up to companies like Seagate to fix the software issues that require customers to pay for white-glove support in the first place.  After all, the less someone spends on support and licenses, the more they can spend on Seagate hard drives.
    • John's final slide pointed out that object stores were originally designed to get around the limitations of POSIX file systems, but as they've evolved over the last decade, they're starting to look a lot like file systems anyway since they require strong consistency, hierarchical namespaces, and familiar file semantics. Has all the work put into developing super-fast object stores like DAOS over the last ten years really just brought us back full circle to parallel file systems?  Companies like VAST and Weka have shown that maybe POSIX isn't as bad as the research community (myself included!) have claimed it to be; it was really just low-performance implementations that nobody wanted.
    Once John's talk is uploaded to the DUG 2022 website, I'll link it here.  Like Dean Hildebrand's talk, it is well worth watching (but for wildly different reasons!)

    PDSW 2022

    I had to duck out of the DAOS User Group early to run (through the rain) to the 7th International Parallel Data Systems Workshop (PDSW 2022) on Monday afternoon.


    Much to everyone’s surprise, PDSW was only given a half day this year and everything felt a little compressed as a result. The organizers kept the work-in-progress (WIP) sessions which can often be an interesting peek into what students are pursuing, but little A/V problems and the unforgiving schedule probably did a disservice to the up-and-comers who use the WIP track to lay the groundwork for future full-length papers. Hopefully SC’23 restores PDSW to its original full-day status.<p></p>

    Splinters keynote from Arif Merchant at Google

    The keynote presentation was given by Arif Merchant from Google about Splinters, the framework that Google Cloud uses to sample I/Os in a scalable way. The challenge they face is that it's impossible to trace and store every single I/O that hits Google's storage servers (D servers), but having an understanding of I/O patterns is essential for characterizing workload I/O behavior and planning for future infrastructure. In fact, this problem is so important that Google isn't the only cloud that's solved it!

    A lot of what Arif talked about is very similar to how Azure does its I/O tracing under the hood. I suppose it should not be surprise that there are only so many ways to solve the challenge of sampling individual IOPS in a way that fairly represents the aggregate workload of a huge distributed storage system. One really smart thing Splinters does that I liked was sample along two different dimensions: not only do they evenly sample across all IOPS at a fixed rate (the obvious thing), but they also sample across files at a fixed rate. In this latter case of per-file sampling, they take a tiny fraction of files and capture every I/O for that file to get a complete picture of how individual files are being accessed.

    This file sampling fills the huge gap that exists when randomly sampling IOPS alone. Because different I/Os have different \"costs\" (for example, reading a 1 MiB file using a single 1 MiB read op or 256x 4 KiB read ops are functionally equivalent to an application), randomly sampling ops introduces systematic biases that can be difficult to back out after the data has been sampled, subsampled, aggregated, and reduced. Splinters' approach lets you see the workload from two different angles (and biases) and answer a much larger range of questions about what's really happening across thousands of storage servers.

    That said, it was interesting to hear Arif describe how Splinters evolved out of a different internal Google project but wound up outliving it. Splinters is also similar to, but slightly different from, their Dapper infrastructure which also does scalable distributed system tracing. And he made overtures to F1, a scalable SQL database that is similar to (but not the same as) the SQL-like query interface that Splinters uses. I got the impression that new technologies come and go pretty quickly at Google, and there's a large appetite for creating new software systems outright rather than shoehorning an existing system into solving a new problem. I can't say one way is better than the other; I was just surprised at the contrast with my own experiences.

    Practical papers

    PDSW had a healthy combination of both very-researchy papers and applied research papers this year. I could only stick around for the applied papers, and two left an impression.

    In the first, Jean Luca Bez presented Drishti, a tool that lives downstream of the Darshan I/O profiling library and finally does what the Darshan community has danced around for years--turning a Darshan log into an actionable set of recommendations on how to improve I/O performance. It does this by cataloguing a bunch of heuristics and using Darshan's new Python integrations to pore through a log and identify known-problematic I/O patterns. Like Jean Luca's DXT Explorer tool, Drishti has a slick user interface and greatly extends the usability and insights that can be pulled out of a Darshan log file. It probably won't win a Turing Award, but this sort of work is probably going to benefit scores of HPC end-users by making Darshan (and troubleshooting I/O problems) much more accessible to mere mortals for years to come.

    Adrian Jackson also presented a very tidy apples-to-apples comparison of DAOS and Lustre on the same hardware using both a systems-level benchmark and an application-inspired, object-oriented data model benchmark. The specific bake-off of a new curiosity (DAOS) and the decades-old incumbent (Lustre) is probably interesting to storage nerds, but I think the real novelty of the work is in its exploration of some uncomfortable realities that the HPC I/O community will have to face in the coming years:

    • Does \"slow memory\" (nonvolatile Optane or CXL-attached memory SSDs) give actual benefit to existing file systems (like Lustre), or is rethinking the entire storage stack (like DAOS did) really necessary to unlock the performance of new hardware?
    • Do applications need to rethink their approach to I/O to make use of post-POSIX storage systems like DAOS, or is performing I/O as you would on a file system (Lustre) on a post-POSIX storage system (DAOS) good enough?

    My take from the work is that, for simple I/O patterns like checkpoint/restart, you can get pretty far by just treating something like DAOS the same as you would a parallel file system:

    But if you want your data at rest to have the same data model as how it's handled within the application, you really ought to use a storage system that supports data models that are more expressive than a stream of bytes (which is what POSIX files are).

    The authors didn't do a perfect job of giving Lustre its fair shake since they chose to use (abuse) directories and files to represent their application's data model on-disk instead of developing an object-file model that file systems like Lustre handle a little better. But let's be real--HPC is full of applications that do the exact same thing and represent datasets on-disk using complex hierarchies of directories and files simply because that's the easiest way to map the application's representation of data into the standard file system model. In that sense, storage systems that represent rich data models in a high-performance way should be really valuable to naive applications that map in-memory data structures directly to files and directories.

    Going back to John Bent's closing slide from his DAOS User Group talk, though, does any of this even matter since all answers lead back to parallel file systems? Maybe there's something to be learned about adding better back-door APIs that support more diverse data models than what POSIX file interfaces give us.

    The SC22 Expo

    The expo is my favorite part of SC because it's when I get to talk to people one-on-one and learn about corners of the HPC industry that I would've never otherwise sought out. Much to my dismay, though, I had very little time to walk the floor this year--so little that I didn't get any swag. If you want to read up on what interesting technology was being showcased, I strongly recommend reading all the great content that Patrick Kennedy and his team at STH created covering the expo.

    That said, I did notice some curious trends about the show floor overall.

    The NVIDIA booth was notably absent this year (though they shared booth space with partners), and many of the usual top vendors had significantly smaller presence on the expo floor. Just for fun, I compiled the top ten(ish) vendors by booth size:

    1. Weka.io (3,200 sqft)
    2. VAST Data, Department of Energy, Penguin Computing, HPE, and Microsoft (2,500 sqft)
    3. AWS (2,000 sqft)
    4. Google and TACC (1,600 sqft)
    5. Supermicro, AMD, Intel, Dell, NASA, and Indiana University (1,500 sqft)

    I think it's amazing to see all-flash storage companies at the top of the list alongside all of the Big 3 cloud service providers. I may be reading too much into this, but this may mean that the money behind SC is shifting towards companies playing in the cloud-based AI space instead of traditional big iron for simulation. Or perhaps it's a sign that most of the traditional HPC players are taking a hard look at the return they get on a big booth given the current economic climate and pulled back this year.

    I did chat with a couple colleagues who completely opted out of a booth this year (for reference, SC'21 had 10% fewer exhibitor booths than SC'19), and the reasoning was consistent: they found more value in having staff meet with customers privately or attend the technical sessions and engage with people organically. Combined with a bit of bad taste left over from SC's high cost of hosting pandemic-era \"digital booths\" despite low return (did anyone visit digital booths at SC'20 or SC'21?), I can see why some vendors may have chosen to skip the expo this year.

    Whatever the reasons may be, I was a bit sad to see such a small presence from some of my favorites like IBM, Fujitsu, Atos, and NEC. Hopefully the SC Exhibits Committee (and the economy!) can find ways to bring back the pre-pandemic glory of the show floor.

    The expo wasn't all doom and gloom though! Even though I couldn't make my complete rounds this year, there were a couple of highlights for me.

    VAST's masterful marketing

    Perhaps the splashiest vendor at SC was VAST Data who had a brilliant marketing presence. First was the giant Vastronaut mascot that was the centerpiece of their booth:

    A quick search of Twitter shows just how many people seized the opportunity to take a selfie at their booth. I would love to know how they transported that thing to and from the conference, but whatever the cost, I'll bet it was worth it.

    At the Grand Opening Gala on Monday, they also gave out delightfully tacky light-up cowboy hats that everyone seemed to be wearing:

    We were there! #sc22 #sc2022 @VAST_Data pic.twitter.com/fWhuSgBfpL

    — ntnu-hpc (@ntnuhpc) November 15, 2022

    The subtle genius of this was that not only did people wear them during the gala and the Flop Gun-themed Beowulf Bash 2022 party later that night, but they had to wear them on their plane rides home since they were so inconveniently bulky. Proof in point, my wife (who doesn't work in tech) sent me this text message to confirm that she was waiting for me at the right luggage carousel at San Francisco Airport:

    I wonder how many innocent bystanders, traveling home for Thanksgiving on Thursday or Friday, saw the shiny cowboy hats at airports around the country and wondered what VAST was.

    The icing on the cake was VAST's CEO, Renen Hallak, parading around in an unmissable Chuck McGill-style space suit all week, clearly not taking himself too seriously and painting VAST as a work hard/play hard kind of company. Now, do flashy space suits and blinking cowboy hats alone mean VAST has a great product? I can't say**. But marketing is an art that I appreciate, and VAST hit some great notes this year.

    ** (Seriously, I'm not sure I wouldn't get in trouble for opining about another company here.)

    The Microsoft hardware bar

    The only booth where I spent any appreciable time this year was my own employer's. I personally love booth duty and accosting strangers on the show floor, especially if there's something interesting at the booth to jumpstart a conversation. When I worked at SDSC it was a Raspberry Pi cluster, and at the Microsoft booth this year it was the \"hardware bar.\"

    In addition to the customary booth presentations with giveaways, swag desk, seating area, and a fun caricature artist, the physical servers that underpin the HPC nodes in Azure were on display. Microsoft contributes its hardware platform designs to the Open Compute Project so the physical hardware that runs in Azure data centers isn't entirely mysterious. Still, every cloud has its hardware secrets, so I was surprised to see these servers laid bare.

    The newest HPC node type (dubbed HBv4) on display was a node powered by AMD's Genoa processors just announced a few days earlier:

    This wasn't a display model, either; it had real DDR5 DRAM, a real NDR InfiniBand HCA, real PCIe Gen5, and real big OCP mezzanine card with real big aluminum heat sinks and a big Microsoft sticker on top. A couple visitors commented on the way the heat piping for those Genoa CPUs was done which I guess is unusual; rather than have a giant copper block on top of each socket, heat pipes connect the socket to massive aluminum heat sinks that are closer to the chassis inlets. In retrospect it makes sense; Genoa has a whopping twelve DDR5 DIMMs per socket which leaves little extra room for heat sinks, and these 88+ core sockets have a staggering thermal design power.

    Another exotic piece of hardware on display was an \"ND MI200 v4\" server:

    It's logically similar to Azure's \"ND A100 v4\" server platform with two CPU sockets, eight SXM4 GPU sockets, eight 200G HDR InfiniBand HCAs, and a bunch of M.2 NVMes. But this specific server has eight MI200 GPUs on a common OAM baseboard and uses Infinity Fabric for GPU-to-GPU communication. I've never seen an OAM-socketed anything in real life before, much less eight of them on a baseboard, so I thought this was pretty great to see in the flesh.

    The ND A100 v4 platform was also on display and looked very similar-but-different with its eight A100 GPUs and HGX baseboard:

    And unlike the MI200 variant, the general public can run on these nodes.

    I'm not sure what more I'm allowed to say, but my colleague Karl made a nice, quick video that runs through the entire Microsoft booth that's worth a watch, and more details can be had by contacting me or your favorite Microsoft account team privately.

    Of course, the hardware bar was just a way to lure people into the booth so I could achieve my real goal: meeting new folks. As I wrote before, one of my biggest realizations at SC this year is how generally confused people are about what HPC in the cloud really means--both people who come from traditional on-prem HPC and people who come from traditional enterprisey cloud. I found myself surprising many of the people with whom I spoke on the show floor with factoids that I have taken for granted. For example,

    • Linux is the most common OS on these HPC node types. While you probably(?) can run Windows if you want on this stuff, I think only a few niche markets do this.
    • The usage model for an HPC cluster in the cloud can be the same as on-prem. You can have login nodes, Slurm, home directories, parallel file systems, and all that. Jobs don't have to be containerized or turned into a VM image.
    • The InfiniBand coming out of these nodes is real InfiniBand with real OFED that supports real mpich/mvapich/OpenMPI. It's the same stuff as in on-prem supercomputers. And nodes are assembled into full-bisection fat tree InfiniBand clusters just like normal.
    • There's no noisy neighbor problem on compute nodes because HPC node types aren't shared between users. When you run a VM on an HPC node, you get the whole thing. Just like on large supercomputers.
    • There's no horrible loss of performance due to running in a VM. Virtualization extensions, PCIe passthrough, and SR-IOV bypass the hypervisor for most things. Inside your VM, you see real Zen cores and real Mellanox HCAs, not virtualized devices.

    My takeaway impression is that a lot of traditional HPC folks looked at the cloud five or ten years ago, had a sour experience, and haven't paid attention since. In those last five years, though, AI has changed the game. Massive demand for the latest CPUs and accelerators, funded by live-fast-die-young venture capital, has given cloud vendors tremendous financial incentive to catch up to on-prem levels of performance efficiency for AI workloads. And it just so happens that infrastructure that's good for AI is also good for traditional modeling and simulation.

    SCinet!

    One of the unexpected highlights of my SC this year arose from a chance encounter with a former coworker from NERSC, Ron Kumar, who gave me a whirlwind tour of SCinet.

    I have to confess great ignorance around SCinet in general; I always saw it was a weird technological proof of concept that the strange networking people at work would go off and do in the weeks leading up to the actual conference. I knew they did some impressive wide-area transfer demos (like the petabyte-in-a-day demo at SC'16), but I didn't really get the significance.

    So what is SCinet? It's this yellow bundle of cables dangling from the ceiling.


    <p>The yellow cables are 144-core fiber trunks that bring over a terabit per second of bandwidth into the convention center from the Internet via the national research backbones like ESnet and Internet2 and distribute many terabits per second of capacity throughout the SC conference venue. For comparison, most HPC centers in the US only have a tenth of SCinet’s wide-area bandwidth at best since 400G infrastructure is still rolling out.</p>

    Most attendees may be familiar with the row of expensive-looking networking racks behind a glass wall towards the back of the expo which is where those yellow cables dangling from the ceiling end. Here's a photo from inside that glass wall:

    What I didn't realize is that if you go around to the back of the giant walled area behind this glass display, there's a security checkpoint that gates entry into a massive network operations center (NOC) full of laptops, spools of fiber, meeting rooms, and busily working teams in charge of all the lower layers of the networking stack.

    The process to get into the NOC involves an escort and being tagged in with a tamper-proof wristband, and I learned on the tour that there's millions upon millions of dollars worth of high-end networking equipment in the racks shown above. If you look closely, you can see a security camera at the end of the aisle that speaks to this; that camera was one of many.

    Behind the pretty public-facing side of the SCinet racks is a mess of fiber and cables:

    I guess if you have to tear all this down after just a few weeks, there's no point in investing days in dressing it all up nicely! I particularly enjoyed the fiber panels in the third rack that appear to be affixed to the rack post with shoe laces.

    This year, SCinet did do a neat proof-of-concept where they demonstrated three 400G routers from three vendors (Juniper, Arista, and Cisco?) all talking the same protocol to handle what I assume is the core routing for everything in the convention center:

    I wish I remembered exactly what was going on here, but I know enough about networking to know that, despite there being standard protocols for coordinating between networking gear, each vendor does their own implementation that is rarely easy to get interoperability from. If anyone out there knows the details of this achievement, please let me know so I can explain this a little better!

    In addition to networking nerd-level demonstrations, SCinet also serves up all the wifi across the convention center. That is why there were tripods with access points scattered around, and why astute attendees may have noticed janky networking equipment scattered around that looked like this:

    Again, I get it: for a network infrastructure that's only going to last a week, I don't think it's a good use of anyone's time or money to nicely dress all the networking.

    One last factoid I didn't know until this year was that exhibitors can request 100 Gb/s network drops into their individual booths for demos (or downloading the latest version of a PowerPoint presentation really fast). The end result of supporting both a vast wifi network and 100G fiber across the show floor is that there was a lot of fiber going into the single row of SCinet equipment:

    Finally, when I posted some of these photos online during the conference, my colleague Bilel was kind enough to post a slide from the SC22 opening presentation that had the speeds and feeds of what I had toured:

    Candy Culhane shared Scinet facts #SC22 #HPC

    5.01 Tb/s of WAN capacity
    $70M in HW & SW, & services provided by 29 SCinet contrib.
    175 volunteers from 80 vol. organiz.
    > 450 wireless deployed
    29 network research exhibition proposals
    11.7 miles of fiber
    2384 fiber patch https://t.co/JtPhjVHZJd pic.twitter.com/kwGl5Ydqp5

    — Bilel Hadri (@mnoukhiya) November 16, 2022

    If you know anyone involved with SCinet, I highly recommend seeing if you can get a tour at the next SC. Even as a relative networking novice, I walked away with a much greater appreciation for the annual achievement of building SCinet. And who knows? Once I get bored of this whole storage thing, maybe I'll try getting into high-performance networking.

    Composability panel

    This year I was invited to participate in a panel titled \"Smackdown! Does HPC Need Composability Now?\" moderated by Addison Snell and Dan Olds from Intersect360 Research. This panel was...different. Unlike the traditional SC panel where panelists take turns presenting slides and saying erudite things, this panel had two teams of panelists. And my team only had one slide to present:

    The ground rules included \"personal attacks are allowed,\" and needless to say, the panel was about equal parts entertainment and technical discourse. That's not a bad thing, though.

    Addison and Dan did a phenomenal job of pulling their respective teams together and leading discussion in a format that both brought forward the key pros and cons of composability in HPC while poking fun at the thinly veiled, ego-driven personalities that often make up these sorts of panels. Rather than politely dancing around issues like sacrificing memory bandwidth by putting accelerators at the far end of a PCIe bus or gaining higher utilization by allowing users to mix and match CPU, NICs, and GPUs, us panelists were free to shoot straight (or perhaps a bit hyperbolically) and call each other out on our hidden agendas.

    I hope it goes without saying that all us panelists were in on the format and don't actually think people on the other side are dumb. By wrapping technical arguments in snarky comments, we could keep the level of discussion accessible to a wide audience, drive home the key points from both sides, and ensure that we weren't losing audience members who don't care about the PhD-level details as much as they want to hear what their peers are thinking about this exciting new space. I got some feedback afterwards that I didn't seem to hold back, so if anyone did take anything I said seriously, I am very sorry!

    On a technical level, what was the outcome?

    It turns out that there was about a 60/40 split between people who felt composability wasn't required yet and those who felt it was after both sides argued their case. Even among panelists, many of us were a lot less convinced about our respective positions than we let on during the panel itself. I got a chuckle when I realized that I wasn't the only one who, when invited to be on the panel, asked \"what side do you want me to argue?\" I honestly could have gone either way because the dust has not yet settled. Dan Stanzione, director of TACC, gave the truest answer to the question of \"will composability help HPC\" up front--\"it depends.\" Maybe this is a growth opportunity, or maybe it's a lukewarm reception.

    Either way, composable technologies are hitting the market regardless of whether you think they'll be useful or not.  AMD Genoa supports CXL 1.1 with extensions for memory pooling, Samsung has memory-semantic SSDs, and everyone and their mother is working on photonics to get higher bandwidths and lower latencies over longer distances. This makes it easier for people to dip their toes in the water to see if composability makes sense, and I think that's what a lot of people will wind up doing in the coming years.

    Customer meetings

    Unlike in years past, my SC experience this year was dominated by customer meetings. I've been on the customer side of the table plenty of times, but I was surprised to find that it was actually more fun to be on the vendor side for a change. I'm part salesman at heart, so I found it personally gratifying to end a meeting with people nodding along rather than scratching their heads. I learned as a customer that it's very easy for vendors to go way off the rails and waste everyone's time, so I was grateful to have avoided the awkward confusion that punctuates those kinds of meetings.

    I also went into the week worrying that I'd be sitting in the same room, hearing the same pitch and the same jokes, and answering the same questions all week. Thankfully, I work with some great field, business, and product teams who set up interesting conversations rather than rote recitations of boring roadmap slides. Approaching the same topics from different angles helped me figure out how all the pieces of what I'm working on fit together to make a complete picture too; there weren't nearly as many opportunities to do this in the DOE world since the end-users of the HPC systems on which I worked aren't told anything until all the design decisions have already been made.

    A few personal notes

    This SC was significant to me at a variety of levels; it was the first time I'd gotten on an airplane since February 2020, the first time I'd traveled since starting a new job at a new company, and the first time I'd met any of my new coworkers outside of the structure of a Teams call. During the pandemic I realized that getting out into the world and talking to people from all corners of HPC were my favorite part of my job. Not being able to go to events like SC and maintain that a sense of community involvement dramatically impacted my level of professional satisfaction for the last two years, so I'm glad I was able to finally go this year.

    Though customer meetings were a lot more fun than I expected them to be, I still felt bummed that I could spend so little time walking the expo, talking to folks, and attending all the BOFs normally on my must-attend list. Compounding this was my personal choice to not dine indoors and consequently miss out on almost all other chances to catch up with old friends and colleagues. I also decided to leave SC a day earlier than I usually do to reduce my risk of getting sick which didn't help either. There's never enough time at SC, but this year was particularly pressed.

    I say all this not to complain, but to say how much I appreciated the people who went out of their way to come accost me during the precious few hours I actually had on the exhibit floor. Some I'd not seen since SC'19, and some I'd never actually met since we only started working together mid-pandemic. The conference is busy for everyone, so giving me a slice of your time was very meaningful. That sense of community membership is why I go to SC, it's why I still work in this business, and it's why I try to contribute whatever I can to whomever wants it whether it be a student, engineer, salesperson, or marketer.

    ", + "url": "https://hpc.social/personal-blog/2022/sc-22-recap/", + + + + + + "date_published": "2022-11-24T02:00:00-07:00", + "date_modified": "2022-11-24T02:00:00-07:00", + + "author": "Glenn K. Lockwood's Blog" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/converged-computing/", + "title": "Converged Computing", + "summary": null, + "content_text": "For many years, there has been a battle between cloud and HPC. The cloud side of the equation says “micro services, cloud native!”and the HPC side says “too expensive!” Conversations often don’t progress because both sides are up-in-arms and focused on why they cannot work together. At best, we might get access to cloud from an HPC center,or an company might present a product as branded for “HPC.” But it’s not truly collaborative in the way that I’d like.I’ll also step back and comment that (I do not believe) folks (myself included) on the HPC side have done enoughto sit at the table. For example, we haven’t been a voice in the Open Containers Initiative (although I’ve tried), nor have we been present (historically) for conferences that are more focused around cloud native technologies.There is no pointing fingers or fault here - it’s just a matter of two different cultures, and it’s been challenging figuring out how to talk to one another, and how to work together. I’ve tried my best to be involved, to the best of my ability, in small ways on both sides. But I’m only one person. This isn’t to say there haven’t been small collaborations, but I believe we can do more.Change is ComingI think this is going to change. The reason is because both sides of the equation have started to realize we have similar goals,and it’s not about creating hybrid environments – having both pancakes and waffles for breakfast – but rather convergence – recognizing that pancakes and waffles are both kinds of breakfast cakes, and we can take features that we like of each to create a breakfast cake that will make everyone happy.The idea of “Converged Computing” comes from my amazing team (see Dan’s talk at KubeCon here) and is the idea that technologies from HPC can be integrated into more traditionally cloud approaches to produce a solution thatsolves problems on both sides. Explicitly for these projects, it means testing the Flux Framework scheduler alongside Kubernetes. Do we still want portable workflows that can move from an HPC environment to cloud? Of course.However, the niche or gradient that I’m interested in is the space that lives between these two worlds.While I won’t go into huge detail (this would be more appropriate for a talk) the lab openly works on Flux Framework, a resource manager that (in my opinion) is one of the coolest projects coming out of our space. I started working with these teams a few months ago, and am bringing my excitement and vision for (what I hope to be) a future where we are actively developing alongside other Kubernetes projects, and our work is well-known and established in this space.What does that mean? Let me share some cool work under development. This is all being done publicly on GitHub, so there isno issue to talk about it! My first year or so at the lab I was hired under a research project, and although I learned a lot, I haven’t felt inspired and driven until starting this work. Let’s talk about some of it! 🎉️The Flux OperatorIf you aren’t familiar with Kubernetes Operators, let’s step back and talk about a human operator. If you are a syadmin managing appswith associated services and databases on a cluster, you often had to do maintenance or update tasks like increasing a storage volume,or modifying a service to a new user need. As this pattern has emerged as a common thing, they have come up with the concept of a Kubernetes Operator - an actual controller you install to your cluster that can automate this. In simple terms, after you install an operator to your cluster,you can hand it a desired state (represented in a yaml configuration file) and the operator will do whatever it takes to reach that state. What does that means in the context of Flux? The Flux Operator is interested in creatingwhat we are calling a “Mini Cluster,” illustrated below.In Kubernetes object terms this is an Indexed Job, a few config maps, secrets, and a RESTFul API and user interface that I designed exposed as a service. You can read more about our current design here.This Mini Cluster is generated from a “custom resource definition” or CRD (the yaml you provide), and it can take these parameters. Concetually, you as the user own the Mini Cluster and can submit jobs to it (either via the web interface or the API) until you are done. When you are done, you can bring down the cluster.We are excited for this work because in the next months (to a bit longer) we are going to be testing different kinds of workloads running using Flux alongside this Mini Cluster, but on Kubernetes! I’ve started a small repository of dummy examples that I’m extending quickly atrse-ops/flux-hpc and please open an issue there if you have a suggestion.Stay Tuned!Stay tuned for more work in this space! I’ve been doing a ton of programming in Go, Python, and workingon a wide range of technologies, and fairly quickly, and I am very much in my happy place. Please come and join us! ❤️", + "content_html": "

    For many years, there has been a battle between cloud and HPC. The cloud side of the equation says “micro services, cloud native!”and the HPC side says “too expensive!” Conversations often don’t progress because both sides are up-in-arms and focused on why they cannot work together. At best, we might get access to cloud from an HPC center,or an company might present a product as branded for “HPC.” But it’s not truly collaborative in the way that I’d like.

    I’ll also step back and comment that (I do not believe) folks (myself included) on the HPC side have done enoughto sit at the table. For example, we haven’t been a voice in the Open Containers Initiative (although I’ve tried), nor have we been present (historically) for conferences that are more focused around cloud native technologies.There is no pointing fingers or fault here - it’s just a matter of two different cultures, and it’s been challenging figuring out how to talk to one another, and how to work together. I’ve tried my best to be involved, to the best of my ability, in small ways on both sides. But I’m only one person. This isn’t to say there haven’t been small collaborations, but I believe we can do more.

    Change is Coming

    I think this is going to change. The reason is because both sides of the equation have started to realize we have similar goals,and it’s not about creating hybrid environments – having both pancakes and waffles for breakfast – but rather convergence – recognizing that pancakes and waffles are both kinds of breakfast cakes, and we can take features that we like of each to create a breakfast cake that will make everyone happy.The idea of “Converged Computing” comes from my amazing team (see Dan’s talk at KubeCon here) and is the idea that technologies from HPC can be integrated into more traditionally cloud approaches to produce a solution thatsolves problems on both sides. Explicitly for these projects, it means testing the Flux Framework scheduler alongside Kubernetes. Do we still want portable workflows that can move from an HPC environment to cloud? Of course.However, the niche or gradient that I’m interested in is the space that lives between these two worlds.

    While I won’t go into huge detail (this would be more appropriate for a talk) the lab openly works on Flux Framework, a resource manager that (in my opinion) is one of the coolest projects coming out of our space. I started working with these teams a few months ago, and am bringing my excitement and vision for (what I hope to be) a future where we are actively developing alongside other Kubernetes projects, and our work is well-known and established in this space.What does that mean? Let me share some cool work under development. This is all being done publicly on GitHub, so there isno issue to talk about it! My first year or so at the lab I was hired under a research project, and although I learned a lot, I haven’t felt inspired and driven until starting this work. Let’s talk about some of it! 🎉️

    The Flux Operator

    If you aren’t familiar with Kubernetes Operators, let’s step back and talk about a human operator. If you are a syadmin managing appswith associated services and databases on a cluster, you often had to do maintenance or update tasks like increasing a storage volume,or modifying a service to a new user need. As this pattern has emerged as a common thing, they have come up with the concept of a Kubernetes Operator - an actual controller you install to your cluster that can automate this. In simple terms, after you install an operator to your cluster,you can hand it a desired state (represented in a yaml configuration file) and the operator will do whatever it takes to reach that state. What does that means in the context of Flux? The Flux Operator is interested in creatingwhat we are calling a “Mini Cluster,” illustrated below.

    In Kubernetes object terms this is an Indexed Job, a few config maps, secrets, and a RESTFul API and user interface that I designed exposed as a service. You can read more about our current design here.

    This Mini Cluster is generated from a “custom resource definition” or CRD (the yaml you provide), and it can take these parameters. Concetually, you as the user own the Mini Cluster and can submit jobs to it (either via the web interface or the API) until you are done. When you are done, you can bring down the cluster.

    We are excited for this work because in the next months (to a bit longer) we are going to be testing different kinds of workloads running using Flux alongside this Mini Cluster, but on Kubernetes! I’ve started a small repository of dummy examples that I’m extending quickly atrse-ops/flux-hpc and please open an issue there if you have a suggestion.

    Stay Tuned!

    Stay tuned for more work in this space! I’ve been doing a ton of programming in Go, Python, and workingon a wide range of technologies, and fairly quickly, and I am very much in my happy place. Please come and join us! ❤️

    ", + "url": "https://hpc.social/personal-blog/2022/converged-computing/", + + + + + + "date_published": "2022-11-18T08:30:00-07:00", + "date_modified": "2022-11-18T08:30:00-07:00", + + "author": "Vanessasaurus" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/ceph-osd-cpu-scaling-part-1/", + "title": "Ceph OSD CPU Scaling - Part 1", + "summary": null, + "content_text": "Last summer we had a user that hit some performance issues based on a recommendation to use 2 cores per OSD in their systems. I wanted to provide some data for the community and wrote up a blog post on the ceph.io website. Please take a look!", + "content_html": "

    Last summer we had a user that hit some performance issues based on a recommendation to use 2 cores per OSD in their systems. I wanted to provide some data for the community and wrote up a blog post on the ceph.io website. Please take a look!

    ", + "url": "https://hpc.social/personal-blog/2022/ceph-osd-cpu-scaling-part-1/", + + + + + + "date_published": "2022-11-08T00:00:00-07:00", + "date_modified": "2022-11-08T00:00:00-07:00", + + "author": "Mark Nelson's Blog" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/containerize-it-baby/", + "title": "Containerize It, Baby!", + "summary": null, + "content_text": "I’ve just submit my entry to the HPC Guru Elevator Pitch Contest for the Supercomputing 2022 conference!I’m fairly sure (like many of these contests) it will be a politically correct winner - someone that is best appealingto the conference, but I’ll take a stand right now that I think my submission is tops in terms of creativityand excited energy! I mean, there is just no alternative when it comes to technologies I’m excited about. Containerize it, baby!Mic Drop! 🎙️Regardless of the outcome of this contest, I feel like I’ve already won - I’ve had so much fun making this and sharing with the community! 🎉️", + "content_html": "

    I’ve just submit my entry to the HPC Guru Elevator Pitch Contest for the Supercomputing 2022 conference!

    I’m fairly sure (like many of these contests) it will be a politically correct winner - someone that is best appealingto the conference, but I’ll take a stand right now that I think my submission is tops in terms of creativityand excited energy! I mean, there is just no alternative when it comes to technologies I’m excited about.

    Containerize it, baby!

    Mic Drop! 🎙️

    Regardless of the outcome of this contest, I feel like I’ve already won - I’ve had so much fun making this and sharing with the community! 🎉️

    ", + "url": "https://hpc.social/personal-blog/2022/containerize-it-baby/", + + + + + + "date_published": "2022-11-03T09:30:00-06:00", + "date_modified": "2022-11-03T09:30:00-06:00", + + "author": "Vanessasaurus" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/happy-living-close-ish-to-the-metal/", + "title": "happy living close (-ish) to the metal", + "summary": null, + "content_text": "For various reasons, I’ve been doing a little bit of career introspection lately. One of the interesting realizations to come out of this is that, despite in practice doing mostly software work, I’ve been happiest when my work involved a strong awareness of the hardware I was running on.I suppose it shouldn’t be a surprise, exactly, but I hadn’t exactly thought about it in those terms before! Before I got into computing, I got a bachelors degree in physics, and got through much of a PhD in materials science. While I wasn’t building computers directly, I was definitely working regularly on hardware, building experimental apparatus involving various combinations of vacuum chambers, lasers, exotic microscopes, custom electronics, and microfluidics.In terms of my computing career, I’ve generally worked in the area of “high-performance computing”, a buzzword that means I’ve focused on building fast parallel systems aimed at researchers. It’s a sub-field that lends itself to awareness of hardware: even as a new baby sysadmin, I was staring at motherboard block diagrams and thinking about the performance differences between different PCIe topologies. And because HPC is one of the areas that took the longest to embrace cloud computing, I spent a lot of years doing work in datacenters. Most of my work would usually involve writing code, doing configuration management, and managing Linux systems… but on a regular basis I’d head into a big loud room full of air conditioners and server racks, carrying a screwdriver.Amusingly, my relatively recent stint at a hyperscaler was the first time I had worked on computers, but didn’t have my office in the same building as the computers I was running! Even there I was at least somewhat cognizant of hardware specifics, and one of my early projects was performance testing on the Bryce Canyon storage node, to see if it was ready for use in a large-scale distributed filesystem.And these days, at NVIDIA, I’m enjoying being even closer to the metal. (At least conceptually; I still work remote…) I spend my days thinking about datacenter requirements, cable lengths, firmware upgrades, hardware health checks, and application performance tests on large clusters. And I love getting to play with these shiny toys.Anyway, this is just a ramble. But a useful one. While I’d be the first to admit that cloud has its place, and I use it for some personal projects, I really enjoy understanding the hardware I run on. I have trouble thinking of computers as remote abstractions with no underlying detail. They are pleasingly physical in my mind, even if they’re thousands of miles away.", + "content_html": "

    For various reasons, I’ve been doing a little bit of career introspection lately. One of the interesting realizations to come out of this is that, despite in practice doing mostly software work, I’ve been happiest when my work involved a strong awareness of the hardware I was running on.

    I suppose it shouldn’t be a surprise, exactly, but I hadn’t exactly thought about it in those terms before! Before I got into computing, I got a bachelors degree in physics, and got through much of a PhD in materials science. While I wasn’t building computers directly, I was definitely working regularly on hardware, building experimental apparatus involving various combinations of vacuum chambers, lasers, exotic microscopes, custom electronics, and microfluidics.

    In terms of my computing career, I’ve generally worked in the area of “high-performance computing”, a buzzword that means I’ve focused on building fast parallel systems aimed at researchers.

    It’s a sub-field that lends itself to awareness of hardware: even as a new baby sysadmin, I was staring at motherboard block diagrams and thinking about the performance differences between different PCIe topologies.

    And because HPC is one of the areas that took the longest to embrace cloud computing, I spent a lot of years doing work in datacenters. Most of my work would usually involve writing code, doing configuration management, and managing Linux systems… but on a regular basis I’d head into a big loud room full of air conditioners and server racks, carrying a screwdriver.

    Amusingly, my relatively recent stint at a hyperscaler was the first time I had worked on computers, but didn’t have my office in the same building as the computers I was running! Even there I was at least somewhat cognizant of hardware specifics, and one of my early projects was performance testing on the Bryce Canyon storage node, to see if it was ready for use in a large-scale distributed filesystem.

    And these days, at NVIDIA, I’m enjoying being even closer to the metal. (At least conceptually; I still work remote…) I spend my days thinking about datacenter requirements, cable lengths, firmware upgrades, hardware health checks, and application performance tests on large clusters. And I love getting to play with these shiny toys.

    Anyway, this is just a ramble. But a useful one. While I’d be the first to admit that cloud has its place, and I use it for some personal projects, I really enjoy understanding the hardware I run on. I have trouble thinking of computers as remote abstractions with no underlying detail. They are pleasingly physical in my mind, even if they’re thousands of miles away.

    ", + "url": "https://hpc.social/personal-blog/2022/happy-living-close-ish-to-the-metal/", + + + + + + "date_published": "2022-11-02T00:18:17-06:00", + "date_modified": "2022-11-02T00:18:17-06:00", + + "author": "Thinking Out Loud" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/the-web-services-i-self-host/", + "title": "The web services I self-host", + "summary": null, + "content_text": "Why self-host anything?In a lot of ways, self-hosting web services is signing up for extra pain. Most useful web services are available in SaaS format these days, and most people don’t want to be a sysadmin just to use chat, email, or read the news.In general, I decide to self-host a service if one of two things is true:Self-hosting is going to add a capability that’s difficult to find in a SaaS alternative. That might be privacy, or extra compute, or just an extra degree of customization that I want.I find it interesting or amusing to self-host it! I have been a professional sysadmin, and ran production web services for over a decade. So I enjoy messing around with servers, and can have a fair amount of fun with this.Infrastructure and general toolingRight now my self-hosted services are hosted on Oracle Cloud Infrastructure, for a very simple reason: OCI includes a very generous Always Free tier, which doesn’t even ask for a credit card! So I’m confident I’m not going to accidentally spend any money. I use ARM Ampere A1 Compute instances for service hosting.The individual services are mostly managed using Docker Compose files, though a few are just running bare-metal. I have so far managed to resist the urge to put everything in Kubernetes.Everything is backed up on a regular basis using Tarsnap.I also use Tailscale to provide a VPN between my cloud servers and my various client devices (phone, laptop, tablet). If a service needs to be exposed to the public Internet to function, I do that… but otherwise, everything is only exposed within the Tailscale VPN, so that only my own devices can access them. This is both a lovely convenience (not having to manage as many DNS records), and provides an extra degree of security by hiding services that no one else needs to access.Services that I self-hostRSS reader: Despite the demise of Google Reader back in the mists of time, I’ve been a consistently heavy user of RSS feed since at least 2008. At times I’ve used commercial products such as Feedly, but these days I self-host the aggregator using FreshRSS. I use FreshRSS partly because it’s pretty easy to spin up and administer, and partly because it’s compatible with Reeder, a Mac and iOS app that I generally use to actually read my feeds.Fediverse instance: I run a self-hosted instance on the Fediverse ensemble of social networking sites. The best-known tool for this is Mastodon, but I currently use the Pleroma server, mostly because it seemed less painful to set up and configure. I run my own instance partly out of curiosity, and partly because I didn’t strongly resonate with any particular topic-specific server that’s already out there.IRC bouncer: I’m not on IRC very much these days, but I do like to avoid losing messages, and sometimes want to be logged into the same channels on different physical clients. So I run a ZNC server to maintain persistence.Matrix server: Matrix is a decentralized messaging platform that supports end-to-end encryption. Think of it as being a little like the Fediverse, but for chat rather than microblogging. This falls pretty squarely in the category of “I find this amusing to run”, because I mostly chat with less-nerdy folks on other, commercial platforms.Git server: I run a Gitea server which I use to mirror my own repos, as well as a variety of other open source repos. This is mostly to ensure that I have an up-to-date backup of repos I care about, independent of Github or whatever provider.Jupyter notebooks: I keep a persistent Jupyter notebook instance running for random code experiments and as a tiny development playground. This runs on its own VM where I also do other random software development, and it’s separate from the other services mostly so I don’t take down all my personal infra with an accidental OOM from a big build.Software package repository: I run an instance of Nexus Repository OSS, mostly to cache Docker images and other content that run the rest of the services above!Services where I use managed hosting but don’t own the serverThis website! My regular website and this blog run on a shared hosting provider, mostly through inertia. (I’ve used the same hosting provider for web hosting since around 2008.)Email: In theory it’s an open, federated system similar to the Fediverse. In practice, the combination of spam and the growth of large providers makes it increasingly painful to run a server yourself. This post from Carlos Fenollosa does a good job of describing the difficulties.I do, however, run all my email through my own domain, though it’s hosted via Google Apps GSuite Google Workspace. I also back up my inbox locally on a regular basis. That means that if Google ever decides to remove my account, charge obnoxious costs, or otherwise misbehave, my email address is at least portable to other providers.", + "content_html": "

    Why self-host anything?

    In a lot of ways, self-hosting web services is signing up for extra pain. Most useful web services are available in SaaS format these days, and most people don’t want to be a sysadmin just to use chat, email, or read the news.

    In general, I decide to self-host a service if one of two things is true:

    • Self-hosting is going to add a capability that’s difficult to find in a SaaS alternative. That might be privacy, or extra compute, or just an extra degree of customization that I want.
    • I find it interesting or amusing to self-host it! I have been a professional sysadmin, and ran production web services for over a decade. So I enjoy messing around with servers, and can have a fair amount of fun with this.

    Infrastructure and general tooling

    Right now my self-hosted services are hosted on Oracle Cloud Infrastructure, for a very simple reason: OCI includes a very generous Always Free tier, which doesn’t even ask for a credit card! So I’m confident I’m not going to accidentally spend any money. I use ARM Ampere A1 Compute instances for service hosting.

    The individual services are mostly managed using Docker Compose files, though a few are just running bare-metal. I have so far managed to resist the urge to put everything in Kubernetes.

    Everything is backed up on a regular basis using Tarsnap.

    I also use Tailscale to provide a VPN between my cloud servers and my various client devices (phone, laptop, tablet). If a service needs to be exposed to the public Internet to function, I do that… but otherwise, everything is only exposed within the Tailscale VPN, so that only my own devices can access them. This is both a lovely convenience (not having to manage as many DNS records), and provides an extra degree of security by hiding services that no one else needs to access.

    Services that I self-host

    • RSS reader: Despite the demise of Google Reader back in the mists of time, I’ve been a consistently heavy user of RSS feed since at least 2008. At times I’ve used commercial products such as Feedly, but these days I self-host the aggregator using FreshRSS. I use FreshRSS partly because it’s pretty easy to spin up and administer, and partly because it’s compatible with Reeder, a Mac and iOS app that I generally use to actually read my feeds.
    • Fediverse instance: I run a self-hosted instance on the Fediverse ensemble of social networking sites. The best-known tool for this is Mastodon, but I currently use the Pleroma server, mostly because it seemed less painful to set up and configure. I run my own instance partly out of curiosity, and partly because I didn’t strongly resonate with any particular topic-specific server that’s already out there.
    • IRC bouncer: I’m not on IRC very much these days, but I do like to avoid losing messages, and sometimes want to be logged into the same channels on different physical clients. So I run a ZNC server to maintain persistence.
    • Matrix server: Matrix is a decentralized messaging platform that supports end-to-end encryption. Think of it as being a little like the Fediverse, but for chat rather than microblogging. This falls pretty squarely in the category of “I find this amusing to run”, because I mostly chat with less-nerdy folks on other, commercial platforms.
    • Git server: I run a Gitea server which I use to mirror my own repos, as well as a variety of other open source repos. This is mostly to ensure that I have an up-to-date backup of repos I care about, independent of Github or whatever provider.
    • Jupyter notebooks: I keep a persistent Jupyter notebook instance running for random code experiments and as a tiny development playground. This runs on its own VM where I also do other random software development, and it’s separate from the other services mostly so I don’t take down all my personal infra with an accidental OOM from a big build.
    • Software package repository: I run an instance of Nexus Repository OSS, mostly to cache Docker images and other content that run the rest of the services above!

    Services where I use managed hosting but don’t own the server

    • This website! My regular website and this blog run on a shared hosting provider, mostly through inertia. (I’ve used the same hosting provider for web hosting since around 2008.)
    • Email: In theory it’s an open, federated system similar to the Fediverse. In practice, the combination of spam and the growth of large providers makes it increasingly painful to run a server yourself. This post from Carlos Fenollosa does a good job of describing the difficulties.

      I do, however, run all my email through my own domain, though it’s hosted via Google Apps GSuite Google Workspace. I also back up my inbox locally on a regular basis. That means that if Google ever decides to remove my account, charge obnoxious costs, or otherwise misbehave, my email address is at least portable to other providers.

    ", + "url": "https://hpc.social/personal-blog/2022/the-web-services-i-self-host/", + + + + + + "date_published": "2022-10-30T21:59:55-06:00", + "date_modified": "2022-10-30T21:59:55-06:00", + + "author": "Thinking Out Loud" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/qemu-kvm-ceph-librbd-performance/", + "title": "QEMU/KVM + Ceph Librbd Performance", + "summary": null, + "content_text": "Checkout my blog post at the ceph.io website about tuning QEMU/KVM for high performance with librbd. We got over 123K random read IOPs with 16K IOs from a single VM!", + "content_html": "

    Checkout my blog post at the ceph.io website about tuning QEMU/KVM for high performance with librbd. We got over 123K random read IOPs with 16K IOs from a single VM!

    ", + "url": "https://hpc.social/personal-blog/2022/qemu-kvm-ceph-librbd-performance/", + + + + + + "date_published": "2022-10-24T01:00:00-06:00", + "date_modified": "2022-10-24T01:00:00-06:00", + + "author": "Mark Nelson's Blog" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/dashboards-for-learning-data-visualizations/", + "title": "Dashboards for Learning Data Visualizations", + "summary": null, + "content_text": "Creating dashboards and data visualizations are a favorite past time of mine. Also, I jump at any chance to learn a new technology. That is why I have spent the last couple of months building dashboards and data visualizations for various projects while learning several web technologies.Through these dashboards, I have learned many new technologies: React and NextJS Mapping libraries such as Leaflet and Mapbox CSS libraries such as TailwindCSS Data access JS clients for Elasticsearch and Prometheus Website hosting service Vercel Data Visualization library D3.jsGP-ARGO DashboardThe Great Plains Augmented Regional Gateway to the Open Science Grid (GP-ARGO) is a regional collaboration of 16 campuses hosting computing that is made available to the OSG. My goal with the GP-ARGO dashboard was to show who is using the resources, as well as give high level overview of the region and sites hosting GP-ARGO resources.The metrics are gathered from OSG’s GRACC Elasticsearch. The list of projects are also from GRACC, and the bar graph in the bottom right are from OSG is simply an iframe to a grafana panel from GRACC.Technologies used: React, NextJS, Leaflet, ElasticsearchRepo: GP-ARGO MapOSDF WebsiteMy next website was the Open Science Data Federation landing page. I was more bold in the design of the OSDF page. I took heavy inspiration from other technology websites such as the Mapbox website and the Lens website. The theme is darker and it was also my first experience with the TailwindCSS library. Additionally, I learned the CSS flexbox layout techniques.The spinning globe is using the Globe.gl library. The library is great to create visualizations to show distribution throughout the world. On the globe I added “transfers” between the OSDF origins and caches. Each origin sends transfers to every cache in the visualization, though it’s all just animation. There is no data behind the transfers, it’s only for visual effect. Also, on the globe, each cache location is labeled. The globe can be rotated and zoomed with your mouse.The number of bytes read and files read is gathered using the Elasticsearch client querying GRACC, the OSG’s accounting service. The OSG gathers statistics on every transfer a cache or origin perform. Additionally, we calculate the rate of data transfers and rate of files being read using GRACC.One unique feature of the OSDF website is the resiliency of the bytes read and files read metrics. We wanted to make sure that the metrics would be shown even if a data component has failed. The metrics are gathered in 3 different ways for resiliency: If all components are working correctly, the metrics are downloaded from the OSG’s Elasticsearch instance. If OSG Elasticsearch has failed, the dashboard pulls saved metrics from NRP’s S3 storage. The metrics are saved everytime they are succesfully gathered from Elasticsearch, so they should be fairly recent. The metrics are gathered and saved on each website build. The metrics are static and immediatly available upon website load. If all else fails, these saved static metrics are always available, even if they may be old.Technologies used: React, NextJS, Globe.glRepo: OSDF WebsiteNRP DashboardThe National Research Platform dashboard is largely similar to the GP-ARGO dashboard. It uses the same basic framework and technologies. But, the data acquisition is different.The metrics shown are the number of gpus allocated, number of pod running, and the number of active research groups. The metrics are gathered from the NRP’s prometheus server on-demand. The graph in the background of the metric is generated with D3.js.Technologies used: React, NextJS, D3.js, Prometheus, TailwindCSSRepo: NRP Map AppPNRP WebsiteThe Prototype National Research Platform is a NSF research platform. The dashboard is also in prototype stage as the PNRP hardware is not fully delivered and operational yet.The dashboard is my first experience with a large map from Mapbox. I used a React binding to interface with the Mapbox service. Also, when you click on a site, it zooms into the building where the PNRP hardware will be hosted.The transfer metrics come from the NRP’s prometheus which shows the bytes moving into and out of the node. The transfer metrics are for cache nodes nearby the sites, but once PNRP hardware becomes operational the transfer metrics will show the site’s cache.Technologies Used: React, NextJS, Mapbox, TailwindCSS, PrometheusRepo: NRP Website", + "content_html": "

    Creating dashboards and data visualizations are a favorite past time of mine. Also, I jump at any chance to learn a new technology. That is why I have spent the last couple of months building dashboards and data visualizations for various projects while learning several web technologies.

    Through these dashboards, I have learned many new technologies:

    GP-ARGO Dashboard

    The Great Plains Augmented Regional Gateway to the Open Science Grid (GP-ARGO) is a regional collaboration of 16 campuses hosting computing that is made available to the OSG. My goal with the GP-ARGO dashboard was to show who is using the resources, as well as give high level overview of the region and sites hosting GP-ARGO resources.

    The metrics are gathered from OSG’s GRACC Elasticsearch. The list of projects are also from GRACC, and the bar graph in the bottom right are from OSG is simply an iframe to a grafana panel from GRACC.

    Technologies used: React, NextJS, Leaflet, Elasticsearch

    Repo: GP-ARGO Map

    \"GP-ARGO\"

    OSDF Website

    My next website was the Open Science Data Federation landing page. I was more bold in the design of the OSDF page. I took heavy inspiration from other technology websites such as the Mapbox website and the Lens website. The theme is darker and it was also my first experience with the TailwindCSS library. Additionally, I learned the CSS flexbox layout techniques.

    The spinning globe is using the Globe.gl library. The library is great to create visualizations to show distribution throughout the world. On the globe I added “transfers” between the OSDF origins and caches. Each origin sends transfers to every cache in the visualization, though it’s all just animation. There is no data behind the transfers, it’s only for visual effect. Also, on the globe, each cache location is labeled. The globe can be rotated and zoomed with your mouse.

    The number of bytes read and files read is gathered using the Elasticsearch client querying GRACC, the OSG’s accounting service. The OSG gathers statistics on every transfer a cache or origin perform. Additionally, we calculate the rate of data transfers and rate of files being read using GRACC.

    One unique feature of the OSDF website is the resiliency of the bytes read and files read metrics. We wanted to make sure that the metrics would be shown even if a data component has failed. The metrics are gathered in 3 different ways for resiliency:

    1. If all components are working correctly, the metrics are downloaded from the OSG’s Elasticsearch instance.
    2. If OSG Elasticsearch has failed, the dashboard pulls saved metrics from NRP’s S3 storage. The metrics are saved everytime they are succesfully gathered from Elasticsearch, so they should be fairly recent.
    3. The metrics are gathered and saved on each website build. The metrics are static and immediatly available upon website load. If all else fails, these saved static metrics are always available, even if they may be old.

    Technologies used: React, NextJS, Globe.gl

    Repo: OSDF Website

    \"OSDF\"

    NRP Dashboard

    The National Research Platform dashboard is largely similar to the GP-ARGO dashboard. It uses the same basic framework and technologies. But, the data acquisition is different.

    The metrics shown are the number of gpus allocated, number of pod running, and the number of active research groups. The metrics are gathered from the NRP’s prometheus server on-demand. The graph in the background of the metric is generated with D3.js.

    Technologies used: React, NextJS, D3.js, Prometheus, TailwindCSS

    Repo: NRP Map App

    \"NRP

    PNRP Website

    The Prototype National Research Platform is a NSF research platform. The dashboard is also in prototype stage as the PNRP hardware is not fully delivered and operational yet.

    The dashboard is my first experience with a large map from Mapbox. I used a React binding to interface with the Mapbox service. Also, when you click on a site, it zooms into the building where the PNRP hardware will be hosted.

    The transfer metrics come from the NRP’s prometheus which shows the bytes moving into and out of the node. The transfer metrics are for cache nodes nearby the sites, but once PNRP hardware becomes operational the transfer metrics will show the site’s cache.

    Technologies Used: React, NextJS, Mapbox, TailwindCSS, Prometheus

    Repo: NRP Website

    \"PNRP

    ", + "url": "https://hpc.social/personal-blog/2022/dashboards-for-learning-data-visualizations/", + + + + + + "date_published": "2022-09-14T06:00:00-06:00", + "date_modified": "2022-09-14T06:00:00-06:00", + + "author": "Derek Weitzel's Blog" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/tunel-apps-for-hpc/", + "title": "Tunel- Apps for HPC", + "summary": null, + "content_text": "A few months ago I was talking about ssh tunnels. The reason was because I was looking for a solution to deploy apps (like a Jupyter notebook) onto HPC.After an adventure I got it working, and it came down a relatively simple set of commands that I needed to just write into my app logic and forget about.The reason for this was working on my new personal project, tunel. Tunel is named for what it does. “Tunel” is an elegant derivation of “tunnel” and will do exactly that - create a tunnel between your local workstation and an HPC cluster. In short, tunel will provide a collection of “apps” that are easy to deploy to HPC. There are concepts called launchers, and examples are singularity, slurm, or htcondor. And we can add more! It’s the job of a launcher to take a an app recipe (a definition in yaml plus helper scripts that can be customized on the fly by the user) and get it running, whatever that means (run a job? a container? monitor something? something else?). For the most part, most apps that I’ve developing have web interfaces, as they have historically been the most challenging thing to get easily working on HPC. As a quick example, to run a jupyter notebook via Singularity on my login node, after I install tunel and have my ssh connection defined as “osg” I can do:$ tunel run-app osg singularity/socket/jupyter --jupyterlab=true The name “singularity/socket/jupyter” is the unique identifier (and path) to the recipe and config, and I can provide custom arguments as shown above. And although this is the “singularity” launcher, we can do the same kind of interaction with a slurm launcher, going one level deeper to run the notebook on a node after we submit a job!And in my typical way of doing things, I have automation that generates a table and documentation for each of these apps. Check them out here!. I’m mostly working on singularity an HTCondor apps at the moment because I use the open science grid (OSG) for development, as this is a personal project. Thanks to Matthew West for showing me OSG - I was pretty handicapped to develop before finding it!Django template with a socket?This kind of framework can be powerful if I develop a bunch of custom apps, but it’s much more powerful if I can enable YOU to easily do that too! Thus, I knew one of the first tasks I wanted to do is create a template, likely in each of Flask, Django, and FastAPI, that would plug immediately into Tunel. And while I have much work left to do, last night and this evening I figured out a technical issue that is going to empower us to make so many cool things and I wanted to share! Let’s talk about the problem, what I tried, and what ultimately worked.Traditional Setup with uwsgi and nginxIf you look at a family of Python + web interface apps, you’ll find this uwsgi guy in the middle (I don’t know the correct pronunciation but I say YOU-SKI). It’s a fairly rich tool, but in layman’s terms I think of it as a middleman between Python and a traditional web server. But actually, you don’t technically need the web server - and this is where things start to get interesting. For a traditonal setup, you might find a nginx (a web server) configuration file that looks like this.# the upstream component nginx needs to connect toupstream django { server unix:///tmp/tunel-django.sock;}# configuration of the serverserver { # the port your site will be served on listen 8000; charset utf-8; server_name localhost; client_max_body_size 10024M; client_body_buffer_size 10024M; client_body_timeout 120; ... location ~* \\.(php|aspx|myadmin|asp)$ { deny all; } location /static/ { autoindex on; alias /var/www/static/; } # Finally, send all non-media requests to the Django server. location / { uwsgi_pass django; uwsgi_max_temp_file_size 10024m; include /code/scripts/nginx/uwsgi_params.par; }}I’ve made a lot of web apps, and whether I use docker-compose with separate containers or a single one, I usually have to write a nginx configuration. The above gets started in the container entrypoint with my app calling uwsgi, and defining that same socket:$ uwsgi --socket=${socket} /code/scripts/uwsgi.iniAnd of course things happen before that, but that’s the main last line. The uwsgi.ini is a configuration filethat makes it easier to define settings.[uwsgi]master = trueprocesses = 4threads = 4py-autoreload = 1#socket = :3031chdir = /code/post-buffering = truelog-date = truemax-requests = 5000http-timeout = 3600000socket-timeout = 120chmod-socket = 666wsgi-file = tuneldjango/wsgi.pyignore-sigpipe = trueignore-write-errors = truedisable-write-exception = truebuffer-size=32768Without going into huge detail, the above says that the app that I wrote (in Python) is listening on that socket, so requests to the web server will either be directed to some static file, filtered out, or sent to our application. And we typically want to use nginx because it’s really good at serving static files and handling traffic.But now let’s step back. If you look under the server in the config above, you’ll notice we are servingcontent on port 8000. This is why I can open the browser to localhost and that port and see my application.But as we know with headless HPC, there are no ports. I can’t use this. So this was my first predicament, last night. I had created this application and it ran locally, but I needed to somehow get the entire thing routed through a tunneled socket to take a next step.Uwsgi Only?I’ll skip over the many hours of things that I tried and failed. I really liked having nginx so I first wanted to somehow send it to the user via a socket, but that never worked. I had an idea to just map the original socket and then have a second container on the host for nginx, but I decided that was too complex. What would up working is realizing that uwsgi can serve http directly, and that came down to a single addition to its config:listen=200protocol=httpOnce I did that, I tried the same technique to map the socket being written to directly to a port via the ssh tunnel, and boum I saw a page! But it was really ugly, because it had no style. This is where I was like OHNO I need nginx for static. But then I found this page and it was a message from the heavens - I could define the same static and media URls using uwsgi directly! That looked like this:$ uwsgi --socket=${socket} --static-map /static=/code/static /code/scripts/uwsgi-standalone.iniAt this point I held my breath, re-ran my app, and wow! There it was - my entire app being served by a container running on a remote machine, only accessible to me through a physical socket. And guess what? I added a file browser, and it even worked to upload a dinosaur picture! Here is the entire page for the app - you can see there are many flags you can add and customize to interact. While it’s only accessible to you and there isn’t need for any kind of login, I did add the default username/password login to Django, and require it for logging in to the file browser. Of course I will eventually need this to be more formally security audited, but at least I don’t have anything interesting on my OSG home to be worried about. And is using just uwsgi a performance issue? I think probably not since the expected use case is only once person.A Future for AppsThis is just the beginning - my plan is to put together a list of use cases for a GUI on a cluster, and then just package them into the core template apps for the developer user to easilyc customize. I have big plans for working on this, and honestly I’m so excited that I find I’m staying up way too late and just egging for the work day to end so I can continue. This idea is so powerful, because it’s using existing technologies to deploy containerized apps on HPC, where you don’t need any special permission. Just to show y’all, here is what it looks like to launch my app template:$ tunel run-app osg singularity/socket/tunel-django --tag=dev --pullI added the pull flag and a custom tag because I am actively developing, and my workflow is to quickly rebuild, push, and then run that command. That then shows me the ssh tunnel command that will immediately connect me to my app on a port in my browser.$ ssh -NT -L 7789:/../tunel/singularity/singularity/socket/tunel-django/singularity-socket-tunel-django.sock sochat1@osgAnd that’s seriously it. You as the developer user are empowered to make and deploy apps, and they have interfaces, and you don’t need to do something silly like open a port or actually deploy a web server. It’s so stupidly easy - I’m looking around at all these complex web app setups that people have made for HPC over the years and I wonder why they aren’t doing something simpler. Maybe it’s just a space of development that people gave up on, or there are some security things I’m missing. Either way, I’m going to charge forward working on this! It’s too simple, and the idea is to beautiful to do anything else by this point.", + "content_html": "

    A few months ago I was talking about ssh tunnels. The reason was because I was looking for a solution to deploy apps (like a Jupyter notebook) onto HPC.After an adventure I got it working, and it came down a relatively simple set of commands that I needed to just write into my app logic and forget about.The reason for this was working on my new personal project, tunel.

    Tunel is named for what it does. “Tunel” is an elegant derivation of “tunnel” and will do exactly that - create a tunnel between your local workstation and an HPC cluster.

    In short, tunel will provide a collection of “apps” that are easy to deploy to HPC. There are concepts called launchers, and examples are singularity, slurm, or htcondor. And we can add more! It’s the job of a launcher to take a an app recipe (a definition in yaml plus helper scripts that can be customized on the fly by the user) and get it running, whatever that means (run a job? a container? monitor something? something else?). For the most part, most apps that I’ve developing have web interfaces, as they have historically been the most challenging thing to get easily working on HPC. As a quick example, to run a jupyter notebook via Singularity on my login node, after I install tunel and have my ssh connection defined as “osg” I can do:

    $ tunel run-app osg singularity/socket/jupyter --jupyterlab=true 

    The name “singularity/socket/jupyter” is the unique identifier (and path) to the recipe and config, and I can provide custom arguments as shown above. And although this is the “singularity” launcher, we can do the same kind of interaction with a slurm launcher, going one level deeper to run the notebook on a node after we submit a job!And in my typical way of doing things, I have automation that generates a table and documentation for each of these apps. Check them out here!.

    I’m mostly working on singularity an HTCondor apps at the moment because I use the open science grid (OSG) for development, as this is a personal project. Thanks to Matthew West for showing me OSG - I was pretty handicapped to develop before finding it!

    Django template with a socket?

    This kind of framework can be powerful if I develop a bunch of custom apps, but it’s much more powerful if I can enable YOU to easily do that too! Thus, I knew one of the first tasks I wanted to do is create a template, likely in each of Flask, Django, and FastAPI, that would plug immediately into Tunel. And while I have much work left to do, last night and this evening I figured out a technical issue that is going to empower us to make so many cool things and I wanted to share! Let’s talk about the problem, what I tried, and what ultimately worked.

    Traditional Setup with uwsgi and nginx

    If you look at a family of Python + web interface apps, you’ll find this uwsgi guy in the middle (I don’t know the correct pronunciation but I say YOU-SKI). It’s a fairly rich tool, but in layman’s terms I think of it as a middleman between Python and a traditional web server. But actually, you don’t technically need the web server - and this is where things start to get interesting. For a traditonal setup, you might find a nginx (a web server) configuration file that looks like this.

    # the upstream component nginx needs to connect toupstream django {    server unix:///tmp/tunel-django.sock;}# configuration of the serverserver {    # the port your site will be served on    listen      8000;    charset     utf-8;    server_name           localhost;    client_max_body_size 10024M;    client_body_buffer_size 10024M;    client_body_timeout 120;    ...    location ~* \\.(php|aspx|myadmin|asp)$ {      deny all;    }    location /static/ {        autoindex on;        alias /var/www/static/;    }    # Finally, send all non-media requests to the Django server.    location / {        uwsgi_pass  django;        uwsgi_max_temp_file_size 10024m;        include /code/scripts/nginx/uwsgi_params.par;    }}

    I’ve made a lot of web apps, and whether I use docker-compose with separate containers or a single one, I usually have to write a nginx configuration. The above gets started in the container entrypoint with my app calling uwsgi, and defining that same socket:

    $ uwsgi --socket=${socket} /code/scripts/uwsgi.ini

    And of course things happen before that, but that’s the main last line. The uwsgi.ini is a configuration filethat makes it easier to define settings.

    [uwsgi]master = trueprocesses = 4threads = 4py-autoreload = 1#socket = :3031chdir = /code/post-buffering = truelog-date = truemax-requests = 5000http-timeout = 3600000socket-timeout = 120chmod-socket = 666wsgi-file = tuneldjango/wsgi.pyignore-sigpipe = trueignore-write-errors = truedisable-write-exception = truebuffer-size=32768

    Without going into huge detail, the above says that the app that I wrote (in Python) is listening on that socket, so requests to the web server will either be directed to some static file, filtered out, or sent to our application. And we typically want to use nginx because it’s really good at serving static files and handling traffic.

    But now let’s step back. If you look under the server in the config above, you’ll notice we are servingcontent on port 8000. This is why I can open the browser to localhost and that port and see my application.But as we know with headless HPC, there are no ports. I can’t use this. So this was my first predicament, last night. I had created this application and it ran locally, but I needed to somehow get the entire thing routed through a tunneled socket to take a next step.

    Uwsgi Only?

    I’ll skip over the many hours of things that I tried and failed. I really liked having nginx so I first wanted to somehow send it to the user via a socket, but that never worked. I had an idea to just map the original socket and then have a second container on the host for nginx, but I decided that was too complex. What would up working is realizing that uwsgi can serve http directly, and that came down to a single addition to its config:

    listen=200protocol=http

    Once I did that, I tried the same technique to map the socket being written to directly to a port via the ssh tunnel, and boum I saw a page! But it was really ugly, because it had no style. This is where I was like OHNO I need nginx for static. But then I found this page and it was a message from the heavens - I could define the same static and media URls using uwsgi directly! That looked like this:

    $ uwsgi --socket=${socket} --static-map /static=/code/static /code/scripts/uwsgi-standalone.ini

    At this point I held my breath, re-ran my app, and wow!

    There it was - my entire app being served by a container running on a remote machine, only accessible to me through a physical socket. And guess what? I added a file browser, and it even worked to upload a dinosaur picture!

    Here is the entire page for the app - you can see there are many flags you can add and customize to interact.

    While it’s only accessible to you and there isn’t need for any kind of login, I did add the default username/password login to Django, and require it for logging in to the file browser. Of course I will eventually need this to be more formally security audited, but at least I don’t have anything interesting on my OSG home to be worried about. And is using just uwsgi a performance issue? I think probably not since the expected use case is only once person.

    A Future for Apps

    This is just the beginning - my plan is to put together a list of use cases for a GUI on a cluster, and then just package them into the core template apps for the developer user to easilyc customize. I have big plans for working on this, and honestly I’m so excited that I find I’m staying up way too late and just egging for the work day to end so I can continue. This idea is so powerful, because it’s using existing technologies to deploy containerized apps on HPC, where you don’t need any special permission. Just to show y’all, here is what it looks like to launch my app template:

    $ tunel run-app osg singularity/socket/tunel-django --tag=dev --pull

    I added the pull flag and a custom tag because I am actively developing, and my workflow is to quickly rebuild, push, and then run that command. That then shows me the ssh tunnel command that will immediately connect me to my app on a port in my browser.

    $ ssh -NT -L 7789:/../tunel/singularity/singularity/socket/tunel-django/singularity-socket-tunel-django.sock sochat1@osg

    And that’s seriously it. You as the developer user are empowered to make and deploy apps, and they have interfaces, and you don’t need to do something silly like open a port or actually deploy a web server. It’s so stupidly easy - I’m looking around at all these complex web app setups that people have made for HPC over the years and I wonder why they aren’t doing something simpler. Maybe it’s just a space of development that people gave up on, or there are some security things I’m missing. Either way, I’m going to charge forward working on this! It’s too simple, and the idea is to beautiful to do anything else by this point.

    ", + "url": "https://hpc.social/personal-blog/2022/tunel-apps-for-hpc/", + + + + + + "date_published": "2022-08-04T13:30:00-06:00", + "date_modified": "2022-08-04T13:30:00-06:00", + + "author": "Vanessasaurus" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/ceph-rocksdb-tuning-deep-dive/", + "title": "Ceph RocksDB Tuning Deep-Dive", + "summary": null, + "content_text": "See my post on the Ceph.io blog about tuning RocksDB in Ceph!", + "content_html": "

    See my post on the Ceph.io blog about tuning RocksDB in Ceph!

    ", + "url": "https://hpc.social/personal-blog/2022/ceph-rocksdb-tuning-deep-dive/", + + + + + + "date_published": "2022-07-25T01:00:00-06:00", + "date_modified": "2022-07-25T01:00:00-06:00", + + "author": "Mark Nelson's Blog" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/the-utility-vs-the-professional-services-firm/", + "title": "The Utility vs the Professional Services Firm", + "summary": null, + "content_text": "As research computing and data becomes more complex and diverse, we need more professional services firms and fewer utilties(Note: This post is adapted from #127 of the Research Computing Teams Newsletter)I get to talk with a lot of research computing and data teams - software, data, and systems. Sometimes in these conversations it’s pretty clear that some teams, or the team and their funder, or a team and I, are talking a bit past each other. And that’s usually because they or we are (currently) operating with very different mental models of how they operate.Some research computing and data teams are operating as Utilities, and see the world through that lens; a growing number are operating as Professional Services Firms. Others are moving from one to the other, and are at different places along that very abrupt transition. Some kinds of groups (like bioinformatics cores) are much more likely to already be operating in service mode, while others (like research compute infrastructure teams) are more likely to still think of themselves as utilities. It varies from place to place, though, depending on local conditions. But they’re very different models! Utility service and professional services delivery are very different, and require different funding, management, and career development models. Image credit: left and right.Utilities, like power companies or garbage collection or municipal potable water, were really the only sensible role models for the first decades of research computing and data teams. Those teams were entirely about operating large equipment purchased from vendors. Costs were mostly a big capital expense. Everyone who needed the utility needed the same thing - undifferentiated flops and bytes, or 60Hz 120VAC. Because everyone needed the same thing, economies of scale led to natural monopolies; the most reasonable provision model was for the local jurisdiction/institution to own or control a single operator. Differentiation or strategy, or gaining new customers, weren’t meaningful discussion topics. The only thing that really makes a difference is scale, which leads to mergers. Innovation happens slowly, top-down, at the industry-wide scale and usually from the vendors (“hey, did you hear about those new gas compressors Dyneco announced?”), and diffuses outwards. Employees take pride in and the organization values operational skill and things ticking along smoothly. Customers value reliability. The only thing that matters for any individual operator is to operate effectively and to provide the standard service with the right amount of cost: high enough to absorb the available subsidy, low enough to not go broke. If a customer needs something other than what the utility provides, rather than that being a market opportunity, it’s either an inconvenience or an irrelevance. The power company or the water utility or the old phone monopoly just doesn’t serve that need.Professional Service Firms — say engineering firms, or architects, or consultancies — are very different beasts. They might very well have significant capital investment in specialized equipment, but their main selling point and their biggest cost is expertise. Competing for and retaining that expertise, and developing that expertise in house and amongst their clients, are principal concerns. As part of a “full-service” offering they they likely have some fairly standard services they offer at the low end, where operating cost and efficiency is vital. But what the organization values, and the employees enjoy, is at the high-touch end — getting deeply involved with the client work, and being as much a collaborator or partner or “trusted advisor” as a service provider. Different clients want very different things, and that high-touch high-expertise work is specialized and labour intensive, so the firms themselves need a clear focus; they can’t meet all needs. Clients can go elsewhere, so there is redundancy and competition, but less than you’d think at a distance. In civil engineering a geotechnical firm is complementary, not competing, with one that specializes in water resource engineering.As in the rest of our lives, in research computing we need to have utilities. As research data management matures, institutional or regional data depositories become mature and “enterprise” enough to become utilities, likely run by IT or the Library. Teaching or CI/CD or MLOps resources for data science or software development are likely best served by this model. The closer the operations are to standard, something that can be run by IT, the more likely it is to be a utility. But one has to be careful. Utilies are commodoties: they tend to get merged together wherever feasible, since scale matters and it’s all undifferentiated commodity provision.As research computing becomes broader and faster changing and more diverse, we need more and more professional services firms, too; nimble groups specialized to particular needs and ready to adapt as those needs change. As even infrastructure is becoming less one-size-fits-all, and methods for making use of computing and data for diverse fields grow more complex and expertise intensive, the preconditions for the utility model are met in fewer situations than used to be.A lot of research computing teams are interested in providing something more like professional services, but were created in the Utility model, and are stuck there by their funders. The institutional or external funders still have this very specific (and to their mind time tested and successful) operating model in their plans. Utilities are funded very differently than professional services firms. At utility scale, it doesn’t make sense to outsource things, or develop non-standard services (who wants non-standard power coming into their house!) Funders requirements on eligible expenses may focus almost entirely on the capital spend, and not on operating funding that’s needed to make effective use of the capital, or to be more agile in how services are delivered.Even those teams who aren’t being held back by funders and who want to make the switch to professional services from their original utility model find it a hard transition. There’s no obvious, incremental path to go from providing a standard, stable commodity to changing, specialized, bundles of expertise. Utilities operate very differently from professional services firms. They value different things. The models for staff growth are different. So they have to be managed quiet differently, and there’s no clear path internally from A to B.Besides funding, and internal considerations, utilities and professional services firms are also percieved and valued by their clients very differently. Utilities’ existing customers don’t want change, and new customers aren’t yet interested in getting advanced app software development suggestions from what they perceive to still be the mobile telephony provider.But research computing and data is changing, increasingly quickly, and the utility approach only meets a piece of these growing needs. Navigating the transition isn’t going to be easy, for RCD teams, leaders, or funders; but expressing it clearly and talking about it more will maybe mean we’re not talking past each other so often.", + "content_html": "

    As research computing and data becomes more complex and diverse, we need more professional services firms and fewer utilties

    (Note: This post is adapted from #127 of the Research Computing Teams Newsletter)

    I get to talk with a lot of research computing and data teams - software, data, and systems. Sometimes in these conversations it’s pretty clear that some teams, or the team and their funder, or a team and I, are talking a bit past each other. And that’s usually because they or we are (currently) operating with very different mental models of how they operate.

    Some research computing and data teams are operating as Utilities, and see the world through that lens; a growing number are operating as Professional Services Firms. Others are moving from one to the other, and are at different places along that very abrupt transition. Some kinds of groups (like bioinformatics cores) are much more likely to already be operating in service mode, while others (like research compute infrastructure teams) are more likely to still think of themselves as utilities. It varies from place to place, though, depending on local conditions. But they’re very different models!

    \"Utility
    Utility service and professional services delivery are very different, and require different funding, management, and career development models. Image credit: left and right.

    Utilities, like power companies or garbage collection or municipal potable water, were really the only sensible role models for the first decades of research computing and data teams. Those teams were entirely about operating large equipment purchased from vendors. Costs were mostly a big capital expense. Everyone who needed the utility needed the same thing - undifferentiated flops and bytes, or 60Hz 120VAC. Because everyone needed the same thing, economies of scale led to natural monopolies; the most reasonable provision model was for the local jurisdiction/institution to own or control a single operator. Differentiation or strategy, or gaining new customers, weren’t meaningful discussion topics. The only thing that really makes a difference is scale, which leads to mergers. Innovation happens slowly, top-down, at the industry-wide scale and usually from the vendors (“hey, did you hear about those new gas compressors Dyneco announced?”), and diffuses outwards. Employees take pride in and the organization values operational skill and things ticking along smoothly. Customers value reliability. The only thing that matters for any individual operator is to operate effectively and to provide the standard service with the right amount of cost: high enough to absorb the available subsidy, low enough to not go broke. If a customer needs something other than what the utility provides, rather than that being a market opportunity, it’s either an inconvenience or an irrelevance. The power company or the water utility or the old phone monopoly just doesn’t serve that need.

    Professional Service Firms — say engineering firms, or architects, or consultancies — are very different beasts. They might very well have significant capital investment in specialized equipment, but their main selling point and their biggest cost is expertise. Competing for and retaining that expertise, and developing that expertise in house and amongst their clients, are principal concerns. As part of a “full-service” offering they they likely have some fairly standard services they offer at the low end, where operating cost and efficiency is vital. But what the organization values, and the employees enjoy, is at the high-touch end — getting deeply involved with the client work, and being as much a collaborator or partner or “trusted advisor” as a service provider. Different clients want very different things, and that high-touch high-expertise work is specialized and labour intensive, so the firms themselves need a clear focus; they can’t meet all needs. Clients can go elsewhere, so there is redundancy and competition, but less than you’d think at a distance. In civil engineering a geotechnical firm is complementary, not competing, with one that specializes in water resource engineering.

    As in the rest of our lives, in research computing we need to have utilities. As research data management matures, institutional or regional data depositories become mature and “enterprise” enough to become utilities, likely run by IT or the Library. Teaching or CI/CD or MLOps resources for data science or software development are likely best served by this model. The closer the operations are to standard, something that can be run by IT, the more likely it is to be a utility. But one has to be careful. Utilies are commodoties: they tend to get merged together wherever feasible, since scale matters and it’s all undifferentiated commodity provision.

    As research computing becomes broader and faster changing and more diverse, we need more and more professional services firms, too; nimble groups specialized to particular needs and ready to adapt as those needs change. As even infrastructure is becoming less one-size-fits-all, and methods for making use of computing and data for diverse fields grow more complex and expertise intensive, the preconditions for the utility model are met in fewer situations than used to be.

    A lot of research computing teams are interested in providing something more like professional services, but were created in the Utility model, and are stuck there by their funders. The institutional or external funders still have this very specific (and to their mind time tested and successful) operating model in their plans. Utilities are funded very differently than professional services firms. At utility scale, it doesn’t make sense to outsource things, or develop non-standard services (who wants non-standard power coming into their house!) Funders requirements on eligible expenses may focus almost entirely on the capital spend, and not on operating funding that’s needed to make effective use of the capital, or to be more agile in how services are delivered.

    Even those teams who aren’t being held back by funders and who want to make the switch to professional services from their original utility model find it a hard transition. There’s no obvious, incremental path to go from providing a standard, stable commodity to changing, specialized, bundles of expertise. Utilities operate very differently from professional services firms. They value different things. The models for staff growth are different. So they have to be managed quiet differently, and there’s no clear path internally from A to B.

    Besides funding, and internal considerations, utilities and professional services firms are also percieved and valued by their clients very differently. Utilities’ existing customers don’t want change, and new customers aren’t yet interested in getting advanced app software development suggestions from what they perceive to still be the mobile telephony provider.

    But research computing and data is changing, increasingly quickly, and the utility approach only meets a piece of these growing needs. Navigating the transition isn’t going to be easy, for RCD teams, leaders, or funders; but expressing it clearly and talking about it more will maybe mean we’re not talking past each other so often.

    ", + "url": "https://hpc.social/personal-blog/2022/the-utility-vs-the-professional-services-firm/", + + + + + + "date_published": "2022-07-03T01:00:00-06:00", + "date_modified": "2022-07-03T01:00:00-06:00", + + "author": "Jonathan Dursi's Blog" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/ssh-tunnels/", + "title": "SSH Tunnels", + "summary": null, + "content_text": "Today I want to talk about ssh tunnels. Very abstractly, we would want to use an sshtunnel to securely send information. In the case of HPC, you are probably familiar with ssh,(Secure Shell or Secure Socket Shell) when you login to your node. You might do something like this:$ ssh dinosaur@server.address.eduOr if you have a proper setup in your ~/.ssh/config (with a named server) you might just do:$ ssh dinosaurI like to use ssh connection multiplexingso the connection is kept alive for a bit, but I won’t go into detail becausethis post isn’t specifically about the details of ssh. The use case I’m interested in (and the thingthat HPC is very bad at) is how to deploy something interactive on an HPC cluster.SSH Tunnel with PortsGiven that a cluster has exposed ports (either the login node, or both the login node and compute nodes)creating a tunnel is fairly straight forward! In the past I created a tool called forward to handle all the manual steps to get this working, meaning: Show the user how to set up their ~/.ssh/config (once) Define (once) parameters like a port, memory, GPUs, and if the cluster has isolated nodes Start any number of provided apps that come with forward (e.g., jupyter, singularity, etc.)An interaction using forward might look like any of the following:# Run a Singularity container that already exists on your resource (recommended)bash start-node.sh singularity-run /scratch/users/vsochat/share/pytorch-dev.simg# Execute a custom command to the same Singularity containerbash start-node.sh singularity-exec /scratch/users/vsochat/share/pytorch-dev.simg echo \"Hello World\"# Run a Singularity container from a url, `docker://ubuntu`bash start-node.sh singularity-run docker://ubuntu# Execute a custom command to the same containerbash start-node.sh singularity-exec docker://ubuntu echo \"Hello World\"# To start a jupyter notebook in a specific directory ON the cluster resourcebash start.sh jupyter <cluster-dir># To start a jupyter notebook with tensorflow in a specific directorybash start.sh py3-tensorflow <cluster-dir>Note that the last set of commands are pertaining to notebooks, which is where these tunnels come into play!A notebook is going to be run on a compute node that looks something like the following:$ jupyter notebook --no-browser --port=$PORTAnd if you ran this with a Singularity container, you’d also want to bind jovyan’s home to be the user’s, along with the jupyter config directory:$ singularity exec --home ${HOME} \\ --bind ${HOME}/.local:/home/jovyan/.local \\ --bind ${HOME}/.jupyter:/home/jovyan/.jupyter \\ datascience_notebook.sif jupyter notebook --no-browser --port=$PORT --ip 0.0.0.0As we described earlier here,there are subtle differences between making a tunnel (with a port) given that you have isolated nodes (or not).You can determine this based on your ability to ssh into a non-login node (meaning where your job is running) from “the outside world”that is your computer. If you cannot, your nodes are isolated, which we will discuss next.Isolated NodesLet’s say that we need to create a tunnel (using ports) to an isolated node. This means that we are basically goingto establish a tunnel to the login node, and then from the login node another one to the compute node.We might use a command that looks like this:$ ssh -L $PORT:localhost:$PORT ${RESOURCE} ssh -L $PORT:localhost:$PORT -N \"$MACHINE\" &In the command above, the first half (ssh -L $PORT:localhost:$PORT ${RESOURCE}) is executed on the local machine, which establishes a port forwarding to the login node. The “-L” in the above (from the man pages) : Specifies that connections to the given TCP port or Unix socket on the local (client) host are to be forwarded to thegiven host and port, or Unix socket, on the remote side.This works by allocating a socket to listen to either a TCPport on the local side, optionally bound to the specifiedbind_address, or to a Unix socket. Whenever a connection ismade to the local port or socket, the connection is for‐warded over the secure channel, and a connection is made toeither host port hostport, or the Unix socket remote_socket,from the remote machine.Or in layman’s terms: Forward whatever is running on the second port on my resource to my local machine.Since we are forwarding ports, this would require minimally the login node to expose ports.The next line ssh -L $PORT:localhost:$PORT -N \"$MACHINE\" & is a second command run from the login node, and port forwards it to the compute node, since you can only access the compute node from the login nodes.You’ll notice it looks just like the first, and this works because ssh commands can be chained.The -N says “don’t execute a remote command (and just forward the port).”Finally, the last $MACHINE is the node that the jupyter notebook is running on.Not IsolatedFor HPCs where the compute node is not isolated from the outside world the ssh command for port forwarding first establishes a connection the login node, but then continues to pass on the login credentials to the compute node to establish a tunnel between the localhost and the port on the compute node. The ssh command in this case utilizes the flag -K that forwards the login credentials to the compute node:$ ssh \"$DOMAINNAME\" -l $FORWARD_USERNAME -K -L $PORT:$MACHINE:$PORT -N &I’m not sure in practice how common this is anymore. At least at my current employer it’s not even the casethat ports are exposed on the login node! It’s probably better that way, because in cases where you do get ports it’s sort of a “pick a port above this range and hope that no other user picks the same one!” It’s messy. So let’s talk about the case of not having ports exposed next, since this was the entire reason I wanted to write this post!SSH Tunnel with SocketMore than a year ago, I had this realization that a lot of people at Stanford used the “forward” tool, and just for notebooks (and thiswas before they were available via Open OnDemand, which is what I’d recommend to a Stanford user at this point). I decided I wanted to make a new open source tool, “tunel” (an elegant derivation of “tunnel”) vsoch/tunel to make it easyto run what I call “apps” on an HPC cluster. Are there better ways of exposing user interfaces on HPC? Yes, indeed. But not everyonehas easy access. It was also a stubborn “I want this to work” proof of concept. This new tool would be like forward, but a little nicer.Because I, along with every other HPC developer and user, wishes we could have nice things 😭️.At this time I had just started a new role at a national lab, and I realized that none of my old techniques for launchingthe job worked because of the lack of exposed ports. Thinking this was impossible, I abandoned it for a year. But then this last week I found this! I was motivated! I was excited! The basic launch command of the notebook looks like this:$ jupyter notebook --sock /tmp/test.sock --no-browserAnd then with a different looking tunnel, we could forward this socket to the host, and map it to a port! My excitement was then brought downby what led to two days of struggling. I first tried my entire tunel workflow, meaning launching a job on a node,and then running that command, and providing the instruction to the user to create the tunnel as follows:$ ssh -L 8888:/tmp/test.sock -N user@this_hostThat didn’t work (and remember this socket was created on the isolated node, that’s important to remember for later). So I started looking at the socket with “nc” - “arbitrary TCP and UDP connections and listens” from the login node. The “-U” below is for UNIX sockets:$ nc -U /tmp/test.sockAnd from the head node I saw:Ncat: Connection refused.So then I knew I needed a simpler, dummier example. I got rid of tunel and just ran the notebook command on the head node.Dear reader, it still did not work. I opened an issue and asked Twitter for help. Someone else on Twitter reported that it worked for them, and that (in my opinion) is the challenge and story of HPC - given the huge differences in setups, it’s hard to reproduce what another person does unless you scope to a very specificenvironment or technology and hugely go out of your way to do it. I’m always grateful when someone tries to help, but when the ultimate answer is just“But it works on my machine!” I (and I think all of us) are like:(╯°□°)╯︵ ┻━┻🤣️Please know that is intended to be funny, and I really am grateful for the attempt to help! Anyway, the first night I was devastated because I was so excited about the possibility of this working! But of course (as it usually does) my quasi-sadness turned again into relentless stubborn-ness, and for my SaturdayI embarked on trying everything. I call this the stubborn brute force approach, and it actually leads to some pretty good outcomes?Socket from Login NodeFirst from the login node, I started reading about flags in detail, again from the man pages. It occurred to me that the suggested command included “-L” (discussed earlier) but there were a ton of other flags to try, and maybe I need them for my setup? The command that wound up working (after much trial and error) was just:# Running on login node$ ssh -NT -L 8888:/tmp/test.sock user@serverAnd here again was the suggested command:$ ssh -L 8888:/tmp/test.sock -N user@this_hostSo they are very similar - and the main difference is the -T is to “Disable pseudo-terminal allocation.”So I suspect (also based on the version of ssl I’m using) that without the flag, you might be making a request for a pty to the server(more details here) and then it could abort. Adding the flag just skips this, because we don’t need that - we just need the simple forward. And yes, this indeed feels very specific to your ssh setup, version of ssh, and server configuration. Of course, this was only the beginning of figuring things out, because I had no idea how to get this working from one level deeper - an isolated compute node.Socket with Isolated NodesRemember that when we created the socket on the isolated node and we tried this out from the login node:$ nc -U /tmp/test.sockAnd the result was this:Ncat: Connection refused.My spidey senses were telling me that this should work. Indeed, when I ssh into the isolated node from the login node,that same command allowed me to connect (meaning it hung / there was no error output). So my first task, I decided, was to tryand “forward” this socket to the login node. Again, back to the man pages! I wound up with something like this (run from the login node):$ ssh isolated-node -NT -L /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sockThe above is again using -L but instead of a port (which aren’t exposed) we are using a socket! It’s kind of neat you can switch out those two. When I tried the same nc command from the loginnode, we had progress (no connection refused message!) 🎉️ And then I moved this up one level to see if I could make this same request from my local machine, sort of combining the first command that worked with the login node notebook with this one. That looked like this (and yes this took more trial and error):$ ssh -NT user@server ssh isolated-node -NT -L /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sockAnd to confirm it was working, I’d ssh into the server and again run that nc command to ensure that the newly forwarded socket was readable fromthe login node. After this, again with more trial and error, I tried running a second command to just forward that (now working socket) to my host.That eventually looked like this:# And another for the local socket$ ssh -NT -L 8899:/home/dinosaur/login-node.sock user@serverAnd then (all together now!) I tried putting them together.$ ssh -NT -L 8899:/home/dinosaur/login-node.sock user@server ssh isolated-node \\ -NT -L /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sockAnd then I spent some time integrating it into tunel, and surprise! the first implementation didn’t work. The first bug was that I needed to clean up old sockets each time the “same” app was run (determined by the job name and organizational namespace so the user can only run one of a particular interactive app at once, and not forget about previous runs). The second issue was about opening the tunnel - it didn’t seem to work if the process exited and/or it was run in a subshell (that also probably exits). I realized that (for the time being) running this connection step on behalf of the user, since it’s something the user should have more control over, probably wasn’t the right way to go. If the user hasn’t added something like an rsa key to ~/.ssh/authorized_keys on their clusters, it would also ask for a password interactively, making it harder for me to manage. So for simplicity sake, and assuming that we really should put the user in control of deciding when to start/stop the tunnel, I simply print the full ssh command in the terminal and let them copy paste it. A successful connection might then prompt them for their password for that second ssh, which (by default) I don’t think is carrying forward auth from the first.So that was my adventure! Mind you, this entire adventure was only about two days, and that included time to write this post, so I still have lots in front of me to work on. However, with these updated commands (and some nice tweaks from Python’s rich library) I quickly had a nice set of commands to run and stop an app with an interactive jupyter notebook, and using sockets on isolated nodes!$ tunel run-app server slurm/socket/jupyter$ tunel stop-app server slurm/socket/jupyterAs a sidenote, one thing I like about rich is that it puts the aesthetic as a first class citizen.So many tools just don’t consider this, and I love that with rich I can think about colors, presentation,and even animations like spinners!Getting a socket working means I’ll be able to continue working on this library (hooray!) so if you have ideas or requests for appsyou’d like to run on HPC, assuming just this basic technology, please give me a ping and I’d love to chat and support them.I’m also going to be requesting an allocation on the Open Science Grid, which hopefully will give me other kinds of clustersto test on. I hope this was interesting to read, thanks for doing that!", + "content_html": "

    Today I want to talk about ssh tunnels. Very abstractly, we would want to use an sshtunnel to securely send information. In the case of HPC, you are probably familiar with ssh,(Secure Shell or Secure Socket Shell) when you login to your node. You might do something like this:

    $ ssh dinosaur@server.address.edu

    Or if you have a proper setup in your ~/.ssh/config (with a named server) you might just do:

    $ ssh dinosaur

    I like to use ssh connection multiplexingso the connection is kept alive for a bit, but I won’t go into detail becausethis post isn’t specifically about the details of ssh. The use case I’m interested in (and the thingthat HPC is very bad at) is how to deploy something interactive on an HPC cluster.

    SSH Tunnel with Ports

    Given that a cluster has exposed ports (either the login node, or both the login node and compute nodes)creating a tunnel is fairly straight forward! In the past I created a tool called forward to handle all the manual steps to get this working, meaning:

    1. Show the user how to set up their ~/.ssh/config (once)
    2. Define (once) parameters like a port, memory, GPUs, and if the cluster has isolated nodes
    3. Start any number of provided apps that come with forward (e.g., jupyter, singularity, etc.)

    An interaction using forward might look like any of the following:

    # Run a Singularity container that already exists on your resource (recommended)bash start-node.sh singularity-run /scratch/users/vsochat/share/pytorch-dev.simg# Execute a custom command to the same Singularity containerbash start-node.sh singularity-exec /scratch/users/vsochat/share/pytorch-dev.simg echo \"Hello World\"# Run a Singularity container from a url, `docker://ubuntu`bash start-node.sh singularity-run docker://ubuntu# Execute a custom command to the same containerbash start-node.sh singularity-exec docker://ubuntu echo \"Hello World\"# To start a jupyter notebook in a specific directory ON the cluster resourcebash start.sh jupyter <cluster-dir># To start a jupyter notebook with tensorflow in a specific directorybash start.sh py3-tensorflow <cluster-dir>

    Note that the last set of commands are pertaining to notebooks, which is where these tunnels come into play!A notebook is going to be run on a compute node that looks something like the following:

    $ jupyter notebook --no-browser --port=$PORT

    And if you ran this with a Singularity container, you’d also want to bind jovyan’s home to be the user’s, along with the jupyter config directory:

    $ singularity exec --home ${HOME} \\    --bind ${HOME}/.local:/home/jovyan/.local \\    --bind ${HOME}/.jupyter:/home/jovyan/.jupyter \\      datascience_notebook.sif jupyter notebook --no-browser --port=$PORT --ip 0.0.0.0

    As we described earlier here,there are subtle differences between making a tunnel (with a port) given that you have isolated nodes (or not).You can determine this based on your ability to ssh into a non-login node (meaning where your job is running) from “the outside world”that is your computer. If you cannot, your nodes are isolated, which we will discuss next.

    Isolated Nodes

    Let’s say that we need to create a tunnel (using ports) to an isolated node. This means that we are basically goingto establish a tunnel to the login node, and then from the login node another one to the compute node.We might use a command that looks like this:

    $ ssh -L $PORT:localhost:$PORT ${RESOURCE} ssh -L $PORT:localhost:$PORT -N \"$MACHINE\" &

    In the command above, the first half (ssh -L $PORT:localhost:$PORT ${RESOURCE}) is executed on the local machine, which establishes a port forwarding to the login node. The “-L” in the above (from the man pages) :

    Specifies that connections to the given TCP port or Unix socket on the local (client) host are to be forwarded to thegiven host and port, or Unix socket, on the remote side.This works by allocating a socket to listen to either a TCPport on the local side, optionally bound to the specifiedbind_address, or to a Unix socket. Whenever a connection ismade to the local port or socket, the connection is for‐warded over the secure channel, and a connection is made toeither host port hostport, or the Unix socket remote_socket,from the remote machine.

    Or in layman’s terms:

    Forward whatever is running on the second port on my resource to my local machine.

    Since we are forwarding ports, this would require minimally the login node to expose ports.The next line ssh -L $PORT:localhost:$PORT -N \"$MACHINE\" & is a second command run from the login node, and port forwards it to the compute node, since you can only access the compute node from the login nodes.You’ll notice it looks just like the first, and this works because ssh commands can be chained.The -N says “don’t execute a remote command (and just forward the port).”Finally, the last $MACHINE is the node that the jupyter notebook is running on.

    Not Isolated

    For HPCs where the compute node is not isolated from the outside world the ssh command for port forwarding first establishes a connection the login node, but then continues to pass on the login credentials to the compute node to establish a tunnel between the localhost and the port on the compute node. The ssh command in this case utilizes the flag -K that forwards the login credentials to the compute node:

    $ ssh \"$DOMAINNAME\" -l $FORWARD_USERNAME -K -L  $PORT:$MACHINE:$PORT -N  &

    I’m not sure in practice how common this is anymore. At least at my current employer it’s not even the casethat ports are exposed on the login node! It’s probably better that way, because in cases where you do get ports it’s sort of a “pick a port above this range and hope that no other user picks the same one!” It’s messy. So let’s talk about the case of not having ports exposed next, since this was the entire reason I wanted to write this post!

    SSH Tunnel with Socket

    More than a year ago, I had this realization that a lot of people at Stanford used the “forward” tool, and just for notebooks (and thiswas before they were available via Open OnDemand, which is what I’d recommend to a Stanford user at this point). I decided I wanted to make a new open source tool, “tunel” (an elegant derivation of “tunnel”) vsoch/tunel to make it easyto run what I call “apps” on an HPC cluster. Are there better ways of exposing user interfaces on HPC? Yes, indeed. But not everyonehas easy access. It was also a stubborn “I want this to work” proof of concept. This new tool would be like forward, but a little nicer.Because I, along with every other HPC developer and user, wishes we could have nice things 😭️.

    At this time I had just started a new role at a national lab, and I realized that none of my old techniques for launchingthe job worked because of the lack of exposed ports. Thinking this was impossible, I abandoned it for a year. But then this last week I found this! I was motivated! I was excited! The basic launch command of the notebook looks like this:

    $ jupyter notebook --sock /tmp/test.sock --no-browser

    And then with a different looking tunnel, we could forward this socket to the host, and map it to a port! My excitement was then brought downby what led to two days of struggling. I first tried my entire tunel workflow, meaning launching a job on a node,and then running that command, and providing the instruction to the user to create the tunnel as follows:

    $ ssh -L 8888:/tmp/test.sock -N user@this_host

    That didn’t work (and remember this socket was created on the isolated node, that’s important to remember for later). So I started looking at the socket with “nc” - “arbitrary TCP and UDP connections and listens” from the login node. The “-U” below is for UNIX sockets:

    $ nc -U /tmp/test.sock

    And from the head node I saw:

    Ncat: Connection refused.

    So then I knew I needed a simpler, dummier example. I got rid of tunel and just ran the notebook command on the head node.Dear reader, it still did not work. I opened an issue and asked Twitter for help. Someone else on Twitter reported that it worked for them, and that (in my opinion) is the challenge and story of HPC - given the huge differences in setups, it’s hard to reproduce what another person does unless you scope to a very specificenvironment or technology and hugely go out of your way to do it. I’m always grateful when someone tries to help, but when the ultimate answer is just“But it works on my machine!” I (and I think all of us) are like:

    (╯°□°)╯︵ ┻━┻

    🤣️

    Please know that is intended to be funny, and I really am grateful for the attempt to help! Anyway, the first night I was devastated because I was so excited about the possibility of this working! But of course (as it usually does) my quasi-sadness turned again into relentless stubborn-ness, and for my SaturdayI embarked on trying everything. I call this the stubborn brute force approach, and it actually leads to some pretty good outcomes?

    Socket from Login Node

    First from the login node, I started reading about flags in detail, again from the man pages. It occurred to me that the suggested command included “-L” (discussed earlier) but there were a ton of other flags to try, and maybe I need them for my setup? The command that wound up working (after much trial and error) was just:

    # Running on login node$ ssh -NT -L 8888:/tmp/test.sock user@server

    And here again was the suggested command:

    $ ssh -L 8888:/tmp/test.sock -N user@this_host

    So they are very similar - and the main difference is the -T is to “Disable pseudo-terminal allocation.”So I suspect (also based on the version of ssl I’m using) that without the flag, you might be making a request for a pty to the server(more details here) and then it could abort. Adding the flag just skips this, because we don’t need that - we just need the simple forward. And yes, this indeed feels very specific to your ssh setup, version of ssh, and server configuration. Of course, this was only the beginning of figuring things out, because I had no idea how to get this working from one level deeper - an isolated compute node.

    Socket with Isolated Nodes

    Remember that when we created the socket on the isolated node and we tried this out from the login node:

    $ nc -U /tmp/test.sock

    And the result was this:

    Ncat: Connection refused.

    My spidey senses were telling me that this should work. Indeed, when I ssh into the isolated node from the login node,that same command allowed me to connect (meaning it hung / there was no error output). So my first task, I decided, was to tryand “forward” this socket to the login node. Again, back to the man pages! I wound up with something like this (run from the login node):

    $ ssh isolated-node -NT -L /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sock

    The above is again using -L but instead of a port (which aren’t exposed) we are using a socket! It’s kind of neat you can switch out those two. When I tried the same nc command from the loginnode, we had progress (no connection refused message!) 🎉️ And then I moved this up one level to see if I could make this same request from my local machine, sort of combining the first command that worked with the login node notebook with this one. That looked like this (and yes this took more trial and error):

    $ ssh -NT user@server ssh isolated-node -NT -L /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sock

    And to confirm it was working, I’d ssh into the server and again run that nc command to ensure that the newly forwarded socket was readable fromthe login node. After this, again with more trial and error, I tried running a second command to just forward that (now working socket) to my host.That eventually looked like this:

    # And another for the local socket$ ssh -NT -L 8899:/home/dinosaur/login-node.sock user@server

    And then (all together now!) I tried putting them together.

    $ ssh -NT -L 8899:/home/dinosaur/login-node.sock user@server ssh isolated-node \\       -NT -L /home/dinosaur/login-node.sock:/home/dinosaur/jupyter.sock

    And then I spent some time integrating it into tunel, and surprise! the first implementation didn’t work. The first bug was that I needed to clean up old sockets each time the “same” app was run (determined by the job name and organizational namespace so the user can only run one of a particular interactive app at once, and not forget about previous runs). The second issue was about opening the tunnel - it didn’t seem to work if the process exited and/or it was run in a subshell (that also probably exits). I realized that (for the time being) running this connection step on behalf of the user, since it’s something the user should have more control over, probably wasn’t the right way to go. If the user hasn’t added something like an rsa key to ~/.ssh/authorized_keys on their clusters, it would also ask for a password interactively, making it harder for me to manage. So for simplicity sake, and assuming that we really should put the user in control of deciding when to start/stop the tunnel, I simply print the full ssh command in the terminal and let them copy paste it. A successful connection might then prompt them for their password for that second ssh, which (by default) I don’t think is carrying forward auth from the first.

    So that was my adventure! Mind you, this entire adventure was only about two days, and that included time to write this post, so I still have lots in front of me to work on. However, with these updated commands (and some nice tweaks from Python’s rich library) I quickly had a nice set of commands to run and stop an app with an interactive jupyter notebook, and using sockets on isolated nodes!

    $ tunel run-app server slurm/socket/jupyter$ tunel stop-app server slurm/socket/jupyter

    As a sidenote, one thing I like about rich is that it puts the aesthetic as a first class citizen.So many tools just don’t consider this, and I love that with rich I can think about colors, presentation,and even animations like spinners!

    Getting a socket working means I’ll be able to continue working on this library (hooray!) so if you have ideas or requests for appsyou’d like to run on HPC, assuming just this basic technology, please give me a ping and I’d love to chat and support them.I’m also going to be requesting an allocation on the Open Science Grid, which hopefully will give me other kinds of clustersto test on. I hope this was interesting to read, thanks for doing that!

    ", + "url": "https://hpc.social/personal-blog/2022/ssh-tunnels/", + + + + + + "date_published": "2022-06-26T13:30:00-06:00", + "date_modified": "2022-06-26T13:30:00-06:00", + + "author": "Vanessasaurus" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/research-software-registries/", + "title": "Research Software Registries", + "summary": null, + "content_text": "This post spurred from some original thinking about research software registries, and my recent discovery of the SciCodes Consortium, which I’m excited to find (and a bit surprised I didn’t earlier given my experience with research software and registries)! Since I’ve developed registries and been involved extensively in communities that develop standards and tooling for them, I’ve naturally been ruminating over ideas for several months, and hoping to find others that are motivated to think about similar things. This is the motivation of this post - to ruminate, share my thinking, and think together about ideas. You can read the content, or listen to the ideas below.vsoch · Research Software RegistriesWhy do we want research software registries?Research software registries have value when they are deployed for a specific context. However,I’m not convinced that a research software registry, at the most basic form providing archives with DOIS and metadata, is a useful thing in and of itself. It’s adding complexity and redundancy to an already cluttered ecosystem. The reason is because the source of truth of software is usually the source code in version control, e.g., the GitHub repository, which often already has support for features we need to enable easy citation (CITATION.cff), tagged releases, and programmatically accessible metadata. In this context, any kind of registry that provides another identifier and points to the first is providing redundant information. The only potential benefit is grouping and curation, which I would then argue should still point to the version control and/or a specific release as a source of truth.I’m also not convinced that we have established an actual use case of “searching a registry for software.” What happens in labs and communities is that you establish communities around the software, and then there are established workflows or slack communities or GitHub organizations to join around that. Most labs already have chosen languages, and even software pipelines that new members extend or work on. I would even go as far to say that for some (myself included) I don’t find research software, but it finds me. It appears as a link in some social media or chat channel, and I click the link and then there are about 15 seconds during which I make a determination if the software can help me to solve a problem that I have, or if it looks easy, professional, and/or fun and I simply want to try it out. If the answer is “yes” then I add it to a list in a Google Document with other things to try out when I have time. If not, I close the lab and life moves on. But I want to point out that nowhere in this workflow do I explicitly go looking for software. The software often finds me, and then I keep a mental cache of “tools that I’ve seen” and go back to it when the use case arises.So being able to answer this question about wanting research software registries is especially challenging because I’m not sure I’ve ever wanted one.Unless there is a specific kind of context around a registry (e.g., search for a specific name in a package manager to use, or look for an already assembled workflow) I haven’t been able to convince myself (yet) that I would find a use for one. I could be wrong about this, however, because as we know, people (myself included) are fairly bad at predicting the future, and perhaps there could be some future where “checking a research software registry” is a part of a daily workflow. I am skeptical because I think that a context is needed. Even if some central source of software ability truth was established, would it not be the case that a graduate student or researcher needs to go there with a use case or context in mind? I can’t imagine just mindlessly browsing for the sake of browsing. It’s akin to search engines - we are usually looking for something very specific. We don’t search without a purpose. The question here then is, what is the purpose?Research Software Registries with a PurposeA very good example of purpose comes down to workflows. This is the “I need to perform this specific function and I want to use what many others have done before me and not re-invent the wheel.” The minimum example of a workflow registry would be a search interface that indexes pipelines that are perhaps stored in version control. And extended version of that includes being able to provide structured inputs, outputs, and arguments, so the registry can programmatically provide this information to tools. You can then also quickly see how changing this to be general inputs/outputs of software (and functions within) and entrypoints of containers can quickly become a more generalized registry for software that could be used by any workflow manager that knows how to consume its information. However, there is a fine line here, because when we talk about I/O we are goingsquarely into workflow management territoty, and again in my opinion, we have to be careful about that scope. The closest thing that comes to mind for providing workflows as a service is something like openneuro that has a beautiful idea of “Get your data into this standard format and we will serve it and provide other easy ways to analyze it.” This kind of success story tells me that perhaps there is something to say for developing anything related to processing or pipelines in the context of a community. You can’t create the perfect registry for every scientific discipline, or perhaps you can do a mediocre job at trying, but perhaps if you scope to a specific one you can do a very good job. I’ve found the same to be true with software - it’s often better to do one or few things very well than more things kind of mediocre.A Provider of Identifiers?I’m skeptical when I hear that people want to apply our traditional model of publication (e.g., having a DOI) to software. The reason isn’t because I don’t value means to support reproducibility (and knowing the exact version of something that was used) but rather that we already have means to tag specific versions of software, and means that fit into a well-established ecosystem: package managers, versions, and releases. To think that a single frozen version of software is “the correct unit to provide” I also disagree with. Software is a living, and changing entity, and when it truly does “freeze” and stops being worked on, unlike a DOI in the academic space, this is sort of its death. The correct entrypoint for a piece of software, in my opinion, is the current version on version control, from where you could decide to pin a particular release or install a particular version from a package manager. But to provide a single frozen DOI that is wrapping some other version / release of the software? It doesn’t make sense to me. It’s adding additional complexity that’s not needed. So my opinion (as I’ve shared before) is that we should be thinking more about preserving specific timepoints in package managers, and not adding on an artificially created layer of “DOI” that seems (in my opinion) more of a reflection of our need to shove things into an academic framework we are comfortable with than anything else.So (I hope) that the purpose of a research software registry would not just be to provide DOIs. That doesn’t help me get my work done at the end of the day. All that said, I don’t think there can be a singular answer for purpose. I think the purpose ultimately comes down to the institution (or community) and the specific goals of the registry. For this reason there is no one answer for what a registry should look like or provide, and it is (or will be) challenging to define attributes that “any registry should have.”What is my purpose?You cut butter!Just kidding :_) I’ve been ruminating on this idea for quite some time, and namely because I’m motivated to build a new kind of research software registry, but first I need to convince myself of a meaningful purpose. While I don’t have my convincing answer yet (but I do have a sense of direction) the way I’ve been thinking about this is to provide a set of questions or use cases that seem plausible. It seems like most people are asking “What kind of information should we have in a registry” but I think this isn’t exactly the question I’m interested in - I want to know: What do you want to do next with the software you find?This is important because it’s going to drive the context and purpose of the registry. Here are a few examples: I want to quickly try this out → a registry that can deploy a developer environment I want to find if this is in a package manager → a reproducible install I want to use this with a workflow manager → this is some kind of workflow hub I want to see inputs / outputs / entrypoints → support to workflow tools I want to install this on HPC → I want a module deployment or similar I want to cite this → use case akin to CITATION.cff I want to understand dependencies of an ecosystem → a registry deploying something akin to citelang I want to see all my options to do X → a domain or categorical registry I want to see new and noteworthy libraries → a registry with advanced filtering and ranking I want to see change over time → a registry with a layer of analysis tools Indeed many of the above contexts require additional information. For example, if we want to be able to ask what software is specifically used to perform X, we need a set of functions that are common to a domain, and then to annotate specific software (or even functions) that do it. If we want to then ask “Which of these is the best?” we need to then generate benchmarks to measure this functionality. E.g., how long does it take to run? What are the inputs and outputs and are they correct? What are resource needs? It would be an incredibly cool thing to be able to ask these questions, but an enormous amount of work for any particular scientific domain. As an example of thinking about functional needs, we might look to brain imaging, which is arguably a subfield of neuroinformatics. We might define custom processing functions like thresholding, registration, normalization, or creating regions of interest, tag specific functions that can do each, and then collect and share metrics about the degree to which easy is successful to do each. Arguably, if I wanted to do this I would create wrappers to workflow managers (akin to Snakemake Wrappers) that not only measure metrics, but make it easy for people to quickly use it in their work.It needs to be easyWhether I’m thinking about being a user of a research software registry or a developer, it just needs to be easy. Here are some ideas around that.Re-inventing the wheel?I come with the experience of deploying a custom container registry (Singularity Hub) years ago, and then being involved in standards committees (the Open Container Initiative) that develop generalized specifications that now drive the most common software (container) registries. I’ve also developed registry proxies that do interesting things, along with a Python OCI registry, and I’m the main developer for oras-py (ORAS == OCI Registry as Storage). So believe me when I say that in terms of storing blobs and metadata about them, I don’t think we should re-invent the wheel. Any new registry I create is going to start with these standards. You might disagree, and that’s OK. But I think people have thought long and hard about these things, and we are stronger for working together on them over always making our own new thing.As a supplement to that, I want to point out one of the biggest challenges in our community. The majority of research software, I would argue, doesn’t get used beyond the lab it’s created for. Said lab might submit or include it in a paper, and then they get their publication and move on. This is reflective of many things, and I’ll review them here. The first is our funding model - we maybe can fund working on a piece of software only up until the funding dries out, and then it becomes an abandoned repository, if it’s made publicly available. The second is our incentive model - the academic community is focused on writing papers, so once you get there, you don’t have reason to consider the long term impact of the software. The third is communication. It is actually much easier to throw together your own library than to have to search and then try contributing to someone else’s.I say this because I don’t think the way that things are are necessarily the fault of anyone - we are all agents responding to incentives and resources available.But then on the flip side - these observations beg to ask what leads to software that is successful, on a community level? I think a few things can happen. Either someone puts time and energy into establishing community, period, meaning bringing together people that are working on common goals and explicitly asking “How can we do this together,” or what I’ve seen with more commercial open source - having enough money or power that you can create strong branding and community just by way of having the funds for it. I’ve talked about this a few times before and it’s not necessarily bad, but it’s unfair at best. Software that maybe would not be successful by its own merit rises to the top, and really great software that doesn’t have those resources does not. That said, I’ve also seen sort of mediocre software get much better and earn its reputation, so I can’t say it’s a completely wrong dynamic.Is the answer Mooooar Metadata?As we design the “perfect set of information” we want provided for any piece of software, we need to put people first.We have to ask ourselves what are people willing to do, and generally people aren’t wanting to spend inordinate amounts of extra time defining metadata or inputs/outputs for their custom scripts. This was a point also brought up by Paula in the SciCodes meeting and I am 100% behind it. If we require extensive metadata about software, it needs to be done in an automated fashion. In practice when I think of archives for software, I’m just not that motivated to provide more than the absolute minimum to click the “submit” button.Do people know what they want?One of the hardest things about this kind of problem is that people don’t often know what they want. And actually - I’d extend that to software in general. Think of common tools like git (version control) or containers.Could most people have told you in advance about the designs for these tools? I suspect likely not.This is often the game that software developers play - we imagine new ways of doing things that scratch an itchor have a problem that we have, and then hand over our duct taped laden prototype to others and we’re likehey, is this useful to you? And often the response in radio silence, but then sometimes it’s a resounding, “WoW, yes!”So I’m going to throw out this idea that people generally don’t know what they want until they see it, touch it and try it.This is also why I want to inspire you to take some time to think about your specific needs and motivation for wanting(on a high level) to browse and interact with research software. What are the compelling reasons for this registry,for you?This is actually really fun to think about, because what even is a research software registry? Is it a place to find software to plug into workflows? Does it provide ABI or more general function signatures to help you plug into workflows? Does it provide a citation? A container? An interactive environment? Dependency graph? Something else? This is inded why this problem is so hard - there are so many ways to thinkabout this basic concept. And that’s kind of what makes it fun too? But also what makes it hard. Personally speaking sinceI’m more interested in building things I find myself ruminating about details for a specific use case. And since I’m a developer and craving better support for developer environments, this tends to be where my brain goes. And have you noticed I haven’t givena direct answer for what is a research software registry? It’s 1. because I don’t know, and 2. because we are trying to define a registry for a kind of output that we don’t even have an agreed upon definition for yet! So perhaps the definition will happen on the level of the deployment or institution? Anyway, I hope you take the opportunity to discuss with your peers, pets, and even yourself, to try and answer this question.SummaryTo summarize, I’m spending a lot of time thinking about this, and less in an “I’m an academic that wants DOIs and metadata” and more in a “I am a software engineer that wants to build something that I actually find useful.” Might I scratch itches along the way? Sure. And I do have some early ideas that I plan to hack on before sharing publicly. In the meantime, I do hope you are interested in some of these ideas and take time to write or introspect yourself.And on a higher level, I really like this format of writing and speaking, where the speaking isn’t formal enough to be a talk that you put together and practice for weeks (I put this all together in an afternoon) but it still is a media format that literally gives a voice.", + "content_html": "

    This post spurred from some original thinking about research software registries, and my recent discovery of the SciCodes Consortium, which I’m excited to find (and a bit surprised I didn’t earlier given my experience with research software and registries)! Since I’ve developed registries and been involved extensively in communities that develop standards and tooling for them, I’ve naturally been ruminating over ideas for several months, and hoping to find others that are motivated to think about similar things. This is the motivation of this post - to ruminate, share my thinking, and think together about ideas. You can read the content, or listen to the ideas below.

    Why do we want research software registries?

    Research software registries have value when they are deployed for a specific context. However,I’m not convinced that a research software registry, at the most basic form providing archives with DOIS and metadata, is a useful thing in and of itself. It’s adding complexity and redundancy to an already cluttered ecosystem. The reason is because the source of truth of software is usually the source code in version control, e.g., the GitHub repository, which often already has support for features we need to enable easy citation (CITATION.cff), tagged releases, and programmatically accessible metadata. In this context, any kind of registry that provides another identifier and points to the first is providing redundant information. The only potential benefit is grouping and curation, which I would then argue should still point to the version control and/or a specific release as a source of truth.

    I’m also not convinced that we have established an actual use case of “searching a registry for software.” What happens in labs and communities is that you establish communities around the software, and then there are established workflows or slack communities or GitHub organizations to join around that. Most labs already have chosen languages, and even software pipelines that new members extend or work on. I would even go as far to say that for some (myself included) I don’t find research software, but it finds me. It appears as a link in some social media or chat channel, and I click the link and then there are about 15 seconds during which I make a determination if the software can help me to solve a problem that I have, or if it looks easy, professional, and/or fun and I simply want to try it out. If the answer is “yes” then I add it to a list in a Google Document with other things to try out when I have time. If not, I close the lab and life moves on. But I want to point out that nowhere in this workflow do I explicitly go looking for software. The software often finds me, and then I keep a mental cache of “tools that I’ve seen” and go back to it when the use case arises.

    So being able to answer this question about wanting research software registries is especially challenging because I’m not sure I’ve ever wanted one.Unless there is a specific kind of context around a registry (e.g., search for a specific name in a package manager to use, or look for an already assembled workflow) I haven’t been able to convince myself (yet) that I would find a use for one. I could be wrong about this, however, because as we know, people (myself included) are fairly bad at predicting the future, and perhaps there could be some future where “checking a research software registry” is a part of a daily workflow. I am skeptical because I think that a context is needed. Even if some central source of software ability truth was established, would it not be the case that a graduate student or researcher needs to go there with a use case or context in mind? I can’t imagine just mindlessly browsing for the sake of browsing. It’s akin to search engines - we are usually looking for something very specific. We don’t search without a purpose. The question here then is, what is the purpose?

    Research Software Registries with a Purpose

    A very good example of purpose comes down to workflows. This is the “I need to perform this specific function and I want to use what many others have done before me and not re-invent the wheel.” The minimum example of a workflow registry would be a search interface that indexes pipelines that are perhaps stored in version control. And extended version of that includes being able to provide structured inputs, outputs, and arguments, so the registry can programmatically provide this information to tools. You can then also quickly see how changing this to be general inputs/outputs of software (and functions within) and entrypoints of containers can quickly become a more generalized registry for software that could be used by any workflow manager that knows how to consume its information. However, there is a fine line here, because when we talk about I/O we are goingsquarely into workflow management territoty, and again in my opinion, we have to be careful about that scope. The closest thing that comes to mind for providing workflows as a service is something like openneuro that has a beautiful idea of “Get your data into this standard format and we will serve it and provide other easy ways to analyze it.” This kind of success story tells me that perhaps there is something to say for developing anything related to processing or pipelines in the context of a community. You can’t create the perfect registry for every scientific discipline, or perhaps you can do a mediocre job at trying, but perhaps if you scope to a specific one you can do a very good job. I’ve found the same to be true with software - it’s often better to do one or few things very well than more things kind of mediocre.

    A Provider of Identifiers?

    I’m skeptical when I hear that people want to apply our traditional model of publication (e.g., having a DOI) to software. The reason isn’t because I don’t value means to support reproducibility (and knowing the exact version of something that was used) but rather that we already have means to tag specific versions of software, and means that fit into a well-established ecosystem: package managers, versions, and releases. To think that a single frozen version of software is “the correct unit to provide” I also disagree with. Software is a living, and changing entity, and when it truly does “freeze” and stops being worked on, unlike a DOI in the academic space, this is sort of its death. The correct entrypoint for a piece of software, in my opinion, is the current version on version control, from where you could decide to pin a particular release or install a particular version from a package manager. But to provide a single frozen DOI that is wrapping some other version / release of the software? It doesn’t make sense to me. It’s adding additional complexity that’s not needed. So my opinion (as I’ve shared before) is that we should be thinking more about preserving specific timepoints in package managers, and not adding on an artificially created layer of “DOI” that seems (in my opinion) more of a reflection of our need to shove things into an academic framework we are comfortable with than anything else.

    So (I hope) that the purpose of a research software registry would not just be to provide DOIs. That doesn’t help me get my work done at the end of the day. All that said, I don’t think there can be a singular answer for purpose. I think the purpose ultimately comes down to the institution (or community) and the specific goals of the registry. For this reason there is no one answer for what a registry should look like or provide, and it is (or will be) challenging to define attributes that “any registry should have.”

    What is my purpose?

    You cut butter!

    Just kidding :_) I’ve been ruminating on this idea for quite some time, and namely because I’m motivated to build a new kind of research software registry, but first I need to convince myself of a meaningful purpose. While I don’t have my convincing answer yet (but I do have a sense of direction) the way I’ve been thinking about this is to provide a set of questions or use cases that seem plausible. It seems like most people are asking “What kind of information should we have in a registry” but I think this isn’t exactly the question I’m interested in - I want to know:

    What do you want to do next with the software you find?

    This is important because it’s going to drive the context and purpose of the registry. Here are a few examples:

    1. I want to quickly try this out → a registry that can deploy a developer environment
    2. I want to find if this is in a package manager → a reproducible install
    3. I want to use this with a workflow manager → this is some kind of workflow hub
    4. I want to see inputs / outputs / entrypoints → support to workflow tools
    5. I want to install this on HPC → I want a module deployment or similar
    6. I want to cite this → use case akin to CITATION.cff
    7. I want to understand dependencies of an ecosystem → a registry deploying something akin to citelang
    8. I want to see all my options to do X → a domain or categorical registry
    9. I want to see new and noteworthy libraries → a registry with advanced filtering and ranking
    10. I want to see change over time → a registry with a layer of analysis tools

    Indeed many of the above contexts require additional information. For example, if we want to be able to ask what software is specifically used to perform X, we need a set of functions that are common to a domain, and then to annotate specific software (or even functions) that do it. If we want to then ask “Which of these is the best?” we need to then generate benchmarks to measure this functionality. E.g., how long does it take to run? What are the inputs and outputs and are they correct? What are resource needs? It would be an incredibly cool thing to be able to ask these questions, but an enormous amount of work for any particular scientific domain. As an example of thinking about functional needs, we might look to brain imaging, which is arguably a subfield of neuroinformatics. We might define custom processing functions like thresholding, registration, normalization, or creating regions of interest, tag specific functions that can do each, and then collect and share metrics about the degree to which easy is successful to do each. Arguably, if I wanted to do this I would create wrappers to workflow managers (akin to Snakemake Wrappers) that not only measure metrics, but make it easy for people to quickly use it in their work.

    It needs to be easy

    Whether I’m thinking about being a user of a research software registry or a developer, it just needs to be easy. Here are some ideas around that.

    Re-inventing the wheel?

    I come with the experience of deploying a custom container registry (Singularity Hub) years ago, and then being involved in standards committees (the Open Container Initiative) that develop generalized specifications that now drive the most common software (container) registries. I’ve also developed registry proxies that do interesting things, along with a Python OCI registry, and I’m the main developer for oras-py (ORAS == OCI Registry as Storage). So believe me when I say that in terms of storing blobs and metadata about them, I don’t think we should re-invent the wheel. Any new registry I create is going to start with these standards. You might disagree, and that’s OK. But I think people have thought long and hard about these things, and we are stronger for working together on them over always making our own new thing.

    As a supplement to that, I want to point out one of the biggest challenges in our community. The majority of research software, I would argue, doesn’t get used beyond the lab it’s created for. Said lab might submit or include it in a paper, and then they get their publication and move on. This is reflective of many things, and I’ll review them here. The first is our funding model - we maybe can fund working on a piece of software only up until the funding dries out, and then it becomes an abandoned repository, if it’s made publicly available. The second is our incentive model - the academic community is focused on writing papers, so once you get there, you don’t have reason to consider the long term impact of the software. The third is communication. It is actually much easier to throw together your own library than to have to search and then try contributing to someone else’s.I say this because I don’t think the way that things are are necessarily the fault of anyone - we are all agents responding to incentives and resources available.

    But then on the flip side - these observations beg to ask what leads to software that is successful, on a community level? I think a few things can happen. Either someone puts time and energy into establishing community, period, meaning bringing together people that are working on common goals and explicitly asking “How can we do this together,” or what I’ve seen with more commercial open source - having enough money or power that you can create strong branding and community just by way of having the funds for it. I’ve talked about this a few times before and it’s not necessarily bad, but it’s unfair at best. Software that maybe would not be successful by its own merit rises to the top, and really great software that doesn’t have those resources does not. That said, I’ve also seen sort of mediocre software get much better and earn its reputation, so I can’t say it’s a completely wrong dynamic.

    Is the answer Mooooar Metadata?

    As we design the “perfect set of information” we want provided for any piece of software, we need to put people first.We have to ask ourselves what are people willing to do, and generally people aren’t wanting to spend inordinate amounts of extra time defining metadata or inputs/outputs for their custom scripts. This was a point also brought up by Paula in the SciCodes meeting and I am 100% behind it. If we require extensive metadata about software, it needs to be done in an automated fashion. In practice when I think of archives for software, I’m just not that motivated to provide more than the absolute minimum to click the “submit” button.

    Do people know what they want?

    One of the hardest things about this kind of problem is that people don’t often know what they want. And actually - I’d extend that to software in general. Think of common tools like git (version control) or containers.Could most people have told you in advance about the designs for these tools? I suspect likely not.This is often the game that software developers play - we imagine new ways of doing things that scratch an itchor have a problem that we have, and then hand over our duct taped laden prototype to others and we’re likehey, is this useful to you? And often the response in radio silence, but then sometimes it’s a resounding, “WoW, yes!”So I’m going to throw out this idea that people generally don’t know what they want until they see it, touch it and try it.This is also why I want to inspire you to take some time to think about your specific needs and motivation for wanting(on a high level) to browse and interact with research software. What are the compelling reasons for this registry,for you?

    This is actually really fun to think about, because what even is a research software registry? Is it a place to find software to plug into workflows? Does it provide ABI or more general function signatures to help you plug into workflows? Does it provide a citation? A container? An interactive environment? Dependency graph? Something else? This is inded why this problem is so hard - there are so many ways to thinkabout this basic concept. And that’s kind of what makes it fun too? But also what makes it hard. Personally speaking sinceI’m more interested in building things I find myself ruminating about details for a specific use case. And since I’m a developer and craving better support for developer environments, this tends to be where my brain goes. And have you noticed I haven’t givena direct answer for what is a research software registry? It’s 1. because I don’t know, and 2. because we are trying to define a registry for a kind of output that we don’t even have an agreed upon definition for yet! So perhaps the definition will happen on the level of the deployment or institution? Anyway, I hope you take the opportunity to discuss with your peers, pets, and even yourself, to try and answer this question.

    Summary

    To summarize, I’m spending a lot of time thinking about this, and less in an “I’m an academic that wants DOIs and metadata” and more in a “I am a software engineer that wants to build something that I actually find useful.” Might I scratch itches along the way? Sure. And I do have some early ideas that I plan to hack on before sharing publicly. In the meantime, I do hope you are interested in some of these ideas and take time to write or introspect yourself.

    And on a higher level, I really like this format of writing and speaking, where the speaking isn’t formal enough to be a talk that you put together and practice for weeks (I put this all together in an afternoon) but it still is a media format that literally gives a voice.

    ", + "url": "https://hpc.social/personal-blog/2022/research-software-registries/", + + + + + + "date_published": "2022-06-19T13:15:00-06:00", + "date_modified": "2022-06-19T13:15:00-06:00", + + "author": "Vanessasaurus" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/mnt-reform-2-part-deux/", + "title": "MNT Reform 2 - part deux", + "summary": null, + "content_text": "A few days back I posted some of my initial thoughts of the MNT Reform 2 laptop which justrecently arrived. I ran the usual battery of tests on the laptop including the High PerformanceLinpack (HPL) of course just for kicks.At that time, I made no attempt to optmize HPL. I simply went with the OS supplied gcc andmath libraries. My next step was to look at how I could improve my HPL result usingthe Arm compiler for Linux and the Arm performance libraries. Here I’ll walk through thosesteps from installing the Arm tools, to compiling and running HPL - and all of the smalldetails in between.(1) To start, I downloaded the latest verion of the Arm compiler for Linux package from here.This was the package with the filename: arm-compiler-for-linux_22.0.2_Ubuntu-20.04_aarch64.tar.(2) After uncompressing arm-compiler-for-linux_22.0.2_Ubuntu-20.04_aarch64.tar, I ran theinstallation command ./arm-compiler-for-linux_22.0.2_Ubuntu-20.04.sh -a which installed thesoftware to /opt/arm on the system. Note that the Arm compilers for Linux ship withmodule files to make setting up the envionment for compiling easy. To support thisI had to install the OS environment-modules package with apt-get install environment-modules(3) In order to load the module for the Arm compiler for Linux, the following steps arenecessary. This assumes that the Arm compiler for Linux is installed in /opt/arm. root@reform:/# module avail----------------------------------- /usr/share/modules/modulefiles ------------------------------------dot module-git module-info modules null use.own Key:modulepath root@reform:/# export MODULEPATH=/opt/arm/modulefiles:$MODULEPATHroot@reform:/# module avail---------------------------------------- /opt/arm/modulefiles -----------------------------------------acfl/22.0.2 binutils/11.2.0 gnu/11.2.0 ----------------------------------- /usr/share/modules/modulefiles ------------------------------------dot module-git module-info modules null use.own Key:modulepath root@reform:/# module load acfl/22.0.2Loading acfl/22.0.2 Loading requirement: binutils/11.2.0root@reform:/# echo $PATH/opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/bin:/opt/arm/gcc-11.2.0_Generic-AArch64_Ubuntu-20.04_aarch64-linux/binutils_bin:/root/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binroot@reform:/# armclang --versionArm C/C++/Fortran Compiler version 22.0.2 (build number 1776) (based on LLVM 13.0.0)Target: aarch64-unknown-linux-gnuThread model: posixInstalledDir: /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/bin(4) Now we shift our focus to Open MPI. Open MPI is an open source distribution of the messagepassing interface (MPI) library for writing parallel applications. We will compile HPL against thisOpen MPI version. For this, I downloaded the latest Open MPI version (4.1.4) from here.By default, Open MPI compiles with support for the SLURM workload manager. My Reform hasIBM Spectrum LSF installed as the workload scheduler. In order to enable LSF support in Open MPI, weneed to specify the appropriate configure flags (see below).root@reform:/opt/HPC/openmpi-4.1.4# ./configure --prefix=/opt/HPC/openmpi-4.1.4 --with-lsf=/opt/ibm/lsf/10.1 --with-lsf-libdir=/opt/ibm/lsf/10.1/linux3.12-glibc2.17-armv8/libroot@reform:/opt/HPC/openmpi-4.1.4# make -j 4......root@reform:/opt/HPC/openmpi-4.1.4# make install......(5) After completing the compilation of Open MPI, the ompi_info command is run to check ifsupport for LSF has been enabled. Note that you must ensure to source the LSF environment(i.e. . ./profile.lsf) before running ompi_info or the LSF libraries won’t be found.root@reform:/opt/HPC/openmpi-4.1.4# ./bin/ompi_info |grep -i lsf Configure command line: '--prefix=/opt/HPC/openmpi-4.1.4' '--with-lsf=/opt/ibm/lsf/10.1' '--with-lsf-libdir=/opt/ibm/lsf/10.1/linux3.12-glibc2.17-armv8/lib' MCA ess: lsf (MCA v2.1.0, API v3.0.0, Component v4.1.4) MCA plm: lsf (MCA v2.1.0, API v2.0.0, Component v4.1.4) MCA ras: lsf (MCA v2.1.0, API v2.0.0, Component v4.1.4)(6) Next, I downloaded the latest HPL package from here.I uncompressed the the package hpl-2.3.tar.gz in the /opt/HPC directory. Next, I had to createa new Makefile for HPL which would use the Arm compiler for Linux and optmized math libraries.A copy of Make.imx8qm follows below.# # -- High Performance Computing Linpack Benchmark (HPL) # HPL - 2.3 - December 2, 2018 # Antoine P. Petitet # University of Tennessee, Knoxville # Innovative Computing Laboratory # (C) Copyright 2000-2008 All Rights Reserved # # -- Copyright notice and Licensing terms: # # Redistribution and use in source and binary forms, with or without# modification, are permitted provided that the following conditions# are met: # # 1. Redistributions of source code must retain the above copyright# notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright# notice, this list of conditions, and the following disclaimer in the# documentation and/or other materials provided with the distribution. # # 3. All advertising materials mentioning features or use of this# software must display the following acknowledgement: # This product includes software developed at the University of# Tennessee, Knoxville, Innovative Computing Laboratory. # # 4. The name of the University, the name of the Laboratory, or the# names of its contributors may not be used to endorse or promote# products derived from this software without specific written# permission. # # -- Disclaimer: # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,# DATA OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ####################################################################### # ----------------------------------------------------------------------# - shell --------------------------------------------------------------# ----------------------------------------------------------------------#SHELL = /bin/sh#CD = cdCP = cpLN_S = ln -sMKDIR = mkdirRM = /bin/rm -fTOUCH = touch## ----------------------------------------------------------------------# - Platform identifier ------------------------------------------------# ----------------------------------------------------------------------#ARCH = imx8qm## ----------------------------------------------------------------------# - HPL Directory Structure / HPL library ------------------------------# ----------------------------------------------------------------------#TOPdir = /opt/HPC/hpl-2.3INCdir = /opt/HPC/hpl-2.3/includeBINdir = /opt/HPC/hpl-2.3/bin/$(ARCH)LIBdir = /opt/HPC/hpl-2.3/lib/$(ARCH)#HPLlib = /opt/HPC/hpl-2.3/lib/libhpl.a ## ----------------------------------------------------------------------# - Message Passing library (MPI) --------------------------------------# ----------------------------------------------------------------------# MPinc tells the C compiler where to find the Message Passing library# header files, MPlib is defined to be the name of the library to be# used. The variable MPdir is only used for defining MPinc and MPlib.#MPdir = /opt/HPC/openmpi-4.1.4MPinc = /opt/HPC/openmpi-4.1.4/includeMPlib = /opt/HPC/openmpi-4.1.4/lib/libmpi.so## ----------------------------------------------------------------------# - Linear Algebra library (BLAS or VSIPL) -----------------------------# ----------------------------------------------------------------------# LAinc tells the C compiler where to find the Linear Algebra library# header files, LAlib is defined to be the name of the library to be# used. The variable LAdir is only used for defining LAinc and LAlib.#LAdir =LAinc =# LAlib = -lamath -lm -mcpu=nativeLAlib = ## ----------------------------------------------------------------------# - F77 / C interface --------------------------------------------------# ----------------------------------------------------------------------# You can skip this section if and only if you are not planning to use# a BLAS library featuring a Fortran 77 interface. Otherwise, it is# necessary to fill out the F2CDEFS variable with the appropriate# options. **One and only one** option should be chosen in **each** of# the 3 following categories:## 1) name space (How C calls a Fortran 77 routine)## -DAdd_ : all lower case and a suffixed underscore (Suns,# Intel, ...), [default]# -DNoChange : all lower case (IBM RS6000),# -DUpCase : all upper case (Cray),# -DAdd__ : the FORTRAN compiler in use is f2c.## 2) C and Fortran 77 integer mapping## -DF77_INTEGER=int : Fortran 77 INTEGER is a C int, [default]# -DF77_INTEGER=long : Fortran 77 INTEGER is a C long,# -DF77_INTEGER=short : Fortran 77 INTEGER is a C short.## 3) Fortran 77 string handling## -DStringSunStyle : The string address is passed at the string loca-# tion on the stack, and the string length is then# passed as an F77_INTEGER after all explicit# stack arguments, [default]# -DStringStructPtr : The address of a structure is passed by a# Fortran 77 string, and the structure is of the# form: struct {char *cp; F77_INTEGER len;},# -DStringStructVal : A structure is passed by value for each Fortran# 77 string, and the structure is of the form:# struct {char *cp; F77_INTEGER len;},# -DStringCrayStyle : Special option for Cray machines, which uses# Cray fcd (fortran character descriptor) for# interoperation.#F2CDEFS = ## ----------------------------------------------------------------------# - HPL includes / libraries / specifics -------------------------------# ----------------------------------------------------------------------#HPL_INCLUDES = -I$(INCdir) -I$(INCdir)/$(ARCH) $(LAinc) -I$(MPinc) -I/opt/arm/armpl-22.0.2_AArch64_Ubuntu-20.04_gcc_aarch64-linux/include/HPL_LIBS = $(HPLlib) $(LAlib) $(MPlib)## - Compile time options -----------------------------------------------## -DHPL_COPY_L force the copy of the panel L before bcast;# -DHPL_CALL_CBLAS call the cblas interface;# -DHPL_CALL_VSIPL call the vsip library;# -DHPL_DETAILED_TIMING enable detailed timers;## By default HPL will:# *) not copy L before broadcast,# *) call the BLAS Fortran 77 interface,# *) not display detailed timing information.#HPL_OPTS =## ----------------------------------------------------------------------#HPL_DEFS = $(F2CDEFS) $(HPL_OPTS) $(HPL_INCLUDES)## ----------------------------------------------------------------------# - Compilers / linkers - Optimization flags ---------------------------# ----------------------------------------------------------------------#CC = armclang CCNOOPT = $(HPL_DEFS)CCFLAGS = $(HPL_DEFS) -O3 -larmpl_lp64 -lamath -lm #LINKER = armclang -O3 -armpl -lamath -lm LINKFLAGS = $(CCFLAGS)#ARCHIVER = arARFLAGS = rRANLIB = echo## ----------------------------------------------------------------------(7) To compile HPL with the above Makefile is as simple as running the appropriate make command andspecify the architecture imx8qm.root@reform:/opt/HPC/hpl-2.3# make arch=imx8qm......(8) Barring any errors, we should now have an xhpl binary in under the /opt/HPC/hpl-2.3/bin/imx8qmdirectory.root@reform:/opt/HPC/hpl-2.3/bin/imx8qm# pwd/opt/HPC/hpl-2.3/bin/imx8qmroot@reform:/opt/HPC/hpl-2.3/bin/imx8qm# ls -latotal 156drwxr-xr-x 2 root root 4096 Jun 8 13:30 .drwxr-xr-x 3 root root 4096 Jun 8 13:20 ..-rw-r--r-- 1 root root 1454 Jun 8 13:30 HPL.dat-rwxr-xr-x 1 root root 146960 Jun 8 13:24 xhplroot@reform:/opt/HPC/hpl-2.3/bin/imx8qm# ldd ./xhpl\tlinux-vdso.so.1 (0x0000007faa7b1000)\tlibamath_aarch64.so => /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libamath_aarch64.so (0x0000007faa5ef000)\tlibm.so.6 => /lib/aarch64-linux-gnu/libm.so.6 (0x0000007faa520000)\tlibarmpl_lp64.so => /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/lib/clang/13.0.0/armpl_links/lib/libarmpl_lp64.so (0x0000007fa3cd5000)\tlibmpi.so.40 => /usr/lib/aarch64-linux-gnu/libmpi.so.40 (0x0000007fa3b8f000)\tlibarmflang.so => /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libarmflang.so (0x0000007fa3728000)\tlibomp.so => /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libomp.so (0x0000007fa3649000)\tlibrt.so.1 => /lib/aarch64-linux-gnu/librt.so.1 (0x0000007fa3631000)\tlibdl.so.2 => /lib/aarch64-linux-gnu/libdl.so.2 (0x0000007fa361d000)\tlibpthread.so.0 => /lib/aarch64-linux-gnu/libpthread.so.0 (0x0000007fa35ed000)\tlibastring_aarch64.so => /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libastring_aarch64.so (0x0000007fa35da000)\tlibc.so.6 => /lib/aarch64-linux-gnu/libc.so.6 (0x0000007fa345f000)\t/lib/ld-linux-aarch64.so.1 (0x0000007faa77e000)\tlibgcc_s.so.1 => /opt/arm/gcc-11.2.0_Generic-AArch64_Ubuntu-20.04_aarch64-linux/lib64/libgcc_s.so.1 (0x0000007fa343a000)\tlibopen-rte.so.40 => /usr/lib/aarch64-linux-gnu/libopen-rte.so.40 (0x0000007fa336c000)\tlibopen-pal.so.40 => /usr/lib/aarch64-linux-gnu/libopen-pal.so.40 (0x0000007fa32aa000)\tlibhwloc.so.15 => /usr/lib/aarch64-linux-gnu/libhwloc.so.15 (0x0000007fa3245000)\tlibstdc++.so.6 => /opt/arm/gcc-11.2.0_Generic-AArch64_Ubuntu-20.04_aarch64-linux/lib64/libstdc++.so.6 (0x0000007fa3030000)\tlibz.so.1 => /lib/aarch64-linux-gnu/libz.so.1 (0x0000007fa3006000)\tlibevent_core-2.1.so.7 => /usr/lib/aarch64-linux-gnu/libevent_core-2.1.so.7 (0x0000007fa2fbf000)\tlibutil.so.1 => /lib/aarch64-linux-gnu/libutil.so.1 (0x0000007fa2fab000)\tlibevent_pthreads-2.1.so.7 => /usr/lib/aarch64-linux-gnu/libevent_pthreads-2.1.so.7 (0x0000007fa2f98000)\tlibudev.so.1 => /usr/lib/aarch64-linux-gnu/libudev.so.1 (0x0000007fa2f5e000)(9) A default HPL.dat file should ber present in the directory /opt/HPC/hpl-2.3/bin/imx8qm. The fileHPL.dat is used to tune the benchmark problem size according to the system. A copy of theHPL.dat file I created follows below. This is suitable for the 4 GB memory configuration ofReform with 4 processor cores.HPLinpack benchmark input fileInnovative Computing Laboratory, University of TennesseeHPL.out output file name (if any) 6 device out (6=stdout,7=stderr,file)1 # of problems sizes (N)19000 Ns1 # of NBs192 NBs0 PMAP process mapping (0=Row-,1=Column-major)1 # of process grids (P x Q)2 Ps2 Qs16.0 threshold1 # of panel fact2 PFACTs (0=left, 1=Crout, 2=Right)1 # of recursive stopping criterium4 NBMINs (>= 1)1 # of panels in recursion2 NDIVs1 # of recursive panel fact.1 RFACTs (0=left, 1=Crout, 2=Right)1 # of broadcast1 BCASTs (0=1rg,1=1rM,2=2rg,3=2rM,4=Lng,5=LnM)1 # of lookahead depth1 DEPTHs (>=0)2 SWAP (0=bin-exch,1=long,2=mix)64 swapping threshold0 L1 in (0=transposed,1=no-transposed) form0 U in (0=transposed,1=no-transposed) form1 Equilibration (0=no,1=yes)8 memory alignment in double (> 0)##### This line (no. 32) is ignored (it serves as a separator). ######0 Number of additional problem sizes for PTRANS1200 10000 30000 values of N0 number of additional blocking sizes for PTRANS40 9 8 13 13 20 16 32 64 values of NB(10) Now we’re ready to execute the appropriate mpirun command to run the xhpl executable.We specify -np 4 to run across the 4 cores of the processor. With this better optimized run we’reseeing ~8.9 GFLOPS performance compared with ~4 GFLOPS for my previous runs where HPL was compiledwith the OS supplied GCC and Math libraries (ATLAS). Note that as this is roughly double the GFLOPSfrom my previous runs, it appears that there is an issue with double precision or perhapsvectorization with the non-optimized runs.gsamu@reform:/opt/HPC/hpl-2.3/bin/imx8qm$ mpirun -np 4 ./xhpl ================================================================================HPLinpack 2.3 -- High-Performance Linpack benchmark -- December 2, 2018Written by A. Petitet and R. Clint Whaley, Innovative Computing Laboratory, UTKModified by Piotr Luszczek, Innovative Computing Laboratory, UTKModified by Julien Langou, University of Colorado Denver================================================================================An explanation of the input/output parameters follows:T/V : Wall time / encoded variant.N : The order of the coefficient matrix A.NB : The partitioning blocking factor.P : The number of process rows.Q : The number of process columns.Time : Time in seconds to solve the linear system.Gflops : Rate of execution for solving the linear system.The following parameter values will be used:N : 19000 NB : 192 PMAP : Row-major process mappingP : 2 Q : 2 PFACT : Right NBMIN : 4 NDIV : 2 RFACT : Crout BCAST : 1ringM DEPTH : 1 SWAP : Mix (threshold = 64)L1 : transposed formU : transposed formEQUIL : yesALIGN : 8 double precision words--------------------------------------------------------------------------------- The matrix A is randomly generated for each test.- The following scaled residual check will be computed: ||Ax-b||_oo / ( eps * ( || x ||_oo * || A ||_oo + || b ||_oo ) * N )- The relative machine precision (eps) is taken to be 1.110223e-16- Computational tests pass if scaled residuals are less than 16.0================================================================================T/V N NB P Q Time Gflops--------------------------------------------------------------------------------WR11C2R4 19000 192 2 2 513.92 8.8987e+00HPL_pdgesv() start time Wed Jun 8 21:28:07 2022HPL_pdgesv() end time Wed Jun 8 21:36:41 2022--------------------------------------------------------------------------------||Ax-b||_oo/(eps*(||A||_oo*||x||_oo+||b||_oo)*N)= 4.89711678e-03 ...... PASSED================================================================================Finished 1 tests with the following results: 1 tests completed and passed residual checks, 0 tests completed and failed residual checks, 0 tests skipped because of illegal input values.--------------------------------------------------------------------------------End of Tests.================================================================================(11) Finally, we submit the same run of Linpack but through Spectrum LSF. The LSF bsub commandinvocation is shown below and the resulting output.gsamu@reform:~$ bsub -n 4 -I -m reform \"cd /opt/HPC/hpl-2.3/bin/imx8qm ; mpirun ./xhpl\" Job <35301> is submitted to default queue <interactive>.<<Waiting for dispatch ...>><<Starting on reform>>================================================================================HPLinpack 2.3 -- High-Performance Linpack benchmark -- December 2, 2018Written by A. Petitet and R. Clint Whaley, Innovative Computing Laboratory, UTKModified by Piotr Luszczek, Innovative Computing Laboratory, UTKModified by Julien Langou, University of Colorado Denver================================================================================An explanation of the input/output parameters follows:T/V : Wall time / encoded variant.N : The order of the coefficient matrix A.NB : The partitioning blocking factor.P : The number of process rows.Q : The number of process columns.Time : Time in seconds to solve the linear system.Gflops : Rate of execution for solving the linear system.The following parameter values will be used:N : 19000 NB : 192 PMAP : Row-major process mappingP : 2 Q : 2 PFACT : Right NBMIN : 4 NDIV : 2 RFACT : Crout BCAST : 1ringM DEPTH : 1 SWAP : Mix (threshold = 64)L1 : transposed formU : transposed formEQUIL : yesALIGN : 8 double precision words--------------------------------------------------------------------------------- The matrix A is randomly generated for each test.- The following scaled residual check will be computed: ||Ax-b||_oo / ( eps * ( || x ||_oo * || A ||_oo + || b ||_oo ) * N )- The relative machine precision (eps) is taken to be 1.110223e-16- Computational tests pass if scaled residuals are less than 16.0================================================================================T/V N NB P Q Time Gflops--------------------------------------------------------------------------------WR11C2R4 19000 192 2 2 518.02 8.8283e+00HPL_pdgesv() start time Thu Jun 9 09:33:35 2022HPL_pdgesv() end time Thu Jun 9 09:42:13 2022--------------------------------------------------------------------------------||Ax-b||_oo/(eps*(||A||_oo*||x||_oo+||b||_oo)*N)= 4.89711678e-03 ...... PASSED================================================================================Finished 1 tests with the following results: 1 tests completed and passed residual checks, 0 tests completed and failed residual checks, 0 tests skipped because of illegal input values.--------------------------------------------------------------------------------End of Tests.================================================================================", + "content_html": "

    A few days back I posted some of my initial thoughts of the MNT Reform 2 laptop which justrecently arrived. I ran the usual battery of tests on the laptop including the High PerformanceLinpack (HPL) of course just for kicks.

    At that time, I made no attempt to optmize HPL. I simply went with the OS supplied gcc andmath libraries. My next step was to look at how I could improve my HPL result usingthe Arm compiler for Linux and the Arm performance libraries. Here I’ll walk through thosesteps from installing the Arm tools, to compiling and running HPL - and all of the smalldetails in between.

    (1) To start, I downloaded the latest verion of the Arm compiler for Linux package from here.This was the package with the filename: arm-compiler-for-linux_22.0.2_Ubuntu-20.04_aarch64.tar.

    (2) After uncompressing arm-compiler-for-linux_22.0.2_Ubuntu-20.04_aarch64.tar, I ran theinstallation command ./arm-compiler-for-linux_22.0.2_Ubuntu-20.04.sh -a which installed thesoftware to /opt/arm on the system. Note that the Arm compilers for Linux ship withmodule files to make setting up the envionment for compiling easy. To support thisI had to install the OS environment-modules package with apt-get install environment-modules

    (3) In order to load the module for the Arm compiler for Linux, the following steps arenecessary. This assumes that the Arm compiler for Linux is installed in /opt/arm.

        root@reform:/# module avail----------------------------------- /usr/share/modules/modulefiles ------------------------------------dot  module-git  module-info  modules  null  use.own  Key:modulepath  root@reform:/# export MODULEPATH=/opt/arm/modulefiles:$MODULEPATHroot@reform:/# module avail---------------------------------------- /opt/arm/modulefiles -----------------------------------------acfl/22.0.2  binutils/11.2.0  gnu/11.2.0  ----------------------------------- /usr/share/modules/modulefiles ------------------------------------dot  module-git  module-info  modules  null  use.own  Key:modulepath  root@reform:/# module load acfl/22.0.2Loading acfl/22.0.2  Loading requirement: binutils/11.2.0root@reform:/# echo $PATH/opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/bin:/opt/arm/gcc-11.2.0_Generic-AArch64_Ubuntu-20.04_aarch64-linux/binutils_bin:/root/.local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binroot@reform:/# armclang --versionArm C/C++/Fortran Compiler version 22.0.2 (build number 1776) (based on LLVM 13.0.0)Target: aarch64-unknown-linux-gnuThread model: posixInstalledDir: /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/bin

    (4) Now we shift our focus to Open MPI. Open MPI is an open source distribution of the messagepassing interface (MPI) library for writing parallel applications. We will compile HPL against thisOpen MPI version. For this, I downloaded the latest Open MPI version (4.1.4) from here.

    By default, Open MPI compiles with support for the SLURM workload manager. My Reform hasIBM Spectrum LSF installed as the workload scheduler. In order to enable LSF support in Open MPI, weneed to specify the appropriate configure flags (see below).

    root@reform:/opt/HPC/openmpi-4.1.4# ./configure --prefix=/opt/HPC/openmpi-4.1.4 --with-lsf=/opt/ibm/lsf/10.1 --with-lsf-libdir=/opt/ibm/lsf/10.1/linux3.12-glibc2.17-armv8/libroot@reform:/opt/HPC/openmpi-4.1.4# make -j 4......root@reform:/opt/HPC/openmpi-4.1.4# make install......

    (5) After completing the compilation of Open MPI, the ompi_info command is run to check ifsupport for LSF has been enabled. Note that you must ensure to source the LSF environment(i.e. . ./profile.lsf) before running ompi_info or the LSF libraries won’t be found.

    root@reform:/opt/HPC/openmpi-4.1.4# ./bin/ompi_info |grep -i lsf  Configure command line: '--prefix=/opt/HPC/openmpi-4.1.4' '--with-lsf=/opt/ibm/lsf/10.1' '--with-lsf-libdir=/opt/ibm/lsf/10.1/linux3.12-glibc2.17-armv8/lib'                 MCA ess: lsf (MCA v2.1.0, API v3.0.0, Component v4.1.4)                 MCA plm: lsf (MCA v2.1.0, API v2.0.0, Component v4.1.4)                 MCA ras: lsf (MCA v2.1.0, API v2.0.0, Component v4.1.4)

    (6) Next, I downloaded the latest HPL package from here.I uncompressed the the package hpl-2.3.tar.gz in the /opt/HPC directory. Next, I had to createa new Makefile for HPL which would use the Arm compiler for Linux and optmized math libraries.A copy of Make.imx8qm follows below.

    #  #  -- High Performance Computing Linpack Benchmark (HPL)                #     HPL - 2.3 - December 2, 2018                          #     Antoine P. Petitet                                                #     University of Tennessee, Knoxville                                #     Innovative Computing Laboratory                                 #     (C) Copyright 2000-2008 All Rights Reserved                       #                                                                       #  -- Copyright notice and Licensing terms:                             #                                                                       #  Redistribution  and  use in  source and binary forms, with or without#  modification, are  permitted provided  that the following  conditions#  are met:                                                             #                                                                       #  1. Redistributions  of  source  code  must retain the above copyright#  notice, this list of conditions and the following disclaimer.        #                                                                       #  2. Redistributions in binary form must reproduce  the above copyright#  notice, this list of conditions,  and the following disclaimer in the#  documentation and/or other materials provided with the distribution. #                                                                       #  3. All  advertising  materials  mentioning  features  or  use of this#  software must display the following acknowledgement:                 #  This  product  includes  software  developed  at  the  University  of#  Tennessee, Knoxville, Innovative Computing Laboratory.             #                                                                       #  4. The name of the  University,  the name of the  Laboratory,  or the#  names  of  its  contributors  may  not  be used to endorse or promote#  products  derived   from   this  software  without  specific  written#  permission.                                                          #                                                                       #  -- Disclaimer:                                                       #                                                                       #  THIS  SOFTWARE  IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS#  ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,  INCLUDING,  BUT NOT#  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR#  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY#  OR  CONTRIBUTORS  BE  LIABLE FOR ANY  DIRECT,  INDIRECT,  INCIDENTAL,#  SPECIAL,  EXEMPLARY,  OR  CONSEQUENTIAL DAMAGES  (INCLUDING,  BUT NOT#  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,#  DATA OR PROFITS; OR BUSINESS INTERRUPTION)  HOWEVER CAUSED AND ON ANY#  THEORY OF LIABILITY, WHETHER IN CONTRACT,  STRICT LIABILITY,  OR TORT#  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE#  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # #######################################################################  # ----------------------------------------------------------------------# - shell --------------------------------------------------------------# ----------------------------------------------------------------------#SHELL        = /bin/sh#CD           = cdCP           = cpLN_S         = ln -sMKDIR        = mkdirRM           = /bin/rm -fTOUCH        = touch## ----------------------------------------------------------------------# - Platform identifier ------------------------------------------------# ----------------------------------------------------------------------#ARCH         = imx8qm## ----------------------------------------------------------------------# - HPL Directory Structure / HPL library ------------------------------# ----------------------------------------------------------------------#TOPdir       = /opt/HPC/hpl-2.3INCdir       = /opt/HPC/hpl-2.3/includeBINdir       = /opt/HPC/hpl-2.3/bin/$(ARCH)LIBdir       = /opt/HPC/hpl-2.3/lib/$(ARCH)#HPLlib       = /opt/HPC/hpl-2.3/lib/libhpl.a ## ----------------------------------------------------------------------# - Message Passing library (MPI) --------------------------------------# ----------------------------------------------------------------------# MPinc tells the  C  compiler where to find the Message Passing library# header files,  MPlib  is defined  to be the name of  the library to be# used. The variable MPdir is only used for defining MPinc and MPlib.#MPdir        = /opt/HPC/openmpi-4.1.4MPinc        = /opt/HPC/openmpi-4.1.4/includeMPlib        = /opt/HPC/openmpi-4.1.4/lib/libmpi.so## ----------------------------------------------------------------------# - Linear Algebra library (BLAS or VSIPL) -----------------------------# ----------------------------------------------------------------------# LAinc tells the  C  compiler where to find the Linear Algebra  library# header files,  LAlib  is defined  to be the name of  the library to be# used. The variable LAdir is only used for defining LAinc and LAlib.#LAdir        =LAinc        =# LAlib        = -lamath -lm -mcpu=nativeLAlib        = ## ----------------------------------------------------------------------# - F77 / C interface --------------------------------------------------# ----------------------------------------------------------------------# You can skip this section  if and only if  you are not planning to use# a  BLAS  library featuring a Fortran 77 interface.  Otherwise,  it  is# necessary  to  fill out the  F2CDEFS  variable  with  the  appropriate# options.  **One and only one**  option should be chosen in **each** of# the 3 following categories:## 1) name space (How C calls a Fortran 77 routine)## -DAdd_              : all lower case and a suffixed underscore  (Suns,#                       Intel, ...),                           [default]# -DNoChange          : all lower case (IBM RS6000),# -DUpCase            : all upper case (Cray),# -DAdd__             : the FORTRAN compiler in use is f2c.## 2) C and Fortran 77 integer mapping## -DF77_INTEGER=int   : Fortran 77 INTEGER is a C int,         [default]# -DF77_INTEGER=long  : Fortran 77 INTEGER is a C long,# -DF77_INTEGER=short : Fortran 77 INTEGER is a C short.## 3) Fortran 77 string handling## -DStringSunStyle    : The string address is passed at the string loca-#                       tion on the stack, and the string length is then#                       passed as  an  F77_INTEGER  after  all  explicit#                       stack arguments,                       [default]# -DStringStructPtr   : The address  of  a  structure  is  passed  by  a#                       Fortran 77  string,  and the structure is of the#                       form: struct {char *cp; F77_INTEGER len;},# -DStringStructVal   : A structure is passed by value for each  Fortran#                       77 string,  and  the  structure is  of the form:#                       struct {char *cp; F77_INTEGER len;},# -DStringCrayStyle   : Special option for  Cray  machines,  which  uses#                       Cray  fcd  (fortran  character  descriptor)  for#                       interoperation.#F2CDEFS      = ## ----------------------------------------------------------------------# - HPL includes / libraries / specifics -------------------------------# ----------------------------------------------------------------------#HPL_INCLUDES = -I$(INCdir) -I$(INCdir)/$(ARCH) $(LAinc) -I$(MPinc) -I/opt/arm/armpl-22.0.2_AArch64_Ubuntu-20.04_gcc_aarch64-linux/include/HPL_LIBS     = $(HPLlib) $(LAlib) $(MPlib)## - Compile time options -----------------------------------------------## -DHPL_COPY_L           force the copy of the panel L before bcast;# -DHPL_CALL_CBLAS       call the cblas interface;# -DHPL_CALL_VSIPL       call the vsip  library;# -DHPL_DETAILED_TIMING  enable detailed timers;## By default HPL will:#    *) not copy L before broadcast,#    *) call the BLAS Fortran 77 interface,#    *) not display detailed timing information.#HPL_OPTS     =## ----------------------------------------------------------------------#HPL_DEFS     = $(F2CDEFS) $(HPL_OPTS) $(HPL_INCLUDES)## ----------------------------------------------------------------------# - Compilers / linkers - Optimization flags ---------------------------# ----------------------------------------------------------------------#CC           = armclang CCNOOPT      = $(HPL_DEFS)CCFLAGS      = $(HPL_DEFS) -O3 -larmpl_lp64 -lamath -lm #LINKER       = armclang -O3 -armpl -lamath -lm LINKFLAGS    = $(CCFLAGS)#ARCHIVER     = arARFLAGS      = rRANLIB       = echo## ----------------------------------------------------------------------

    (7) To compile HPL with the above Makefile is as simple as running the appropriate make command andspecify the architecture imx8qm.

    root@reform:/opt/HPC/hpl-2.3# make arch=imx8qm......

    (8) Barring any errors, we should now have an xhpl binary in under the /opt/HPC/hpl-2.3/bin/imx8qmdirectory.

    root@reform:/opt/HPC/hpl-2.3/bin/imx8qm# pwd/opt/HPC/hpl-2.3/bin/imx8qmroot@reform:/opt/HPC/hpl-2.3/bin/imx8qm# ls -latotal 156drwxr-xr-x 2 root root   4096 Jun  8 13:30 .drwxr-xr-x 3 root root   4096 Jun  8 13:20 ..-rw-r--r-- 1 root root   1454 Jun  8 13:30 HPL.dat-rwxr-xr-x 1 root root 146960 Jun  8 13:24 xhplroot@reform:/opt/HPC/hpl-2.3/bin/imx8qm# ldd ./xhpl\tlinux-vdso.so.1 (0x0000007faa7b1000)\tlibamath_aarch64.so => /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libamath_aarch64.so (0x0000007faa5ef000)\tlibm.so.6 => /lib/aarch64-linux-gnu/libm.so.6 (0x0000007faa520000)\tlibarmpl_lp64.so => /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/lib/clang/13.0.0/armpl_links/lib/libarmpl_lp64.so (0x0000007fa3cd5000)\tlibmpi.so.40 => /usr/lib/aarch64-linux-gnu/libmpi.so.40 (0x0000007fa3b8f000)\tlibarmflang.so => /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libarmflang.so (0x0000007fa3728000)\tlibomp.so => /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libomp.so (0x0000007fa3649000)\tlibrt.so.1 => /lib/aarch64-linux-gnu/librt.so.1 (0x0000007fa3631000)\tlibdl.so.2 => /lib/aarch64-linux-gnu/libdl.so.2 (0x0000007fa361d000)\tlibpthread.so.0 => /lib/aarch64-linux-gnu/libpthread.so.0 (0x0000007fa35ed000)\tlibastring_aarch64.so => /opt/arm/arm-linux-compiler-22.0.2_Generic-AArch64_Ubuntu-20.04_aarch64-linux/llvm-bin/../lib/libastring_aarch64.so (0x0000007fa35da000)\tlibc.so.6 => /lib/aarch64-linux-gnu/libc.so.6 (0x0000007fa345f000)\t/lib/ld-linux-aarch64.so.1 (0x0000007faa77e000)\tlibgcc_s.so.1 => /opt/arm/gcc-11.2.0_Generic-AArch64_Ubuntu-20.04_aarch64-linux/lib64/libgcc_s.so.1 (0x0000007fa343a000)\tlibopen-rte.so.40 => /usr/lib/aarch64-linux-gnu/libopen-rte.so.40 (0x0000007fa336c000)\tlibopen-pal.so.40 => /usr/lib/aarch64-linux-gnu/libopen-pal.so.40 (0x0000007fa32aa000)\tlibhwloc.so.15 => /usr/lib/aarch64-linux-gnu/libhwloc.so.15 (0x0000007fa3245000)\tlibstdc++.so.6 => /opt/arm/gcc-11.2.0_Generic-AArch64_Ubuntu-20.04_aarch64-linux/lib64/libstdc++.so.6 (0x0000007fa3030000)\tlibz.so.1 => /lib/aarch64-linux-gnu/libz.so.1 (0x0000007fa3006000)\tlibevent_core-2.1.so.7 => /usr/lib/aarch64-linux-gnu/libevent_core-2.1.so.7 (0x0000007fa2fbf000)\tlibutil.so.1 => /lib/aarch64-linux-gnu/libutil.so.1 (0x0000007fa2fab000)\tlibevent_pthreads-2.1.so.7 => /usr/lib/aarch64-linux-gnu/libevent_pthreads-2.1.so.7 (0x0000007fa2f98000)\tlibudev.so.1 => /usr/lib/aarch64-linux-gnu/libudev.so.1 (0x0000007fa2f5e000)

    (9) A default HPL.dat file should ber present in the directory /opt/HPC/hpl-2.3/bin/imx8qm. The fileHPL.dat is used to tune the benchmark problem size according to the system. A copy of theHPL.dat file I created follows below. This is suitable for the 4 GB memory configuration ofReform with 4 processor cores.

    HPLinpack benchmark input fileInnovative Computing Laboratory, University of TennesseeHPL.out      output file name (if any) 6            device out (6=stdout,7=stderr,file)1            # of problems sizes (N)19000         Ns1            # of NBs192           NBs0            PMAP process mapping (0=Row-,1=Column-major)1            # of process grids (P x Q)2            Ps2            Qs16.0         threshold1            # of panel fact2            PFACTs (0=left, 1=Crout, 2=Right)1            # of recursive stopping criterium4            NBMINs (>= 1)1            # of panels in recursion2            NDIVs1            # of recursive panel fact.1            RFACTs (0=left, 1=Crout, 2=Right)1            # of broadcast1            BCASTs (0=1rg,1=1rM,2=2rg,3=2rM,4=Lng,5=LnM)1            # of lookahead depth1            DEPTHs (>=0)2            SWAP (0=bin-exch,1=long,2=mix)64           swapping threshold0            L1 in (0=transposed,1=no-transposed) form0            U  in (0=transposed,1=no-transposed) form1            Equilibration (0=no,1=yes)8            memory alignment in double (> 0)##### This line (no. 32) is ignored (it serves as a separator). ######0                               Number of additional problem sizes for PTRANS1200 10000 30000                values of N0                               number of additional blocking sizes for PTRANS40 9 8 13 13 20 16 32 64        values of NB

    (10) Now we’re ready to execute the appropriate mpirun command to run the xhpl executable.We specify -np 4 to run across the 4 cores of the processor. With this better optimized run we’reseeing ~8.9 GFLOPS performance compared with ~4 GFLOPS for my previous runs where HPL was compiledwith the OS supplied GCC and Math libraries (ATLAS). Note that as this is roughly double the GFLOPSfrom my previous runs, it appears that there is an issue with double precision or perhapsvectorization with the non-optimized runs.

    gsamu@reform:/opt/HPC/hpl-2.3/bin/imx8qm$ mpirun -np 4 ./xhpl ================================================================================HPLinpack 2.3  --  High-Performance Linpack benchmark  --   December 2, 2018Written by A. Petitet and R. Clint Whaley,  Innovative Computing Laboratory, UTKModified by Piotr Luszczek, Innovative Computing Laboratory, UTKModified by Julien Langou, University of Colorado Denver================================================================================An explanation of the input/output parameters follows:T/V    : Wall time / encoded variant.N      : The order of the coefficient matrix A.NB     : The partitioning blocking factor.P      : The number of process rows.Q      : The number of process columns.Time   : Time in seconds to solve the linear system.Gflops : Rate of execution for solving the linear system.The following parameter values will be used:N      :   19000 NB     :     192 PMAP   : Row-major process mappingP      :       2 Q      :       2 PFACT  :   Right NBMIN  :       4 NDIV   :       2 RFACT  :   Crout BCAST  :  1ringM DEPTH  :       1 SWAP   : Mix (threshold = 64)L1     : transposed formU      : transposed formEQUIL  : yesALIGN  : 8 double precision words--------------------------------------------------------------------------------- The matrix A is randomly generated for each test.- The following scaled residual check will be computed:      ||Ax-b||_oo / ( eps * ( || x ||_oo * || A ||_oo + || b ||_oo ) * N )- The relative machine precision (eps) is taken to be               1.110223e-16- Computational tests pass if scaled residuals are less than                16.0================================================================================T/V                N    NB     P     Q               Time                 Gflops--------------------------------------------------------------------------------WR11C2R4       19000   192     2     2             513.92             8.8987e+00HPL_pdgesv() start time Wed Jun  8 21:28:07 2022HPL_pdgesv() end time   Wed Jun  8 21:36:41 2022--------------------------------------------------------------------------------||Ax-b||_oo/(eps*(||A||_oo*||x||_oo+||b||_oo)*N)=   4.89711678e-03 ...... PASSED================================================================================Finished      1 tests with the following results:              1 tests completed and passed residual checks,              0 tests completed and failed residual checks,              0 tests skipped because of illegal input values.--------------------------------------------------------------------------------End of Tests.================================================================================

    (11) Finally, we submit the same run of Linpack but through Spectrum LSF. The LSF bsub commandinvocation is shown below and the resulting output.

    gsamu@reform:~$ bsub -n 4 -I -m reform \"cd /opt/HPC/hpl-2.3/bin/imx8qm ; mpirun ./xhpl\" Job <35301> is submitted to default queue <interactive>.<<Waiting for dispatch ...>><<Starting on reform>>================================================================================HPLinpack 2.3  --  High-Performance Linpack benchmark  --   December 2, 2018Written by A. Petitet and R. Clint Whaley,  Innovative Computing Laboratory, UTKModified by Piotr Luszczek, Innovative Computing Laboratory, UTKModified by Julien Langou, University of Colorado Denver================================================================================An explanation of the input/output parameters follows:T/V    : Wall time / encoded variant.N      : The order of the coefficient matrix A.NB     : The partitioning blocking factor.P      : The number of process rows.Q      : The number of process columns.Time   : Time in seconds to solve the linear system.Gflops : Rate of execution for solving the linear system.The following parameter values will be used:N      :   19000 NB     :     192 PMAP   : Row-major process mappingP      :       2 Q      :       2 PFACT  :   Right NBMIN  :       4 NDIV   :       2 RFACT  :   Crout BCAST  :  1ringM DEPTH  :       1 SWAP   : Mix (threshold = 64)L1     : transposed formU      : transposed formEQUIL  : yesALIGN  : 8 double precision words--------------------------------------------------------------------------------- The matrix A is randomly generated for each test.- The following scaled residual check will be computed:      ||Ax-b||_oo / ( eps * ( || x ||_oo * || A ||_oo + || b ||_oo ) * N )- The relative machine precision (eps) is taken to be               1.110223e-16- Computational tests pass if scaled residuals are less than                16.0================================================================================T/V                N    NB     P     Q               Time                 Gflops--------------------------------------------------------------------------------WR11C2R4       19000   192     2     2             518.02             8.8283e+00HPL_pdgesv() start time Thu Jun  9 09:33:35 2022HPL_pdgesv() end time   Thu Jun  9 09:42:13 2022--------------------------------------------------------------------------------||Ax-b||_oo/(eps*(||A||_oo*||x||_oo+||b||_oo)*N)=   4.89711678e-03 ...... PASSED================================================================================Finished      1 tests with the following results:              1 tests completed and passed residual checks,              0 tests completed and failed residual checks,              0 tests skipped because of illegal input values.--------------------------------------------------------------------------------End of Tests.================================================================================
    ", + "url": "https://hpc.social/personal-blog/2022/mnt-reform-2-part-deux/", + + + + + + "date_published": "2022-06-09T01:06:51-06:00", + "date_modified": "2022-06-09T01:06:51-06:00", + + "author": "Ramblings of a supercomputing enthusiast." + + }, + + { + "id": "https://hpc.social/personal-blog/2022/neunundneunzig-mnt-reform-s/", + "title": "Neunundneunzig MNT Reform(s)", + "summary": null, + "content_text": "I’ll admit it. I sat on the fence for a long time before placing an orderfor the MNT Reform 2 laptop. At the time, I was in the market for a laptopas my 2 Macbook Pro retina laptops were repurposed for online schoolingfor my children during the pandemic (and as it turns out were neverreturned to me).I have fairly extensive experience with Arm-based systems and was aware of potentialangst with custom distros when specific system support is not in the Linux mainline.Yes this has been pretty much addressed - for servers with the Arm SBSAspecifications. However the MNT Reform 2 was never marketed as SBSA.With eyes wide open, I ultimately decided to go ahead an order an MNT Reform 2.My laptop needs were really for light coding/scripting, occasional browsing,writing (blogs, etc), tinkering and as a terminal to my other systems. Sure,these requirements coud have bee met by some less expensive x86 laptops oreven Chromebooks. But those are distinctly lacking a cool factor. What really helpedto reach this decision was the following:Put together by a small, enthusiastic teamA proper keyboard and cool trackball in a laptopIntel outsideSwappable CPUs (there are some drop in replacements in the works)User replaceable, 18650 batteriesAntithesis of paper thin laptopsOf course, knowing that the Reform is based on the NXP/Freescale i.MX8MQ with4 x Arm Cortex-A53 cores (1.5 GHz), I knew it was not going to be a barn burnerin terms of performance.Late to the partyBecause of my procrastination, I only recieved my Reform this past week.Given that they’ve been shipping for some time, I definitely had thatlate to the party feeling. In a way this was good though as all of thewrite-ups and videos that have been posted over time gave me a goodidea of what to expect. So in this blog I don’t expect to coveranything ground breaking - just my experience so far.Much has been written about the packaging of the system. And in this senseit didn’t disappoint. You could tell that it was packaged with great careby the team at MNT and it was frankly a very enjoyable experience tounwrap the components. I’ve included a collage of photos at the end of this blogof the Reform.And that wasn’t the only fun. Right away I had to remove the transparent bottomcover of the Reform in order to connect up the batteries and install the Wifi andNVMe SSD. At this time I also replaced the plastic port covers with the optional metalversions that I ordered earlier this year. Once this was done, the system sprang tolife and I was able to very quickly get it booting from the encrypted NVMe thanksto the detailed handbook that was also included in the bundle I purchased andtips on the MNT Community site.As for the keyboard, I really enjoy the tactile feel of it. It’s quite a refreshingexperience from the mushy keyboard on the MacBook Air M1 that I use for work. Andalthough I ordered both the trackball and trackpad for the Reform, I’ll likelystick with the trackball for now as it’s just a pleasure to use. Note thatmy Reform appears to have the updated trackball which has ball bearings for asmoother action.Fanless bitteOf course one of the first things an #HPC minded person like myself will do witha system is run High-performance Linpack (HPL) on it. This is a force of habit for meand thought it may also prove to be a good way to burn in the system.So I started with Open MPI. I downloaded and compiled Open MPI v4.1.4. This completedwithout a hitch. Note that I didn’t specify any specific flags configuring Open MPIother than a prefix for the installation location (under $HOME).HPL was easy to compile as well. Note that I simply used the OS ATLAS and BLASlibraries and the OS supplied compiler(s). So we can say that this is not anoptimized build of HPL.And below we see the results of the run of xhpl below, which achieved a result of4.2 GFLOPS.$ mpirun -np 4 ./xhpl ================================================================================HPLinpack 2.3 -- High-Performance Linpack benchmark -- December 2, 2018Written by A. Petitet and R. Clint Whaley, Innovative Computing Laboratory, UTKModified by Piotr Luszczek, Innovative Computing Laboratory, UTKModified by Julien Langou, University of Colorado Denver================================================================================An explanation of the input/output parameters follows:T/V : Wall time / encoded variant.N : The order of the coefficient matrix A.NB : The partitioning blocking factor.P : The number of process rows.Q : The number of process columns.Time : Time in seconds to solve the linear system.Gflops : Rate of execution for solving the linear system.The following parameter values will be used:N : 19000 NB : 192 PMAP : Row-major process mappingP : 2 Q : 2 PFACT : Right NBMIN : 4 NDIV : 2 RFACT : Crout BCAST : 1ringM DEPTH : 1 SWAP : Mix (threshold = 64)L1 : transposed formU : transposed formEQUIL : yesALIGN : 8 double precision words--------------------------------------------------------------------------------- The matrix A is randomly generated for each test.- The following scaled residual check will be computed: ||Ax-b||_oo / ( eps * ( || x ||_oo * || A ||_oo + || b ||_oo ) * N )- The relative machine precision (eps) is taken to be 1.110223e-16- Computational tests pass if scaled residuals are less than 16.0================================================================================T/V N NB P Q Time Gflops--------------------------------------------------------------------------------WR11C2R4 19000 192 2 2 1073.27 4.2610e+00HPL_pdgesv() start time Mon Jun 6 12:08:36 2022HPL_pdgesv() end time Mon Jun 6 12:26:30 2022--------------------------------------------------------------------------------||Ax-b||_oo/(eps*(||A||_oo*||x||_oo+||b||_oo)*N)= 1.18409443e-03 ...... PASSED================================================================================Finished 1 tests with the following results: 1 tests completed and passed residual checks, 0 tests completed and failed residual checks, 0 tests skipped because of illegal input values.--------------------------------------------------------------------------------End of Tests.================================================================================Just for kicks, I’ve also included a screenshot of lstopo, which is part of thePortal Hardware Locality (hwloc) project. I am a bit confused as to why theL1 and L2 cache sizes are zero though in the output.I’ve included the output from some system commands below including lscpu, lspci and lsusb.$ lscpuArchitecture: aarch64CPU op-mode(s): 32-bit, 64-bitByte Order: Little EndianCPU(s): 4On-line CPU(s) list: 0-3Thread(s) per core: 1Core(s) per socket: 4Socket(s): 1NUMA node(s): 1Vendor ID: ARMModel: 4Model name: Cortex-A53Stepping: r0p4CPU max MHz: 1500.0000CPU min MHz: 1000.0000BogoMIPS: 16.66NUMA node0 CPU(s): 0-3Vulnerability Itlb multihit: Not affectedVulnerability L1tf: Not affectedVulnerability Mds: Not affectedVulnerability Meltdown: Not affectedVulnerability Spec store bypass: Not affectedVulnerability Spectre v1: Mitigation; \\__user pointer sanitizationVulnerability Spectre v2: Not affectedVulnerability Srbds: Not affectedVulnerability Tsx async abort: Not affectedFlags: fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid$ lspci0000:00:00.0 PCI bridge: Synopsys, Inc. DWC_usb3 / PCIe bridge (rev 01)0000:01:00.0 Network controller: Qualcomm Atheros AR928X Wireless Network Adapter (PCI-Express) (rev 01)0001:00:00.0 PCI bridge: Synopsys, Inc. DWC_usb3 / PCIe bridge (rev 01)0001:01:00.0 Non-Volatile memory controller: Silicon Motion, Inc. SM2262/SM2262EN SSD Controller (rev 03)$ lsusbBus 004 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hubBus 003 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hubBus 002 Device 002: ID 0451:8140 Texas Instruments, Inc. TUSB8041 4-Port HubBus 002 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hubBus 001 Device 004: ID 03eb:2041 Atmel Corp. LUFA Mouse Demo ApplicationBus 001 Device 003: ID 03eb:2042 Atmel Corp. LUFA Keyboard Demo ApplicationBus 001 Device 002: ID 0451:8142 Texas Instruments, Inc. TUSB8041 4-Port HubBus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hubSo that’s a very brief look at my initial experiences with the Reform laptop. I’veonly scratched the surface here, but so far I’m liking what I’m seeing. As for theneunundneunzig title reference, well I suppose that’s part of the vibe I got withthe laptop.A few photos for your viewing pleasure!", + "content_html": "

    I’ll admit it. I sat on the fence for a long time before placing an orderfor the MNT Reform 2 laptop. At the time, I was in the market for a laptopas my 2 Macbook Pro retina laptops were repurposed for online schoolingfor my children during the pandemic (and as it turns out were neverreturned to me).

    I have fairly extensive experience with Arm-based systems and was aware of potentialangst with custom distros when specific system support is not in the Linux mainline.Yes this has been pretty much addressed - for servers with the Arm SBSAspecifications. However the MNT Reform 2 was never marketed as SBSA.

    With eyes wide open, I ultimately decided to go ahead an order an MNT Reform 2.My laptop needs were really for light coding/scripting, occasional browsing,writing (blogs, etc), tinkering and as a terminal to my other systems. Sure,these requirements coud have bee met by some less expensive x86 laptops oreven Chromebooks. But those are distinctly lacking a cool factor. What really helpedto reach this decision was the following:

    • Put together by a small, enthusiastic team
    • A proper keyboard and cool trackball in a laptop
    • Intel outside
    • Swappable CPUs (there are some drop in replacements in the works)
    • User replaceable, 18650 batteries
    • Antithesis of paper thin laptops

    Of course, knowing that the Reform is based on the NXP/Freescale i.MX8MQ with4 x Arm Cortex-A53 cores (1.5 GHz), I knew it was not going to be a barn burnerin terms of performance.

    Late to the party

    Because of my procrastination, I only recieved my Reform this past week.Given that they’ve been shipping for some time, I definitely had thatlate to the party feeling. In a way this was good though as all of thewrite-ups and videos that have been posted over time gave me a goodidea of what to expect. So in this blog I don’t expect to coveranything ground breaking - just my experience so far.

    Much has been written about the packaging of the system. And in this senseit didn’t disappoint. You could tell that it was packaged with great careby the team at MNT and it was frankly a very enjoyable experience tounwrap the components. I’ve included a collage of photos at the end of this blogof the Reform.

    And that wasn’t the only fun. Right away I had to remove the transparent bottomcover of the Reform in order to connect up the batteries and install the Wifi andNVMe SSD. At this time I also replaced the plastic port covers with the optional metalversions that I ordered earlier this year. Once this was done, the system sprang tolife and I was able to very quickly get it booting from the encrypted NVMe thanksto the detailed handbook that was also included in the bundle I purchased andtips on the MNT Community site.

    As for the keyboard, I really enjoy the tactile feel of it. It’s quite a refreshingexperience from the mushy keyboard on the MacBook Air M1 that I use for work. Andalthough I ordered both the trackball and trackpad for the Reform, I’ll likelystick with the trackball for now as it’s just a pleasure to use. Note thatmy Reform appears to have the updated trackball which has ball bearings for asmoother action.

    Fanless bitte

    Of course one of the first things an #HPC minded person like myself will do witha system is run High-performance Linpack (HPL) on it. This is a force of habit for meand thought it may also prove to be a good way to burn in the system.

    So I started with Open MPI. I downloaded and compiled Open MPI v4.1.4. This completedwithout a hitch. Note that I didn’t specify any specific flags configuring Open MPIother than a prefix for the installation location (under $HOME).

    HPL was easy to compile as well. Note that I simply used the OS ATLAS and BLASlibraries and the OS supplied compiler(s). So we can say that this is not anoptimized build of HPL.

    And below we see the results of the run of xhpl below, which achieved a result of4.2 GFLOPS.

    $ mpirun -np 4 ./xhpl ================================================================================HPLinpack 2.3  --  High-Performance Linpack benchmark  --   December 2, 2018Written by A. Petitet and R. Clint Whaley,  Innovative Computing Laboratory, UTKModified by Piotr Luszczek, Innovative Computing Laboratory, UTKModified by Julien Langou, University of Colorado Denver================================================================================An explanation of the input/output parameters follows:T/V    : Wall time / encoded variant.N      : The order of the coefficient matrix A.NB     : The partitioning blocking factor.P      : The number of process rows.Q      : The number of process columns.Time   : Time in seconds to solve the linear system.Gflops : Rate of execution for solving the linear system.The following parameter values will be used:N      :   19000 NB     :     192 PMAP   : Row-major process mappingP      :       2 Q      :       2 PFACT  :   Right NBMIN  :       4 NDIV   :       2 RFACT  :   Crout BCAST  :  1ringM DEPTH  :       1 SWAP   : Mix (threshold = 64)L1     : transposed formU      : transposed formEQUIL  : yesALIGN  : 8 double precision words--------------------------------------------------------------------------------- The matrix A is randomly generated for each test.- The following scaled residual check will be computed:      ||Ax-b||_oo / ( eps * ( || x ||_oo * || A ||_oo + || b ||_oo ) * N )- The relative machine precision (eps) is taken to be               1.110223e-16- Computational tests pass if scaled residuals are less than                16.0================================================================================T/V                N    NB     P     Q               Time                 Gflops--------------------------------------------------------------------------------WR11C2R4       19000   192     2     2            1073.27             4.2610e+00HPL_pdgesv() start time Mon Jun  6 12:08:36 2022HPL_pdgesv() end time   Mon Jun  6 12:26:30 2022--------------------------------------------------------------------------------||Ax-b||_oo/(eps*(||A||_oo*||x||_oo+||b||_oo)*N)=   1.18409443e-03 ...... PASSED================================================================================Finished      1 tests with the following results:              1 tests completed and passed residual checks,              0 tests completed and failed residual checks,              0 tests skipped because of illegal input values.--------------------------------------------------------------------------------End of Tests.================================================================================

    Just for kicks, I’ve also included a screenshot of lstopo, which is part of thePortal Hardware Locality (hwloc) project. I am a bit confused as to why theL1 and L2 cache sizes are zero though in the output.

    I’ve included the output from some system commands below including lscpu, lspci and lsusb.

    $ lscpuArchitecture:                    aarch64CPU op-mode(s):                  32-bit, 64-bitByte Order:                      Little EndianCPU(s):                          4On-line CPU(s) list:             0-3Thread(s) per core:              1Core(s) per socket:              4Socket(s):                       1NUMA node(s):                    1Vendor ID:                       ARMModel:                           4Model name:                      Cortex-A53Stepping:                        r0p4CPU max MHz:                     1500.0000CPU min MHz:                     1000.0000BogoMIPS:                        16.66NUMA node0 CPU(s):               0-3Vulnerability Itlb multihit:     Not affectedVulnerability L1tf:              Not affectedVulnerability Mds:               Not affectedVulnerability Meltdown:          Not affectedVulnerability Spec store bypass: Not affectedVulnerability Spectre v1:        Mitigation; \\__user pointer sanitizationVulnerability Spectre v2:        Not affectedVulnerability Srbds:             Not affectedVulnerability Tsx async abort:   Not affectedFlags:                           fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
    $ lspci0000:00:00.0 PCI bridge: Synopsys, Inc. DWC_usb3 / PCIe bridge (rev 01)0000:01:00.0 Network controller: Qualcomm Atheros AR928X Wireless Network Adapter (PCI-Express) (rev 01)0001:00:00.0 PCI bridge: Synopsys, Inc. DWC_usb3 / PCIe bridge (rev 01)0001:01:00.0 Non-Volatile memory controller: Silicon Motion, Inc. SM2262/SM2262EN SSD Controller (rev 03)
    $ lsusbBus 004 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hubBus 003 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hubBus 002 Device 002: ID 0451:8140 Texas Instruments, Inc. TUSB8041 4-Port HubBus 002 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hubBus 001 Device 004: ID 03eb:2041 Atmel Corp. LUFA Mouse Demo ApplicationBus 001 Device 003: ID 03eb:2042 Atmel Corp. LUFA Keyboard Demo ApplicationBus 001 Device 002: ID 0451:8142 Texas Instruments, Inc. TUSB8041 4-Port HubBus 001 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub

    So that’s a very brief look at my initial experiences with the Reform laptop. I’veonly scratched the surface here, but so far I’m liking what I’m seeing. As for theneunundneunzig title reference, well I suppose that’s part of the vibe I got withthe laptop.

    A few photos for your viewing pleasure!

    ", + "url": "https://hpc.social/personal-blog/2022/neunundneunzig-mnt-reform-s/", + + + + + + "date_published": "2022-06-06T18:54:07-06:00", + "date_modified": "2022-06-06T18:54:07-06:00", + + "author": "Ramblings of a supercomputing enthusiast." + + }, + + { + "id": "https://hpc.social/personal-blog/2022/life-and-leaving-nersc/", + "title": "Life and leaving NERSC", + "summary": null, + "content_text": "When word started to spread that I was leaving my job at NERSC for Microsoft, a lot of people either directly or indirectly attributed my decision to being one motivated by money.  Rationalizing my decision to leave is certainly a lot easier with this \"Glenn was lured away with bags of cash\" narrative, but that wasn't really a factor when I chose to move on.  Rather, my decision is a reflection of where I see the world of HPC going in the coming decade and where I personally wanted to position myself.  For my own therapeutic reasons (and perhaps the benefit of anyone interested in what it's like to work within, and subsequently leave, the DOE HPC complex), I'll try to write it all out here.Working at NERSCFirst things first: NERSC has been a wonderful place to work.<div style=\"text-align: center;\">A typical view from outside NERSC’s facility in Berkeley after work during the winter months.  Yes, it really does look like this.</div><p>When I started in mid-2015, I came in with about three years of prior work experience (two at SDSC doing user support and one at a biotech startup) and knew a little bit about a lot of things in HPC.  But I didn’t really know the basics of I/O or storage–I couldn’t tell you what “POSIX I/O” really meant or how GPFS worked.  The fact that I got to help author NERSC’s ten-year strategy around storage in just two years, was invited to present my view on how to bridge the gap between HPC and enterprise storage at Samsung’s North American headquarters a year later, and was trusted to oversee the design and execution of the world’s first 35 petabyte all-flash Lustre file system through my first four years is a testament to how much opportunity is available to learn and grow at NERSC.</p>There are a couple of reasons for this.Stable fundingPerhaps foremost, NERSC (and DOE's Leadership Computing Facilities, ALCF and OLCF) enjoy healthy budgets and financial stability since worldwide leadership in scientific advancement is generally a national priority by both major political parties in the US.  This means that, regardless of who is president and which party holds majorities in Congress, the DOE HPC facilities can pay their employees and deploy new supercomputers.  This solid funding makes it much easier to invest in staff development and long-term planning; I was able to become a resident I/O expert at NERSC because I was never forced to chase after the funding du jour to make ends meet.  Congress trusts NERSC to allocate its funding responsibly, and NERSC prioritized letting me learn as much as I could without distraction.Instant credibility and accessSecond, having a NERSC affiliation gives you instant credibility and access in many cases.  It's not necessarily fair, but it's definitely true.  Within my first year at NERSC, I was invited to give a presentation about I/O performance monitoring in Paris because the organizer wanted a lineup of speakers from all the big players in HPC.  I had never been to Europe at that point in my life, but being the I/O guy from NERSC (and being able to present well!) was enough to get me there.  And it was during that trip to Paris that I got to meet--and literally have conversation over dinner with--more industry bigshots that I can remember.  And that trip to Paris was not an outlier; pandemic aside, NERSC let me go to Europe at least once or twice every year I've worked there.<div style=\"text-align: center;\">The first photo I ever took of Notre Dame on the first day I’d ever set foot in Europe.  NERSC sent me there less than a year after I started.</div><p>Of course, this is not to say that every employee at a DOE HPC facility is wining and dining in Paris every summer.  Many of these opportunities are earned by showing the value of the work you’re doing, just like at any job.  But owing to healthy budgets, travel expenses are rarely the limiting factor in chasing after these opportunities.  In addition, going out into the world and talking about what you do is part of the job at a DOE facility; being a leader in the field of HPC is part of the mission of NERSC, ALCF, and OLCF, so doing high-risk, first-of-a-kind work and telling the world about it is uniquely valued within DOE in a way that it is not in industry.</p>Smart peopleA product of these two factors (stable budget and instant credibility) results in coworkers and colleagues who are generally very experienced and capable.  There's an interesting mix of laissez-faire management and rigorous process-driven management as a result.Staff are generally given the freedom to choose their own destiny and focus on work that they enjoy much like in any academic environment; it's not hard to pick up passion projects or even move between groups if things get stale on a day-to-day basis.  Since everyone is working on their own slices of HPC, there's also easy access to world experts in different areas of technology if you need one.  For example, I recall once reviewing a storage system that appeared to rely on multiplexing two 12G SAS links over a single 24G SAS.  After one email and a few hours, a coworker confirmed, complete with a citation to the SCSI standards, that this was totally possible.  Even if someone in-house didn't know the answer, I had direct access to an engineering manager at a leading storage vendor who owed me a favor and definitely would've known the answer.  It's really, really hard to find as many smart people in arm's reach in most other HPC centers. At the same time, there is rigorous federal oversight on major projects and procurements to ensure that taxpayer dollars are responsibly spent.  This is a double-edged sword because all of the reporting and reviews that go into massive capital projects make forward progress very slow at times.  All DOE HPC facilities review and re-review everything about these giant supercomputers before making a decision, so by the time the public sees a press release about a new supercomputer, lab staff have spent literal years going over every detail and risk.  It sometimes may not seem that way (how many problems has Aurora had?), but rest assured that every schedule slip or technology change the public hears was preceded by countless hours of meetings about risk and cost minimization.  On the flip-side though, you have the opportunity to learn every gory detail about the system directly from the people who designed it.PayIn true millennial fashion, I think it's important to have an open discussion about the pay.  DOE labs pay more than any other HPC facility in the world as far as I am aware, and even in the San Francisco Bay Area, salary at NERSC is comparable to the base salaries offered by all the big tech companies.  You can get an idea of what entry-level salaries (think: first job after postdoc or a few years out of undergrad) by searching H1B Visa postings, and anecdotally, I'd wager that a typical HPC job at NERSC pays about 2x that of the same job at a typical US university and 3x-4x that of the same job at a British or European university.  All the labs pay about the same to boot, so an HPC job at somewhere like Oak Ridge can afford you a relatively luxurious lifestyle.Don't get me wrong though; affording to buy a Bay Area house on a single NERSC salary alone would be tough in the same way that buying a Bay Area house on any single salary would be.  And while NERSC's compensation is comparable to the base salary of the big tech companies, that base is about all you can get since DOE labs cannot offer equity or substantial bonuses.  This is less of a gap if you're just starting out, but anyone who's looked at compensation structures in tech knows that stock-based compensation, not base salary, dominates total compensation as you move up.So, if money wasn't an issue for me and NERSC is such a great place to work, why would I ever leave?The road ahead for HPCOn one hand, HPC's future has never been brighter thanks to how much life (and money!) the AI industry is bringing to the development of HPC technologies.  We have new all-flash file systems, gigantic GPUs, awesome CPU memory technologies, and mixed-precision techniques in the HPC space that were all directly driven by developments primarily intended for AI workloads.  On the other hand, leadership HPC appears to be engaging in unsustainable brinkmanship while midrange HPC is having its value completely undercut by cloud vendors.  I've not been shy about my overall anxiety about where HPC is going because of this, but I'll elaborate now that the exascale race has been won.The future of leadership HPCWithout some monumental breakthrough in transistor technology, there is only one path forward in continuing to build faster and faster supercomputers in the next decade: pour more and more energy (and dissipate more and more heat) into larger and larger (and more and more) GPUs.The goal post for exascale power keeps moving because that's been the easiest way to hit the mythical exaflop milestone; while the original goal was 20 MW, Frontier is coming in at 29 MW and Aurora at \"under 60 MW.\"  Not only is this just a lot of power to feed into a single room, but the cost and effort of actually building this infrastructure is newsworthy in and of itself these days.  At the current trajectory, the cost of building a new data center and extensive power and cooling infrastructure for every new leadership supercomputer is going to become prohibitive very soon.HPC data centers situated in places where the cost of electricity and real estate (stacked atop the risk of earthquake or wildfire) further skew the economics of just adding more power are going to run up against this first.  It used to be easy to dismiss these practicality concerns by arguing that colocating scientists with supercomputers created immeasurable synergy and exchange of ideas, but the fact that science never stopped during the work-from-home days of the pandemic have taken a lot of air out of that argument.My guess is that all the 50-60 MW data centers being built for the exascale supercomputers will be the last of their kind, and that there will be no public appetite to keep doubling down.Given this, DOE's leadership computing facilities are facing an existential threat: how do you define leadership computing after exascale if you can't just add another 50% more power into your facility?  How do you justify spending another $600 million for a supercomputer that uses the same power but only delivers 15% more performance?  You can pour similarly huge amounts of money into application modernization to accelerate science, but at the end of the day, you'd still be buying a lot of hardware that's not a lot faster.The future of places like NERSCNERSC is probably a little better off since its lack of an exascale machine today gives it at least one more turn of the crank before it hits a hard power limit in its data center.  That gives it the ability to deploy at least one more system after Perlmutter that is significantly (at least 2x) more capable but draws significantly more power.  However, compared to Frontier and Aurora, such a system may still look rather silly when it lands in the same way that Perlmutter looks a bit silly compared Summit, which was funded by the same agency but deployed years earlier.And therein lies the dilemma of centers like NERSC--how do you position yourself now so that by the time you deploy an HPC system that is close to maxing out on power, it is sufficiently different from a pure-FLOPS leadership system that it can solve problems that the leadership systems cannot?The easy go-to solution is to craft a story around \"data-centric\" supercomputing.  We did this when I was at the San Diego Supercomputer Center when we were budget-limited and had to differentiate our $12 million Comet supercomputer from TACC's $30 million Stampede.  You invest more in the file system than you would for a pure-FLOPS play, you provide low-cost but high-value onramps like Jupyter and science gateways to enable new science communities that have modest computing needs, and you fiddle with policies like allocations and queue priority to better suit interactive and urgent computing workloads.  From a productivity standpoint, this is can be a great story since users will always respond well to lower queue wait times and less frustrations with the file system.  From a system architect's standpoint, though, this is really boring.  The innovation happens in policies and software, not clever hardware or design, so there's very little that's new for a system designer to think about in this case.A more innovative approach is to start thinking about how to build a system that does more than just run batch jobs.  Perhaps it gives you a private, fast file system where you can store all your data in a way indistinguishable from your personal laptop.  Perhaps it gives you a convenient place to run a Jupyter notebook that has immediate access to a powerful GPU.  Or perhaps it gives you all the tools to set up an automated process where all you have to do is upload a file to trigger an automatic data analysis and reduction pipeline that returns its output to a shiny HTTP interface.  Such a system may not be able to crank out an exaflop using HPL, but does that matter if it's the only system in the country that supports such automation?There are interesting system architecture questions in the latter case, so as a system designer, I much prefer it over the \"data-centric\" angle to non-exaflop supercomputing strategies.  But there remains a problem.The problem: cloudSuch a \"more than just batch jobs\" supercomputer actually already exists.  It's called the cloud, and it's far, far ahead of where state-of-the-art large-scale HPC is today--it pioneered the idea of providing an integrated platform where you can twist the infrastructure and its services to exactly fit what you want to get done.  Triggering data analysis based on the arrival of new data has been around for the better part of a decade in the form of serverless computing frameworks like Azure Functions.  If you need to run a Jupyter notebook on a server that has a beefy GPU on it, just pop a few quarters into your favorite cloud provider.  And if you don't even want to worry about what infrastructure you need to make your Jupyter-based machine learning workload go fast, the cloud providers all have integrated machine learning development environments that hide all of the underlying infrastructure.And therein lies the problem: the definition of \"innovation\" as non-exaflop HPC runs up against this power wall might actually mean \"catching up to the cloud.\"This is not to say that NERSC-like HPC centers are entirely behind the cloud; all the DOE HPC facilities have bigger, faster, and more convenient parallel file systems that are generally always on and where data is always somewhere \"fast.\"  They also provide familiar, managed software environments and more egalitarian support to small- to mid-scale science projects.  DOE HPC also takes the most risk in deploying unproven technologies to shake them out before they become available to the wide market.However, those gaps are beginning to close.  You can stick a full Cray EX system, identical to what you might find at NERSC or OLCF, inside Azure nowadays and avoid that whole burdensome mess of building out a 50 MW data center.  You can also integrate such a system with all the rich infrastructure features the cloud has to offer like triggered functions.  And when it comes to being first to market for risky HPC hardware, the cloud has already caught up in many ways--Microsoft deployed AMD Milan-X CPUs in their data centers before any HPC shop did, and more recently, Microsoft invested in AMD MI-200 GPUs before Frontier had a chance to shake them out.Given this steep trajectory, I see only two scenarios for large-scale, non-exaflop HPC facilities in the 10+ year horizon:They develop, adopt, steal, or squish cloud technologies into their supercomputers to make them functionally equivalent to cloud HPC deployments.  They may be a little friendlier to scientific users since cloud functionality wasn't designed for scientific computing alone, but they also may not be as stable, mature, or feature-rich as their cloud cousins.They find better overall economics in eventually moving to massive, long-term, billion-dollar deals where flagship HPC systems and their \"more than just batch jobs\" features are colocated inside cloud datacenters sited at economically advantageous (that is, cheap power, cooling, and labor) locations in the country.There's also grey area in between where national HPC facilities consolidate their physical infrastructure in cheap areas to manage costs but still self-manage their infrastructure rather than fully outsource to a commercial cloud.  CSCS has hinted at this model as their future plan since they cannot build 100 MW datacenters in Switzerland, and this is proof that leading HPC facilities around the world see the writing on the wall and need to maneuver now to ensure they remain relevant beyond the next decade.  Unfortunately, the politics of consolidating the physical infrastructure across the DOE HPC sites would likely be mired in Congressional politics and take at least a decade to work out.  Since serious work towards this hasn't started yet, I don't envision such a grey-area solution emerging before all the DOE facilities hit their power limit.Hopefully I've painted a picture of how I perceive the road ahead for large-scale HPC facilities and you can guess which one I think will win out.Final thoughtsI have every confidence that there will still be DOE HPC facilities in ten years and that they will still be staffed by some of the brightest minds in HPC.  And even if a cloud-based HPC facility ultimately consumes centers like NERSC, I don't think many people would be out of work.  The vast majority of what DOE's HPC people do is think carefully about technology trends, maintain a deep understanding of user requirements, provide excellent support to its thousands of users, and keep complex supercomputers running well.  Those jobs don't go away if the supercomputer is in the cloud; it's just the physical location, the hands doing physical hardware swaps, and the breadth of vendor interactions that may change.For me as a system architect though, it's become too hard for me to catch up to all the new technologies and techniques HPC needs for the future while also building up other staff to be masters of today's I/O challenges.  I found myself at a fork in the road.  One path would mean catching up on a technical level and then getting in front of where the future of HPC lies before it gets there.  The other path would mean trying to steer the entire DOE HPC ship in the right direction, as long as that may take, and have faith that the people I bring along can race far enough ahead to tell me if we're still going where we need to go.  Perhaps a bit selfishly, I chose the former.  I'm just not ready to give up on racing ahead myself yet, and the only way I could hope to catch up was to make it a full-time job.I don't claim to know the future, and a lot of what I've laid out is all speculative at best.  NERSC, ALCF, or OLCF very well may build another round of data centers to keep the DOE HPC party going for another decade.  However, there's no denying that the stakes keep getting higher with every passing year.That all said, DOE has pulled off stranger things in the past, and it still has a bunch of talented people to make the best of whatever the future holds.", + "content_html": "

    When word started to spread that I was leaving my job at NERSC for Microsoft, a lot of people either directly or indirectly attributed my decision to being one motivated by money.  Rationalizing my decision to leave is certainly a lot easier with this \"Glenn was lured away with bags of cash\" narrative, but that wasn't really a factor when I chose to move on.  Rather, my decision is a reflection of where I see the world of HPC going in the coming decade and where I personally wanted to position myself.  For my own therapeutic reasons (and perhaps the benefit of anyone interested in what it's like to work within, and subsequently leave, the DOE HPC complex), I'll try to write it all out here.

    Working at NERSC

    First things first: NERSC has been a wonderful place to work.

    <div style=\"text-align: center;\">A typical view from outside NERSC’s facility in Berkeley after work during the winter months.  Yes, it really does look like this.</div><p>When I started in mid-2015, I came in with about three years of prior work experience (two at SDSC doing user support and one at a biotech startup) and knew a little bit about a lot of things in HPC.  But I didn’t really know the basics of I/O or storage–I couldn’t tell you what “POSIX I/O” really meant or how GPFS worked.  The fact that I got to help author NERSC’s ten-year strategy around storage in just two years, was invited to present my view on how to bridge the gap between HPC and enterprise storage at Samsung’s North American headquarters a year later, and was trusted to oversee the design and execution of the world’s first 35 petabyte all-flash Lustre file system through my first four years is a testament to how much opportunity is available to learn and grow at NERSC.</p>

    There are a couple of reasons for this.

    Stable funding

    Perhaps foremost, NERSC (and DOE's Leadership Computing Facilities, ALCF and OLCF) enjoy healthy budgets and financial stability since worldwide leadership in scientific advancement is generally a national priority by both major political parties in the US.  This means that, regardless of who is president and which party holds majorities in Congress, the DOE HPC facilities can pay their employees and deploy new supercomputers.  This solid funding makes it much easier to invest in staff development and long-term planning; I was able to become a resident I/O expert at NERSC because I was never forced to chase after the funding du jour to make ends meet.  Congress trusts NERSC to allocate its funding responsibly, and NERSC prioritized letting me learn as much as I could without distraction.

    Instant credibility and access

    Second, having a NERSC affiliation gives you instant credibility and access in many cases.  It's not necessarily fair, but it's definitely true.  Within my first year at NERSC, I was invited to give a presentation about I/O performance monitoring in Paris because the organizer wanted a lineup of speakers from all the big players in HPC.  I had never been to Europe at that point in my life, but being the I/O guy from NERSC (and being able to present well!) was enough to get me there.  And it was during that trip to Paris that I got to meet--and literally have conversation over dinner with--more industry bigshots that I can remember.  And that trip to Paris was not an outlier; pandemic aside, NERSC let me go to Europe at least once or twice every year I've worked there.

    <div style=\"text-align: center;\">The first photo I ever took of Notre Dame on the first day I’d ever set foot in Europe.  NERSC sent me there less than a year after I started.</div><p>Of course, this is not to say that every employee at a DOE HPC facility is wining and dining in Paris every summer.  Many of these opportunities are earned by showing the value of the work you’re doing, just like at any job.  But owing to healthy budgets, travel expenses are rarely the limiting factor in chasing after these opportunities.  In addition, going out into the world and talking about what you do is part of the job at a DOE facility; being a leader in the field of HPC is part of the mission of NERSC, ALCF, and OLCF, so doing high-risk, first-of-a-kind work and telling the world about it is uniquely valued within DOE in a way that it is not in industry.</p>

    Smart people

    A product of these two factors (stable budget and instant credibility) results in coworkers and colleagues who are generally very experienced and capable.  There's an interesting mix of laissez-faire management and rigorous process-driven management as a result.

    Staff are generally given the freedom to choose their own destiny and focus on work that they enjoy much like in any academic environment; it's not hard to pick up passion projects or even move between groups if things get stale on a day-to-day basis.  Since everyone is working on their own slices of HPC, there's also easy access to world experts in different areas of technology if you need one.  For example, I recall once reviewing a storage system that appeared to rely on multiplexing two 12G SAS links over a single 24G SAS.  After one email and a few hours, a coworker confirmed, complete with a citation to the SCSI standards, that this was totally possible.  Even if someone in-house didn't know the answer, I had direct access to an engineering manager at a leading storage vendor who owed me a favor and definitely would've known the answer.  It's really, really hard to find as many smart people in arm's reach in most other HPC centers. 

    At the same time, there is rigorous federal oversight on major projects and procurements to ensure that taxpayer dollars are responsibly spent.  This is a double-edged sword because all of the reporting and reviews that go into massive capital projects make forward progress very slow at times.  All DOE HPC facilities review and re-review everything about these giant supercomputers before making a decision, so by the time the public sees a press release about a new supercomputer, lab staff have spent literal years going over every detail and risk.  It sometimes may not seem that way (how many problems has Aurora had?), but rest assured that every schedule slip or technology change the public hears was preceded by countless hours of meetings about risk and cost minimization.  On the flip-side though, you have the opportunity to learn every gory detail about the system directly from the people who designed it.

    Pay

    In true millennial fashion, I think it's important to have an open discussion about the pay.  DOE labs pay more than any other HPC facility in the world as far as I am aware, and even in the San Francisco Bay Area, salary at NERSC is comparable to the base salaries offered by all the big tech companies.  You can get an idea of what entry-level salaries (think: first job after postdoc or a few years out of undergrad) by searching H1B Visa postings, and anecdotally, I'd wager that a typical HPC job at NERSC pays about 2x that of the same job at a typical US university and 3x-4x that of the same job at a British or European university.  All the labs pay about the same to boot, so an HPC job at somewhere like Oak Ridge can afford you a relatively luxurious lifestyle.

    Don't get me wrong though; affording to buy a Bay Area house on a single NERSC salary alone would be tough in the same way that buying a Bay Area house on any single salary would be.  And while NERSC's compensation is comparable to the base salary of the big tech companies, that base is about all you can get since DOE labs cannot offer equity or substantial bonuses.  This is less of a gap if you're just starting out, but anyone who's looked at compensation structures in tech knows that stock-based compensation, not base salary, dominates total compensation as you move up.

    So, if money wasn't an issue for me and NERSC is such a great place to work, why would I ever leave?

    The road ahead for HPC

    On one hand, HPC's future has never been brighter thanks to how much life (and money!) the AI industry is bringing to the development of HPC technologies.  We have new all-flash file systems, gigantic GPUs, awesome CPU memory technologies, and mixed-precision techniques in the HPC space that were all directly driven by developments primarily intended for AI workloads.  On the other hand, leadership HPC appears to be engaging in unsustainable brinkmanship while midrange HPC is having its value completely undercut by cloud vendors.  I've not been shy about my overall anxiety about where HPC is going because of this, but I'll elaborate now that the exascale race has been won.

    The future of leadership HPC

    Without some monumental breakthrough in transistor technology, there is only one path forward in continuing to build faster and faster supercomputers in the next decade: pour more and more energy (and dissipate more and more heat) into larger and larger (and more and more) GPUs.

    The goal post for exascale power keeps moving because that's been the easiest way to hit the mythical exaflop milestone; while the original goal was 20 MW, Frontier is coming in at 29 MW and Aurora at \"under 60 MW.\"  Not only is this just a lot of power to feed into a single room, but the cost and effort of actually building this infrastructure is newsworthy in and of itself these days.  At the current trajectory, the cost of building a new data center and extensive power and cooling infrastructure for every new leadership supercomputer is going to become prohibitive very soon.

    HPC data centers situated in places where the cost of electricity and real estate (stacked atop the risk of earthquake or wildfire) further skew the economics of just adding more power are going to run up against this first.  It used to be easy to dismiss these practicality concerns by arguing that colocating scientists with supercomputers created immeasurable synergy and exchange of ideas, but the fact that science never stopped during the work-from-home days of the pandemic have taken a lot of air out of that argument.

    My guess is that all the 50-60 MW data centers being built for the exascale supercomputers will be the last of their kind, and that there will be no public appetite to keep doubling down.

    Given this, DOE's leadership computing facilities are facing an existential threat: how do you define leadership computing after exascale if you can't just add another 50% more power into your facility?  How do you justify spending another $600 million for a supercomputer that uses the same power but only delivers 15% more performance?  You can pour similarly huge amounts of money into application modernization to accelerate science, but at the end of the day, you'd still be buying a lot of hardware that's not a lot faster.

    The future of places like NERSC

    NERSC is probably a little better off since its lack of an exascale machine today gives it at least one more turn of the crank before it hits a hard power limit in its data center.  That gives it the ability to deploy at least one more system after Perlmutter that is significantly (at least 2x) more capable but draws significantly more power.  However, compared to Frontier and Aurora, such a system may still look rather silly when it lands in the same way that Perlmutter looks a bit silly compared Summit, which was funded by the same agency but deployed years earlier.

    And therein lies the dilemma of centers like NERSC--how do you position yourself now so that by the time you deploy an HPC system that is close to maxing out on power, it is sufficiently different from a pure-FLOPS leadership system that it can solve problems that the leadership systems cannot?

    The easy go-to solution is to craft a story around \"data-centric\" supercomputing.  We did this when I was at the San Diego Supercomputer Center when we were budget-limited and had to differentiate our $12 million Comet supercomputer from TACC's $30 million Stampede.  You invest more in the file system than you would for a pure-FLOPS play, you provide low-cost but high-value onramps like Jupyter and science gateways to enable new science communities that have modest computing needs, and you fiddle with policies like allocations and queue priority to better suit interactive and urgent computing workloads.  From a productivity standpoint, this is can be a great story since users will always respond well to lower queue wait times and less frustrations with the file system.  From a system architect's standpoint, though, this is really boring.  The innovation happens in policies and software, not clever hardware or design, so there's very little that's new for a system designer to think about in this case.

    A more innovative approach is to start thinking about how to build a system that does more than just run batch jobs.  Perhaps it gives you a private, fast file system where you can store all your data in a way indistinguishable from your personal laptop.  Perhaps it gives you a convenient place to run a Jupyter notebook that has immediate access to a powerful GPU.  Or perhaps it gives you all the tools to set up an automated process where all you have to do is upload a file to trigger an automatic data analysis and reduction pipeline that returns its output to a shiny HTTP interface.  Such a system may not be able to crank out an exaflop using HPL, but does that matter if it's the only system in the country that supports such automation?

    There are interesting system architecture questions in the latter case, so as a system designer, I much prefer it over the \"data-centric\" angle to non-exaflop supercomputing strategies.  But there remains a problem.

    The problem: cloud

    Such a \"more than just batch jobs\" supercomputer actually already exists.  It's called the cloud, and it's far, far ahead of where state-of-the-art large-scale HPC is today--it pioneered the idea of providing an integrated platform where you can twist the infrastructure and its services to exactly fit what you want to get done.  Triggering data analysis based on the arrival of new data has been around for the better part of a decade in the form of serverless computing frameworks like Azure Functions.  If you need to run a Jupyter notebook on a server that has a beefy GPU on it, just pop a few quarters into your favorite cloud provider.  And if you don't even want to worry about what infrastructure you need to make your Jupyter-based machine learning workload go fast, the cloud providers all have integrated machine learning development environments that hide all of the underlying infrastructure.

    And therein lies the problem: the definition of \"innovation\" as non-exaflop HPC runs up against this power wall might actually mean \"catching up to the cloud.\"

    This is not to say that NERSC-like HPC centers are entirely behind the cloud; all the DOE HPC facilities have bigger, faster, and more convenient parallel file systems that are generally always on and where data is always somewhere \"fast.\"  They also provide familiar, managed software environments and more egalitarian support to small- to mid-scale science projects.  DOE HPC also takes the most risk in deploying unproven technologies to shake them out before they become available to the wide market.

    However, those gaps are beginning to close.  You can stick a full Cray EX system, identical to what you might find at NERSC or OLCF, inside Azure nowadays and avoid that whole burdensome mess of building out a 50 MW data center.  You can also integrate such a system with all the rich infrastructure features the cloud has to offer like triggered functions.  And when it comes to being first to market for risky HPC hardware, the cloud has already caught up in many ways--Microsoft deployed AMD Milan-X CPUs in their data centers before any HPC shop did, and more recently, Microsoft invested in AMD MI-200 GPUs before Frontier had a chance to shake them out.

    Given this steep trajectory, I see only two scenarios for large-scale, non-exaflop HPC facilities in the 10+ year horizon:

    1. They develop, adopt, steal, or squish cloud technologies into their supercomputers to make them functionally equivalent to cloud HPC deployments.  They may be a little friendlier to scientific users since cloud functionality wasn't designed for scientific computing alone, but they also may not be as stable, mature, or feature-rich as their cloud cousins.
    2. They find better overall economics in eventually moving to massive, long-term, billion-dollar deals where flagship HPC systems and their \"more than just batch jobs\" features are colocated inside cloud datacenters sited at economically advantageous (that is, cheap power, cooling, and labor) locations in the country.

    There's also grey area in between where national HPC facilities consolidate their physical infrastructure in cheap areas to manage costs but still self-manage their infrastructure rather than fully outsource to a commercial cloud.  CSCS has hinted at this model as their future plan since they cannot build 100 MW datacenters in Switzerland, and this is proof that leading HPC facilities around the world see the writing on the wall and need to maneuver now to ensure they remain relevant beyond the next decade.  Unfortunately, the politics of consolidating the physical infrastructure across the DOE HPC sites would likely be mired in Congressional politics and take at least a decade to work out.  Since serious work towards this hasn't started yet, I don't envision such a grey-area solution emerging before all the DOE facilities hit their power limit.

    Hopefully I've painted a picture of how I perceive the road ahead for large-scale HPC facilities and you can guess which one I think will win out.

    Final thoughts

    I have every confidence that there will still be DOE HPC facilities in ten years and that they will still be staffed by some of the brightest minds in HPC.  And even if a cloud-based HPC facility ultimately consumes centers like NERSC, I don't think many people would be out of work.  The vast majority of what DOE's HPC people do is think carefully about technology trends, maintain a deep understanding of user requirements, provide excellent support to its thousands of users, and keep complex supercomputers running well.  Those jobs don't go away if the supercomputer is in the cloud; it's just the physical location, the hands doing physical hardware swaps, and the breadth of vendor interactions that may change.

    For me as a system architect though, it's become too hard for me to catch up to all the new technologies and techniques HPC needs for the future while also building up other staff to be masters of today's I/O challenges.  I found myself at a fork in the road.  One path would mean catching up on a technical level and then getting in front of where the future of HPC lies before it gets there.  The other path would mean trying to steer the entire DOE HPC ship in the right direction, as long as that may take, and have faith that the people I bring along can race far enough ahead to tell me if we're still going where we need to go.  Perhaps a bit selfishly, I chose the former.  I'm just not ready to give up on racing ahead myself yet, and the only way I could hope to catch up was to make it a full-time job.

    I don't claim to know the future, and a lot of what I've laid out is all speculative at best.  NERSC, ALCF, or OLCF very well may build another round of data centers to keep the DOE HPC party going for another decade.  However, there's no denying that the stakes keep getting higher with every passing year.

    That all said, DOE has pulled off stranger things in the past, and it still has a bunch of talented people to make the best of whatever the future holds.

    ", + "url": "https://hpc.social/personal-blog/2022/life-and-leaving-nersc/", + + + + + + "date_published": "2022-05-27T06:42:00-06:00", + "date_modified": "2022-05-27T06:42:00-06:00", + + "author": "Glenn K. Lockwood's Blog" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/experimenting-with-igor-s-bluestore-wal/", + "title": "Experimenting with Igor’s Bluestore WAL", + "summary": null, + "content_text": "Igor Fedetov is one of the most knowledgable developers working on Ceph. He’s started working on replacing our use of RocksDB’s write ahead log with a bluestore native implementation. After tuning we can achieve up to 122K random write IOPS on a single OSD! That’s nearly a 50% improvment over the current main branch and over twice as fast as Pacific!", + "content_html": "

    Igor Fedetov is one of the most knowledgable developers working on Ceph. He’s started working on replacing our use of RocksDB’s write ahead log with a bluestore native implementation. After tuning we can achieve up to 122K random write IOPS on a single OSD! That’s nearly a 50% improvment over the current main branch and over twice as fast as Pacific!

    ", + "url": "https://hpc.social/personal-blog/2022/experimenting-with-igor-s-bluestore-wal/", + + + + + + "date_published": "2022-05-26T01:00:00-06:00", + "date_modified": "2022-05-26T01:00:00-06:00", + + "author": "Mark Nelson's Blog" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/interesting-links-i-clicked-this-week/", + "title": "Interesting links I clicked this week", + "summary": null, + "content_text": "I watched several really interesting talks from SRECon22 Americas this week, and in particular I’d like to highlight:Principled Performance Analytics, Narayan Desai and Brent Bryan from Google. Some interesting thoughts on quantitative analysis of live performance data for monitoring and observability purposes, moving past simple percentile analysis.The ‘Success’ in SRE is Silent, Casey Rosenthal from Verica.io. Interesting thoughts here on the visibility of reliability, qualitative analysis of systems, and why regulation and certification might not be the right thing for web systems.Building and Running a Diversity-focused Pre-internship program for SRE, from Andrew Ryan at Facebook Meta. Some good lessons-learned here from an early-career internship-like program, in its first year.Taking the 737 to the Max, Nickolas Means from Sym. A really interesting analysis of the Boeing 737 Max failures from both a technical and cultural perspective, complete with some graph tracing to understand failure modes.I also ran across some other articles that I’ve been actively recommending and sharing with friends and colleagues, including:Plato’s Dashboards, Fred Hebert at Honeycomb. This article has some great analysis of how easily-measurable metrics are often poor proxies for the information we’re actually interested in, and discussing qualitative research methods as a way to gain more insight.The End of Roe Will Bring About A Sea Change In The Encryption Debate, Rianna Pfefferkorn from the Stanford Internet Observatory. You should absolutely go read this article, but to sum up: Law enforcement in states than ban abortion is now absolutely part of the threat model that encrypted messaging defends against. No one claiming to be a progressive should be arguing in favor of “exceptional access” or other law enforcement access to encryption.", + "content_html": "

    I watched several really interesting talks from SRECon22 Americas this week, and in particular I’d like to highlight:

    • Principled Performance Analytics, Narayan Desai and Brent Bryan from Google. Some interesting thoughts on quantitative analysis of live performance data for monitoring and observability purposes, moving past simple percentile analysis.
    • The ‘Success’ in SRE is Silent, Casey Rosenthal from Verica.io. Interesting thoughts here on the visibility of reliability, qualitative analysis of systems, and why regulation and certification might not be the right thing for web systems.
    • Building and Running a Diversity-focused Pre-internship program for SRE, from Andrew Ryan at Facebook Meta. Some good lessons-learned here from an early-career internship-like program, in its first year.
    • Taking the 737 to the Max, Nickolas Means from Sym. A really interesting analysis of the Boeing 737 Max failures from both a technical and cultural perspective, complete with some graph tracing to understand failure modes.

    I also ran across some other articles that I’ve been actively recommending and sharing with friends and colleagues, including:

    • Plato’s Dashboards, Fred Hebert at Honeycomb. This article has some great analysis of how easily-measurable metrics are often poor proxies for the information we’re actually interested in, and discussing qualitative research methods as a way to gain more insight.
    • The End of Roe Will Bring About A Sea Change In The Encryption Debate, Rianna Pfefferkorn from the Stanford Internet Observatory. You should absolutely go read this article, but to sum up: Law enforcement in states than ban abortion is now absolutely part of the threat model that encrypted messaging defends against. No one claiming to be a progressive should be arguing in favor of “exceptional access” or other law enforcement access to encryption.

    ", + "url": "https://hpc.social/personal-blog/2022/interesting-links-i-clicked-this-week/", + + + + + + "date_published": "2022-05-14T19:35:32-06:00", + "date_modified": "2022-05-14T19:35:32-06:00", + + "author": "Thinking Out Loud" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/customizing-command-output-in-ibm-spectrum-lsf/", + "title": "Customizing command output in IBM Spectrum LSF", + "summary": null, + "content_text": "IBM Spectrum LSF provides many ways to query the LSF cluster for information about workloads. As a user, once you’ve submitted a job to LSF, it’s logical to want to understand what has happened to your job. Has the job started yet? Is the job pending? If so, why is it pending? And the all important, “Is my job done yet?”. Of course, LSF provides a very rich CLI which has been developed and refined - over the past three decades. It’s also possible to get JSON-formatted output from various LSF query commands. This is useful for users and administrators alike as JSON-formatted output is easy to parse, and scripting can be used to extract values from the JSON output.This is not meant to be a definitive guide on how to query information in LSF, but rather provides some examples of the various ways that users can query job related information using the LSF CLI. This will include a look at the -json and -o options which have been introduced during the lifecycle of LSF v10.1.0 family. The -json option can be used to provide JSON-formatted output from various LSF query commands and the -o can be used to customize the fields in the output to only those desired.We’ll start with a simple job submission. Here we submit a test workload as a non-root user in the LSF cluster.$ bsub -o $HOME/output.%J -e $HOME/error.%J ./testjob.shJob <24520> is submitted to default queue <normal>.With the unique jobID number 24520, we can now query LSF for information about the job:$ bjobs 24520JOBID USER STAT QUEUE FROM_HOST EXEC_HOST JOB_NAME SUBMIT_TIME24520 gsamu RUN normal kilenc kilenc *estjob.sh May 10 21:09Adding the -l option to bjobs provides the long output (more details).$ bjobs -l 24520Job <24520>, User <gsamu>, Project <default>, Status <RUN>, Queue <normal>, Com mand <./testjob.sh>, Share group charged </gsamu>Tue May 10 21:09:22: Submitted from host <kilenc>, CWD <$HOME>, Output File </h ome/gsamu/output.24520>, Error File </home/gsamu/error.245 20>;Tue May 10 21:09:23: Started 1 Task(s) on Host(s) <kilenc>, Allocated 1 Slot(s) on Host(s) <kilenc>, Execution Home </home/gsamu>, Execut ion CWD </home/gsamu>;Tue May 10 21:10:01: Resource usage collected. MEM: 12 Mbytes; SWAP: 0 Mbytes; NTHREAD: 5 PGID: 313588; PIDs: 313588 313589 313591 313592 MEMORY USAGE: MAX MEM: 12 Mbytes; AVG MEM: 10 Mbytes SCHEDULING PARAMETERS: r15s r1m r15m ut pg io ls it tmp swp mem loadSched - - - - - - - - - - - loadStop - - - - - - - - - - - RESOURCE REQUIREMENT DETAILS: Combined: select[type == local] order[r15s:pg] Effective: select[type == local] order[r15s:pg] It is possible to customize the output format of the bjobs command using the -o option. In this case, we want to show only some specific details about the job in the output of bjobs. We’ve selected to view: jobID, job status, project name, memory consumed, output and error files. A full list of the available fields for the custom format can be found here.$ bjobs -o \"jobid stat: queue:- project:10 mem:12:G output_file error_file\" 24520JOBID STAT QUEUE PROJ_NAME MEM OUTPUT_FILE ERROR_FILE24520 RUN normal default 0.01 G /home/gsamu/output.24520 /home/gsamu/error.24520Adding the -json option, it’s possible to get this customized job output in JSON format.$ bjobs -o \"jobid stat: queue:- project:10 mem:12:G output_file error_file\" -json 24520{ \"COMMAND\":\"bjobs\", \"JOBS\":1, \"RECORDS\":[ { \"JOBID\":\"24520\", \"STAT\":\"RUN\", \"QUEUE\":\"normal\", \"PROJ_NAME\":\"default\", \"MEM\":\"0.01 G\", \"OUTPUT_FILE\":\"\\/home\\/gsamu\\/output.24520\", \"ERROR_FILE\":\"\\/home\\/gsamu\\/error.24520\" } ]}Next, let’s look at the bhist command. This can be used to view historical data about jobs.$ bhist 24520Summary of time in seconds spent in various states:JOBID USER JOB_NAME PEND PSUSP RUN USUSP SSUSP UNKWN TOTAL24520 gsamu *tjob.sh 1 0 457 0 0 0 458 We see that the job command has been truncated. Let’s now run bhist again with the -w option to produce a wide output.$ bhist -w 24520Summary of time in seconds spent in various states:JOBID USER JOB_NAME PEND PSUSP RUN USUSP SSUSP UNKWN TOTAL24520 gsamu ./testjob.sh 1 0 462 0 0 0 463 And finally, with the -l option to produce a long, detailed output.$ bhist -l 24520Job <24520>, User <gsamu>, Project <default>, Command <./testjob.sh>Tue May 10 21:09:22: Submitted from host <kilenc>, to Queue <normal>, CWD <$HOM E>, Output File </home/gsamu/output.%J>, Error File </home /gsamu/error.%J>;Tue May 10 21:09:23: Dispatched 1 Task(s) on Host(s) <kilenc>, Allocated 1 Slot (s) on Host(s) <kilenc>, Effective RES_REQ <select[type == local] order[r15s:pg] >;Tue May 10 21:09:25: Starting (Pid 313588);Tue May 10 21:09:25: Running with execution home </home/gsamu>, Execution CWD < /home/gsamu>, Execution Pid <313588>;Summary of time in seconds spent in various states by Tue May 10 21:17:26 PEND PSUSP RUN USUSP SSUSP UNKWN TOTAL 1 0 483 0 0 0 484 When the job is done, the bacct command can be used to get detailed accounting information for jobs.$ bacct 24520Accounting information about jobs that are: - submitted by all users. - accounted on all projects. - completed normally or exited - executed on all hosts. - submitted to all queues. - accounted on all service classes. - accounted to all RC accounts.------------------------------------------------------------------------------SUMMARY: ( time unit: second ) Total number of done jobs: 1 Total number of exited jobs: 0 Total CPU time consumed: 3.4 Average CPU time consumed: 3.4 Maximum CPU time of a job: 3.4 Minimum CPU time of a job: 3.4 Total wait time in queues: 1.0 Average wait time in queue: 1.0 Maximum wait time in queue: 1.0 Minimum wait time in queue: 1.0 Average turnaround time: 669 (seconds/job) Maximum turnaround time: 669 Minimum turnaround time: 669 Average hog factor of a job: 0.01 ( cpu time / turnaround time ) Maximum hog factor of a job: 0.01 Minimum hog factor of a job: 0.01 Average expansion factor of a job: 1.00 ( turnaround time / run time ) Maximum expansion factor of a job: 1.00 Minimum expansion factor of a job: 1.00 Total Run time consumed: 668 Average Run time consumed: 668 Maximum Run time of a job: 668 Minimum Run time of a job: 668 Scheduler Efficiency for 1 jobs Slot Utilization: 100.00% Memory Utilization: 100.00% And now the long, detailed output from bacct using the -l parameter.$ bacct -l 24520Accounting information about jobs that are: - submitted by all users. - accounted on all projects. - completed normally or exited - executed on all hosts. - submitted to all queues. - accounted on all service classes. - accounted to all RC accounts.------------------------------------------------------------------------------Job <24520>, User <gsamu>, Project <default>, Status <DONE>, Queue <normal>, Co mmand <./testjob.sh>, Share group charged </gsamu>Tue May 10 21:09:22: Submitted from host <kilenc>, CWD <$HOME>, Output File </h ome/gsamu/output.%J>, Error File </home/gsamu/error.%J>;Tue May 10 21:09:23: Dispatched 1 Task(s) on Host(s) <kilenc>, Allocated 1 Slot (s) on Host(s) <kilenc>, Effective RES_REQ <select[type == local] order[r15s:pg] >;Tue May 10 21:20:31: Completed <done>.Accounting information about this job: Share group charged </gsamu> CPU_T WAIT TURNAROUND STATUS HOG_FACTOR MEM SWAP 3.37 1 669 done 0.0050 12M 0M------------------------------------------------------------------------------SUMMARY: ( time unit: second ) Total number of done jobs: 1 Total number of exited jobs: 0 Total CPU time consumed: 3.4 Average CPU time consumed: 3.4 Maximum CPU time of a job: 3.4 Minimum CPU time of a job: 3.4 Total wait time in queues: 1.0 Average wait time in queue: 1.0 Maximum wait time in queue: 1.0 Minimum wait time in queue: 1.0 Average turnaround time: 669 (seconds/job) Maximum turnaround time: 669 Minimum turnaround time: 669 Average hog factor of a job: 0.01 ( cpu time / turnaround time ) Maximum hog factor of a job: 0.01 Minimum hog factor of a job: 0.01 Average expansion factor of a job: 1.00 ( turnaround time / run time ) Maximum expansion factor of a job: 1.00 Minimum expansion factor of a job: 1.00 Total Run time consumed: 668 Average Run time consumed: 668 Maximum Run time of a job: 668 Minimum Run time of a job: 668 Scheduler Efficiency for 1 jobs Slot Utilization: 100.00% Memory Utilization: 100.00% From jobs to queuesWe’ve looked briefly at querying LSF for job related information. Let’s now take a closer look at querying LSF for information regarding the queue configuration. Batch queues are where users submit jobs to. Queues can have a very wide array of attributes and settings. Below we see a listing of the default queues configured in LSF Suite for HPC. The bqueues command is used to query LSF for the queue configuration.$ bqueuesQUEUE_NAME PRIO STATUS MAX JL/U JL/P JL/H NJOBS PEND RUN SUSP admin 50 Open:Active - - - - 0 0 0 0owners 43 Open:Active - - - - 0 0 0 0priority 43 Open:Active - - - - 0 0 0 0night 40 Open:Active - - - - 0 0 0 0short 35 Open:Active - - - - 0 0 0 0dataq 33 Open:Active - - - - 0 0 0 0normal 30 Open:Active - - - - 0 0 0 0interactive 30 Open:Active - - - - 0 0 0 0idle 20 Open:Active - - - - 0 0 0 0The -l option of bqueues can be used to get a more details view about the queues. Here, we look at the long output for the queue normal.$ bqueues -l normalQUEUE: normal -- For normal low priority jobs, running only if hosts are lightly loaded. This is the default queue.PARAMETERS/STATISTICSPRIO NICE STATUS MAX JL/U JL/P JL/H NJOBS PEND RUN SSUSP USUSP RSV PJOBS 30 0 Open:Active - - - - 0 0 0 0 0 0 0Interval for a host to accept two jobs is 0 secondsSCHEDULING PARAMETERS r15s r1m r15m ut pg io ls it tmp swp mem loadSched - - - - - - - - - - - loadStop - - - - - - - - - - - SCHEDULING POLICIES: FAIRSHARE NO_INTERACTIVEUSER_SHARES: [default, 1] SHARE_INFO_FOR: normal/ USER/GROUP SHARES PRIORITY STARTED RESERVED CPU_TIME RUN_TIME ADJUST GPU_RUN_TIMEgsamu 1 0.333 0 0 0.0 0 0.000 0elasticsearch 1 0.333 0 0 0.0 0 0.000 0USERS: all HOSTS: all Custom output formatting can also be used for the bqueues command. Below is an example of the use of custom output formatting using the -o parameter. For this example, we display queue name, status and the number of jobs (all states). More details about the bqueues -o parameter can be found here.$ bqueues -o \"queue_name:12 status:12 njobs\"QUEUE_NAME STATUS NJOBSadmin Open:Active 0owners Open:Active 0priority Open:Active 0night Open:Active 0short Open:Active 0dataq Open:Active 0normal Open:Active 0interactive Open:Active 0idle Open:Active 0And for JSON-formatted output, we add the -json parameter.$ bqueues -json -o \"queue_name:12 status:12 njobs\"{ \"COMMAND\":\"bqueues\", \"QUEUES\":9, \"RECORDS\":[ { \"QUEUE_NAME\":\"admin\", \"STATUS\":\"Open:Active\", \"NJOBS\":\"0\" }, { \"QUEUE_NAME\":\"owners\", \"STATUS\":\"Open:Active\", \"NJOBS\":\"0\" }, { \"QUEUE_NAME\":\"priority\", \"STATUS\":\"Open:Active\", \"NJOBS\":\"0\" }, { \"QUEUE_NAME\":\"night\", \"STATUS\":\"Open:Active\", \"NJOBS\":\"0\" }, { \"QUEUE_NAME\":\"short\", \"STATUS\":\"Open:Active\", \"NJOBS\":\"0\" }, { \"QUEUE_NAME\":\"dataq\", \"STATUS\":\"Open:Active\", \"NJOBS\":\"0\" }, { \"QUEUE_NAME\":\"normal\", \"STATUS\":\"Open:Active\", \"NJOBS\":\"0\" }, { \"QUEUE_NAME\":\"interactive\", \"STATUS\":\"Open:Active\", \"NJOBS\":\"0\" }, { \"QUEUE_NAME\":\"idle\", \"STATUS\":\"Open:Active\", \"NJOBS\":\"0\" } ]}From queues to serversFinally, we’ll look at the LSF bhosts command, which is used to display information about the batch hosts in the LSF cluster.$ bhostsHOST_NAME STATUS JL/U MAX NJOBS RUN SSUSP USUSP RSV archie ok - 2 0 0 0 0 0kilenc ok - 32 0 0 0 0 0To view detailed information about a batch host, the -l parameter can be specified for bhosts. Here we query for information on host archie.$ bhosts -l archieHOST archieSTATUS CPUF JL/U MAX NJOBS RUN SSUSP USUSP RSV DISPATCH_WINDOWok 6.00 - 2 0 0 0 0 0 - CURRENT LOAD USED FOR SCHEDULING: r15s r1m r15m ut pg io ls it tmp swp mem slots ngpus Total 0.0 0.0 0.0 0% 0.0 1 1 437 3456M 0M 1.7G 2 0.0 Reserved 0.0 0.0 0.0 0% 0.0 0 0 0 0M 0M 0M - - ngpus_physical gpu_shared_avg_ut gpu_shared_avg_mut gpu_mode0 Total 0.0 0.0 0.0 0.0 Reserved - - - - gpu_mode1 gpu_mode2 gpu_mode3 gpu_mode4 gpu_mode5 gpu_mode6 Total 0.0 0.0 0.0 0.0 0.0 0.0 Reserved - - - - - - gpu_mode7 gpu_temp0 gpu_temp1 gpu_temp2 gpu_temp3 gpu_temp4 Total 0.0 0.0 0.0 0.0 0.0 0.0 Reserved - - - - - - gpu_temp5 gpu_temp6 gpu_temp7 gpu_ecc0 gpu_ecc1 gpu_ecc2 gpu_ecc3 Total 0.0 0.0 0.0 0.0 0.0 0.0 0.0 Reserved - - - - - - - gpu_ecc4 gpu_ecc5 gpu_ecc6 gpu_ecc7 gpu_ut0 gpu_ut1 gpu_ut2 gpu_ut3 Total 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 Reserved - - - - - - - - gpu_ut4 gpu_ut5 gpu_ut6 gpu_ut7 gpu_mut0 gpu_mut1 gpu_mut2 gpu_mut3 Total 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 Reserved - - - - - - - - gpu_mut4 gpu_mut5 gpu_mut6 gpu_mut7 gpu_mtotal0 gpu_mtotal1 Total 0.0 0.0 0.0 0.0 0.0 0.0 Reserved - - - - - - gpu_mtotal2 gpu_mtotal3 gpu_mtotal4 gpu_mtotal5 gpu_mtotal6 Total 0.0 0.0 0.0 0.0 0.0 Reserved - - - - - gpu_mtotal7 gpu_mused0 gpu_mused1 gpu_mused2 gpu_mused3 gpu_mused4 Total 0.0 0.0 0.0 0.0 0.0 0.0 Reserved - - - - - - gpu_mused5 gpu_mused6 gpu_mused7 gpu_maxfactor Total 0.0 0.0 0.0 0.0 Reserved - - - - LOAD THRESHOLD USED FOR SCHEDULING: r15s r1m r15m ut pg io ls it tmp swp mem loadSched - - - - - - - - - - - loadStop - - - - - - - - - - - CONFIGURED AFFINITY CPU LIST: allSimilar to bjobs and bqueues, the -o parameter can be used for custom formatting of output of bhosts. Below is an example of the use of custom output formatting using the -o parameter. For this example, we display host name, status and the number of jobs (all states). More details about the bqueues -o parameter can be found here.$ bhosts -o \"host_name:12 status:12 njobs\" HOST_NAME STATUS NJOBSarchie ok 0kilenc ok 0And adding the -json parameter for JSON-formatted output.$ bhosts -json -o \"host_name:12 status:12 njobs\" { \"COMMAND\":\"bhosts\", \"HOSTS\":2, \"RECORDS\":[ { \"HOST_NAME\":\"archie\", \"STATUS\":\"ok\", \"NJOBS\":\"0\" }, { \"HOST_NAME\":\"kilenc\", \"STATUS\":\"ok\", \"NJOBS\":\"0\" } ]}That concludes our brief look at LSF query commands. We’ve only scratched the surface here in terms of capabilities and query commands for LSF. The LSF command line interface is powerful and flexible including ways to customize the command outputs and to output in JSON-format. For more details, the complete set of IBM Spectrum LSF documentation can be found online at IBM Documentation here.", + "content_html": "

    IBM Spectrum LSF provides many ways to query the LSF cluster for information about workloads. As a user, once you’ve submitted a job to LSF, it’s logical to want to understand what has happened to your job. Has the job started yet? Is the job pending? If so, why is it pending? And the all important, “Is my job done yet?”. Of course, LSF provides a very rich CLI which has been developed and refined - over the past three decades. It’s also possible to get JSON-formatted output from various LSF query commands. This is useful for users and administrators alike as JSON-formatted output is easy to parse, and scripting can be used to extract values from the JSON output.

    This is not meant to be a definitive guide on how to query information in LSF, but rather provides some examples of the various ways that users can query job related information using the LSF CLI. This will include a look at the -json and -o options which have been introduced during the lifecycle of LSF v10.1.0 family. The -json option can be used to provide JSON-formatted output from various LSF query commands and the -o can be used to customize the fields in the output to only those desired.

    We’ll start with a simple job submission. Here we submit a test workload as a non-root user in the LSF cluster.

    $ bsub -o $HOME/output.%J -e $HOME/error.%J ./testjob.shJob <24520> is submitted to default queue <normal>.

    With the unique jobID number 24520, we can now query LSF for information about the job:

    $ bjobs 24520JOBID   USER    STAT  QUEUE      FROM_HOST   EXEC_HOST   JOB_NAME   SUBMIT_TIME24520   gsamu   RUN   normal     kilenc      kilenc      *estjob.sh May 10 21:09

    Adding the -l option to bjobs provides the long output (more details).

    $ bjobs -l 24520Job <24520>, User <gsamu>, Project <default>, Status <RUN>, Queue <normal>, Com                     mand <./testjob.sh>, Share group charged </gsamu>Tue May 10 21:09:22: Submitted from host <kilenc>, CWD <$HOME>, Output File </h                     ome/gsamu/output.24520>, Error File </home/gsamu/error.245                     20>;Tue May 10 21:09:23: Started 1 Task(s) on Host(s) <kilenc>, Allocated 1 Slot(s)                      on Host(s) <kilenc>, Execution Home </home/gsamu>, Execut                     ion CWD </home/gsamu>;Tue May 10 21:10:01: Resource usage collected.                     MEM: 12 Mbytes;  SWAP: 0 Mbytes;  NTHREAD: 5                     PGID: 313588;  PIDs: 313588 313589 313591 313592  MEMORY USAGE: MAX MEM: 12 Mbytes;  AVG MEM: 10 Mbytes SCHEDULING PARAMETERS:           r15s   r1m  r15m   ut      pg    io   ls    it    tmp    swp    mem loadSched   -     -     -     -       -     -    -     -     -      -      -   loadStop    -     -     -     -       -     -    -     -     -      -      -   RESOURCE REQUIREMENT DETAILS: Combined: select[type == local] order[r15s:pg] Effective: select[type == local] order[r15s:pg] 

    It is possible to customize the output format of the bjobs command using the -o option. In this case, we want to show only some specific details about the job in the output of bjobs. We’ve selected to view: jobID, job status, project name, memory consumed, output and error files. A full list of the available fields for the custom format can be found here.

    $ bjobs -o \"jobid stat: queue:- project:10  mem:12:G output_file error_file\" 24520JOBID STAT       QUEUE PROJ_NAME  MEM          OUTPUT_FILE ERROR_FILE24520 RUN       normal default    0.01 G       /home/gsamu/output.24520 /home/gsamu/error.24520

    Adding the -json option, it’s possible to get this customized job output in JSON format.

    $ bjobs -o \"jobid stat: queue:- project:10  mem:12:G output_file error_file\" -json 24520{  \"COMMAND\":\"bjobs\",  \"JOBS\":1,  \"RECORDS\":[    {      \"JOBID\":\"24520\",      \"STAT\":\"RUN\",      \"QUEUE\":\"normal\",      \"PROJ_NAME\":\"default\",      \"MEM\":\"0.01 G\",      \"OUTPUT_FILE\":\"\\/home\\/gsamu\\/output.24520\",      \"ERROR_FILE\":\"\\/home\\/gsamu\\/error.24520\"    }  ]}

    Next, let’s look at the bhist command. This can be used to view historical data about jobs.

    $ bhist 24520Summary of time in seconds spent in various states:JOBID   USER    JOB_NAME  PEND    PSUSP   RUN     USUSP   SSUSP   UNKWN   TOTAL24520   gsamu   *tjob.sh  1       0       457     0       0       0       458       

    We see that the job command has been truncated. Let’s now run bhist again with the -w option to produce a wide output.

    $ bhist -w 24520Summary of time in seconds spent in various states:JOBID   USER    JOB_NAME  PEND    PSUSP   RUN     USUSP   SSUSP   UNKWN   TOTAL24520   gsamu   ./testjob.sh 1       0       462     0       0       0       463       

    And finally, with the -l option to produce a long, detailed output.

    $ bhist -l 24520Job <24520>, User <gsamu>, Project <default>, Command <./testjob.sh>Tue May 10 21:09:22: Submitted from host <kilenc>, to Queue <normal>, CWD <$HOM                     E>, Output File </home/gsamu/output.%J>, Error File </home                     /gsamu/error.%J>;Tue May 10 21:09:23: Dispatched 1 Task(s) on Host(s) <kilenc>, Allocated 1 Slot                     (s) on Host(s) <kilenc>, Effective RES_REQ <select[type ==                      local] order[r15s:pg] >;Tue May 10 21:09:25: Starting (Pid 313588);Tue May 10 21:09:25: Running with execution home </home/gsamu>, Execution CWD <                     /home/gsamu>, Execution Pid <313588>;Summary of time in seconds spent in various states by  Tue May 10 21:17:26  PEND     PSUSP    RUN      USUSP    SSUSP    UNKWN    TOTAL  1        0        483      0        0        0        484         

    When the job is done, the bacct command can be used to get detailed accounting information for jobs.

    $ bacct 24520Accounting information about jobs that are:   - submitted by all users.  - accounted on all projects.  - completed normally or exited  - executed on all hosts.  - submitted to all queues.  - accounted on all service classes.  - accounted to all RC accounts.------------------------------------------------------------------------------SUMMARY:      ( time unit: second )  Total number of done jobs:       1      Total number of exited jobs:     0 Total CPU time consumed:       3.4      Average CPU time consumed:     3.4 Maximum CPU time of a job:     3.4      Minimum CPU time of a job:     3.4 Total wait time in queues:     1.0 Average wait time in queue:    1.0 Maximum wait time in queue:    1.0      Minimum wait time in queue:    1.0 Average turnaround time:       669 (seconds/job) Maximum turnaround time:       669      Minimum turnaround time:       669 Average hog factor of a job:  0.01 ( cpu time / turnaround time ) Maximum hog factor of a job:  0.01      Minimum hog factor of a job:  0.01 Average expansion factor of a job:  1.00 ( turnaround time / run time ) Maximum expansion factor of a job:  1.00 Minimum expansion factor of a job:  1.00 Total Run time consumed:       668      Average Run time consumed:     668 Maximum Run time of a job:     668      Minimum Run time of a job:     668 Scheduler Efficiency for 1 jobs Slot Utilization:          100.00%  Memory Utilization:            100.00% 

    And now the long, detailed output from bacct using the -l parameter.

    $ bacct -l 24520Accounting information about jobs that are:   - submitted by all users.  - accounted on all projects.  - completed normally or exited  - executed on all hosts.  - submitted to all queues.  - accounted on all service classes.  - accounted to all RC accounts.------------------------------------------------------------------------------Job <24520>, User <gsamu>, Project <default>, Status <DONE>, Queue <normal>, Co                     mmand <./testjob.sh>, Share group charged </gsamu>Tue May 10 21:09:22: Submitted from host <kilenc>, CWD <$HOME>, Output File </h                     ome/gsamu/output.%J>, Error File </home/gsamu/error.%J>;Tue May 10 21:09:23: Dispatched 1 Task(s) on Host(s) <kilenc>, Allocated 1 Slot                     (s) on Host(s) <kilenc>, Effective RES_REQ <select[type ==                      local] order[r15s:pg] >;Tue May 10 21:20:31: Completed <done>.Accounting information about this job:     Share group charged </gsamu>     CPU_T     WAIT     TURNAROUND   STATUS     HOG_FACTOR    MEM    SWAP      3.37        1            669     done         0.0050    12M      0M------------------------------------------------------------------------------SUMMARY:      ( time unit: second )  Total number of done jobs:       1      Total number of exited jobs:     0 Total CPU time consumed:       3.4      Average CPU time consumed:     3.4 Maximum CPU time of a job:     3.4      Minimum CPU time of a job:     3.4 Total wait time in queues:     1.0 Average wait time in queue:    1.0 Maximum wait time in queue:    1.0      Minimum wait time in queue:    1.0 Average turnaround time:       669 (seconds/job) Maximum turnaround time:       669      Minimum turnaround time:       669 Average hog factor of a job:  0.01 ( cpu time / turnaround time ) Maximum hog factor of a job:  0.01      Minimum hog factor of a job:  0.01 Average expansion factor of a job:  1.00 ( turnaround time / run time ) Maximum expansion factor of a job:  1.00 Minimum expansion factor of a job:  1.00 Total Run time consumed:       668      Average Run time consumed:     668 Maximum Run time of a job:     668      Minimum Run time of a job:     668 Scheduler Efficiency for 1 jobs Slot Utilization:          100.00%  Memory Utilization:            100.00% 

    From jobs to queues

    We’ve looked briefly at querying LSF for job related information. Let’s now take a closer look at querying LSF for information regarding the queue configuration. Batch queues are where users submit jobs to. Queues can have a very wide array of attributes and settings. Below we see a listing of the default queues configured in LSF Suite for HPC. The bqueues command is used to query LSF for the queue configuration.

    $ bqueuesQUEUE_NAME      PRIO STATUS          MAX JL/U JL/P JL/H NJOBS  PEND   RUN  SUSP admin            50  Open:Active       -    -    -    -     0     0     0     0owners           43  Open:Active       -    -    -    -     0     0     0     0priority         43  Open:Active       -    -    -    -     0     0     0     0night            40  Open:Active       -    -    -    -     0     0     0     0short            35  Open:Active       -    -    -    -     0     0     0     0dataq            33  Open:Active       -    -    -    -     0     0     0     0normal           30  Open:Active       -    -    -    -     0     0     0     0interactive      30  Open:Active       -    -    -    -     0     0     0     0idle             20  Open:Active       -    -    -    -     0     0     0     0

    The -l option of bqueues can be used to get a more details view about the queues. Here, we look at the long output for the queue normal.

    $ bqueues -l normalQUEUE: normal  -- For normal low priority jobs, running only if hosts are lightly loaded.  This is the default queue.PARAMETERS/STATISTICSPRIO NICE STATUS          MAX JL/U JL/P JL/H NJOBS  PEND   RUN SSUSP USUSP  RSV PJOBS  30    0  Open:Active       -    -    -    -     0     0     0     0     0    0     0Interval for a host to accept two jobs is 0 secondsSCHEDULING PARAMETERS           r15s   r1m  r15m   ut      pg    io   ls    it    tmp    swp    mem loadSched   -     -     -     -       -     -    -     -     -      -      -   loadStop    -     -     -     -       -     -    -     -     -      -      -  SCHEDULING POLICIES:  FAIRSHARE  NO_INTERACTIVEUSER_SHARES:  [default, 1] SHARE_INFO_FOR: normal/ USER/GROUP   SHARES  PRIORITY  STARTED  RESERVED  CPU_TIME  RUN_TIME   ADJUST  GPU_RUN_TIMEgsamu           1       0.333      0        0         0.0        0       0.000             0elasticsearch     1       0.333      0        0         0.0        0       0.000             0USERS: all  HOSTS:  all 

    Custom output formatting can also be used for the bqueues command. Below is an example of the use of custom output formatting using the -o parameter. For this example, we display queue name, status and the number of jobs (all states). More details about the bqueues -o parameter can be found here.

    $ bqueues -o \"queue_name:12 status:12 njobs\"QUEUE_NAME   STATUS       NJOBSadmin        Open:Active  0owners       Open:Active  0priority     Open:Active  0night        Open:Active  0short        Open:Active  0dataq        Open:Active  0normal       Open:Active  0interactive  Open:Active  0idle         Open:Active  0

    And for JSON-formatted output, we add the -json parameter.

    $ bqueues -json -o \"queue_name:12 status:12 njobs\"{  \"COMMAND\":\"bqueues\",  \"QUEUES\":9,  \"RECORDS\":[    {      \"QUEUE_NAME\":\"admin\",      \"STATUS\":\"Open:Active\",      \"NJOBS\":\"0\"    },    {      \"QUEUE_NAME\":\"owners\",      \"STATUS\":\"Open:Active\",      \"NJOBS\":\"0\"    },    {      \"QUEUE_NAME\":\"priority\",      \"STATUS\":\"Open:Active\",      \"NJOBS\":\"0\"    },    {      \"QUEUE_NAME\":\"night\",      \"STATUS\":\"Open:Active\",      \"NJOBS\":\"0\"    },    {      \"QUEUE_NAME\":\"short\",      \"STATUS\":\"Open:Active\",      \"NJOBS\":\"0\"    },    {      \"QUEUE_NAME\":\"dataq\",      \"STATUS\":\"Open:Active\",      \"NJOBS\":\"0\"    },    {      \"QUEUE_NAME\":\"normal\",      \"STATUS\":\"Open:Active\",      \"NJOBS\":\"0\"    },    {      \"QUEUE_NAME\":\"interactive\",      \"STATUS\":\"Open:Active\",      \"NJOBS\":\"0\"    },    {      \"QUEUE_NAME\":\"idle\",      \"STATUS\":\"Open:Active\",      \"NJOBS\":\"0\"    }  ]}

    From queues to servers

    Finally, we’ll look at the LSF bhosts command, which is used to display information about the batch hosts in the LSF cluster.

    $ bhostsHOST_NAME          STATUS       JL/U    MAX  NJOBS    RUN  SSUSP  USUSP    RSV archie             ok              -      2      0      0      0      0      0kilenc             ok              -     32      0      0      0      0      0

    To view detailed information about a batch host, the -l parameter can be specified for bhosts. Here we query for information on host archie.

    $ bhosts -l archieHOST  archieSTATUS           CPUF  JL/U    MAX  NJOBS    RUN  SSUSP  USUSP    RSV DISPATCH_WINDOWok               6.00     -      2      0      0      0      0      0      - CURRENT LOAD USED FOR SCHEDULING:                r15s   r1m  r15m    ut    pg    io   ls    it   tmp   swp   mem  slots  ngpus Total           0.0   0.0   0.0    0%   0.0     1    1   437 3456M    0M  1.7G      2    0.0 Reserved        0.0   0.0   0.0    0%   0.0     0    0     0    0M    0M    0M      -     -                ngpus_physical gpu_shared_avg_ut gpu_shared_avg_mut gpu_mode0 Total                    0.0               0.0                0.0       0.0 Reserved                  -                 -                  -         -                gpu_mode1 gpu_mode2 gpu_mode3 gpu_mode4 gpu_mode5 gpu_mode6 Total               0.0       0.0       0.0       0.0       0.0       0.0 Reserved             -         -         -         -         -         -                gpu_mode7 gpu_temp0 gpu_temp1 gpu_temp2 gpu_temp3 gpu_temp4 Total               0.0       0.0       0.0       0.0       0.0       0.0 Reserved             -         -         -         -         -         -                gpu_temp5 gpu_temp6 gpu_temp7 gpu_ecc0 gpu_ecc1 gpu_ecc2 gpu_ecc3 Total               0.0       0.0       0.0      0.0      0.0      0.0      0.0 Reserved             -         -         -        -        -        -        -                gpu_ecc4 gpu_ecc5 gpu_ecc6 gpu_ecc7 gpu_ut0 gpu_ut1 gpu_ut2 gpu_ut3 Total              0.0      0.0      0.0      0.0     0.0     0.0     0.0     0.0 Reserved            -        -        -        -       -       -       -       -                gpu_ut4 gpu_ut5 gpu_ut6 gpu_ut7 gpu_mut0 gpu_mut1 gpu_mut2 gpu_mut3 Total             0.0     0.0     0.0     0.0      0.0      0.0      0.0      0.0 Reserved           -       -       -       -        -        -        -        -                gpu_mut4 gpu_mut5 gpu_mut6 gpu_mut7 gpu_mtotal0 gpu_mtotal1 Total              0.0      0.0      0.0      0.0         0.0         0.0 Reserved            -        -        -        -           -           -                gpu_mtotal2 gpu_mtotal3 gpu_mtotal4 gpu_mtotal5 gpu_mtotal6 Total                 0.0         0.0         0.0         0.0         0.0 Reserved               -           -           -           -           -                gpu_mtotal7 gpu_mused0 gpu_mused1 gpu_mused2 gpu_mused3 gpu_mused4 Total                 0.0        0.0        0.0        0.0        0.0        0.0 Reserved               -          -          -          -          -          -                gpu_mused5 gpu_mused6 gpu_mused7 gpu_maxfactor Total                0.0        0.0        0.0           0.0 Reserved              -          -          -             -  LOAD THRESHOLD USED FOR SCHEDULING:           r15s   r1m  r15m   ut      pg    io   ls    it    tmp    swp    mem loadSched   -     -     -     -       -     -    -     -     -      -      -   loadStop    -     -     -     -       -     -    -     -     -      -      -    CONFIGURED AFFINITY CPU LIST: all

    Similar to bjobs and bqueues, the -o parameter can be used for custom formatting of output of bhosts. Below is an example of the use of custom output formatting using the -o parameter. For this example, we display host name, status and the number of jobs (all states). More details about the bqueues -o parameter can be found here.

    $ bhosts -o \"host_name:12 status:12 njobs\" HOST_NAME    STATUS       NJOBSarchie       ok           0kilenc       ok           0

    And adding the -json parameter for JSON-formatted output.

    $ bhosts -json -o \"host_name:12 status:12 njobs\" {  \"COMMAND\":\"bhosts\",  \"HOSTS\":2,  \"RECORDS\":[    {      \"HOST_NAME\":\"archie\",      \"STATUS\":\"ok\",      \"NJOBS\":\"0\"    },    {      \"HOST_NAME\":\"kilenc\",      \"STATUS\":\"ok\",      \"NJOBS\":\"0\"    }  ]}

    That concludes our brief look at LSF query commands. We’ve only scratched the surface here in terms of capabilities and query commands for LSF. The LSF command line interface is powerful and flexible including ways to customize the command outputs and to output in JSON-format. For more details, the complete set of IBM Spectrum LSF documentation can be found online at IBM Documentation here.

    ", + "url": "https://hpc.social/personal-blog/2022/customizing-command-output-in-ibm-spectrum-lsf/", + + + + + + "date_published": "2022-05-12T13:16:02-06:00", + "date_modified": "2022-05-12T13:16:02-06:00", + + "author": "Ramblings of a supercomputing enthusiast." + + }, + + { + "id": "https://hpc.social/personal-blog/2022/pipelib-simple-library-to-parse-filter-and-sort-things/", + "title": "Pipelib- Simple Library to Parse, Filter, and Sort Things", + "summary": null, + "content_text": "In early April I added an “update” command to Singularity Registry HPC (see the pull request here and needed to start with a list of docker tags andparse them into version strings to sort, and still return the original tag for later use.I wound up creating a custom class and set of functions that use distutils.LooseVersion to support that, but in creating this“hard coded thing” I stepped back and had a question. Can we more intelligentally compose custom parsing pipelines?Specifically I wanted to:Start with a list of container tags for an image from a registryFilter out anything that looks like a commit, but isn't a string (e.g., latest)Derive a major, minor, and patch version for each, and filter to newestSort!For step 3, as an example if there was a 1.2.3-commitA and 1.2.3-commitB I’d only want to keep one, and the newer one of the two,so I could ask for “unique by patch” and filter the older one out.Ultimately of course I dove right in,and this led to the creation of Pipelib, which was an itch I terribly wanted to scratch! In this quick post, I want to share the overall design, because it was really fun to make.DesignBefore we talk about the design, let me show it to you.import pipelib.steps as stepimport pipelib.pipeline as pipeline# A pipeline to process a list of stringssteps = ( # convert everything to lowercase step.transform.ToLowercase(), # don't include anything with \"two\" ~step.filters.HasPatterns(filters=[\"two\"]))# Strings to processitems = ['item-ONE', 'item-TWO', 'item-two-THREE']p = pipeline.Pipeline(steps)# The updated and transformed itemsupdated = p.run(items)# ['item-one']In the above, we take a pipeline object and add steps to it. That design is fairly simple,as the Pipeline class takes an optional iterable of things to process. I say “things” becausewe can give it steps, composed steps, or even entire other pipelines. Here is an exampleof adding an entire other Pipeline!import pipelib.steps as stepimport pipelib.pipeline as pipelinefruits = [\"Orange\", \"Melon\", \"Watermelon\", \"Fruit23\"]preprocess = pipeline.Pipeline( steps = ( # Example of chaining steps together step.filters.HasMaxLength(length=8) & step.filters.HasAllLetters(), ))# Add this preprocess step alongside other steps (make lowercase)steps = ( step.transform.ToLowercase(), preprocess,)# Create a new pipeline and runp = pipeline.Pipeline(steps)# We should expect orange and melon!updated = p.run(fruits)['orange', 'melon']Implementation-wise, this is also fairly simple. We can check the underlying class of the provided objectand either add a single step, or insert a set of steps given another pipeline. In fact, pipelib comes with asmall set of “pipelines” that are ready for you to use. For example, here is one tofilter out “things that look like complete or partial git commits”import pipelib.steps as stepimport pipelib.pipeline as pipeline# Pre-generated sets of steps we can useimport pipelib.pipelines as pipelinespipeline.Pipeline( pipelines.git.RemoveCommits).run([\"832b1c\", \"832b1c645e562d5cc6e376e5a3e058c02a40d92a\", \"123-abcd\"])[\"123-abcd\"]This is something I found useful because people sometimes use commits as Docker tags, and I don’t find this incredibly meaningful as a version to compare to (and want to remove them). Under the hood, it looks like this:RemoveCommits = pipeline.Pipeline( steps=( step.filters.HasMinLength(length=8) & ~step.filters.HasAllLowerLettersNumbers(), ))Do you also notice something interesting in the above? We are actually combining steps akin to logical operations.The above “pipeline” is actually just one step that combined other steps!pipelines.git.RemoveCommits.steps[HasMinLength_AND_NotHasAllLowerLettersNumbers]Let’s step back and talk about some concepts that allow this.ConceptsPipelineAs we’ve seen above, a pipeline is a collection of steps that take, as input, a listing of items and return a parser and filtered list.StepA step is some action in a pipeline. The way this works is that we have different kinds of steps, and this makes them easyto implement and even test. A boolean step is akin to a filter, and is expected to return True or False to indicate if the item passes, e.g., False means it’s filtered out. Boolean steps are neat because they afford different kinds of logic and combination.Logical OperationsLet’s say that we have a step that checks that an input is all letters:step.filters.HasAllLetters()For the above, anything that had a number (e.g., orange123) would be filtered out. But what if we wanted to inverse that, and allow passing of inputs that don’t have all letters (meaning we want numbers or special characters?) We can simply do that:~step.filters.HasAllLetters()Implementation wise, this was really fun to do! For Python to respect the logical operator ~ I simply define the “invert” function for the BooleanStep class.def __invert__(self): \"\"\" We can say \"~step\" and reverse the logic. \"\"\" self.reverse = True return selfIt sets an attribute “reverse” to True, and returns itself, that way we use the same step, but with this variable set to be true.What does that do? In the “run” function of the BooleanStep we basically retrieve an outcome from the underlying step (True or False) and simply reverse it given that boolean is True! Again, it’s very simple, and allows for doing things like this:from pipelib.pipeline import Pipelineimport pipelib.steps as stepsPipeline(~steps.filters.HasAllLetters()).run([\"I-have-special-characters\", \"Idonot\"])['I-have-special-characters']Pipeline(steps.filters.HasAllLetters()).run([\"I-have-special-characters\", \"Idonot\"])['Idonot']What if we wanted to combine steps? E.g., what if I want to say “has all letters” OR “has minimum length 10?” If we put the stepsside by side we would only be able to support an AND - allowing passing through of entries that have all letters and the minimum length of 10.Pipelib supports both those operators - AND and OR as follows:> step = steps.filters.HasAllLetters() & steps.filters.HasMinLength(length=10)> stepHasAllLetters_AND_HasMinLengthPipeline(step).run([\"thisonewillpass\", \"thisoneno\", \"notthisone2\"])['thisonewillpass']For both cases above, we are using the “and” and “or functions, respectively, and:Checking for class compatibility (both must be BooleanStep)Creating a list of composed steps to added to a class attribute \"composed\"Add the previous run functions too, naming based on the step class nameDefine a new run function that loops through the composed set, runs, updates and returns a shared resultName the class based on the combined names of the composed classesFor step 4 above, the operation (AND or OR) will vary depending on if the initial call was to “and” or “or”.The main difference between the two is that “OR” starts with a default of False (otherwise it would always return True)and AND starts with a default of True (otherwise it would always return False).And since we are always taking the first class “composed” attribute, this means that you can composesteps with other steps as many times as you like - a new check is simply added to the front or back ofthe list. The result (returned) is the new class that is ready to run. Here is what an OR looks like:> step = steps.filters.HasAllLetters() | steps.filters.HasMinLength(length=10)> stepHasAllLetters_OR_HasMinLengthPipeline(step).run([\"thisonewillpass\", \"veryshort\", \"12345\"])['thisonewillpass', 'veryshort']If you are interested in this function, you can see the entire thing here.Transformation OperationsA base step can be thought of as a transformation. Instead of expecting a boolean to be returned, we areinstead expecting a new value or None. In this respect the transform step can also act as a boolean as a returnof “None” will be removed from the list, however in most cases a transform is intended to perform an operation on the item passed. Here is an example of a transformation operation:Pipeline(steps.transform.ToLowercase()).run([\"AHHHH\"])['ahhhh']Sort OperationsA sort operation is a step that is one level up. Instead of operating on individual items, the stepre-defines a the higher level “run” function and does operations across the iterable.A good example from Pipelib is the use case that originally inspired me - to start with a messylist of Docker tags, do some parsing to derive versions, and return back a sorted list.pipeline.Pipeline(steps.container.ContainerTagSort(ascending=False)).run([\"1.2.3\", \"0.1.0\", \"8.3.2\"])['8.3.2', '1.2.3', '0.1.0']pipeline.Pipeline(steps.container.ContainerTagSort(ascending=True)).run([\"1.2.3\", \"0.1.0\", \"8.3.2\"])['0.1.0', '1.2.3', '8.3.2']In the above we also demonstrate that steps can take parameters, such as the order of a sort!This particular sorting step also allows you to say you want to return unique major, minor, or patchversions.pipeline.Pipeline(steps.container.ContainerTagSort(unique_major=True)).run([\"1.2.3\", \"1.1.0\", \"8.3.2\"])['8.3.2', '1.2.3']And if you wanted to do a more comprehensive clean up and sort, you could do something like this.WrapperPipelib needed a way to be able to pass around some parsed version of an item, but still maintainthe original. For example, let’s say I’m parsing Docker tags into something that resembles a loosesemantic version, I might have filtered 1.2.3-boop to be just 1.2.3, but at the end of theday I need the original tag to pull. Pipelib accomplishes this via wrappers.A wrapper is conceptually that - an internal wrapper class to an item that allows for storingan original value, and still doing operations to change a current state. Wrappers are used inside steps and allow for things like sorting and comparison. You probably don’t need to worry about wrappersunless you want to develop for pipelib. By default, wrappers and “extracted away” to return the basictypes. However, you can ask Pipelib to not do this unwrapping, and then you can get backthe derived and original values:tags = [\"1.2.3\", \"1.1.0\", \"8.3.2\"]updated = pipeline.Pipeline(steps.container.ContainerTagSort()).run(tags, unwrap=False)# Notice that this just looks like a set of strings...updated['8.3.2', '1.2.3']# But actually we have wrappers, that each have an _original attributetype(updated[0])pipelib.wrappers.version.VersionWrapperConclusionI’ve had so much fun making this library! Like many of my projects it’s probably not super useful,but if you see a cool use case please let me know! I’m also happy to develop custom pipelines or stepsfor a use case that you might be interested in. Please don’t hesitate to ask me for help, I’m always runningout of fun things to do :) Why should I care?Arguably you could just hard code this kind of filtering and sorting, but I think theidea of being able to customize and assemble steps is a cool one. If the steps are providedin a library it might might it slightly easier, or your work more reproducible because someone else can use the steps. And if you don’t care? That’s okay too. I recognize this wasmostly a fun project, and yet-another-itch I really wanted to scratch because I’ve nevermade a design like this before, either in terms of the idea or underlying testing and automation.", + "content_html": "

    In early April I added an “update” command to Singularity Registry HPC (see the pull request here and needed to start with a list of docker tags andparse them into version strings to sort, and still return the original tag for later use.I wound up creating a custom class and set of functions that use distutils.LooseVersion to support that, but in creating this“hard coded thing” I stepped back and had a question.

    Can we more intelligentally compose custom parsing pipelines?

    Specifically I wanted to:

    1. Start with a list of container tags for an image from a registry
    2. Filter out anything that looks like a commit, but isn't a string (e.g., latest)
    3. Derive a major, minor, and patch version for each, and filter to newest
    4. Sort!

    For step 3, as an example if there was a 1.2.3-commitA and 1.2.3-commitB I’d only want to keep one, and the newer one of the two,so I could ask for “unique by patch” and filter the older one out.Ultimately of course I dove right in,and this led to the creation of Pipelib, which was an itch I terribly wanted to scratch! In this quick post, I want to share the overall design, because it was really fun to make.

    Design

    Before we talk about the design, let me show it to you.

    import pipelib.steps as stepimport pipelib.pipeline as pipeline# A pipeline to process a list of stringssteps = (   # convert everything to lowercase   step.transform.ToLowercase(),   # don't include anything with \"two\"   ~step.filters.HasPatterns(filters=[\"two\"]))# Strings to processitems = ['item-ONE', 'item-TWO', 'item-two-THREE']p = pipeline.Pipeline(steps)# The updated and transformed itemsupdated = p.run(items)# ['item-one']

    In the above, we take a pipeline object and add steps to it. That design is fairly simple,as the Pipeline class takes an optional iterable of things to process. I say “things” becausewe can give it steps, composed steps, or even entire other pipelines. Here is an exampleof adding an entire other Pipeline!

    import pipelib.steps as stepimport pipelib.pipeline as pipelinefruits = [\"Orange\", \"Melon\", \"Watermelon\", \"Fruit23\"]preprocess = pipeline.Pipeline(    steps = (        # Example of chaining steps together        step.filters.HasMaxLength(length=8) & step.filters.HasAllLetters(),    ))# Add this preprocess step alongside other steps (make lowercase)steps = (   step.transform.ToLowercase(),   preprocess,)# Create a new pipeline and runp = pipeline.Pipeline(steps)# We should expect orange and melon!updated = p.run(fruits)['orange', 'melon']

    Implementation-wise, this is also fairly simple. We can check the underlying class of the provided objectand either add a single step, or insert a set of steps given another pipeline. In fact, pipelib comes with asmall set of “pipelines” that are ready for you to use. For example, here is one tofilter out “things that look like complete or partial git commits”

    import pipelib.steps as stepimport pipelib.pipeline as pipeline# Pre-generated sets of steps we can useimport pipelib.pipelines as pipelinespipeline.Pipeline(    pipelines.git.RemoveCommits).run([\"832b1c\", \"832b1c645e562d5cc6e376e5a3e058c02a40d92a\", \"123-abcd\"])[\"123-abcd\"]

    This is something I found useful because people sometimes use commits as Docker tags, and I don’t find this incredibly meaningful as a version to compare to (and want to remove them). Under the hood, it looks like this:

    RemoveCommits = pipeline.Pipeline(    steps=(        step.filters.HasMinLength(length=8) & ~step.filters.HasAllLowerLettersNumbers(),    ))

    Do you also notice something interesting in the above? We are actually combining steps akin to logical operations.The above “pipeline” is actually just one step that combined other steps!

    pipelines.git.RemoveCommits.steps[HasMinLength_AND_NotHasAllLowerLettersNumbers]

    Let’s step back and talk about some concepts that allow this.

    Concepts

    Pipeline

    As we’ve seen above, a pipeline is a collection of steps that take, as input, a listing of items and return a parser and filtered list.

    Step

    A step is some action in a pipeline. The way this works is that we have different kinds of steps, and this makes them easyto implement and even test. A boolean step is akin to a filter, and is expected to return True or False to indicate if the item passes, e.g., False means it’s filtered out. Boolean steps are neat because they afford different kinds of logic and combination.

    Logical Operations

    Let’s say that we have a step that checks that an input is all letters:

    step.filters.HasAllLetters()

    For the above, anything that had a number (e.g., orange123) would be filtered out. But what if we wanted to inverse that, and allow passing of inputs that don’t have all letters (meaning we want numbers or special characters?) We can simply do that:

    ~step.filters.HasAllLetters()

    Implementation wise, this was really fun to do! For Python to respect the logical operator ~ I simply define the “invert” function for the BooleanStep class.

    def __invert__(self):    \"\"\"    We can say \"~step\" and reverse the logic.    \"\"\"    self.reverse = True    return self

    It sets an attribute “reverse” to True, and returns itself, that way we use the same step, but with this variable set to be true.What does that do? In the “run” function of the BooleanStep we basically retrieve an outcome from the underlying step (True or False) and simply reverse it given that boolean is True! Again, it’s very simple, and allows for doing things like this:

    from pipelib.pipeline import Pipelineimport pipelib.steps as stepsPipeline(~steps.filters.HasAllLetters()).run([\"I-have-special-characters\", \"Idonot\"])['I-have-special-characters']Pipeline(steps.filters.HasAllLetters()).run([\"I-have-special-characters\", \"Idonot\"])['Idonot']

    What if we wanted to combine steps? E.g., what if I want to say “has all letters” OR “has minimum length 10?” If we put the stepsside by side we would only be able to support an AND - allowing passing through of entries that have all letters and the minimum length of 10.Pipelib supports both those operators - AND and OR as follows:

    > step = steps.filters.HasAllLetters() & steps.filters.HasMinLength(length=10)> stepHasAllLetters_AND_HasMinLengthPipeline(step).run([\"thisonewillpass\", \"thisoneno\", \"notthisone2\"])['thisonewillpass']

    For both cases above, we are using the “and” and “or functions, respectively, and:

    1. Checking for class compatibility (both must be BooleanStep)
    2. Creating a list of composed steps to added to a class attribute \"composed\"
    3. Add the previous run functions too, naming based on the step class name
    4. Define a new run function that loops through the composed set, runs, updates and returns a shared result
    5. Name the class based on the combined names of the composed classes

    For step 4 above, the operation (AND or OR) will vary depending on if the initial call was to “and” or “or”.The main difference between the two is that “OR” starts with a default of False (otherwise it would always return True)and AND starts with a default of True (otherwise it would always return False).And since we are always taking the first class “composed” attribute, this means that you can composesteps with other steps as many times as you like - a new check is simply added to the front or back ofthe list. The result (returned) is the new class that is ready to run. Here is what an OR looks like:

    > step = steps.filters.HasAllLetters() | steps.filters.HasMinLength(length=10)> stepHasAllLetters_OR_HasMinLengthPipeline(step).run([\"thisonewillpass\", \"veryshort\", \"12345\"])['thisonewillpass', 'veryshort']

    If you are interested in this function, you can see the entire thing here.

    Transformation Operations

    A base step can be thought of as a transformation. Instead of expecting a boolean to be returned, we areinstead expecting a new value or None. In this respect the transform step can also act as a boolean as a returnof “None” will be removed from the list, however in most cases a transform is intended to perform an operation on the item passed. Here is an example of a transformation operation:

    Pipeline(steps.transform.ToLowercase()).run([\"AHHHH\"])['ahhhh']

    Sort Operations

    A sort operation is a step that is one level up. Instead of operating on individual items, the stepre-defines a the higher level “run” function and does operations across the iterable.A good example from Pipelib is the use case that originally inspired me - to start with a messylist of Docker tags, do some parsing to derive versions, and return back a sorted list.

    pipeline.Pipeline(steps.container.ContainerTagSort(ascending=False)).run([\"1.2.3\", \"0.1.0\", \"8.3.2\"])['8.3.2', '1.2.3', '0.1.0']pipeline.Pipeline(steps.container.ContainerTagSort(ascending=True)).run([\"1.2.3\", \"0.1.0\", \"8.3.2\"])['0.1.0', '1.2.3', '8.3.2']

    In the above we also demonstrate that steps can take parameters, such as the order of a sort!This particular sorting step also allows you to say you want to return unique major, minor, or patchversions.

    pipeline.Pipeline(steps.container.ContainerTagSort(unique_major=True)).run([\"1.2.3\", \"1.1.0\", \"8.3.2\"])['8.3.2', '1.2.3']

    And if you wanted to do a more comprehensive clean up and sort, you could do something like this.

    Wrapper

    Pipelib needed a way to be able to pass around some parsed version of an item, but still maintainthe original. For example, let’s say I’m parsing Docker tags into something that resembles a loosesemantic version, I might have filtered 1.2.3-boop to be just 1.2.3, but at the end of theday I need the original tag to pull. Pipelib accomplishes this via wrappers.

    A wrapper is conceptually that - an internal wrapper class to an item that allows for storingan original value, and still doing operations to change a current state. Wrappers are used inside steps and allow for things like sorting and comparison. You probably don’t need to worry about wrappersunless you want to develop for pipelib. By default, wrappers and “extracted away” to return the basictypes. However, you can ask Pipelib to not do this unwrapping, and then you can get backthe derived and original values:

    tags  = [\"1.2.3\", \"1.1.0\", \"8.3.2\"]updated = pipeline.Pipeline(steps.container.ContainerTagSort()).run(tags, unwrap=False)# Notice that this just looks like a set of strings...updated['8.3.2', '1.2.3']# But actually we have wrappers, that each have an _original attributetype(updated[0])pipelib.wrappers.version.VersionWrapper

    Conclusion

    I’ve had so much fun making this library! Like many of my projects it’s probably not super useful,but if you see a cool use case please let me know! I’m also happy to develop custom pipelines or stepsfor a use case that you might be interested in. Please don’t hesitate to ask me for help, I’m always runningout of fun things to do :)

    Why should I care?

    Arguably you could just hard code this kind of filtering and sorting, but I think theidea of being able to customize and assemble steps is a cool one. If the steps are providedin a library it might might it slightly easier, or your work more reproducible because someone else can use the steps. And if you don’t care? That’s okay too. I recognize this wasmostly a fun project, and yet-another-itch I really wanted to scratch because I’ve nevermade a design like this before, either in terms of the idea or underlying testing and automation.

    ", + "url": "https://hpc.social/personal-blog/2022/pipelib-simple-library-to-parse-filter-and-sort-things/", + + + + + + "date_published": "2022-05-07T13:30:00-06:00", + "date_modified": "2022-05-07T13:30:00-06:00", + + "author": "Vanessasaurus" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/the-research-software-ecosystem/", + "title": "The Research Software Ecosystem", + "summary": null, + "content_text": "We recently published the Research Software Encyclopedia and also have added several new parsers for obtaining new data, meaning the total collectionof curated research software is greater than 1500entries. In honor of this collection, and of a library I’m working on called CiteLang, I wanted to do a small study to better understand:What are the most valuable dependencies in our community, across languages?What are the most valuable dependencies in our community, by language?What is the credit allocation for each repository?CiteLangTo step back for a second, let’s talk again about CiteLang. It has many functions - one of thembeing an ability to assess opensource contributions via git, but it’s main purpose is to be a markdown syntax for citing software,meaning that we can:Generate basic software credit trees, graphs, and markdown summaries.Derive a new, customizable model of credit based on published packages and dependencies.Provide a way to cite software in a paper and give credit without needing DOIs.As a simple example, I can run CiteLang over this markdown file with CiteLang references:# SummaryPortability and reproducibility of complex software stacks is essential for researchers to perform their work. High Performance Computing (HPC) environments add another level of complexity, where possibly conflicting dependencies must co-exist. Although container technologies like Singularity @conda{name=singularity} make it possible to \"bring your own environment,\" without any form of central strategy to manage containers, researchers who seek reproducibility via using containers are tasked with managing their own container collection, often not taking care to ensure that a particular digest or version is used. The reproducibility of the work is at risk, as they cannot easily install and use containers, nor can they share their software with others.Singularity Registry HPC (shpc) @pypi{name=singularity-hpc} is the first of its kind to provide an easy means for a researcher to add their research software for sharing and collaboration with other researchers to an existing collection of over 200 popular scientific libraries @github{name=autamus/registry} @github{name=spack/spack, release=0.17}. The software installs containers as environment modules that are easyto use and read documentation for, and exposes aliases for commands in the container that the researcher can add to their pipeline without thinking about complex interactions with a container. The simple addition of an entry to the registry maintained by shpc comes down to adding a yaml file, and after doing this, another researcher can easily install the same software, down to the digest, to reproduce the original work.# References<!--citelang start--><!--citelang end-->And then run citelang render paper.md to get a nice rendered table alongside your paper! What CiteLang does is find the references in the paper, they look like this:@conda{name=singularity}@pypi{name=singularity-hpc}@github{name=autamus/registry} @github{name=spack/spack, release=0.17}Each of the references above is a package manager with a package name and (optionally) a version, and we can load in the metadatafor each and then generate a table that you see here that summarizes credit across dependencies. In this model, we give some allocation of credit (default is 50%) to the main work (paper or software) citing the software, and then recursively parse dependencies up to some minimum level of credit to calculate scores. Dependencies shared across libraries are averaged together. The final table represents the credit that you give not only to the top level software, but to all nested dependencies, for the work that you did. And that’s only the basics! CiteLang takes this simple ability to parse references and extends it to automation, graphs, badges, and more! You can read more about CiteLang here. Publish or perish? How about neither? I just need to keep writing software!But do you see what is happening above? We aren’t requiring some artificial publicationin order to cite software. We are citing it based on its actual usage, as a known dependency to some other software.In a nutshell, we don’t believe that “the traditional academic way” of citing papers makes sense for software, and insteadof using DOIs we can use package managers and metadata as a source of truth, and derive the real value of a piece of softwarebased on this ecosystem. This means that as a research software engineer, you can just keep doing what you are already doing, and ifsomeone uses CiteLang to summarize their work, given that your software is published to a package managed you’ll get credit. Thereare so many cool ideas around this! But let’s start at the beginning. We first want to show how to summarize an ecosystem.That is exactly what we are going to do in this post.The Research Software EcosytemStarting with these curated repositories from a set of scrapers including the Journal of Open Source Software, the HAL Research Software Database, the Research Software NL Dictionary, ROpenSci, and The Molecular Sciences Software Institute, we can do a basic analysis to identify the most used (and thus valued) pieces of software in our ecosystem. My analysis plan was to:Start with the current database.For each repository, look for requirements files to parse.Derive dependency data based on this requirements file.Combine and rank to discover the top dependencies!This of course is limited to the subset of software in our database, and the ability of CiteLang to parse a requirements file.Currently we parse setup.py and requirements.txt (Python), DESCRIPTION (R), go.mod (Go), package.json (npm), and Gemfile (ruby). Based on thebreakdown of the languages found in the RSEPedia, this is a reasonable start! But it’s also kind of sad to see that my favorite languages (Go and Rust) are barely represented in our community. Also, the aboveshould tell you that the R and Python results likely have some meaningful interpretation, but the others not so much, only because we don’t have a big enough sample. So for all of the abovesteps, for these 1500+ repositories and many languages, I wanted th entire process to be automated, always have potential for easy improvement,and run at some regular interval as new software comes into the Research Software Encyclopedia (also automated) so we can derive changes over time.If you dont’ care to read further:View the Research Software EcosystemCheck out Languages hereResults for Dependencies hereIndividual Repositories hereFor this first publication of the interface we have the following metrics: And I’m so excited because a tiny vision I had a few years ago to provide (and use) a community research software database is comingto live! So without further adeiu, I’m just going to jump into the cool results! It will be fun to see how these change over time.PythonLadies and gents, dinosaurs and rabbits! Your Python results: So here is the first awesome insight. Is anyone really surprised to see numpy as the number one library?The credit value here says that the average Python repository is attributing about 3% of credit to numpy, meaning it is a direct or indirect dependency. Let that sink in! Here is the irony - when is the last time you cited numpy? You probably haven’t, because you’ve cited somethingthat uses it. We don’t remember numpy despite the fact that it’s so core to everything that we do. The fact that the most widely used library is rarely cited is huge evidence for why a manual “write papers and cite DOIs” approach just won’t work for software.What else do we see in this list? Let me name a few things. First, we can’t be so terrible at remembering to look at or visualizethings because matplotlib is second. At least for research software, this is telling us that making plots or charts is important.The next (possibly surprising) result is that documentation and testing is at least represented, and this might be a biased samplebecause we include repositories that are peer reviewed (e.g., JoSS) and documentation and testing is necessary for that. Given this need for Python, sphinx and pytest come up as leaders to provide that. So here is another nugget of insight: Some of us are so busy focusing on domain-specific software that we forget the importance of the “less sexy” research software that helps us test, document, view things, or even create simple data structures.This kind of “base” software has always been what I’ve been most interested in, and ironically what people tell me time and time again“That’s not research software.” Oh really? So something that is entirely powering the research community is not research software?Of course I have my own strong opinions about a taxonomy for research software, but I would encourage those of you who are very dismissive to take a step back andconsider what you are really saying.The next insight is that we see a lot of libraries for data formats (e.g., pyaml, h5py, lxml, and more lower in the list) and this is an attestment to how important being able to read, serialize, and save data is.The final insight is the fact that requests is high in the list. For those of you not familiar, requests is a library for doing that, makinghttp requests to get content from some webby place. This is an attestment to the fact that our work is increasingly relying on external APIs,automation, or other resources provided on the web.You can see the full Python results here.RI’m less of an R programmer these days, but I think that these results also make sense. We don’t see any huge leaders in the same way as we see numpy in Python, but not surprisingly the leader packagefor the R language is, well, R! I at first thought this was a bug, but actually R DESCRIPTION files that we parse do commonly include a pinned version of R:Depends: R (>= 3.4.1), TailRank, ...And so we actually can give credit to the language proper! If you don’t feel this is appropriate, feel free to skip this line and considerthe top package jsonlite. This is also why I think json would be represented in Python if it wasn’t part of the standard library. Us research folks - we need our json! Overall I think we see a similar pattern here as we saw with Python. The libraries that float to the top are those that involve data structures (jsonlite, yaml), webby requests or similar (httr, curl), documentation and testing (knitr, rmarkdown) and graphics or visualization. What does this tell us about what is undervalued in research software? Again, it’s not the domain specific libraries, but rather the core stuff that enables those libraries.You can see the full R results here.ProjectsIf you are interested in a specific project in the RSEPedia, we also provide a project-specific table and badge! You can browse projects from here, and here is an example of a badge generated for a project called github.com/ORNL/tx2 (and on GitHub). Without even looking I can tell you we have some machine learning and/or visualization going on here (scikit-learn! umap! pandas! matplotlib)! Notice how numpy (as an example) shows up at multiple points in the tree - when we calculate an overall credit, say, for the ecosystem, we take that into account! And we can then peek at the project-specific table and sort of verify that yes, this is a Python ML/visualization project: And we see some surprises! Like, the slack-sdk? What? Believe it or not, that is pulled in by tqdm. The project-specific tables (and the description at the top) also give you a better sense of how CiteLang allocatescredit. The top level package is given 50%, and then the other 50% is given to all dependencies in the same fashion.We cut off at a value of 0.001, and we do that in case we might be parsing dependencies forever down to some infintesimally small amount.Finally, every project serves its own raw data and the site is searchable, because sites should be. 😄️DiscussionI’m so happy (and a bit relieved, to be honest) to finally be able to show what I’ve been saying for years - that the most valuable software for research, and the software that is driving domain-specific research software, are the unsexy libraries that have to do with data structures, (maybe standards), documentation or testing, and data formats or retrieval. These are the packages that you aren’t going to remember to cite. Also, this set is totally leaving out the software we use on a day to day basis in our CI, which arguably isn’t research software but has done more for the research community than anything I can think of - containers, version control (git), and continuous integration. We’d be a mess without it. We need to be more thankful and aware of this, and for some of y’all that turn down your nose to anything that isn’t a domain-science library, perhaps take a pause. Next, let’s talk about limitations and hopes for the future.A Living DatabaseI wouldn’t have been happy with myself to simply publish software at one point in time and call it a day.The Research Software Encyclopedia is updated weekly, and so I’ve designed this analysis to do the same!This means that while we do cache a result for a newly added piece of software, we do continue to grow the analysis as new software is added. And since the tool will always use the newly updated CiteLang, any improvements to the parsers there will be reflected here! And if anyone wants to run the entire thing again (outside of the limit of GitHub Actions) they can clone the repository, nuke the _repos folder, and run the scripts again.Language GapsThe biggest gap in the RSEPedia is with respect to what we don’t see. First, despite being a prominent language, we don’t see anything for C++, because there isn’t a package manager with an API to use it. If you have a nifty (or even hacky) idea for how to parse a requirements file, I want to hear it. The RSEPedia has support for spack, but most research-oriented C++ projects are not going to go out of their way to publish their package there, and we get no signal of the package being in spack when we clone the repository. Sppaaaaaack (sorry, it’s a bit of a tic at this point!) 😁️We also don’t see standard modules or libraries provided within a language. E.g., I can almost guarantee you a ton of Python libraries are importing json, but since it’s not a package manager library we wouldn’t see it. I suspect citelang could come up with a way to derive credit for these libraries by way of abstract syntax trees or just parsing the source code, although I haven’t done this yet because I’m not convinced it’s something people are as interested in. If you want to say thank you for the Python standard library, there is a donate button on their contribution page (or you could contribute code). There is an even deeper level of parsing (at least for Python) that looks at function signatures, and I wrote a library called caliper in early 2021 to do that, and it’s able to generate function databases for Python software of interest. This would be cool to do for some kind of (unrelated) compatibility analysis here, but yes that’s very different.Parsing LimitationFor all requirements files except for Python, we are forced to do static parsing. While not perfect because bugs can happen for niche cases of someone defining requirements in a weird way, it’s a reasonable start. There is always room for improvement, or adding more static parsers for requirements files I have not considered yet.However, this is not the case for the Python parsing (either requirements.txt or setup.py)! For Python these results are likely very good because we wrap the pypi package manager install command to derive a list of packages and versions from either a setup.py or requirements.txt. Don’t worry - nothing is installed, we either just parse the requirements file and return the results, or we use the solveragainst a setup.py to come to an equivalent list. We originally had a static parser (and still use this as a fallback) however I talked to @alecbcs and he had this fantastic idea! Will it likely need updates as time goes on, giventhe functions are private? Sure. But I’m happy to do that to get the much more accurate listing.In practice, the only setup.py files that I was not able to parse either had a bug (e.g., trying to read a file that doesn’t exist in the repository) or they were trying to use modules outside of the standard library. For all of the cases of broken-ness, I opened issues on the respective repositories so we might have a better chance at parsing in the future! One detail is that we parse the first requirements file found. For a primary requirements file in the root of the repository, this is the best outcome. However, some repos don’t have a file in the root, and perhaps we find one in a documentation folder instead. Either way, the result represents our best effort at finding and parsing requirements given a cloned repository we don’t know the structure of in advance.Final ThoughtsHere are my final takeaways:Publication is not for Research SoftwareA system of credit that relies on software engineers to do extra manual work (to write papers) is never going to fully capture the ecosystem and give proper credit. It will only capture those that have the time and possibly privilege to take the extra time to write a paper.Publication only makes sense given that a piece of software is paired alongside a robust result, in which case fine, write the paper and also champion the software.Publication Does not Actually Capture CreditA system that also only skims the superficial top (the name of one package) and does not dig deep into a dependency tree is also going to miss insights and deserved attributions of credit. As the numpy example shows, nobody is actually citing numpy, but a ton of projects are using it somewhere in their dependency tree, so it deserves a lot of credit.We Can Do BetterI have a pet peeve. I’m frankly just tired of people writing about credit and attribution but not doing anything about it. We could extend that to other things, but it’s especially an issue for this topic. Ironically they are writing papers and improving their publication record as they write about how publication and research software is a strained process. I may not have solved this problem, but damn at least I’m trying to actually do something about it instead of spurting gas.I find this idea exciting because there are so many directions you can go with it. When I first designed the idea I imagined a database and online interface where you could essentially connect your GitHub repository, and akin to a builder service, parse your repository on some event and derive a new credit or citation graph. Or you could have some set akin to the RSEPedia that are also updated regularly. And then, by way of having that database, we could do these same queries (that currently I’m doing statically) to say “What are the most important libraries for this language? Across the ecosystem?” or “How has this changed over time?” It would be a true way to derive the value of a library without needing people to publish papers, and totally automated and integrated with package managers, which is where people already should be putting their software.Heck, if someone gave me a cloud and a little bit of funding I’d love to work on this. Are there good reasons or use cases? I don’t know, but maybe.So what do you think?", + "content_html": "

    We recently published the Research Software Encyclopedia and also have added several new parsers for obtaining new data, meaning the total collectionof curated research software is greater than 1500entries. In honor of this collection, and of a library I’m working on called CiteLang, I wanted to do a small study to better understand:

    1. What are the most valuable dependencies in our community, across languages?
    2. What are the most valuable dependencies in our community, by language?
    3. What is the credit allocation for each repository?

    CiteLang

    To step back for a second, let’s talk again about CiteLang. It has many functions - one of thembeing an ability to assess opensource contributions via git, but it’s main purpose is to be a markdown syntax for citing software,meaning that we can:

    1. Generate basic software credit trees, graphs, and markdown summaries.
    2. Derive a new, customizable model of credit based on published packages and dependencies.
    3. Provide a way to cite software in a paper and give credit without needing DOIs.

    As a simple example, I can run CiteLang over this markdown file with CiteLang references:

    # SummaryPortability and reproducibility of complex software stacks is essential for researchers to perform their work. High Performance Computing (HPC) environments add another level of complexity, where possibly conflicting dependencies must co-exist. Although container technologies like Singularity @conda{name=singularity} make it possible to \"bring your own environment,\" without any form of central strategy to manage containers, researchers who seek reproducibility via using containers are tasked with managing their own container collection, often not taking care to ensure that a particular digest or version is used. The reproducibility of the work is at risk, as they cannot easily install and use containers, nor can they share their software with others.Singularity Registry HPC (shpc) @pypi{name=singularity-hpc} is the first of its kind to provide an easy means for a researcher to add their research software for sharing and collaboration with other researchers to an existing collection of over 200 popular scientific libraries @github{name=autamus/registry} @github{name=spack/spack, release=0.17}. The software installs containers as environment modules that are easyto use and read documentation for, and exposes aliases for commands in the container that the researcher can add to their pipeline without thinking about complex interactions with a container. The simple addition of an entry to the registry maintained by shpc comes down to adding a yaml file, and after doing this, another researcher can easily install the same software, down to the digest, to reproduce the original work.# References<!--citelang start--><!--citelang end-->

    And then run citelang render paper.md to get a nice rendered table alongside your paper! What CiteLang does is find the references in the paper, they look like this:

    @conda{name=singularity}@pypi{name=singularity-hpc}@github{name=autamus/registry} @github{name=spack/spack, release=0.17}

    Each of the references above is a package manager with a package name and (optionally) a version, and we can load in the metadatafor each and then generate a table that you see here that summarizes credit across dependencies. In this model, we give some allocation of credit (default is 50%) to the main work (paper or software) citing the software, and then recursively parse dependencies up to some minimum level of credit to calculate scores. Dependencies shared across libraries are averaged together. The final table represents the credit that you give not only to the top level software, but to all nested dependencies, for the work that you did. And that’s only the basics! CiteLang takes this simple ability to parse references and extends it to automation, graphs, badges, and more! You can read more about CiteLang here.

    Publish or perish? How about neither? I just need to keep writing software!

    But do you see what is happening above? We aren’t requiring some artificial publicationin order to cite software. We are citing it based on its actual usage, as a known dependency to some other software.In a nutshell, we don’t believe that “the traditional academic way” of citing papers makes sense for software, and insteadof using DOIs we can use package managers and metadata as a source of truth, and derive the real value of a piece of softwarebased on this ecosystem. This means that as a research software engineer, you can just keep doing what you are already doing, and ifsomeone uses CiteLang to summarize their work, given that your software is published to a package managed you’ll get credit. Thereare so many cool ideas around this! But let’s start at the beginning. We first want to show how to summarize an ecosystem.That is exactly what we are going to do in this post.

    The Research Software Ecosytem

    Starting with these curated repositories from a set of scrapers including the Journal of Open Source Software, the HAL Research Software Database, the Research Software NL Dictionary, ROpenSci, and The Molecular Sciences Software Institute, we can do a basic analysis to identify the most used (and thus valued) pieces of software in our ecosystem. My analysis plan was to:

    1. Start with the current database.
    2. For each repository, look for requirements files to parse.
    3. Derive dependency data based on this requirements file.
    4. Combine and rank to discover the top dependencies!

    This of course is limited to the subset of software in our database, and the ability of CiteLang to parse a requirements file.Currently we parse setup.py and requirements.txt (Python), DESCRIPTION (R), go.mod (Go), package.json (npm), and Gemfile (ruby). Based on thebreakdown of the languages found in the RSEPedia, this is a reasonable start!

    But it’s also kind of sad to see that my favorite languages (Go and Rust) are barely represented in our community. Also, the aboveshould tell you that the R and Python results likely have some meaningful interpretation, but the others not so much, only because we don’t have a big enough sample. So for all of the abovesteps, for these 1500+ repositories and many languages, I wanted th entire process to be automated, always have potential for easy improvement,and run at some regular interval as new software comes into the Research Software Encyclopedia (also automated) so we can derive changes over time.If you dont’ care to read further:

    1. View the Research Software Ecosystem
    2. Check out Languages here
    3. Results for Dependencies here
    4. Individual Repositories here

    For this first publication of the interface we have the following metrics:

    And I’m so excited because a tiny vision I had a few years ago to provide (and use) a community research software database is comingto live! So without further adeiu, I’m just going to jump into the cool results! It will be fun to see how these change over time.

    Python

    Ladies and gents, dinosaurs and rabbits! Your Python results:

    So here is the first awesome insight. Is anyone really surprised to see numpy as the number one library?The credit value here says that the average Python repository is attributing about 3% of credit to numpy, meaning it is a direct or indirect dependency. Let that sink in! Here is the irony - when is the last time you cited numpy? You probably haven’t, because you’ve cited somethingthat uses it. We don’t remember numpy despite the fact that it’s so core to everything that we do.

    The fact that the most widely used library is rarely cited is huge evidence for why a manual “write papers and cite DOIs” approach just won’t work for software.

    What else do we see in this list? Let me name a few things. First, we can’t be so terrible at remembering to look at or visualizethings because matplotlib is second. At least for research software, this is telling us that making plots or charts is important.The next (possibly surprising) result is that documentation and testing is at least represented, and this might be a biased samplebecause we include repositories that are peer reviewed (e.g., JoSS) and documentation and testing is necessary for that. Given this need for Python, sphinx and pytest come up as leaders to provide that. So here is another nugget of insight:

    Some of us are so busy focusing on domain-specific software that we forget the importance of the “less sexy” research software that helps us test, document, view things, or even create simple data structures.

    This kind of “base” software has always been what I’ve been most interested in, and ironically what people tell me time and time again“That’s not research software.” Oh really? So something that is entirely powering the research community is not research software?Of course I have my own strong opinions about a taxonomy for research software, but I would encourage those of you who are very dismissive to take a step back andconsider what you are really saying.

    The next insight is that we see a lot of libraries for data formats (e.g., pyaml, h5py, lxml, and more lower in the list) and this is an attestment to how important being able to read, serialize, and save data is.

    The final insight is the fact that requests is high in the list. For those of you not familiar, requests is a library for doing that, makinghttp requests to get content from some webby place. This is an attestment to the fact that our work is increasingly relying on external APIs,automation, or other resources provided on the web.

    You can see the full Python results here.

    R

    I’m less of an R programmer these days, but I think that these results also make sense.

    We don’t see any huge leaders in the same way as we see numpy in Python, but not surprisingly the leader packagefor the R language is, well, R! I at first thought this was a bug, but actually R DESCRIPTION files that we parse do commonly include a pinned version of R:

    Depends: R (>= 3.4.1), TailRank, ...

    And so we actually can give credit to the language proper! If you don’t feel this is appropriate, feel free to skip this line and considerthe top package jsonlite. This is also why I think json would be represented in Python if it wasn’t part of the standard library. Us research folks - we need our json! Overall I think we see a similar pattern here as we saw with Python. The libraries that float to the top are those that involve data structures (jsonlite, yaml), webby requests or similar (httr, curl), documentation and testing (knitr, rmarkdown) and graphics or visualization. What does this tell us about what is undervalued in research software? Again, it’s not the domain specific libraries, but rather the core stuff that enables those libraries.

    You can see the full R results here.

    Projects

    If you are interested in a specific project in the RSEPedia, we also provide a project-specific table and badge! You can browse projects from here, and here is an example of a badge generated for a project called github.com/ORNL/tx2 (and on GitHub). Without even looking I can tell you we have some machine learning and/or visualization going on here (scikit-learn! umap! pandas! matplotlib)!

    Notice how numpy (as an example) shows up at multiple points in the tree - when we calculate an overall credit, say, for the ecosystem, we take that into account! And we can then peek at the project-specific table and sort of verify that yes, this is a Python ML/visualization project:

    And we see some surprises! Like, the slack-sdk? What? Believe it or not, that is pulled in by tqdm. The project-specific tables (and the description at the top) also give you a better sense of how CiteLang allocatescredit. The top level package is given 50%, and then the other 50% is given to all dependencies in the same fashion.We cut off at a value of 0.001, and we do that in case we might be parsing dependencies forever down to some infintesimally small amount.

    Finally, every project serves its own raw data

    and the site is searchable, because sites should be. 😄️

    Discussion

    I’m so happy (and a bit relieved, to be honest) to finally be able to show what I’ve been saying for years - that the most valuable software for research, and the software that is driving domain-specific research software, are the unsexy libraries that have to do with data structures, (maybe standards), documentation or testing, and data formats or retrieval. These are the packages that you aren’t going to remember to cite. Also, this set is totally leaving out the software we use on a day to day basis in our CI, which arguably isn’t research software but has done more for the research community than anything I can think of - containers, version control (git), and continuous integration. We’d be a mess without it. We need to be more thankful and aware of this, and for some of y’all that turn down your nose to anything that isn’t a domain-science library, perhaps take a pause. Next, let’s talk about limitations and hopes for the future.

    A Living Database

    I wouldn’t have been happy with myself to simply publish software at one point in time and call it a day.The Research Software Encyclopedia is updated weekly, and so I’ve designed this analysis to do the same!This means that while we do cache a result for a newly added piece of software, we do continue to grow the analysis as new software is added. And since the tool will always use the newly updated CiteLang, any improvements to the parsers there will be reflected here! And if anyone wants to run the entire thing again (outside of the limit of GitHub Actions) they can clone the repository, nuke the _repos folder, and run the scripts again.

    Language Gaps

    The biggest gap in the RSEPedia is with respect to what we don’t see. First, despite being a prominent language, we don’t see anything for C++, because there isn’t a package manager with an API to use it. If you have a nifty (or even hacky) idea for how to parse a requirements file, I want to hear it. The RSEPedia has support for spack, but most research-oriented C++ projects are not going to go out of their way to publish their package there, and we get no signal of the package being in spack when we clone the repository. Sppaaaaaack (sorry, it’s a bit of a tic at this point!) 😁️

    We also don’t see standard modules or libraries provided within a language. E.g., I can almost guarantee you a ton of Python libraries are importing json, but since it’s not a package manager library we wouldn’t see it. I suspect citelang could come up with a way to derive credit for these libraries by way of abstract syntax trees or just parsing the source code, although I haven’t done this yet because I’m not convinced it’s something people are as interested in. If you want to say thank you for the Python standard library, there is a donate button on their contribution page (or you could contribute code). There is an even deeper level of parsing (at least for Python) that looks at function signatures, and I wrote a library called caliper in early 2021 to do that, and it’s able to generate function databases for Python software of interest. This would be cool to do for some kind of (unrelated) compatibility analysis here, but yes that’s very different.

    Parsing Limitation

    For all requirements files except for Python, we are forced to do static parsing. While not perfect because bugs can happen for niche cases of someone defining requirements in a weird way, it’s a reasonable start. There is always room for improvement, or adding more static parsers for requirements files I have not considered yet.

    However, this is not the case for the Python parsing (either requirements.txt or setup.py)! For Python these results are likely very good because we wrap the pypi package manager install command to derive a list of packages and versions from either a setup.py or requirements.txt. Don’t worry - nothing is installed, we either just parse the requirements file and return the results, or we use the solveragainst a setup.py to come to an equivalent list. We originally had a static parser (and still use this as a fallback) however I talked to @alecbcs and he had this fantastic idea! Will it likely need updates as time goes on, giventhe functions are private? Sure. But I’m happy to do that to get the much more accurate listing.

    In practice, the only setup.py files that I was not able to parse either had a bug (e.g., trying to read a file that doesn’t exist in the repository) or they were trying to use modules outside of the standard library. For all of the cases of broken-ness, I opened issues on the respective repositories so we might have a better chance at parsing in the future! One detail is that we parse the first requirements file found. For a primary requirements file in the root of the repository, this is the best outcome. However, some repos don’t have a file in the root, and perhaps we find one in a documentation folder instead. Either way, the result represents our best effort at finding and parsing requirements given a cloned repository we don’t know the structure of in advance.

    Final Thoughts

    Here are my final takeaways:

    Publication is not for Research Software

    A system of credit that relies on software engineers to do extra manual work (to write papers) is never going to fully capture the ecosystem and give proper credit. It will only capture those that have the time and possibly privilege to take the extra time to write a paper.Publication only makes sense given that a piece of software is paired alongside a robust result, in which case fine, write the paper and also champion the software.

    Publication Does not Actually Capture Credit

    A system that also only skims the superficial top (the name of one package) and does not dig deep into a dependency tree is also going to miss insights and deserved attributions of credit. As the numpy example shows, nobody is actually citing numpy, but a ton of projects are using it somewhere in their dependency tree, so it deserves a lot of credit.

    We Can Do Better

    I have a pet peeve. I’m frankly just tired of people writing about credit and attribution but not doing anything about it. We could extend that to other things, but it’s especially an issue for this topic. Ironically they are writing papers and improving their publication record as they write about how publication and research software is a strained process. I may not have solved this problem, but damn at least I’m trying to actually do something about it instead of spurting gas.

    I find this idea exciting because there are so many directions you can go with it. When I first designed the idea I imagined a database and online interface where you could essentially connect your GitHub repository, and akin to a builder service, parse your repository on some event and derive a new credit or citation graph. Or you could have some set akin to the RSEPedia that are also updated regularly. And then, by way of having that database, we could do these same queries (that currently I’m doing statically) to say “What are the most important libraries for this language? Across the ecosystem?” or “How has this changed over time?” It would be a true way to derive the value of a library without needing people to publish papers, and totally automated and integrated with package managers, which is where people already should be putting their software.Heck, if someone gave me a cloud and a little bit of funding I’d love to work on this. Are there good reasons or use cases? I don’t know, but maybe.

    So what do you think?

    ", + "url": "https://hpc.social/personal-blog/2022/the-research-software-ecosystem/", + + + + + + "date_published": "2022-04-24T13:30:00-06:00", + "date_modified": "2022-04-24T13:30:00-06:00", + + "author": "Vanessasaurus" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/spooky-allocator-issues-and-fixes/", + "title": "Spooky Allocator Issues and Fixes", + "summary": null, + "content_text": "Recently we started noticing performance issues in the main branch of Ceph that ultimately were traced back to a commit last summer that changed parts of our AVL and hybrid disk allocator implementations in bluestore. Strangly, the issue only affected some of the NVMe drives in our test lab but not others. The quick fix was to always update and save the allocator’s cursor position so that we don’t search (and fail) over and over in fast-fit mode for every allocation request. Another interesting offshoot of this though is that it may be much nicer to limit fast-fit searches based on time rather than byte distance or the number of iterations.", + "content_html": "

    Recently we started noticing performance issues in the main branch of Ceph that ultimately were traced back to a commit last summer that changed parts of our AVL and hybrid disk allocator implementations in bluestore. Strangly, the issue only affected some of the NVMe drives in our test lab but not others. The quick fix was to always update and save the allocator’s cursor position so that we don’t search (and fail) over and over in fast-fit mode for every allocation request. Another interesting offshoot of this though is that it may be much nicer to limit fast-fit searches based on time rather than byte distance or the number of iterations.

    ", + "url": "https://hpc.social/personal-blog/2022/spooky-allocator-issues-and-fixes/", + + + + + + "date_published": "2022-04-13T01:00:00-06:00", + "date_modified": "2022-04-13T01:00:00-06:00", + + "author": "Mark Nelson's Blog" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/lsf-hookin-up-with-the-criu/", + "title": "LSF hookin' up with the CRIU", + "summary": null, + "content_text": "With the unpredicable spring weather here in Southern Ontario, weekend projectsare the order of the day. Whether it’s fixing my bike for spring, repairing things in the home which I’ve neglected for far long or topics relating to IT which have been percolating in my head, I am a textbook busybody.A few decades back, when I was a support engineer at Platform Computing, I hadmy first experience working with clients using both kernel-level and user-levelcheckpoint and restart through the HPC workload scheduler Platform LSF (nowIBM Spectrum LSF). I distinctly recall that user-level library was a bit trickyas you had to link your home grown code against it - and it had numerouslimitations which I can’t recall off the top of my head. Back then, like today,IBM Spectrum LSF provides a number of ways that administrators can extend capabilities using plug-ins.Checkpoint and restart is an example where plug-ins can be used. More about thislater.I’ve been keeping an eye on the project known as CRIU for some time. CRIU, which stands for Checkpoint/Restore In Userspaceprovides checkpoint and restart functionality on Linux. And I thought it may bean interesting weekend project to integrate CRIU with LSF. As it turns out,I was not blazing any trails here as I found that there are others alreadyusing CRIU with LSF today. Nevertheless, I decided to give it a try.My system of choice for this tinkering was a dual-socket POWER9 based systemrunning CentOS Stream 8 and IBM Spectrum LSF Suite for HPC v10.2.0.12. TheLSF online documentation contains information on the specificationsof the LSF plugins for checkpoint and restart. The plugins are known as echkpnt and erestart, where the “e” denotes external.Here is a quick rundown on the steps to integrate CRIU with LSF.It turns out that my system already had criu installed. It’s a dependencyon runc which was installed as part of podman. This step really dependson your distro. In my case, dnf provides criu was my friend.# uname -aLinux kilenc 4.18.0-373.el8.ppc64le #1 SMP Tue Mar 22 15:28:39 UTC 2022 ppc64le ppc64le ppc64le GNU/Linux# criuUsage: criu dump|pre-dump -t PID [<options>] criu restore [<options>] criu check [--feature FEAT] criu page-server criu service [<options>] criu dedup criu lazy-pages -D DIR [<options>]Commands: dump checkpoint a process/tree identified by pid pre-dump pre-dump task(s) minimizing their frozen time restore restore a process/tree check checks whether the kernel support is up-to-date page-server launch page server service launch service dedup remove duplicates in memory dump cpuinfo dump writes cpu information into image file cpuinfo check validates cpu information read from image fileTry -h|--help for more infoThe criu command needs to be run as root to be able to checkpointprocesses. As we are going to leverage criu directly in the LSF echkpnt anderestart scripts, I chose to enable sudo access for criu. To do this I simplyadded the following to /etc/sudoers.gsamu ALL=NOPASSWD:/usr/sbin/criuNext, I tested that the basic criu functionality was working. I foundthis to be a useful blog on how to perform a simple test.With criu installed and working (see step 3), the next steps was to createthe echkpnt and erestart scripts which would ultimately call the appropriatecriu dump and criu restore commands. These scripts will be named echkpnt.criu and erestart.criu. The .criu extension denotes the checkpoint andrestart method name in LSF. The checkpoint method is specified at the time ofjob submission in LSF.The key for the echkpnt.criu script is to build out the list of PIDs forthe job in question. For this I used an inelegant approach - simplyscraping the output of the LSF bjobs -l command. This listof PIDs is then used as arguments to the criu dump command.The example echkpnt.criu script is included below.I used a simple approach as well for erestart.criu. As per the specificationfor erestart, the key is to create a new LSF jobfile which containsthe appropriate criu restore invocation, pointing to the checkpointdata. The example erestart.criu script is included below.With the echkpnt.criu and erestart.criu scripts in the $LSF_SERVERDIRdirectory, the process to perform a checkpoint and restart of LSF jobs isstraight forward using bchkpnt and brestart commands respectively.Here is a simple example.Submit a job as checkpointable. The checkpoint method criu is specified as well as the location where the checkpoint data will be written to.$ bsub -k \"/home/gsamu/checkpoint_data method=criu\" ./criu_testJob <12995> is submitted to default queue <normal>.The executable criu_test simply writes a message to standard out every 3 seconds.$ bpeek 12995<< output from stdout >>0: Sleeping for three seconds ...1: Sleeping for three seconds ...2: Sleeping for three seconds ...3: Sleeping for three seconds ...4: Sleeping for three seconds ...Next, we see that LSF has detected the job PIDS. Now we’re ready to perform the checkpoint.$ bjobs -l 12995 Job <12995>, User <gsamu>, Project <default>, Status <RUN>, Queue <normal>, Com mand <./criu_test>, Share group charged </gsamu>Tue Apr 12 08:48:28: Submitted from host <kilenc>, CWD <$HOME>, C heckpoint directory </home/gsamu/checkpoint_data/12995>;Tue Apr 12 08:48:29: Started 1 Task(s) on Host(s) <kilenc>, Alloc ated 1 Slot(s) on Host(s) <kilenc>, Executio n Home </home/gsamu>, Execution CWD </home/gsamu>;Tue Apr 12 08:48:38: Resource usage collected. MEM: 12 Mbytes; SWAP: 0 Mbytes; NTHREAD: 4 PGID: 418130; PIDs: 418130 418131 418133 MEMORY USAGE: MAX MEM: 12 Mbytes; AVG MEM: 6 Mbytes SCHEDULING PARAMETERS: r15s r1m r15m ut pg io ls it tmp swp mem loadSched - - - - - - - - - - - loadStop - - - - - - - - - - - RESOURCE REQUIREMENT DETAILS: Combined: select[type == local] order[r15s:pg] Effective: select[type == local] order[r15s:pg] Initiate the checkpoint using the LSF bchkpnt command. The -k option is specified which will result in the job being checkpointed and killed.$ bchkpnt -k 12995Job <12995> is being checkpointedWe see in the history of the job using the bhist command that the checkpoint was initiated and succeeded. The job was subsequently killed (TERM_CHKPNT).$ bhist -l 12995 Job <12995>, User <gsamu>, Project <default>, Command <./criu_test>Tue Apr 12 08:48:28: Submitted from host <kilenc>, to Queue <norm al>, CWD <$HOME>, Checkpoint directory </home/gsamu/checkp oint_data/12995>;Tue Apr 12 08:48:29: Dispatched 1 Task(s) on Host(s) <kilenc>, Al located 1 Slot(s) on Host(s) <kilenc>, Effec tive RES_REQ <select[type == local] order[r15s:pg] >;Tue Apr 12 08:48:31: Starting (Pid 418130);Tue Apr 12 08:48:31: Running with execution home </home/gsamu>, Execution CWD < /home/gsamu>, Execution Pid <418130>;Tue Apr 12 08:54:14: Checkpoint initiated (actpid 419029);Tue Apr 12 08:54:15: Checkpoint succeeded (actpid 419029);Tue Apr 12 08:54:15: Exited with exit code 137. The CPU time used is 2.1 second s;Tue Apr 12 08:54:15: Completed <exit>; TERM_CHKPNT: job killed after checkpoint ing;\t\t MEMORY USAGE:MAX MEM: 12 Mbytes; AVG MEM: 11 Mbytes Summary of time in seconds spent in various states by Tue Apr 12 08:54:15 PEND PSUSP RUN USUSP SSUSP UNKWN TOTAL 1 0 346 0 0 0 347 Restart the job from the checkpoint data with the LSF brestart command. A new jobID is assigned.$ brestart /home/gsamu/checkpoint_data/ 12995 Job <12996> is submitted to queue <normal>.$ bjobs -l 12996 Job <12996>, User <gsamu>, Project <default>, Status <RUN>, Queue <normal>, Com mand <./criu_test>, Share group charged </gsamu>Tue Apr 12 08:55:57: Submitted from host <kilenc>, CWD <$HOME>, R estart, Checkpoint directory </home/gsamu/checkpoint_data/ /12996>;Tue Apr 12 08:55:58: Started 1 Task(s) on Host(s) <kilenc>, Alloc ated 1 Slot(s) on Host(s) <kilenc>, Executio n Home </home/gsamu>, Execution CWD </home/gsamu>;Tue Apr 12 08:56:07: Resource usage collected. MEM: 14 Mbytes; SWAP: 0 Mbytes; NTHREAD: 5 PGID: 420069; PIDs: 420069 420070 420073 420074 420076 MEMORY USAGE: MAX MEM: 14 Mbytes; AVG MEM: 14 Mbytes SCHEDULING PARAMETERS: r15s r1m r15m ut pg io ls it tmp swp mem loadSched - - - - - - - - - - - loadStop - - - - - - - - - - - RESOURCE REQUIREMENT DETAILS: Combined: select[type == local] order[r15s:pg] Effective: select[type == local] order[r15s:pg] Viewing the standard output of the job, we see the point where it was killed and that it has picked up from where it left off.$ bpeek 12996<< output from stdout >>0: Sleeping for three seconds ...1: Sleeping for three seconds ...2: Sleeping for three seconds ...3: Sleeping for three seconds ...4: Sleeping for three seconds ...….….110: Sleeping for three seconds ...111: Sleeping for three seconds ...112: Sleeping for three seconds ...113: Sleeping for three seconds .../home/gsamu/.lsbatch/1649767708.12995: line 8: 418133 Killed ./criu_test114: Sleeping for three seconds ...115: Sleeping for three seconds ...116: Sleeping for three seconds ...117: Sleeping for three seconds ...118: Sleeping for three seconds ...119: Sleeping for three seconds ...120: Sleeping for three seconds ...........We’ve demonstrated how one can integrate CRIU checkpoint and restartwith IBM Spectrum LSF using the echkpnt and erestart interfaces.As highlighted earlier, LSF provides a number of plugin interfaceswhich provides flexibility to organizations looking to do site specificcustomizations.", + "content_html": "

    With the unpredicable spring weather here in Southern Ontario, weekend projectsare the order of the day. Whether it’s fixing my bike for spring, repairing things in the home which I’ve neglected for far long or topics relating to IT which have been percolating in my head, I am a textbook busybody.

    A few decades back, when I was a support engineer at Platform Computing, I hadmy first experience working with clients using both kernel-level and user-levelcheckpoint and restart through the HPC workload scheduler Platform LSF (nowIBM Spectrum LSF). I distinctly recall that user-level library was a bit trickyas you had to link your home grown code against it - and it had numerouslimitations which I can’t recall off the top of my head. Back then, like today,IBM Spectrum LSF provides a number of ways that administrators can extend capabilities using plug-ins.Checkpoint and restart is an example where plug-ins can be used. More about thislater.

    I’ve been keeping an eye on the project known as CRIU for some time. CRIU, which stands for Checkpoint/Restore In Userspaceprovides checkpoint and restart functionality on Linux. And I thought it may bean interesting weekend project to integrate CRIU with LSF. As it turns out,I was not blazing any trails here as I found that there are others alreadyusing CRIU with LSF today. Nevertheless, I decided to give it a try.

    My system of choice for this tinkering was a dual-socket POWER9 based systemrunning CentOS Stream 8 and IBM Spectrum LSF Suite for HPC v10.2.0.12. TheLSF online documentation contains information on the specificationsof the LSF plugins for checkpoint and restart. The plugins are known as echkpnt and erestart, where the “e” denotes external.

    Here is a quick rundown on the steps to integrate CRIU with LSF.

    • It turns out that my system already had criu installed. It’s a dependencyon runc which was installed as part of podman. This step really dependson your distro. In my case, dnf provides criu was my friend.
    # uname -aLinux kilenc 4.18.0-373.el8.ppc64le #1 SMP Tue Mar 22 15:28:39 UTC 2022 ppc64le ppc64le ppc64le GNU/Linux# criuUsage:  criu dump|pre-dump -t PID [<options>]  criu restore [<options>]  criu check [--feature FEAT]  criu page-server  criu service [<options>]  criu dedup  criu lazy-pages -D DIR [<options>]Commands:  dump           checkpoint a process/tree identified by pid  pre-dump       pre-dump task(s) minimizing their frozen time  restore        restore a process/tree  check          checks whether the kernel support is up-to-date  page-server    launch page server  service        launch service  dedup          remove duplicates in memory dump  cpuinfo dump   writes cpu information into image file  cpuinfo check  validates cpu information read from image fileTry -h|--help for more info
    • The criu command needs to be run as root to be able to checkpointprocesses. As we are going to leverage criu directly in the LSF echkpnt anderestart scripts, I chose to enable sudo access for criu. To do this I simplyadded the following to /etc/sudoers.
    gsamu   ALL=NOPASSWD:/usr/sbin/criu
    • Next, I tested that the basic criu functionality was working. I foundthis to be a useful blog on how to perform a simple test.

    • With criu installed and working (see step 3), the next steps was to createthe echkpnt and erestart scripts which would ultimately call the appropriatecriu dump and criu restore commands. These scripts will be named echkpnt.criu and erestart.criu. The .criu extension denotes the checkpoint andrestart method name in LSF. The checkpoint method is specified at the time ofjob submission in LSF.

    The key for the echkpnt.criu script is to build out the list of PIDs forthe job in question. For this I used an inelegant approach - simplyscraping the output of the LSF bjobs -l command. This listof PIDs is then used as arguments to the criu dump command.The example echkpnt.criu script is included below.

    I used a simple approach as well for erestart.criu. As per the specificationfor erestart, the key is to create a new LSF jobfile which containsthe appropriate criu restore invocation, pointing to the checkpointdata. The example erestart.criu script is included below.

    • With the echkpnt.criu and erestart.criu scripts in the $LSF_SERVERDIRdirectory, the process to perform a checkpoint and restart of LSF jobs isstraight forward using bchkpnt and brestart commands respectively.Here is a simple example.

    • Submit a job as checkpointable. The checkpoint method criu is specified as well as the location where the checkpoint data will be written to.

    $ bsub -k \"/home/gsamu/checkpoint_data method=criu\" ./criu_testJob <12995> is submitted to default queue <normal>.
    • The executable criu_test simply writes a message to standard out every 3 seconds.
    $ bpeek 12995<< output from stdout >>0: Sleeping for three seconds ...1: Sleeping for three seconds ...2: Sleeping for three seconds ...3: Sleeping for three seconds ...4: Sleeping for three seconds ...
    • Next, we see that LSF has detected the job PIDS. Now we’re ready to perform the checkpoint.

      $ bjobs -l 12995 Job <12995>, User <gsamu>, Project <default>, Status <RUN>, Queue <normal>, Com                     mand <./criu_test>, Share group charged </gsamu>Tue Apr 12 08:48:28: Submitted from host <kilenc>, CWD <$HOME>, C                     heckpoint directory </home/gsamu/checkpoint_data/12995>;Tue Apr 12 08:48:29: Started 1 Task(s) on Host(s) <kilenc>, Alloc                     ated 1 Slot(s) on Host(s) <kilenc>, Executio                     n Home </home/gsamu>, Execution CWD </home/gsamu>;Tue Apr 12 08:48:38: Resource usage collected.                     MEM: 12 Mbytes;  SWAP: 0 Mbytes;  NTHREAD: 4                     PGID: 418130;  PIDs: 418130 418131 418133    MEMORY USAGE: MAX MEM: 12 Mbytes;  AVG MEM: 6 Mbytes  SCHEDULING PARAMETERS:           r15s   r1m  r15m   ut      pg    io   ls    it    tmp    swp    mem loadSched   -     -     -     -       -     -    -     -     -      -      -   loadStop    -     -     -     -       -     -    -     -     -      -      -    RESOURCE REQUIREMENT DETAILS: Combined: select[type == local] order[r15s:pg] Effective: select[type == local] order[r15s:pg] 

    • Initiate the checkpoint using the LSF bchkpnt command. The -k option is specified which will result in the job being checkpointed and killed.

      $ bchkpnt -k 12995Job <12995> is being checkpointed

    • We see in the history of the job using the bhist command that the checkpoint was initiated and succeeded. The job was subsequently killed (TERM_CHKPNT).

      $ bhist -l 12995 Job <12995>, User <gsamu>, Project <default>, Command <./criu_test>Tue Apr 12 08:48:28: Submitted from host <kilenc>, to Queue <norm                     al>, CWD <$HOME>, Checkpoint directory </home/gsamu/checkp                     oint_data/12995>;Tue Apr 12 08:48:29: Dispatched 1 Task(s) on Host(s) <kilenc>, Al                     located 1 Slot(s) on Host(s) <kilenc>, Effec                     tive RES_REQ <select[type == local] order[r15s:pg] >;Tue Apr 12 08:48:31: Starting (Pid 418130);Tue Apr 12 08:48:31: Running with execution home </home/gsamu>, Execution CWD <                     /home/gsamu>, Execution Pid <418130>;Tue Apr 12 08:54:14: Checkpoint initiated (actpid 419029);Tue Apr 12 08:54:15: Checkpoint succeeded (actpid 419029);Tue Apr 12 08:54:15: Exited with exit code 137. The CPU time used is 2.1 second                     s;Tue Apr 12 08:54:15: Completed <exit>; TERM_CHKPNT: job killed after checkpoint                     ing;\t\t       MEMORY USAGE:MAX MEM: 12 Mbytes;  AVG MEM: 11 Mbytes Summary of time in seconds spent in various states by  Tue Apr 12 08:54:15  PEND     PSUSP    RUN      USUSP    SSUSP    UNKWN    TOTAL  1        0        346      0        0        0        347         

    • Restart the job from the checkpoint data with the LSF brestart command. A new jobID is assigned.

      $ brestart /home/gsamu/checkpoint_data/ 12995 Job <12996> is submitted to queue <normal>.$ bjobs -l 12996 Job <12996>, User <gsamu>, Project <default>, Status <RUN>, Queue <normal>, Com                     mand <./criu_test>, Share group charged </gsamu>Tue Apr 12 08:55:57: Submitted from host <kilenc>, CWD <$HOME>, R                     estart, Checkpoint directory </home/gsamu/checkpoint_data/                     /12996>;Tue Apr 12 08:55:58: Started 1 Task(s) on Host(s) <kilenc>, Alloc                     ated 1 Slot(s) on Host(s) <kilenc>, Executio                     n Home </home/gsamu>, Execution CWD </home/gsamu>;Tue Apr 12 08:56:07: Resource usage collected.                     MEM: 14 Mbytes;  SWAP: 0 Mbytes;  NTHREAD: 5                     PGID: 420069;  PIDs: 420069 420070 420073 420074 420076    MEMORY USAGE: MAX MEM: 14 Mbytes;  AVG MEM: 14 Mbytes  SCHEDULING PARAMETERS:           r15s   r1m  r15m   ut      pg    io   ls    it    tmp    swp    mem loadSched   -     -     -     -       -     -    -     -     -      -      -   loadStop    -     -     -     -       -     -    -     -     -      -      -    RESOURCE REQUIREMENT DETAILS: Combined: select[type == local] order[r15s:pg] Effective: select[type == local] order[r15s:pg] 

    • Viewing the standard output of the job, we see the point where it was killed and that it has picked up from where it left off.

      $ bpeek 12996<< output from stdout >>0: Sleeping for three seconds ...1: Sleeping for three seconds ...2: Sleeping for three seconds ...3: Sleeping for three seconds ...4: Sleeping for three seconds ...….….110: Sleeping for three seconds ...111: Sleeping for three seconds ...112: Sleeping for three seconds ...113: Sleeping for three seconds .../home/gsamu/.lsbatch/1649767708.12995: line 8: 418133 Killed                  ./criu_test114: Sleeping for three seconds ...115: Sleeping for three seconds ...116: Sleeping for three seconds ...117: Sleeping for three seconds ...118: Sleeping for three seconds ...119: Sleeping for three seconds ...120: Sleeping for three seconds ...........

    We’ve demonstrated how one can integrate CRIU checkpoint and restartwith IBM Spectrum LSF using the echkpnt and erestart interfaces.As highlighted earlier, LSF provides a number of plugin interfaceswhich provides flexibility to organizations looking to do site specificcustomizations.

    ", + "url": "https://hpc.social/personal-blog/2022/lsf-hookin-up-with-the-criu/", + + + + + + "date_published": "2022-04-12T19:32:04-06:00", + "date_modified": "2022-04-12T19:32:04-06:00", + + "author": "Ramblings of a supercomputing enthusiast." + + }, + + { + "id": "https://hpc.social/personal-blog/2022/relivin-the-90-s-amiga-style/", + "title": "Relivin' the 90's - Amiga style", + "summary": null, + "content_text": "Although I very much started my experience with home computers with IBMcompatibles running MSDOS in the late 1980’s, I’m a lifelong, self-professedCommodore-Amiga addict. I distinctly recall the launch of the Amiga A1000 andbeing dazzled by it’s multimedia capabilities around the same time thatI had a PC XT with CGA graphics. I was instantly hooked. Having great videogames for the time was just icing on the cake.I started my Amiga experience with an A500, which I quickly traded in for anA2000 model, which I still have today. I came across an A3000 in the late 1990’sfor a small sum which I added to my collection. The A3000 is my favouriteAmiga system with onboard SCSI and it’s design is reminiscent of pizza-boxUNIX servers which were common back in the day.The majority of my friends at the time were all-in on PCs. But for me therewas just something a bit clinical and boring about them. The Amiga filledthis gap for me and continues to do so. It’s probably one of bigreasons why I still to this day tinker so much with non-X86 systems.Retro computing is a hobby that requires much time. So it’s sometimeschallenging to juggle this hobby with other things, especially as the weatherturns warmer here in Southern Ontario. My A3000 system was one that I waslooking to prioritize for resurrection this spring. This is in particularbecause the last thing I tinkered with on the A3000 roughly 20 years back was Amiga UNIX. Yes, my A3000 sat in storage for around 20 years! In the mid to late 90’s, I ventured out to an Amiga speciality shop in London, Ontario (Canada) for a clearance they were having. It’s there that I happened across Amiga UNIX software (tape and manuals), as well the Commodore A2410 High Resolution Graphics board, Commodore A2065 Ethernet board and a Wangtek 5150ES tape drive(which is mounted in a SUN Microsystems external case). Here is a view of theAmiga ShowConfig output.I had the foresight to remove the motherboard RTC batteries beforestoring the systems. But my A3000 refused to boot when I took it out of storagelate in 2021. After much fiddling, I decided to reach out to a local Amigarepair specialist. The gentleman worked at Comspec(?), which did repair workfor Commodore back in the day.I recently got my A3000 back after the fault was corrected, and a newreplaceable coin battery for the RTC was installed. The fault turnedout to be an issue with some of the ZIP memory sockets. Because of thedifficulty and cost in purchasing ZIP memory back in the day, I purchaseda ProvTech AmiFast 3000 ZIP to SIMM converter which allows me to use 72-pin SIMM memory.With a working A3000 system, it was time to look at software once again.I found my old dusty Amiga OS 3.5 and OS 3.9 original media sets. Withsome effort I was able to get Amiga OS 3.9 installed on the system. It’snot that the installation is difficult, it was more a matter of getting myCDROM working and clearing out some of the cobwebs on my Amiga knowledge.Additionally, I was able to successfully boot Amiga UNIX off an externalSCSI disk which I installed back in roughly ‘98 or ‘99. I plan to writemore about Amiga UNIX in a subsequent post. For those who are curious aboutAmiga UNIX there is a fantastic wiki here.Back to Amiga OS 3.9. After getting it installed successfully I had a few goals:Get my Amiga online via the A2065 Ethernet boardGet a high resolution Workbench (desktop) via the A2410Relive the memories!Amiga on the ‘NetI recall back in the day various Amiga TCP/IP implementations such asAS225 and AmiTCP. Consulting with the gentleman who repaired my Amiga,he suggested Roadshow. I’d neverheard of Roadshow before, but downloaded and got the trial version workingeasily. I required to copy the a2065.device driver for theA2065 board to the system and created the necessary configuration filein SYS:Devs/NetInterfaces. The configuration file A2065 is shown in theimage below.A quick aside here. I had to create a CD with a bunch of softwareincluding Roadshow and a number of utilities from Aminet such as the A2065 device driver. Aminet is one of the goto places for Amigasoftware on the net.I found Roadshow so easy to get setup and working that I purchased a licensefor it. I also purchased licenses at the same time for GoADF, which is a great utility for managing ADF (Amiga Disk Format) files.With a working TCP/IP stack, I installed the trial version of iBrowse, in addition to the FTP utility RNOXfer (from Aminet). With a working FTP client, I could now more easily move files to and from the A3000. This definitely helped for the next stage.Just a note that browsing on the Amiga is definitely a retro experience.This is in no way a slight at the fine folks who develop and maintainbrowsers such as iBrowse. I’m considering updating my iBrowse demolicense to a full license in the future as well.I also took the opportunity at this time to install an NTP client. Even thoughmy Amiga now has a working RTC, I still like to use NTP to keep the clockaccurately set. For this I used the AmiTimeKeeper utility from Aminet.I pointed it as I do normally to the NTP servers at the National ResearchCouncil (NRC) of Canada. TimeKeeper has a CLI interface as well as a UIstatus window to provide information on the synchronization status.Workbench à la A2410It was time to move on to having a high resolution Workbench (desktop) experience. I also own a Picasso II video card which is presently in my Amiga 2000 system. Using P96 or CyberGraphX on the Picasso II was quite straightforward inthe past. My goal this time was to use the A2410, which from what I couldread, was supported in CyberGraphX V4.Thing is, when I went to install CyberGraphX V4 from my original media,I did not see the A2410 listed. It was only when I applied the update(s) that I could see the A2410 listed as a supported video card. Note the final patch version of CyberGraphX I’m using is from cgxv42_rc6.lha which I downloadedfrom Aminet here.The A2410 CyberGraphX (CGX) driver installed without a hitch, gettingit to work was a challenge. Although I could get a Workbench to appear in thedesired resolution and colours, when I double clicked on any icon on the desktop, the system would hang. It was only through trial and error that I discoveredthat some specific CyberGraphX variables had to be set. The screenshot below of the CyberGraphX settings tool shows the current, woring settings. Ultimately,the hang seemed to be addressed by enabling the CGX SUPERGELS variable.Here is a look at the CGX showcgxconfig tool output.A screenshot of the Workbench driven by the A2410 is shown below. Theperformance is not great, but it does work, and I’m super pleased aboutthat. On the subject of graphics cards, I’ve had my eye on the MNT ZZ9000 which I’m considering purchasing to breathe more life into my A3000.The next stage in this journey is to get the same configuration workingwith Amiga OS 3.2, which I purchased from the folks at Retro Rewind in Toronto. According to what I’ve read, I need to downgrade theintuition.library version to get CyberGraphX working with OS 3.2. I’llwrite more about this when I have the opportunity.And now, I’m ready to begin to relive those memories!Update! Here are some photos of the A2065, A2410 and A3000 daughterboardfrom my system.", + "content_html": "

    Although I very much started my experience with home computers with IBMcompatibles running MSDOS in the late 1980’s, I’m a lifelong, self-professedCommodore-Amiga addict. I distinctly recall the launch of the Amiga A1000 andbeing dazzled by it’s multimedia capabilities around the same time thatI had a PC XT with CGA graphics. I was instantly hooked. Having great videogames for the time was just icing on the cake.

    I started my Amiga experience with an A500, which I quickly traded in for anA2000 model, which I still have today. I came across an A3000 in the late 1990’sfor a small sum which I added to my collection. The A3000 is my favouriteAmiga system with onboard SCSI and it’s design is reminiscent of pizza-boxUNIX servers which were common back in the day.

    The majority of my friends at the time were all-in on PCs. But for me therewas just something a bit clinical and boring about them. The Amiga filledthis gap for me and continues to do so. It’s probably one of bigreasons why I still to this day tinker so much with non-X86 systems.

    Retro computing is a hobby that requires much time. So it’s sometimeschallenging to juggle this hobby with other things, especially as the weatherturns warmer here in Southern Ontario. My A3000 system was one that I waslooking to prioritize for resurrection this spring. This is in particularbecause the last thing I tinkered with on the A3000 roughly 20 years back was Amiga UNIX. Yes, my A3000 sat in storage for around 20 years! In the mid to late 90’s, I ventured out to an Amiga speciality shop in London, Ontario (Canada) for a clearance they were having. It’s there that I happened across Amiga UNIX software (tape and manuals), as well the Commodore A2410 High Resolution Graphics board, Commodore A2065 Ethernet board and a Wangtek 5150ES tape drive(which is mounted in a SUN Microsystems external case). Here is a view of theAmiga ShowConfig output.

    I had the foresight to remove the motherboard RTC batteries beforestoring the systems. But my A3000 refused to boot when I took it out of storagelate in 2021. After much fiddling, I decided to reach out to a local Amigarepair specialist. The gentleman worked at Comspec(?), which did repair workfor Commodore back in the day.

    I recently got my A3000 back after the fault was corrected, and a newreplaceable coin battery for the RTC was installed. The fault turnedout to be an issue with some of the ZIP memory sockets. Because of thedifficulty and cost in purchasing ZIP memory back in the day, I purchaseda ProvTech AmiFast 3000 ZIP to SIMM converter which allows me to use 72-pin SIMM memory.

    With a working A3000 system, it was time to look at software once again.I found my old dusty Amiga OS 3.5 and OS 3.9 original media sets. Withsome effort I was able to get Amiga OS 3.9 installed on the system. It’snot that the installation is difficult, it was more a matter of getting myCDROM working and clearing out some of the cobwebs on my Amiga knowledge.

    Additionally, I was able to successfully boot Amiga UNIX off an externalSCSI disk which I installed back in roughly ‘98 or ‘99. I plan to writemore about Amiga UNIX in a subsequent post. For those who are curious aboutAmiga UNIX there is a fantastic wiki here.

    Back to Amiga OS 3.9. After getting it installed successfully I had a few goals:

    • Get my Amiga online via the A2065 Ethernet board
    • Get a high resolution Workbench (desktop) via the A2410
    • Relive the memories!

    Amiga on the ‘Net

    I recall back in the day various Amiga TCP/IP implementations such asAS225 and AmiTCP. Consulting with the gentleman who repaired my Amiga,he suggested Roadshow. I’d neverheard of Roadshow before, but downloaded and got the trial version workingeasily. I required to copy the a2065.device driver for theA2065 board to the system and created the necessary configuration filein SYS:Devs/NetInterfaces. The configuration file A2065 is shown in theimage below.

    A quick aside here. I had to create a CD with a bunch of softwareincluding Roadshow and a number of utilities from Aminet such as the A2065 device driver. Aminet is one of the goto places for Amigasoftware on the net.

    I found Roadshow so easy to get setup and working that I purchased a licensefor it. I also purchased licenses at the same time for GoADF, which is a great utility for managing ADF (Amiga Disk Format) files.

    With a working TCP/IP stack, I installed the trial version of iBrowse, in addition to the FTP utility RNOXfer (from Aminet). With a working FTP client, I could now more easily move files to and from the A3000. This definitely helped for the next stage.

    Just a note that browsing on the Amiga is definitely a retro experience.This is in no way a slight at the fine folks who develop and maintainbrowsers such as iBrowse. I’m considering updating my iBrowse demolicense to a full license in the future as well.

    I also took the opportunity at this time to install an NTP client. Even thoughmy Amiga now has a working RTC, I still like to use NTP to keep the clockaccurately set. For this I used the AmiTimeKeeper utility from Aminet.I pointed it as I do normally to the NTP servers at the National ResearchCouncil (NRC) of Canada. TimeKeeper has a CLI interface as well as a UIstatus window to provide information on the synchronization status.

    Workbench à la A2410

    It was time to move on to having a high resolution Workbench (desktop) experience. I also own a Picasso II video card which is presently in my Amiga 2000 system. Using P96 or CyberGraphX on the Picasso II was quite straightforward inthe past. My goal this time was to use the A2410, which from what I couldread, was supported in CyberGraphX V4.

    Thing is, when I went to install CyberGraphX V4 from my original media,I did not see the A2410 listed. It was only when I applied the update(s) that I could see the A2410 listed as a supported video card. Note the final patch version of CyberGraphX I’m using is from cgxv42_rc6.lha which I downloadedfrom Aminet here.

    The A2410 CyberGraphX (CGX) driver installed without a hitch, gettingit to work was a challenge. Although I could get a Workbench to appear in thedesired resolution and colours, when I double clicked on any icon on the desktop, the system would hang. It was only through trial and error that I discoveredthat some specific CyberGraphX variables had to be set. The screenshot below of the CyberGraphX settings tool shows the current, woring settings. Ultimately,the hang seemed to be addressed by enabling the CGX SUPERGELS variable.

    Here is a look at the CGX showcgxconfig tool output.

    A screenshot of the Workbench driven by the A2410 is shown below. Theperformance is not great, but it does work, and I’m super pleased aboutthat. On the subject of graphics cards, I’ve had my eye on the MNT ZZ9000 which I’m considering purchasing to breathe more life into my A3000.

    The next stage in this journey is to get the same configuration workingwith Amiga OS 3.2, which I purchased from the folks at Retro Rewind in Toronto. According to what I’ve read, I need to downgrade theintuition.library version to get CyberGraphX working with OS 3.2. I’llwrite more about this when I have the opportunity.

    And now, I’m ready to begin to relive those memories!

    Update! Here are some photos of the A2065, A2410 and A3000 daughterboardfrom my system.

    ", + "url": "https://hpc.social/personal-blog/2022/relivin-the-90-s-amiga-style/", + + + + + + "date_published": "2022-03-29T13:53:09-06:00", + "date_modified": "2022-03-29T13:53:09-06:00", + + "author": "Ramblings of a supercomputing enthusiast." + + }, + + { + "id": "https://hpc.social/personal-blog/2022/an-unstructured-rant-on-running-long-lived-software-services/", + "title": "An unstructured rant on running long-lived software services", + "summary": null, + "content_text": "– Be kind to your colleagues. Be kind to your users. Be kind to yourself. This is a long haul and you’ll all fuck up.⁃ The natural environment for your code is production. It will run there longer than it does anywhere else. Design for prod first, and if possible, make your dev environment act like prod.⁃ Legacy code is the only code worth caring about.⁃ Users do weird stuff, but they usually have a very good reason, at least in their context. Learn from them.⁃ It’s 2022, please do structured logging.⁃ Contexts and tracing make everyone’s lives easier when it comes time to debug. At minimum, include a unique request id with every request and plumb it through the system.⁃ Do your logging in a separate thread. It sucks to find a daemon blocked and hanging because of a full disk or a down syslog server.⁃ Don’t page for individual machines going down. Do provide an easy or automated way for bad nodes to get thrown out of the system.– Be prepared for your automation to be the problem, and include circuit breakers or kill switches to stop it. I’ve seen health checks that started flagging every machine in the fleet as bad, whether it was healthy or not. We didn’t bring down prod because the code assumed if it flagged more than 15% of the fleet as bad, the problem was probably with the test, not the service.⁃ Make sure you have a way to know who your users are. If you allow anonymous access, you’ll discover in five years that a business-critical team you’ve never heard of is relying on you.⁃ Make sure you have a way to turn off access for an individual machine, user, etc. If your system does anything more expensive than sending network requests, it will be possible for a single bad client to overwhelm a distributed system with thousands of servers. Turning off their access is easier than begging them to stop.⁃ If you don’t implement QOS early on, it will be hellish to add it later, and you will certainly need it if your system lasts long enough.⁃ If you provide a client library, and your system is internal only, have it send logs to the same system as your servers. This will help trace issues back to misbehaving clients so much.⁃ Track the build time for every deployed server binary and monitor how old they are. If your CI process deploys daily, week-old binaries are a problem. Month-old binaries are a major incident.⁃ If you can get away with it (internal services): track the age of client library builds and either refuse to support builds older than X, or just cut them off entirely. It sucks to support requests from year-old clients, force them to upgrade!⁃ Despite all this, you will at some point start getting requests from an ancient software version, or otherwise malformed. Make sure these requests don’t break anything.⁃ Backups are a pain, and the tooling is often bad, but I swear they will save you one day. Take the time to invest in them.⁃ Your CI process should exercise your turnup process, your decommission process, and your backups workflow. Life will suck later if you discover one of these is broken.⁃ Third party services go down. Your service goes down too, but they probably won’t happen at the same time. Be prepared to either operate without them, or mirror them yourself⁃ Your users will never, ever care if you’re down because of a dependency. Every datacenter owned by AWS could be hit by a meteor at the same time, but your user will only ever ask “why doesn’t my service work?”⁃ Have good human relationships with your software dependencies. Know the people who develop them, keep in touch with them, make sure you understand each other. This is especially true internally but also important with external deps. In the end, software is made of people.⁃ If users don’t have personal buy-in to the security policy, they will find ways to work around them and complain about you for making their lives harder. Take the time to educate them, or you’ll be fighting them continuously.", + "content_html": "

    – Be kind to your colleagues. Be kind to your users. Be kind to yourself. This is a long haul and you’ll all fuck up.

    ⁃ The natural environment for your code is production. It will run there longer than it does anywhere else. Design for prod first, and if possible, make your dev environment act like prod.

    ⁃ Legacy code is the only code worth caring about.

    ⁃ Users do weird stuff, but they usually have a very good reason, at least in their context. Learn from them.

    ⁃ It’s 2022, please do structured logging.

    ⁃ Contexts and tracing make everyone’s lives easier when it comes time to debug. At minimum, include a unique request id with every request and plumb it through the system.

    ⁃ Do your logging in a separate thread. It sucks to find a daemon blocked and hanging because of a full disk or a down syslog server.

    ⁃ Don’t page for individual machines going down. Do provide an easy or automated way for bad nodes to get thrown out of the system.

    – Be prepared for your automation to be the problem, and include circuit breakers or kill switches to stop it. I’ve seen health checks that started flagging every machine in the fleet as bad, whether it was healthy or not. We didn’t bring down prod because the code assumed if it flagged more than 15% of the fleet as bad, the problem was probably with the test, not the service.

    ⁃ Make sure you have a way to know who your users are. If you allow anonymous access, you’ll discover in five years that a business-critical team you’ve never heard of is relying on you.

    ⁃ Make sure you have a way to turn off access for an individual machine, user, etc. If your system does anything more expensive than sending network requests, it will be possible for a single bad client to overwhelm a distributed system with thousands of servers. Turning off their access is easier than begging them to stop.

    ⁃ If you don’t implement QOS early on, it will be hellish to add it later, and you will certainly need it if your system lasts long enough.

    ⁃ If you provide a client library, and your system is internal only, have it send logs to the same system as your servers. This will help trace issues back to misbehaving clients so much.

    ⁃ Track the build time for every deployed server binary and monitor how old they are. If your CI process deploys daily, week-old binaries are a problem. Month-old binaries are a major incident.

    ⁃ If you can get away with it (internal services): track the age of client library builds and either refuse to support builds older than X, or just cut them off entirely. It sucks to support requests from year-old clients, force them to upgrade!

    ⁃ Despite all this, you will at some point start getting requests from an ancient software version, or otherwise malformed. Make sure these requests don’t break anything.

    ⁃ Backups are a pain, and the tooling is often bad, but I swear they will save you one day. Take the time to invest in them.

    ⁃ Your CI process should exercise your turnup process, your decommission process, and your backups workflow. Life will suck later if you discover one of these is broken.

    ⁃ Third party services go down. Your service goes down too, but they probably won’t happen at the same time. Be prepared to either operate without them, or mirror them yourself

    ⁃ Your users will never, ever care if you’re down because of a dependency. Every datacenter owned by AWS could be hit by a meteor at the same time, but your user will only ever ask “why doesn’t my service work?”

    ⁃ Have good human relationships with your software dependencies. Know the people who develop them, keep in touch with them, make sure you understand each other. This is especially true internally but also important with external deps. In the end, software is made of people.

    ⁃ If users don’t have personal buy-in to the security policy, they will find ways to work around them and complain about you for making their lives harder. Take the time to educate them, or you’ll be fighting them continuously.

    ", + "url": "https://hpc.social/personal-blog/2022/an-unstructured-rant-on-running-long-lived-software-services/", + + + + + + "date_published": "2022-03-12T16:00:00-07:00", + "date_modified": "2022-03-12T16:00:00-07:00", + + "author": "Thinking Out Loud" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/what-i-ve-learned-from-looking-at-1-500-jobs-leading-research-computing-teams/", + "title": "What I've Learned from Looking at 1,500 Jobs Leading Research Computing Teams", + "summary": null, + "content_text": "Job numbers continue to grow; lots of data and product management jobs; IR groups at Universities becoming bigger employers(Note: This post is adapted from #111 of the Research Computing Teams Newsletter)A year and a half ago I posted my observations on the first 500 jobs posted to the job board - we’re getting close to 1,500 now, and it’s worth taking a look to see what if anything has changed in research computing team leadership and management jobs1.There are some trends that have continued since the posting. The jobs in industry are growing vastly beyond what I would have imagined possible when I started in research computing in the 1990s. (The number of jobs working with biomedical data of one sort or another in particular is just astonishing.) Rather than technical computing being a niche, it’s utterly mainstream now. There are a lot of jobs out there, and I don’t even bother posting generic “data science manager” jobs unless they’re connected to some real complex research questions - which happens a lot, whether it’s fraud detection or improving financial modelling or supporting biomedical research. Some really fun-looking jobs that would probably feel a lot like working at a research computing centre keep coming up at consultancies –– go visit a client and help them with their data science/data engineering/etc needs. There’s also a growing number of data science/engineering jobs at Universities that fall under the Provost/VP Operations rather than the VPR’s side of the house — Institutional Research, looking at (say) student success in support of the teaching mission.Because of the growth in number of jobs, it is very much a candidate’s market out there. I’m seeing postings –– especially for the traditional academic “director of research computing” jobs –— stay open for cringe-inducing periods of time. A few in particular I’ve watched with vicarious embarrassment continue coming up in the listings for 8+ months. That’s a bad sign for us as hiring managers - the market for individual contributors is at least as tight - but it’s amazing news for us as individuals.When I wrote that post in late 2020 it was just regulated industries like health/biotech or financial services that were developing data governance or other data management jobs, but now data management is popping up everywhere, whether it’s retail or logistics or anywhere else. These are being joined, again first in the regulated industries, by data privacy or data risk management jobs. Privacy-preserving data analysis jobs (and teams supporting same with software development) are also starting to be more common (and there’s a lot of cool research and technology work to be done there!)I’m also (finally!) starting to see a explicitly product management jobs in research computing, both academic and private-sector. You see it around data management — bundling and curating of data into real data products — but also in software development, especially around analysis pipelines for some reason.Probably related to the growth of product vs project thinking, I’m starting to see a lot of “delivery manager” jobs that would have been called “project managers” just a year ago. Projects are defined by having clearly defined start- and end-points up-front. “Delivery” jobs seem to focus on sustained, ongoing work, more appropriate for long-lived products.These products that keep coming up often combine data, software, and systems one way or another. That really points to weaknesses around organizing by type of skills - the research software engineering movement, for instance - as the lines between software and systems in this DevOps, infrastructure-as-code era is very fuzzy; and as data grows more and more important, data skills are needed everywhere.Especially for us as managers or leads, but especially for individual contributors as they grow their skills, it’s important to have a pretty holistic view of research computing and data and not try to break it up into silos. The growing number of data engineering jobs is a great example. That work often involves all three of software, systems, and data expertise. Data engineering is getting so broad and important that not only are there different sub-fields, in large organizations there are likely to be completely distinct data engineering teams doing different work. Trying to decide which of those jobs are “research software engineering” jobs and which aren’t is not a productive way forward, for those candidates or for us as a community.Needless to say, the growth of remote jobs has been off the charts - especially in the private sector, although the academic institutions are gamely doing what they can to keep up (often hampered by institutional policies).Late June 2022 update: At the time that I write this, there’s a slow down in hiring in tech, especially among early stage-startups. That slowdown due to economic conditions as I write this is not, as far as I can tell, affecting these more research-oriented kinds of jobs. The job board doesn’t have a lot of jobs from startups anyway. For larger organizations, the biotech firms or the banking firms doing fraud detection research or the computing providers or academic groups or… clearly do not view these roles as “nice to haves” that can wait until there’s a bit more economic certainty. What counts as such a job? Any job that involves leading, or mentoring people, or managing projects, programs, or products, in software, systems, or data curation/management/engineering/analysis to support the solution of research problems is a good fit. If you are hiring for such a job, feel free to submit it to the job board. ↩ ", + "content_html": "

    Job numbers continue to grow; lots of data and product management jobs; IR groups at Universities becoming bigger employers

    (Note: This post is adapted from #111 of the Research Computing Teams Newsletter)

    A year and a half ago I posted my observations on the first 500 jobs posted to the job board - we’re getting close to 1,500 now, and it’s worth taking a look to see what if anything has changed in research computing team leadership and management jobs1.

    There are some trends that have continued since the posting. The jobs in industry are growing vastly beyond what I would have imagined possible when I started in research computing in the 1990s. (The number of jobs working with biomedical data of one sort or another in particular is just astonishing.) Rather than technical computing being a niche, it’s utterly mainstream now. There are a lot of jobs out there, and I don’t even bother posting generic “data science manager” jobs unless they’re connected to some real complex research questions - which happens a lot, whether it’s fraud detection or improving financial modelling or supporting biomedical research. Some really fun-looking jobs that would probably feel a lot like working at a research computing centre keep coming up at consultancies –– go visit a client and help them with their data science/data engineering/etc needs. There’s also a growing number of data science/engineering jobs at Universities that fall under the Provost/VP Operations rather than the VPR’s side of the house — Institutional Research, looking at (say) student success in support of the teaching mission.

    Because of the growth in number of jobs, it is very much a candidate’s market out there. I’m seeing postings –– especially for the traditional academic “director of research computing” jobs –— stay open for cringe-inducing periods of time. A few in particular I’ve watched with vicarious embarrassment continue coming up in the listings for 8+ months. That’s a bad sign for us as hiring managers - the market for individual contributors is at least as tight - but it’s amazing news for us as individuals.

    When I wrote that post in late 2020 it was just regulated industries like health/biotech or financial services that were developing data governance or other data management jobs, but now data management is popping up everywhere, whether it’s retail or logistics or anywhere else. These are being joined, again first in the regulated industries, by data privacy or data risk management jobs. Privacy-preserving data analysis jobs (and teams supporting same with software development) are also starting to be more common (and there’s a lot of cool research and technology work to be done there!)

    I’m also (finally!) starting to see a explicitly product management jobs in research computing, both academic and private-sector. You see it around data management — bundling and curating of data into real data products — but also in software development, especially around analysis pipelines for some reason.

    Probably related to the growth of product vs project thinking, I’m starting to see a lot of “delivery manager” jobs that would have been called “project managers” just a year ago. Projects are defined by having clearly defined start- and end-points up-front. “Delivery” jobs seem to focus on sustained, ongoing work, more appropriate for long-lived products.

    These products that keep coming up often combine data, software, and systems one way or another. That really points to weaknesses around organizing by type of skills - the research software engineering movement, for instance - as the lines between software and systems in this DevOps, infrastructure-as-code era is very fuzzy; and as data grows more and more important, data skills are needed everywhere.

    Especially for us as managers or leads, but especially for individual contributors as they grow their skills, it’s important to have a pretty holistic view of research computing and data and not try to break it up into silos. The growing number of data engineering jobs is a great example. That work often involves all three of software, systems, and data expertise. Data engineering is getting so broad and important that not only are there different sub-fields, in large organizations there are likely to be completely distinct data engineering teams doing different work. Trying to decide which of those jobs are “research software engineering” jobs and which aren’t is not a productive way forward, for those candidates or for us as a community.

    Needless to say, the growth of remote jobs has been off the charts - especially in the private sector, although the academic institutions are gamely doing what they can to keep up (often hampered by institutional policies).

    Late June 2022 update: At the time that I write this, there’s a slow down in hiring in tech, especially among early stage-startups. That slowdown due to economic conditions as I write this is not, as far as I can tell, affecting these more research-oriented kinds of jobs. The job board doesn’t have a lot of jobs from startups anyway. For larger organizations, the biotech firms or the banking firms doing fraud detection research or the computing providers or academic groups or… clearly do not view these roles as “nice to haves” that can wait until there’s a bit more economic certainty.


    1. What counts as such a job? Any job that involves leading, or mentoring people, or managing projects, programs, or products, in software, systems, or data curation/management/engineering/analysis to support the solution of research problems is a good fit. If you are hiring for such a job, feel free to submit it to the job board

    ", + "url": "https://hpc.social/personal-blog/2022/what-i-ve-learned-from-looking-at-1-500-jobs-leading-research-computing-teams/", + + + + + + "date_published": "2022-02-26T00:00:00-07:00", + "date_modified": "2022-02-26T00:00:00-07:00", + + "author": "Jonathan Dursi's Blog" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/a-supportive-job-interview-story/", + "title": "A supportive job interview story", + "summary": null, + "content_text": "(adapted from an old lobste.rs comment)My favorite interview ever was a systems interview that didn’t go as planned. This was for an SRE position, and while I expected the interview to be a distributed systems discussion, the interviewer instead wanted to talk kernel internals.I was not at all prepared for this, and admitted it up front. The interviewer said something along the lines of, “well, why don’t we see how it goes anyway?”He then proceeded to teach me a ton about how filesystem drivers work in Linux, in the form of leading me carefully through the interview question he was “asking” me. The interviewer was incredibly encouraging throughout, and we had a good discussion about why certain design decisions worked the way they did.I ended the interview (a) convinced I had bombed it, but (b) having had an excellent time anyway and having learned a bunch of new things. I later learned the interviewer had recommended to hire me based on how our conversation had gone, though I didn’t end up taking the job for unrelated reasons having to do with relocation.I’ve given a number of similar interviews since, on system design or general sysadmin skills. I’ve always tried to go into these thinking about both where I could learn, and where I could teach, and how either outcome would give the candidate a chance to shine.", + "content_html": "

    (adapted from an old lobste.rs comment)

    My favorite interview ever was a systems interview that didn’t go as planned. This was for an SRE position, and while I expected the interview to be a distributed systems discussion, the interviewer instead wanted to talk kernel internals.

    I was not at all prepared for this, and admitted it up front. The interviewer said something along the lines of, “well, why don’t we see how it goes anyway?”

    He then proceeded to teach me a ton about how filesystem drivers work in Linux, in the form of leading me carefully through the interview question he was “asking” me. The interviewer was incredibly encouraging throughout, and we had a good discussion about why certain design decisions worked the way they did.

    I ended the interview (a) convinced I had bombed it, but (b) having had an excellent time anyway and having learned a bunch of new things. I later learned the interviewer had recommended to hire me based on how our conversation had gone, though I didn’t end up taking the job for unrelated reasons having to do with relocation.

    I’ve given a number of similar interviews since, on system design or general sysadmin skills. I’ve always tried to go into these thinking about both where I could learn, and where I could teach, and how either outcome would give the candidate a chance to shine.

    ", + "url": "https://hpc.social/personal-blog/2022/a-supportive-job-interview-story/", + + + + + + "date_published": "2022-02-25T16:00:00-07:00", + "date_modified": "2022-02-25T16:00:00-07:00", + + "author": "Thinking Out Loud" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/interactive-development-containers/", + "title": "Interactive Development Containers", + "summary": null, + "content_text": "I’ve recently been interested in developer workflows. Aside from being a developer, I feellike the tooling for our community, and especially for HPC or hybrid environments, is lacking.As a simple example, let’s ask a basic question: How do I start developing here and move it over there?For the most part, creating a development container is fairly straight forward, and we can even bind sourcecode to the host to work on in one editor terminal and then build and run or test in another. However,for the moving part, it gets shoddy. Our best bet is to rebuild the container with the most updated source code, push to a registry, and then pull down somewhere else.For a container that is a binary and not layers provided by a registry, we could even scp it.If we do this right, we will have an automated build and deploy that triggers when we merge new code into main, but do you see the problem? What about the code that we wantto test that isn’t ready to merge? This is why we typically would need to manuallypush to a registry with some kind of “work in progress” tag and then pull somewhere else.Minimally we’d need to build fresh again, and then reproduce all the steps to set up our environment.Interactive Development ContainersNow I don’t have all the answers, but recently @alecbcs andI have been dreaming about what kinds of development environments we want.functionality such as: Saving the container state without leaving it. Loading or saving or otherwise interacting with named environments. Inspecting or interacting with container metadata, also without leaving the container. Moving files or sizing the container without the same.And actually I won’t even get to answering the first question in this post about moving somethingfrom one place to another, but rest assured it is an important one. This post is about some prototype or fun testing work that we’ve started around these ideas.The playground for some of these early ideas has been Paks. Paks is a Python library that I’m calling a developer wrapper for containers.Mind you, it’s more of a playground right now to experiment with ideas. But I’ve had somuch fun even this early on that I want to share what I’ve learned.WrapperBecause Paks is a wrapper, you will run containers using the paks command. Here are a few quick examples.$ paks run ubuntu$ paks run --shell /bin/sh busybox$ paks run --container-tech podman busyboxWhat is happening on the backend that took me a bit to figure out is that we will need to run a subprocess,but create a pseudo terminal to betterwatch and interact with it. This is going to happen in the “interactive_terminal” command below. But unless youwant your terminal to get wonky, we need to use termios to grab the current tty and make sure it gets restored no matter what at the end. That looks like this: def interactive_command(self, cmd): \"\"\" Ensure we always restore original TTY otherwise terminal gets messed up \"\"\" # Controller to get history self.hist = self.commands.history # save original tty setting then set it to raw mode old_tty = termios.tcgetattr(sys.stdin) old_pty = termios.tcgetattr(sys.stdout) try: self._interactive_command(cmd) finally: termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty) termios.tcsetattr(sys.stdout, termios.TCSADRAIN, old_pty)What happens if you don’t do that? Your terminal gets weird and wonky. And then in the interactivecommand function, this is where we launch a subprocess with a new pseudo terminal: tty.setraw(sys.stdin.fileno()) # open pseudo-terminal to interact with subprocess openpty, opentty = pty.openpty() # use os.setsid() make it run in a new process group, or bash job control will not be enabled p = subprocess.Popen( cmd, preexec_fn=os.setsid, stdin=opentty, stdout=opentty, stderr=opentty, universal_newlines=True, ) # Welcome to Paks! self.welcome(openpty)The setsid as a pre-exec function is ensuring the child process is a new session and won’t exit, sort of akin to a daemon. So at face value, yes it is doing exactly what you think - we are shelling into the containerand watching the command line and looking for paks-known commands. And I didn’t use a Python keylogger becauseI found that keyboard requires sudo (like really?!) and pynput is really scary because it doesn’t just get keys from the terminal - it’s watching anything you type anywhere! That gave me the heebie jeebies. I hope there is some scanner for pypi that is looking for that packageand checking it’s not being malicious.All of the above said, and all the time spent, I’m not convinced that this exact method isthe best way to be running commands from inside the container. There are other ideasthat need to be tested!StructureWe could have talked about this first, but let me show you the basic structure of paksso you get an understanding of the components.paks# Backends are different wrappers, so logically we start with podman and docker├── backends│   ├── base.py│   ├── docker.py│   ├── __init__.py│   └── podman.py# The client is what you interact with on the command line. This shows the various commands available.├── cli│   ├── config.py│   ├── env.py│   ├── __init__.py│   └── run.py# This is a central controller for things├── client.py# Here's all the built-in, interactive commands paks supports!├── commands│   ├── command.py│   ├── cp.py│   ├── env.py│   ├── history.py│   ├── __init__.py│   ├── inspect.py│   └── state.py├── defaults.py├── env.py├── logger.py# Coming soon - load your own commands!├── plugins.py├── schemas.py├── settings.py├── settings.yml├── templates.py├── utils└── version.pySo that should give you the gist - we have container wrappers (backends) and thencommands that we can issue while we are inside the container. Let’s talk about them next.Saving StateThe first thing I wanted to try with Paks was to save a container state, but not needingto open a separate terminal and save from the outside. The use case for this is that given I’m in an interactivecontainer and I’ve made some changes, I don’t want to exit and rebuild. All y’all reproducibility folkscan stop wincing, and realize that we also need more temporary or throwaway development environments like this.Reproducibilty is important, but mostly for the final production thing, and only up to a level of notgiving us pain. So how might I do this?For paks, while you are inside the container (let’s say ubuntu) you simply ask to #save:$ paks run ubuntu# touch PANCAKES# #saveSaving container...sha256:d82aaa268feb59344cf31a757ce7f5c0caa6a6bbd10b8d0af1d55cdbc50b609b[+] Building 0.2s (5/5) FINISHED...=> => writing image sha256:f58ae524d8644400b33c078f19612cba7849ef8f3ea158e2291ac697a4129080=> => naming to docker.io/library/busybox-savedUntagged: dockerio-busybox-joyous-hippo-3922-gloopy-peanut-9044:latestDeleted: sha256:d82aaa268feb59344cf31a757ce7f5c0caa6a6bbd10b8d0af1d55cdbc50b609bDeleted: sha256:f58ae524d8644400b33c078f19612cba7849ef8f3ea158e2291ac697a4129080Successfully saved container! ⭐️And then you can see that there is an ubuntu-saved container!$ docker images | grep ubuntuubuntu-saved latest 93e336d994de 2 minutes ago 72.8MBubuntu latest 54c9d81cbb44 7 days ago 72.8MBSo this has saved me some tiny bit of energy to open up another terminal, remember how to docker commit,and then also rebuild with a squash to minimize the layers (as there is a maximum number we don’t want to hit).What Paks could then eventually do is make it easy to move this entire container betweenplaces, e.g., from your local machine to HPC without a hitch. I haven’t started to work on that yetbecause this is a fun side project.EnvironmentsOne thing I do a lot is use GitHub tokens to do fun stuff with the API. I usually need tokeep this in some hidden file, then find it, open it, copy paste it, and export it in the container.And then I do that a million times when I have to run a new container. But with Paks, we can create a named environment on the host (a file to source with exports):$ paks env edit githubYou can also quickly show an environment:$ paks env show githubGITHUB_TOKEN=xxxxxxxxxxxAnd then in our container, as many times as we need, load it seamlessly!root@9ec6c3d43591:/# #envload githubLoading environment...Successfully loaded environment githubroot@9ec6c3d43591:/# export GITHUB_TOKEN=xxxxxxxxxroot@9ec6c3d43591:/# export GITHUB_USER=dinosaurIf only my GitHub username was dinosaur! 😁️ Is it loaded?root@9ec6c3d43591:/# env | grep GITHUBGITHUB_USER=dinosaurGITHUB_TOKEN=xxxxxxxxxOkay, so to be fair, there are a bunch of other commands for inspection and size,and I’m not going to go through them all! You can see them in the Paks user guide.And I don’t mean to say you should use this - you probably shouldn’t. But you might be interested to try it out.Parsing KeystrokesSo the most interesting part of this project has been learning about input from the terminal,and actually the reason I wanted to write this post to share what I learned. Let’s go back to the interactivefunction where we ran subprocess and created a pseudo terminal. There actually is a pretty simple wayto watch what is being typed:# This is the subprocess return code, keep going until we are done (e.g. have a return code)while p.poll() is None: # Wait for io completion (e.g., see man select) r, w, e = select.select([sys.stdin, openpty], [], []) # Was it a new input? if sys.stdin in r: terminal_input = os.read(sys.stdin.fileno(), 10240) new_char = terminal_input.decode(\"utf-8\") # Do something with what you see here # Was it a new output? elif openpty in r: o = os.read(openpty, 10240) if o: os.write(sys.stdout.fileno(), o)I learned a lot from this! Let’s talk about it.DebuggingSo the first thing I learned is that my typical “import IPython” and “IPython.embed()”isn’t going to work as easily as normal, because (at least superficially) I didn’tsee a way to have it sort of injected into the process. Anything that is interactive inthat loop is still (conceptually) running on my host. So when I use IPythonit does some weird stuff with carriage returns, but it’s still possible to interact witha little bit. So what I wound up doing so I could easily see every keypress was to writeto file in append mode:with open('/tmp/file.txt', 'a') as fd: fd.write(new_char)This was kind of neat because I could be typing in one terminal, and then havea file open (watching it) that updates with changes, and I’d get a sense of whatis going on. I could append anything to this file to debug. And this is also reallydifferent from how we normally use subprocess, where maybe we will parse entire linesat once:p = subprocess.Popen(['python','thing.py'], stdout=subprocess.PIPE)while True: line = p.stdout.readline() if not line: breakbecause we are reading on character at a time! So what we essentially need to dois keep a string that we continue appending to unless there is a newline, up or down,or left or right to indicate moving the cursor.Ascii CharactersI started to quickly see characters that my editor didn’t know - e.g., likelyescape sequences and other ascii that showed up in the little question mark box.I quickly realized that I was seeing asciicode (and some characters that couldn’t be parsed) so the solution was to look at the ordof the character and compare to a number. For example, for a backspacethe number is 127. So to act on it I might do:# if we have a backspace (ord 127)if len(new_char) == 1 and ord(new_char) == 127: # This is our in progress line. If we have content, backspace! if len(string_input) > 0: string_input = string_input[:-1] # But if we don't, just write the character for the person to see and # keep collecting new characters (continue in the loop) if not string_input: os.write(openpty, terminal_input) continue # Otherwise (not a backspace) add to our growing line to parse further!else: string_input = string_input + new_charThe above is basically looking for a backspace, and if we find one, we removeone character from the line we are assembling. Otherwise we just add the new characterto the line.xterm sequencesAnd a similar thing happens for pressing up/down and right/left, except theterminal parses them as “[A”, “[B”, “[C”, and “[D”, respectively, and often withan escape sequence first. There are some nice tables herefor the interested reader! And this was also the point that I realized how challenging parsing input is!Along with needing to account for every character, you also need to account for platformdifferences. That’s also why I view this library as mostly for development and thinking,or at least for mostly Linux and bash shells, because I’m not sure I could ever handle them all.So for the purposes of my library, for now I decided I’m not going to handle moving left and right,nor do I want to deal with weird extra ascii characters that are added, so I just clean them up.# Get rid of left/rightstring_input = string_input.replace(\"[D\", \"\").replace(\"[C\", \"\")# Replace weird characters and escape sequencesstring_input = self.clean(string_input)Yes, that probably means some of your ninja shortcuts won’t work perfectly when running paks,and if you absolutely want one to be parsed please let me know and we can add it.NewlinesSo the gold nugget of content that Paks is interested in is when you press enter.This means you’ve finished typing something and there is some version of a newlineor carriage return. This is also a pretty variable thing depending on the platform you areon - newlines can come in very different forms! I tried to honor the two that I see most often: \\r\\n: Windows \\n: UNIX (e.g., Mac OSX) \\r: Mac (pre OSX)has_newline = \"\\n\" in string_input or \"\\r\" in string_inputAt this point, we can start acting on what we see. E.g., if the user has asked for anykind of exit, I honor it.# Universal exit commandif \"exit\" in string_input and has_newline: print(\"\\n\\rContainer exited.\\n\\r\") return self.uri.extended_nameThe return of the name at the end is to handle cleaning up the image, which was allocateda temporary name.HistoryOne of the more interesting parts of this project was realizing that people use history, a lot.At least I do. This is going to appear as an up or down press, and only when a newline is found is some item in history re-executed. So first let’s look for exploring history with up/down. There aretwo cases - pressing up/down without a newline:# Pressing up or down, but not enterif (\"[A\" in string_input or \"[B\" in string_input) and not has_newline: string_input = self.get_history(string_input, openpty) os.write(openpty, terminal_input) continueAnd with one:# Pressing up or down with enterif (\"[A\" in string_input or \"[B\" in string_input) and has_newline: string_input = self.get_history(string_input, openpty) os.write(openpty, terminal_input)If we don’t have a newline, we add a continue to keep parsing characters the user istyping. If we do have a newline, we let the loop keep running to keep parsing the line of history we retrieved.But let’s step back and talk about that history. We basically want to retrieve whatever line of history thatthe user is asking for, because to us it looks like up and down errors. You could imaginerestoring the previous line, and then editing it. This actually proved to be quite challenging,because I realized (by default) when we start running a container (well, ubuntu and centos)the history is stored in memory and not written to ~/.bash_history. This led to this thread and some people coming in to quickly helpand others coming in just to say “Why are you doing this with containers it makes no sense stop.” Yeah, right. If Ilistened to every person that has ever told me to stop working on something because “REASONS!” I wouldn’tultimately work on much at all.The short answer was that I needed a function to be able to get a line of history, and based on the number of times pressing up or down. For my first attempt I said “nevermind this, I’ll just save my own history!”but that got hugely complicated very fast because it turns out, we don’t just stupidly type commands over and over,we are constantly using more characters on the keyboard than letters and numbers, retrieving old things to edit,updating again, and in practice I found that I could keep up with simple parsing, but it would get out of syncfor a longer session. There also is the issue that people can tweak the amount of history saved, or how it’s saved, and there are a set of environment variables and commandsto do that. So most containers will start running and save history to memory and not file (and this makessense in case there is sensitive information) but it was problematic for me because I couldn’t parse it.For example, when someone presses up and down a bunch of times, I might see:[A[A[A[A[A[B[AThis is a reference to some previous command that I can only find in historygiven I’m parsing the input/output as I am. So my second attempt (well, maybe second throughtenth) I was trying different variations of trying to be able to parse the history.If you looked at the tweetyou’ll see we need to run:$ history -ato start writing what’s in memory to file. I didn’t want to do this on every command, because alongwith the user seeing it and the UI being awful, it was just too much. Instead, I realized that I had a smallopportunity when the user first shells into the container (and is expecting a jump in their UI) to run whateverI need and then clear the terminal. So I ran it there, right before a clear and welcome message. def welcome(self, openpty): \"\"\" Welcome the user and clear terminal \"\"\" # Don't add commands executed to history os.write(openpty, self.encode(\" export PROMPT_COMMAND='history -a'\\r\")) os.write(openpty, self.encode(\" clear\\r\")) os.write(openpty, self.encode(\" ### Welcome to PAKS! ###\\r\"))And with this method you aren’t aware of the extra commands at all! And did you notice the spaces above? That’s also another trick! Any command that you type with a leadingspace won’t be saved to history, and this is thanks to HISTCONTROL that has an ignorespace option. I think most people / containersset it to ignore space and to ignore duplicates:root@1c268386714a:/# echo $HISTCONTROLignoredups:ignorespaceThat said, I don’t explicitly try to reset this in the container, so that could be a bugif there is a container base that doesn’t do that. And I’m pretty sure centos doesn’t come with clear!I’ll likely need to work on this a bit more. For now, please consider this only working for debian/ubuntu bases and we can inspect the other ones later!Okay, so now let’s look at the function to get history (self.hist.run). For now, just ignore the command toget the history, that’s actually done via a Paks command that we will talk about after.Here is what is going on:def get_history(self, line, openpty): \"\"\" Given an input with some number of up/down and newline, derive command. \"\"\" # Calculate the absolute change of ups/downs up = line.count(\"[A\") down = line.count(\"[B\") change = up - down # pushed down below history (maybe they are angry?) if change <= 0: return \"\" # Retrieve history, actually via a command run from the outside to get the file history = self.hist.run( container_name=self.uri.extended_name, out=openpty, history_file=self.settings.history_file, user=self.settings.user, ) history = [x for x in history.split(\"\\n\") if x] # No history, nothing to return if not history: return \"\" # The change is outside the length of history if change > len(history): return \"\" # here we are looking back up into history (negative index) newline = history[-1 * change] # Add back any characters typed AFTER the up/down presses newline += re.split(\"(\\[A|\\[B)\", line, 1)[-1] return newlineThe above might not be perfect, but it worked the best for everything that I tried!This allows us to issue a command that paks knows, press up to get it again, and then editit and have the command work correctly. Speaking of commands…CommandsThe core meat of paks is the commands that it recognizes. Every command has a base classthat is going to handle parsing a line (with a main command and optional args or kwargs, depending on the command),ensuring all required variables are passed (this is largely internal to the library and even a developer userdoesn’t need to think about it unless they want to change what is passed), and then providing functions for basic kinds ofexecution. So let’s step back and first look at how we find a command (or executor). Basically, once we have a newlineand we’ve parsed it per the above (looking up history and such) we can sniff it to see if it matches a knowncommand pattern:# If we have a newline (and possibly a command)if has_newline: self.run_executor(string_input, openpty) # Add derived line to the history os.write(openpty, terminal_input) string_input = \"\"The function “run_executor” is going to make this call if there is a Paks command and handle it.And no matter what, we reset our string input to be empty given that the user pressed enter, becausethey are going to start typing fresh. But before that, this function “run_executor” is going to seeif there are any known commands, and if so, to run them! That function looks like this:def run_executor(self, string_input, openpty): \"\"\" Given a string input, run executor \"\"\" # Get out early if it's not a Paks command (always starts with #) string_input = string_input.replace(\"[A\", \"\").replace(\"[B\", \"\") if not string_input.startswith(\"#\"): return # Do we have a matching executor? executor = self.commands.get_executor(string_input, out=openpty) if executor is not None: # Print any message it wants to the terminal before run... if executor.pre_message: print(\"\\n\\r\" + executor.pre_message) # Run it! result = executor.run( name=self.image, container_name=self.uri.extended_name, original=string_input, ) # And any message it wants to print after if result.message: print(\"\\r\" + result.message)The result object holds what you would expect - a return code, some message,and the basic outputs of the call. It’s up to the executor (command) to decidewhat to show the user. Some might not show anything beyond commands that are runwith the executor. So what does that function “get_executor” look like?This is where we delive into the commands module, where there is a simple lookup ofthe starting prefixes of commands matched to Command classes:# lookup of named commands and settingsdocker_commands = { \"#save\": SaveContainer, \"#inspect\": InspectContainer, \"#envload\": EnvLoad, \"#envhost\": EnvHost, \"#envsave\": EnvSave, \"#cp\": Copy, \"#size\": Size,}When I add a load functionality, all it will need to do is update this dictionary.And the reason those are “docker commands” is that you can imagine we eventuallysupport other container technologies, and the commands you run are going to vary.Each Command actually has a class attribute for the container types that are supported.Here is a snippet of the DockerCommands class attached to the client that we are calling “get_executor” on:class DockerCommands: # Required kwargs for any docker/podman command to run required = [\"container_name\", \"name\"] def __init__(self, container_tech): self.command = container_tech self.lookup = docker_commands def parse_name(self, cmd): parts = cmd.split(\" \") return parts.pop(0).replace(\"\\n\", \"\").replace(\"\\r\", \"\").strip() def has_command(self, name): name, _ = self.parse_name(name) return name in self.lookup @property def history(self): return History(self.command) def get_executor(self, name, out=None): \"\"\" Backend is required to update history \"\"\" name = self.parse_name(name) if name in self.lookup: return self.lookup[name](self.command, required=self.required, out=out)To focus on the last function, you basically see that we parse the line (name), and thensee if it’s in our lookup. If so, we return the initialized executor, and we need to addthe output source in case it needs to interact with the current terminal. The self.commandrefers to the container technology (e.g., docker or podman in this case).Then we can look at a particular command (e.g., inspect) and see it’s pretty simple! We have definedthe supported container technologies along with optional messages, and a main run function. Here is the commandto inspect, which will dump out the json manifest and optionally take a section:class InspectContainer(Command): supported_for = [\"docker\", \"podman\"] pre_message = \"Inspecting Container...\" def run(self, **kwargs): \"\"\" Inspect a container fully, or specific sections \"\"\" # Always run this first to make sure container tech is valid self.check(**kwargs) # These are both required for docker/podman container_name = self.kwargs[\"container_name\"] # inspect particular attributes provided as args if self.args: for section in self.args: result = self.run_command( [ self.tech, \"inspect\", \"--format\", \"\" % section.capitalize(), container_name, ] ) # Otherwise just dump the whole thing else: result = self.run_command([self.tech, \"inspect\", container_name]) if result: return result return self.return_success()You’ll now know the main Paks trick - because we are still running on the host,we can issue commands to the host while we are in the container! In the above, we can just type:#inspect#inspect configAnd see the output in the terminal! This is how a lot of the interactions with the host work.It’s kind of simple and silly, but also really cool when you see it work on the container!So the run function above, just as a reminder, is called by this part:result = executor.run( name=self.image, container_name=self.uri.extended_name, original=string_input,)And honestly, that’s the majority of Paks! 🎉️DiscussionPaks has honestly been so fun to work on, despite long hours of trying to figure things out during evenings and weekends. I’m so excitedabout the ideas, and I want to share them with others because I think developer tools for containersare kind of lacking. Heck, I stayed up until like 4am writing this post. No, I don’t normally do that,I had some things on my mind, but it was an excellent use of the time, despite the fact that I woke up 4 hours later andI’m going to crash tonight (err tomorrow night… err now that I’m tweaking up the finishing touches to this post)!Next StepsI’m working on a “paks load” command that will let someone develop a Python modulewith some set of commands for their custom use case. The first thing I wanted to trywas to generate sboms for spack (e.g., “Generate sboms for this spack install in the containerand save them to my host so I can upload alongside the container to a registry). I hadsome previous work to use spack scripting, but ultimately this weekend did a pull requestto add sbom generation to spack proper. And then I’ll be able to work on the load commands.I also want to address some of the anticipated bugs I mentioned above, like properly setting “HISTCONTROL”to ensure we don’t save commands issued by the client to history, and possibly having a cleanup step on savethat removes the file. I haven’t added this yet is because if I’m developing in the container and want to say, move it from my local machine to HPC, I kind of want to have my history so I can lazily use it.But Really…We have some magic up our sleeves for what we are actually working on to inspire these ideas!I guess you’ll just have to wait for the future, because @alecbcs andI are both have vision and are a great tag team! 🎉️SecuritySo there are obviously security issues around a library like this - and I added notesto the documentation that I’ll re-iterate here. Paks is intended for use by a developerthat is in their own trusted environment, whether local or on HPC. Because there is an interactionwith the host, you wouldn’t use this in production someone to give users an ability to loadenvironments or save. You also wouldn’t want to save a development container with somethingprivate in history and push it. I’m still an advocate for, after development is done,pushing changed code to GitHub and having an automated build build, test, and deploy.Could we eventually have a production grade library to enable interactions inside thecontainer? Possibly, but it’s not Paks in Python in its current state. I thinkthat’s okay - we have to start small with ideas and go from there.Didn’t I see paks before?Yes, you did! A previous version was intended for making spack build caches on GitHub, but thatdidn’t work because you couldn’t build a spack package within a container and thenpull the same container and install it and hit the cache. I think this might work someday,hence why I haven’t completely deleted the code, but I couldn’t let a cute logo and colorscheme go to waste!So for now it’s on a separate branch but largely I am not working on it. If you want to see this branch,it’s still here!Thanks for reading friends! I hope this has been interesting and you might be inspired toalso work on better tooling for developers, even if that just means exploring the ideas.", + "content_html": "

    I’ve recently been interested in developer workflows. Aside from being a developer, I feellike the tooling for our community, and especially for HPC or hybrid environments, is lacking.As a simple example, let’s ask a basic question:

    How do I start developing here and move it over there?

    For the most part, creating a development container is fairly straight forward, and we can even bind sourcecode to the host to work on in one editor terminal and then build and run or test in another. However,for the moving part, it gets shoddy. Our best bet is to rebuild the container with the most updated source code, push to a registry, and then pull down somewhere else.For a container that is a binary and not layers provided by a registry, we could even scp it.If we do this right, we will have an automated build and deploy that triggers when we merge new code into main, but do you see the problem? What about the code that we wantto test that isn’t ready to merge? This is why we typically would need to manuallypush to a registry with some kind of “work in progress” tag and then pull somewhere else.Minimally we’d need to build fresh again, and then reproduce all the steps to set up our environment.

    Interactive Development Containers

    Now I don’t have all the answers, but recently @alecbcs andI have been dreaming about what kinds of development environments we want.functionality such as:

    1. Saving the container state without leaving it.
    2. Loading or saving or otherwise interacting with named environments.
    3. Inspecting or interacting with container metadata, also without leaving the container.
    4. Moving files or sizing the container without the same.

    And actually I won’t even get to answering the first question in this post about moving somethingfrom one place to another, but rest assured it is an important one. This post is about some prototype or fun testing work that we’ve started around these ideas.The playground for some of these early ideas has been Paks.

    Paks is a Python library that I’m calling a developer wrapper for containers.Mind you, it’s more of a playground right now to experiment with ideas. But I’ve had somuch fun even this early on that I want to share what I’ve learned.

    Wrapper

    Because Paks is a wrapper, you will run containers using the paks command. Here are a few quick examples.

    $ paks run ubuntu$ paks run --shell /bin/sh busybox$ paks run --container-tech podman busybox

    What is happening on the backend that took me a bit to figure out is that we will need to run a subprocess,but create a pseudo terminal to betterwatch and interact with it. This is going to happen in the “interactive_terminal” command below. But unless youwant your terminal to get wonky, we need to use termios to grab the current tty and make sure it gets restored no matter what at the end. That looks like this:

        def interactive_command(self, cmd):        \"\"\"        Ensure we always restore original TTY otherwise terminal gets messed up        \"\"\"        # Controller to get history        self.hist = self.commands.history        # save original tty setting then set it to raw mode        old_tty = termios.tcgetattr(sys.stdin)        old_pty = termios.tcgetattr(sys.stdout)        try:            self._interactive_command(cmd)        finally:            termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)            termios.tcsetattr(sys.stdout, termios.TCSADRAIN, old_pty)

    What happens if you don’t do that? Your terminal gets weird and wonky. And then in the interactivecommand function, this is where we launch a subprocess with a new pseudo terminal:

            tty.setraw(sys.stdin.fileno())        # open pseudo-terminal to interact with subprocess        openpty, opentty = pty.openpty()        # use os.setsid() make it run in a new process group, or bash job control will not be enabled        p = subprocess.Popen(            cmd,            preexec_fn=os.setsid,            stdin=opentty,            stdout=opentty,            stderr=opentty,            universal_newlines=True,        )        # Welcome to Paks!        self.welcome(openpty)

    The setsid as a pre-exec function is ensuring the child process is a new session and won’t exit, sort of akin to a daemon. So at face value, yes it is doing exactly what you think - we are shelling into the containerand watching the command line and looking for paks-known commands. And I didn’t use a Python keylogger becauseI found that keyboard requires sudo (like really?!) and pynput is really scary because it doesn’t just get keys from the terminal - it’s watching anything you type anywhere! That gave me the heebie jeebies. I hope there is some scanner for pypi that is looking for that packageand checking it’s not being malicious.

    All of the above said, and all the time spent, I’m not convinced that this exact method isthe best way to be running commands from inside the container. There are other ideasthat need to be tested!

    Structure

    We could have talked about this first, but let me show you the basic structure of paksso you get an understanding of the components.

    paks# Backends are different wrappers, so logically we start with podman and docker├── backends│   ├── base.py│   ├── docker.py│   ├── __init__.py│   └── podman.py# The client is what you interact with on the command line. This shows the various commands available.├── cli│   ├── config.py│   ├── env.py│   ├── __init__.py│   └── run.py# This is a central controller for things├── client.py# Here's all the built-in, interactive commands paks supports!├── commands│   ├── command.py│   ├── cp.py│   ├── env.py│   ├── history.py│   ├── __init__.py│   ├── inspect.py│   └── state.py├── defaults.py├── env.py├── logger.py# Coming soon - load your own commands!├── plugins.py├── schemas.py├── settings.py├── settings.yml├── templates.py├── utils└── version.py

    So that should give you the gist - we have container wrappers (backends) and thencommands that we can issue while we are inside the container. Let’s talk about them next.

    Saving State

    The first thing I wanted to try with Paks was to save a container state, but not needingto open a separate terminal and save from the outside. The use case for this is that given I’m in an interactivecontainer and I’ve made some changes, I don’t want to exit and rebuild. All y’all reproducibility folkscan stop wincing, and realize that we also need more temporary or throwaway development environments like this.Reproducibilty is important, but mostly for the final production thing, and only up to a level of notgiving us pain. So how might I do this?

    For paks, while you are inside the container (let’s say ubuntu) you simply ask to #save:

    $ paks run ubuntu# touch PANCAKES# #saveSaving container...sha256:d82aaa268feb59344cf31a757ce7f5c0caa6a6bbd10b8d0af1d55cdbc50b609b[+] Building 0.2s (5/5) FINISHED...=> => writing image sha256:f58ae524d8644400b33c078f19612cba7849ef8f3ea158e2291ac697a4129080=> => naming to docker.io/library/busybox-savedUntagged: dockerio-busybox-joyous-hippo-3922-gloopy-peanut-9044:latestDeleted: sha256:d82aaa268feb59344cf31a757ce7f5c0caa6a6bbd10b8d0af1d55cdbc50b609bDeleted: sha256:f58ae524d8644400b33c078f19612cba7849ef8f3ea158e2291ac697a4129080Successfully saved container! ⭐️

    And then you can see that there is an ubuntu-saved container!

    $ docker images | grep ubuntuubuntu-saved                                      latest    93e336d994de   2 minutes ago   72.8MBubuntu                                            latest    54c9d81cbb44   7 days ago      72.8MB

    So this has saved me some tiny bit of energy to open up another terminal, remember how to docker commit,and then also rebuild with a squash to minimize the layers (as there is a maximum number we don’t want to hit).What Paks could then eventually do is make it easy to move this entire container betweenplaces, e.g., from your local machine to HPC without a hitch. I haven’t started to work on that yetbecause this is a fun side project.

    Environments

    One thing I do a lot is use GitHub tokens to do fun stuff with the API. I usually need tokeep this in some hidden file, then find it, open it, copy paste it, and export it in the container.And then I do that a million times when I have to run a new container. But with Paks, we can create a named environment on the host (a file to source with exports):

    $ paks env edit githubYou can also quickly show an environment:$ paks env show githubGITHUB_TOKEN=xxxxxxxxxxx

    And then in our container, as many times as we need, load it seamlessly!

    root@9ec6c3d43591:/# #envload githubLoading environment...Successfully loaded environment githubroot@9ec6c3d43591:/#  export GITHUB_TOKEN=xxxxxxxxxroot@9ec6c3d43591:/#  export GITHUB_USER=dinosaur

    If only my GitHub username was dinosaur! 😁️ Is it loaded?

    root@9ec6c3d43591:/# env | grep GITHUBGITHUB_USER=dinosaurGITHUB_TOKEN=xxxxxxxxx

    Okay, so to be fair, there are a bunch of other commands for inspection and size,and I’m not going to go through them all! You can see them in the Paks user guide.And I don’t mean to say you should use this - you probably shouldn’t. But you might be interested to try it out.

    Parsing Keystrokes

    So the most interesting part of this project has been learning about input from the terminal,and actually the reason I wanted to write this post to share what I learned. Let’s go back to the interactivefunction where we ran subprocess and created a pseudo terminal. There actually is a pretty simple wayto watch what is being typed:

    # This is the subprocess return code, keep going until we are done (e.g. have a return code)while p.poll() is None:    # Wait for io completion (e.g., see man select)    r, w, e = select.select([sys.stdin, openpty], [], [])    # Was it a new input?    if sys.stdin in r:        terminal_input = os.read(sys.stdin.fileno(), 10240)        new_char = terminal_input.decode(\"utf-8\")        # Do something with what you see here    # Was it a new output?    elif openpty in r:        o = os.read(openpty, 10240)        if o:            os.write(sys.stdout.fileno(), o)

    I learned a lot from this! Let’s talk about it.

    Debugging

    So the first thing I learned is that my typical “import IPython” and “IPython.embed()”isn’t going to work as easily as normal, because (at least superficially) I didn’tsee a way to have it sort of injected into the process. Anything that is interactive inthat loop is still (conceptually) running on my host. So when I use IPythonit does some weird stuff with carriage returns, but it’s still possible to interact witha little bit. So what I wound up doing so I could easily see every keypress was to writeto file in append mode:

    with open('/tmp/file.txt', 'a') as fd:    fd.write(new_char)

    This was kind of neat because I could be typing in one terminal, and then havea file open (watching it) that updates with changes, and I’d get a sense of whatis going on. I could append anything to this file to debug. And this is also reallydifferent from how we normally use subprocess, where maybe we will parse entire linesat once:

    p = subprocess.Popen(['python','thing.py'], stdout=subprocess.PIPE)while True:  line = p.stdout.readline()  if not line:    break

    because we are reading on character at a time! So what we essentially need to dois keep a string that we continue appending to unless there is a newline, up or down,or left or right to indicate moving the cursor.

    Ascii Characters

    I started to quickly see characters that my editor didn’t know - e.g., likelyescape sequences and other ascii that showed up in the little question mark box.I quickly realized that I was seeing asciicode (and some characters that couldn’t be parsed) so the solution was to look at the ordof the character and compare to a number. For example, for a backspacethe number is 127. So to act on it I might do:

    # if we have a backspace (ord 127)if len(new_char) == 1 and ord(new_char) == 127:    # This is our in progress line. If we have content, backspace!    if len(string_input) > 0:        string_input = string_input[:-1]        # But if we don't, just write the character for the person to see and     # keep collecting new characters (continue in the loop)    if not string_input:        os.write(openpty, terminal_input)        continue    # Otherwise (not a backspace) add to our growing line to parse further!else:    string_input = string_input + new_char

    The above is basically looking for a backspace, and if we find one, we removeone character from the line we are assembling. Otherwise we just add the new characterto the line.

    xterm sequences

    And a similar thing happens for pressing up/down and right/left, except theterminal parses them as “[A”, “[B”, “[C”, and “[D”, respectively, and often withan escape sequence first. There are some nice tables herefor the interested reader! And this was also the point that I realized how challenging parsing input is!Along with needing to account for every character, you also need to account for platformdifferences. That’s also why I view this library as mostly for development and thinking,or at least for mostly Linux and bash shells, because I’m not sure I could ever handle them all.So for the purposes of my library, for now I decided I’m not going to handle moving left and right,nor do I want to deal with weird extra ascii characters that are added, so I just clean them up.

    # Get rid of left/rightstring_input = string_input.replace(\"[D\", \"\").replace(\"[C\", \"\")# Replace weird characters and escape sequencesstring_input = self.clean(string_input)

    Yes, that probably means some of your ninja shortcuts won’t work perfectly when running paks,and if you absolutely want one to be parsed please let me know and we can add it.

    Newlines

    So the gold nugget of content that Paks is interested in is when you press enter.This means you’ve finished typing something and there is some version of a newlineor carriage return. This is also a pretty variable thing depending on the platform you areon - newlines can come in very different forms! I tried to honor the two that I see most often:

    1. \\r\\n: Windows
    2. \\n: UNIX (e.g., Mac OSX)
    3. \\r: Mac (pre OSX)
    has_newline = \"\\n\" in string_input or \"\\r\" in string_input

    At this point, we can start acting on what we see. E.g., if the user has asked for anykind of exit, I honor it.

    # Universal exit commandif \"exit\" in string_input and has_newline:    print(\"\\n\\rContainer exited.\\n\\r\")    return self.uri.extended_name

    The return of the name at the end is to handle cleaning up the image, which was allocateda temporary name.

    History

    One of the more interesting parts of this project was realizing that people use history, a lot.At least I do. This is going to appear as an up or down press, and only when a newline is found is some item in history re-executed. So first let’s look for exploring history with up/down. There aretwo cases - pressing up/down without a newline:

    # Pressing up or down, but not enterif (\"[A\" in string_input or \"[B\" in string_input) and not has_newline:    string_input = self.get_history(string_input, openpty)    os.write(openpty, terminal_input)    continue

    And with one:

    # Pressing up or down with enterif (\"[A\" in string_input or \"[B\" in string_input) and has_newline:    string_input = self.get_history(string_input, openpty)    os.write(openpty, terminal_input)

    If we don’t have a newline, we add a continue to keep parsing characters the user istyping. If we do have a newline, we let the loop keep running to keep parsing the line of history we retrieved.But let’s step back and talk about that history. We basically want to retrieve whatever line of history thatthe user is asking for, because to us it looks like up and down errors. You could imaginerestoring the previous line, and then editing it. This actually proved to be quite challenging,because I realized (by default) when we start running a container (well, ubuntu and centos)the history is stored in memory and not written to ~/.bash_history. This led to this thread and some people coming in to quickly helpand others coming in just to say “Why are you doing this with containers it makes no sense stop.” Yeah, right. If Ilistened to every person that has ever told me to stop working on something because “REASONS!” I wouldn’tultimately work on much at all.

    The short answer was that I needed a function to be able to get a line of history, and based on the number of times pressing up or down. For my first attempt I said “nevermind this, I’ll just save my own history!”but that got hugely complicated very fast because it turns out, we don’t just stupidly type commands over and over,we are constantly using more characters on the keyboard than letters and numbers, retrieving old things to edit,updating again, and in practice I found that I could keep up with simple parsing, but it would get out of syncfor a longer session. There also is the issue that people can tweak the amount of history saved, or how it’s saved, and there are a set of environment variables and commandsto do that. So most containers will start running and save history to memory and not file (and this makessense in case there is sensitive information) but it was problematic for me because I couldn’t parse it.For example, when someone presses up and down a bunch of times, I might see:

    [A[A[A[A[A[B[A

    This is a reference to some previous command that I can only find in historygiven I’m parsing the input/output as I am. So my second attempt (well, maybe second throughtenth) I was trying different variations of trying to be able to parse the history.If you looked at the tweetyou’ll see we need to run:

    $ history -a

    to start writing what’s in memory to file. I didn’t want to do this on every command, because alongwith the user seeing it and the UI being awful, it was just too much. Instead, I realized that I had a smallopportunity when the user first shells into the container (and is expecting a jump in their UI) to run whateverI need and then clear the terminal. So I ran it there, right before a clear and welcome message.

        def welcome(self, openpty):        \"\"\"        Welcome the user and clear terminal        \"\"\"        # Don't add commands executed to history        os.write(openpty, self.encode(\" export PROMPT_COMMAND='history -a'\\r\"))        os.write(openpty, self.encode(\" clear\\r\"))        os.write(openpty, self.encode(\" ### Welcome to PAKS! ###\\r\"))

    And with this method you aren’t aware of the extra commands at all! And did you notice the spaces above? That’s also another trick! Any command that you type with a leadingspace won’t be saved to history, and this is thanks to HISTCONTROL that has an ignorespace option. I think most people / containersset it to ignore space and to ignore duplicates:

    root@1c268386714a:/# echo $HISTCONTROLignoredups:ignorespace

    That said, I don’t explicitly try to reset this in the container, so that could be a bugif there is a container base that doesn’t do that. And I’m pretty sure centos doesn’t come with clear!I’ll likely need to work on this a bit more.

    For now, please consider this only working for debian/ubuntu bases and we can inspect the other ones later!

    Okay, so now let’s look at the function to get history (self.hist.run). For now, just ignore the command toget the history, that’s actually done via a Paks command that we will talk about after.Here is what is going on:

    def get_history(self, line, openpty):    \"\"\"    Given an input with some number of up/down and newline, derive command.    \"\"\"    # Calculate the absolute change of ups/downs    up = line.count(\"[A\")    down = line.count(\"[B\")    change = up - down    # pushed down below history (maybe they are angry?)    if change <= 0:       return \"\"    # Retrieve history, actually via a command run from the outside to get the file    history = self.hist.run(        container_name=self.uri.extended_name,        out=openpty,        history_file=self.settings.history_file,        user=self.settings.user,    )    history = [x for x in history.split(\"\\n\") if x]    # No history, nothing to return    if not history:        return \"\"    # The change is outside the length of history    if change > len(history):        return \"\"    # here we are looking back up into history (negative index)    newline = history[-1 * change]    # Add back any characters typed AFTER the up/down presses    newline += re.split(\"(\\[A|\\[B)\", line, 1)[-1]    return newline

    The above might not be perfect, but it worked the best for everything that I tried!This allows us to issue a command that paks knows, press up to get it again, and then editit and have the command work correctly. Speaking of commands…

    Commands

    The core meat of paks is the commands that it recognizes. Every command has a base classthat is going to handle parsing a line (with a main command and optional args or kwargs, depending on the command),ensuring all required variables are passed (this is largely internal to the library and even a developer userdoesn’t need to think about it unless they want to change what is passed), and then providing functions for basic kinds ofexecution. So let’s step back and first look at how we find a command (or executor). Basically, once we have a newlineand we’ve parsed it per the above (looking up history and such) we can sniff it to see if it matches a knowncommand pattern:

    # If we have a newline (and possibly a command)if has_newline:    self.run_executor(string_input, openpty)    # Add derived line to the history    os.write(openpty, terminal_input)    string_input = \"\"

    The function “run_executor” is going to make this call if there is a Paks command and handle it.And no matter what, we reset our string input to be empty given that the user pressed enter, becausethey are going to start typing fresh. But before that, this function “run_executor” is going to seeif there are any known commands, and if so, to run them! That function looks like this:

    def run_executor(self, string_input, openpty):    \"\"\"    Given a string input, run executor    \"\"\"    # Get out early if it's not a Paks command (always starts with #)    string_input = string_input.replace(\"[A\", \"\").replace(\"[B\", \"\")    if not string_input.startswith(\"#\"):        return    # Do we have a matching executor?    executor = self.commands.get_executor(string_input, out=openpty)    if executor is not None:        # Print any message it wants to the terminal before run...        if executor.pre_message:            print(\"\\n\\r\" + executor.pre_message)        # Run it!        result = executor.run(            name=self.image,            container_name=self.uri.extended_name,            original=string_input,        )        # And any message it wants to print after        if result.message:            print(\"\\r\" + result.message)

    The result object holds what you would expect - a return code, some message,and the basic outputs of the call. It’s up to the executor (command) to decidewhat to show the user. Some might not show anything beyond commands that are runwith the executor. So what does that function “get_executor” look like?This is where we delive into the commands module, where there is a simple lookup ofthe starting prefixes of commands matched to Command classes:

    # lookup of named commands and settingsdocker_commands = {    \"#save\": SaveContainer,    \"#inspect\": InspectContainer,    \"#envload\": EnvLoad,    \"#envhost\": EnvHost,    \"#envsave\": EnvSave,    \"#cp\": Copy,    \"#size\": Size,}

    When I add a load functionality, all it will need to do is update this dictionary.And the reason those are “docker commands” is that you can imagine we eventuallysupport other container technologies, and the commands you run are going to vary.Each Command actually has a class attribute for the container types that are supported.Here is a snippet of the DockerCommands class attached to the client that we are calling “get_executor” on:

    class DockerCommands:    # Required kwargs for any docker/podman command to run    required = [\"container_name\", \"name\"]    def __init__(self, container_tech):        self.command = container_tech        self.lookup = docker_commands    def parse_name(self, cmd):        parts = cmd.split(\" \")        return parts.pop(0).replace(\"\\n\", \"\").replace(\"\\r\", \"\").strip()    def has_command(self, name):        name, _ = self.parse_name(name)        return name in self.lookup    @property    def history(self):        return History(self.command)    def get_executor(self, name, out=None):        \"\"\"        Backend is required to update history        \"\"\"        name = self.parse_name(name)        if name in self.lookup:            return self.lookup[name](self.command, required=self.required, out=out)

    To focus on the last function, you basically see that we parse the line (name), and thensee if it’s in our lookup. If so, we return the initialized executor, and we need to addthe output source in case it needs to interact with the current terminal. The self.commandrefers to the container technology (e.g., docker or podman in this case).

    Then we can look at a particular command (e.g., inspect) and see it’s pretty simple! We have definedthe supported container technologies along with optional messages, and a main run function. Here is the commandto inspect, which will dump out the json manifest and optionally take a section:

    class InspectContainer(Command):    supported_for = [\"docker\", \"podman\"]    pre_message = \"Inspecting Container...\"    def run(self, **kwargs):        \"\"\"        Inspect a container fully, or specific sections        \"\"\"        # Always run this first to make sure container tech is valid        self.check(**kwargs)        # These are both required for docker/podman        container_name = self.kwargs[\"container_name\"]        # inspect particular attributes provided as args        if self.args:            for section in self.args:                result = self.run_command(                    [                        self.tech,                        \"inspect\",                        \"--format\",                        \"\" % section.capitalize(),                        container_name,                    ]                )        # Otherwise just dump the whole thing        else:            result = self.run_command([self.tech, \"inspect\", container_name])            if result:                return result        return self.return_success()

    You’ll now know the main Paks trick - because we are still running on the host,we can issue commands to the host while we are in the container! In the above, we can just type:

    #inspect#inspect config

    And see the output in the terminal! This is how a lot of the interactions with the host work.It’s kind of simple and silly, but also really cool when you see it work on the container!So the run function above, just as a reminder, is called by this part:

    result = executor.run(    name=self.image,    container_name=self.uri.extended_name,    original=string_input,)

    And honestly, that’s the majority of Paks! 🎉️

    Discussion

    Paks has honestly been so fun to work on, despite long hours of trying to figure things out during evenings and weekends. I’m so excitedabout the ideas, and I want to share them with others because I think developer tools for containersare kind of lacking. Heck, I stayed up until like 4am writing this post. No, I don’t normally do that,I had some things on my mind, but it was an excellent use of the time, despite the fact that I woke up 4 hours later andI’m going to crash tonight (err tomorrow night… err now that I’m tweaking up the finishing touches to this post)!

    Next Steps

    I’m working on a “paks load” command that will let someone develop a Python modulewith some set of commands for their custom use case. The first thing I wanted to trywas to generate sboms for spack (e.g., “Generate sboms for this spack install in the containerand save them to my host so I can upload alongside the container to a registry). I hadsome previous work to use spack scripting, but ultimately this weekend did a pull requestto add sbom generation to spack proper. And then I’ll be able to work on the load commands.I also want to address some of the anticipated bugs I mentioned above, like properly setting “HISTCONTROL”to ensure we don’t save commands issued by the client to history, and possibly having a cleanup step on savethat removes the file. I haven’t added this yet is because if I’m developing in the container and want to say, move it from my local machine to HPC, I kind of want to have my history so I can lazily use it.

    But Really…

    We have some magic up our sleeves for what we are actually working on to inspire these ideas!I guess you’ll just have to wait for the future, because @alecbcs andI are both have vision and are a great tag team! 🎉️

    Security

    So there are obviously security issues around a library like this - and I added notesto the documentation that I’ll re-iterate here. Paks is intended for use by a developerthat is in their own trusted environment, whether local or on HPC. Because there is an interactionwith the host, you wouldn’t use this in production someone to give users an ability to loadenvironments or save. You also wouldn’t want to save a development container with somethingprivate in history and push it. I’m still an advocate for, after development is done,pushing changed code to GitHub and having an automated build build, test, and deploy.Could we eventually have a production grade library to enable interactions inside thecontainer? Possibly, but it’s not Paks in Python in its current state. I thinkthat’s okay - we have to start small with ideas and go from there.

    Didn’t I see paks before?

    Yes, you did! A previous version was intended for making spack build caches on GitHub, but thatdidn’t work because you couldn’t build a spack package within a container and thenpull the same container and install it and hit the cache. I think this might work someday,hence why I haven’t completely deleted the code, but I couldn’t let a cute logo and colorscheme go to waste!So for now it’s on a separate branch but largely I am not working on it. If you want to see this branch,it’s still here!

    Thanks for reading friends! I hope this has been interesting and you might be inspired toalso work on better tooling for developers, even if that just means exploring the ideas.

    ", + "url": "https://hpc.social/personal-blog/2022/interactive-development-containers/", + + + + + + "date_published": "2022-02-15T12:30:00-07:00", + "date_modified": "2022-02-15T12:30:00-07:00", + + "author": "Vanessasaurus" + + }, + + { + "id": "https://hpc.social/personal-blog/2022/developing-managed-vs-self-hosted-software/", + "title": "Developing managed vs self-hosted software", + "summary": null, + "content_text": "I’ve done some work lately with teams that deliver their products in very different ways, and it has me thinking about how much our “best practices” depend on a product’s delivery and operations model. I’ve had a bunch of conversations about this tensionOn the one hand, some of the teams I’ve worked with build software services that are developed and operated by the same team, and where the customers (internal or external) directly make use of the operated service. These teams try to follow what I think of as “conventional” SaaS best practices:Their development workflow prioritizes iteration speed above all elseThey tend to deploy from HEAD, or close to it, in their source repositoryIn almost all cases, branches are short-lived for feature developmentThey’ve built good automated test suites and well-tuned CI/CD pipelinesReleases are very frequentThey make extensive use of observability tooling, often using third-party SaaS for thisFast roll-back is prioritized over perfect testing ahead of timeWhile their user documentation is mostly good, their operations documentation tends to be “just good enough” to onboard new team members, and a lot of it lives in SlackHowever, we also have plenty of customers who deploy our software to their own systems, whether in the cloud or on-premise. (Some of them don’t even connect to the Internet on a regular basis!) The development workflow for software aimed at these customers looks rather different:Deploys are managed by the customer, and release cycles are longerThese teams do still have CI/CD and extensive automated tests… but they may also have explicit QA steps before releasesThere tend to be lots of longer-lived version branches, and even “LTS” branches with their own roadmapsLogging is prioritized over observability, because they can’t make assumptions about the customer toolingThey put a lot more effort into operational documentation, because most operators will not also be developersFrom a developer perspective, of course, this all feels much more painful! The managed service use case feels much more comfortable to develop for, and most of the community tooling and best practices for web development seems to optimize for that model.But from a sysadmin perspective, used to mostly operating third-party software, the constraints of self-hosted development are all very familiar. And even managed service teams often rely on third-party software developed using this kind of model, relying on LTS releases of Linux distributions and pinning major versions of dependencies.The biggest challenge I’ve seen, however, is when a development team tries to target the same software at both use cases. As far as I can tell, it’s very difficult to simultaneously operate a reliable service that is being continuously developed and deployed, and to provide predictable and high-quality releases to self-hosted customers.So far, I’ve seen this tension resolved in three different ways:The internal service becomes “just another customer”, operating something close to the latest external release, resulting in a slower release cycle for the internal serviceFast development for the internal service gets prioritized, with external releases becoming less frequent and including bigger and bigger changesInternal and external diverge completely, with separate development teams taking over (and often a name change for one of them)I don’t really have a conclusion here, except that I don’t really love any of these results. /sighIf you’re reading this and have run into similar tensions, how have you seen this resolved? Have you seen any success stories in deploying the same code internally and externally? Or alternatively — any interesting stories of failure to share? Feel free to send me an email, I’d be interested to hear from you.", + "content_html": "

    I’ve done some work lately with teams that deliver their products in very different ways, and it has me thinking about how much our “best practices” depend on a product’s delivery and operations model. I’ve had a bunch of conversations about this tension

    On the one hand, some of the teams I’ve worked with build software services that are developed and operated by the same team, and where the customers (internal or external) directly make use of the operated service. These teams try to follow what I think of as “conventional” SaaS best practices:

    • Their development workflow prioritizes iteration speed above all else
    • They tend to deploy from HEAD, or close to it, in their source repository
      • In almost all cases, branches are short-lived for feature development
    • They’ve built good automated test suites and well-tuned CI/CD pipelines
    • Releases are very frequent
    • They make extensive use of observability tooling, often using third-party SaaS for this
    • Fast roll-back is prioritized over perfect testing ahead of time
    • While their user documentation is mostly good, their operations documentation tends to be “just good enough” to onboard new team members, and a lot of it lives in Slack

    However, we also have plenty of customers who deploy our software to their own systems, whether in the cloud or on-premise. (Some of them don’t even connect to the Internet on a regular basis!) The development workflow for software aimed at these customers looks rather different:

    • Deploys are managed by the customer, and release cycles are longer
    • These teams do still have CI/CD and extensive automated tests… but they may also have explicit QA steps before releases
    • There tend to be lots of longer-lived version branches, and even “LTS” branches with their own roadmaps
    • Logging is prioritized over observability, because they can’t make assumptions about the customer tooling
    • They put a lot more effort into operational documentation, because most operators will not also be developers

    From a developer perspective, of course, this all feels much more painful! The managed service use case feels much more comfortable to develop for, and most of the community tooling and best practices for web development seems to optimize for that model.

    But from a sysadmin perspective, used to mostly operating third-party software, the constraints of self-hosted development are all very familiar. And even managed service teams often rely on third-party software developed using this kind of model, relying on LTS releases of Linux distributions and pinning major versions of dependencies.

    The biggest challenge I’ve seen, however, is when a development team tries to target the same software at both use cases. As far as I can tell, it’s very difficult to simultaneously operate a reliable service that is being continuously developed and deployed, and to provide predictable and high-quality releases to self-hosted customers.

    So far, I’ve seen this tension resolved in three different ways:

    • The internal service becomes “just another customer”, operating something close to the latest external release, resulting in a slower release cycle for the internal service
    • Fast development for the internal service gets prioritized, with external releases becoming less frequent and including bigger and bigger changes
    • Internal and external diverge completely, with separate development teams taking over (and often a name change for one of them)

    I don’t really have a conclusion here, except that I don’t really love any of these results. /sigh

    If you’re reading this and have run into similar tensions, how have you seen this resolved? Have you seen any success stories in deploying the same code internally and externally? Or alternatively — any interesting stories of failure to share? \"😉\" Feel free to send me an email, I’d be interested to hear from you.

    ", + "url": "https://hpc.social/personal-blog/2022/developing-managed-vs-self-hosted-software/", + + + + + + "date_published": "2022-02-12T16:00:00-07:00", + "date_modified": "2022-02-12T16:00:00-07:00", + + "author": "Thinking Out Loud" + + } + + ] +} diff --git a/feed.xml b/feed.xml new file mode 100644 index 0000000..f86a34a --- /dev/null +++ b/feed.xml @@ -0,0 +1,6836 @@ +Jekyll2023-08-03T20:41:18-06:00https://hpc.social/personal-blog/feed.xmlhpc.social - Aggregated Personal BlogShared personal experiences and storieshpc.socialinfo@hpc.socialLSF client on macOS - submitting from your laptop2023-03-01T19:10:58-07:002023-03-01T19:10:58-07:00https://hpc.social/personal-blog/2023/lsf-client-on-macos-submitting-from-your-laptop<p>In traditional HPC environments, login nodes are typically used as an access point for users to submit +and manage jobs. Although login nodes are still used today, HPC environments are +increasingly being used by a broad class of users with domain expertise and not necessarily IT experts. +In other words, such users may be more comfortable using their native desktop +environment rather than the CLI. Given the factors, in the commercial HPC space, organizations are always looking +for ways to lower the barto access and interact with HPC environments.</p> + +<p>Spectrum LSF provides many ways to submit and manage jobs in an HPC cluster. For power users, the rich +CLI functionality exists. There is also an available web-based interface for job +submission and management which provides customizable application templates to greatly simplify job sub +mission, while hiding complexity of the underlying infrastructure. A RESTful API +is also available to users of IBM Spectrum LSF Application Center or IBM Spectrum LSF Suites, which ena +bles organizations to access the HPC environment via web services.</p> + +<p>I&rsquo;ve written previously in detail about the the LSF web-based interface in the blog +<a href="https://www.gaborsamu.com/blog/easy_hpc/">The Easy HPC Button</a>. Here, we&rsquo;ll take a closer look at the +available LSF client for macOS that uses the RESTful API. First, a bit about LSF clients. LSF clients +can access resources on LSF server hosts without running the LSF daemons. LSF clients don&rsquo;t require a software +license and from clients, users can run all of the familiar LSF commands. Additionally, LSF clients are +submit only, and don&rsquo;t execute jobs.</p> + +<p><strong>Note:</strong> The macOS LSF client uses the LSF RESTful API. This means that it will function in environments +running LSF Standard Edition with LSF Application Center or LSF Suites.</p> + +<p><strong>Configuration</strong></p> + +<p>The configuration used for the example below is as follows:</p> + +<table> +<thead> +<tr> +<th style="text-align: left;">Hostname</th> +<th>OS</th> +<th>Detail</th> +</tr> +</thead> +<tbody> +<tr> +<td style="text-align: left;"><em>kilenc</em></td> +<td>CentOS Stream 8.4</td> +<td>LSF Suite for HPC v10.2.0.13</td> +</tr> +<tr> +<td style="text-align: left;"><em>My-Macbook-Air</em></td> +<td>macOS Ventura 13.2.1 (Apple M1)</td> +<td>LSF client</td> +</tr> +</tbody> +</table> +<ol> +<li>On the Spectrum LSF Suite for HPC management host (<em>kilenc</em>), add the following variables to the Parameter +section in the file lsf.cluster.<em>name</em>. The FLOAT_CLIENTS variable determines how many floating clients can +join the LSF cluster, The FLOAT_CLIENTS_ADDR_RANGE specifies the allowable IP addresses. In this case, the +client system is on a 192.168.x.x network.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">Begin Parameters +FLOAT_CLIENTS=2 +FLOAT_CLIENTS_ADDR_RANGE=192.* +End Parameters</code></pre></div> + +<ol start="2"> +<li>To make the changes take effect, issue the following commands as the LSF administrator:</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsadmin reconfig +badmin reconfig</code></pre></div> + +<ol start="3"> +<li> +<p>Obtain the tarball <em>pacdesktop_client10.2.0.13_macos-x86_64.tar</em>. For users with an LSF entitlement this package is available on +<a href="https://www.ibm.com/support/fixcentral/">IBM Fix Central</a>. Note that this package will work on systems with Apple M1 silicon through emulation.</p> + +</li> +<li> +<p>Open a Terminal on the macOS client system, copy the tarball to the $HOME/Desktop directory of user lsfuser and uncompress the tarball.</p> + +</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air Desktop % pwd +/Users/lsfuser/Desktop +lsfuser@My-MacBook-Air Desktop % ls -la pacdesktop_client10.2.0.13_macos-x86_64.tar +-rw-r--r--@ 1 lsfuser staff 18452480 27 Feb 17:12 pacdesktop_client10.2.0.13_macos-x86_64.tar +lsfuser@My-MacBook-Air Desktop % tar -xvf pacdesktop_client10.2.0.13_macos-x86_64.tar +x LSF_Desktop_Client/ +x LSF_Desktop_Client/bapp +x LSF_Desktop_Client/btop +x LSF_Desktop_Client/bwait +x LSF_Desktop_Client/lseligible +x LSF_Desktop_Client/bsla +x LSF_Desktop_Client/blparams +x LSF_Desktop_Client/bhpart +x LSF_Desktop_Client/bclusters +x LSF_Desktop_Client/blstartup +x LSF_Desktop_Client/lsacct +x LSF_Desktop_Client/bsub +x LSF_Desktop_Client/bugroup +x LSF_Desktop_Client/bpeek +x LSF_Desktop_Client/bacct +x LSF_Desktop_Client/brequeue +x LSF_Desktop_Client/bjgroup +x LSF_Desktop_Client/bslots +x LSF_Desktop_Client/lsrun +x LSF_Desktop_Client/bjobs +x LSF_Desktop_Client/lshosts +x LSF_Desktop_Client/lsload +x LSF_Desktop_Client/brlainfo +x LSF_Desktop_Client/bresources +x LSF_Desktop_Client/bladmin +x LSF_Desktop_Client/bstatus +x LSF_Desktop_Client/bmod +x LSF_Desktop_Client/bpost +x LSF_Desktop_Client/lsid +x LSF_Desktop_Client/bentags +x LSF_Desktop_Client/ch +x LSF_Desktop_Client/bchkpnt +x LSF_Desktop_Client/bparams +x LSF_Desktop_Client/bjdepinfo +x LSF_Desktop_Client/bgmod +x LSF_Desktop_Client/brestart +x LSF_Desktop_Client/lsltasks +x LSF_Desktop_Client/blusers +x LSF_Desktop_Client/paclogon +x LSF_Desktop_Client/regnotify +x LSF_Desktop_Client/cacert.pem +x LSF_Desktop_Client/bresume +x LSF_Desktop_Client/blstat +x LSF_Desktop_Client/bhist +x LSF_Desktop_Client/bqueues +x LSF_Desktop_Client/bltasks +x LSF_Desktop_Client/bresize +x LSF_Desktop_Client/blcollect +x LSF_Desktop_Client/lsacctmrg +x LSF_Desktop_Client/bgadd +x LSF_Desktop_Client/bmig +x LSF_Desktop_Client/bstop +x LSF_Desktop_Client/bswitch +x LSF_Desktop_Client/blhosts +x LSF_Desktop_Client/blcstat +x LSF_Desktop_Client/brsvs +x LSF_Desktop_Client/brun +x LSF_Desktop_Client/blinfo +x LSF_Desktop_Client/lsgrun +x LSF_Desktop_Client/busers +x LSF_Desktop_Client/lsloadadj +x LSF_Desktop_Client/blkill +x LSF_Desktop_Client/bbot +x LSF_Desktop_Client/lsclusters +x LSF_Desktop_Client/bconf +x LSF_Desktop_Client/lsinfo +x LSF_Desktop_Client/lsmake +x LSF_Desktop_Client/blimits +x LSF_Desktop_Client/bmgroup +x LSF_Desktop_Client/bread +x LSF_Desktop_Client/bkill +x LSF_Desktop_Client/lstcsh +x LSF_Desktop_Client/lsrtasks +x LSF_Desktop_Client/README.TXT +x LSF_Desktop_Client/lsplace +x LSF_Desktop_Client/bhosts +x LSF_Desktop_Client/paclogout +x LSF_Desktop_Client/bgdel</code></pre></div> + +<ol start="5"> +<li>Following the directions in the file README.TXT, set the environment variable LSF_DESKTOP_CLIENT=yes, and set the PATH variable accordingly.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air LSF_Desktop_Client % export LSF_DESKTOP_CLIENT=yes +lsfuser@My-MacBook-Air LSF_Desktop_Client % export PATH=`pwd`:$PATH</code></pre></div> + +<ol start="6"> +<li>Next, it&rsquo;s necessary to run the <em>paclogon</em> command to connect to the LSF Application Center (or LSF Suite installation). Here we point to the LSF server kilenc on port 8080.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air LSF_Desktop_Client % paclogon +Log on to IBM Spectrum LSF Application Center +User account: lsfuser +Enter password: +Specify the URL to connect to IBM Spectrum LSF Application Center. Format: +http://host_name:port_number/platform or https://host_name:port_number/platform +URL: http://kilenc:8080/platform +You have successfully logged on to IBM Spectrum LSF Application Center.</code></pre></div> + +<ol start="7"> +<li>After successfully logging in using the paclogon command, it should be possible to run LSF &ldquo;base&rdquo; commands from the macOS terminal including <em>lsid</em>, <em>lsload</em>, <em>lshosts</em>.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air LSF_Desktop_Client % lsid +IBM Spectrum LSF 10.1.0.13, Apr 15 2022 +Suite Edition: IBM Spectrum LSF Suite for HPC 10.2.0.13 +Copyright International Business Machines Corp. 1992, 2016. +US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule Contract with IBM Corp. + +My cluster name is Klaszter +My master name is kilenc +lsfuser@My-MacBook-Air LSF_Desktop_Client % lshosts -w +HOST_NAME type model cpuf ncpus maxmem maxswp server RESOURCES +kilenc LINUXPPC64LE POWER9 25.0 32 30.7G 15.8G Yes (mg docker) +lsfuser@My-MacBook-Air LSF_Desktop_Client % lsload -w +HOST_NAME status r15s r1m r15m ut pg ls it tmp swp mem +kilenc ok 0.8 2.1 2.4 7% 0.0 0 1156 551M 15.6G 10G</code></pre></div> + +<ol start="8"> +<li>Next, run the LSF batch commands <em>bqueues</em> and <em>bhosts</em>.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air LSF_Desktop_Client % bqueues +QUEUE_NAME PRIO STATUS MAX JL/U JL/P JL/H NJOBS PEND RUN SUSP +admin 50 Open:Active - - - - 0 0 0 0 +owners 43 Open:Active - - - - 0 0 0 0 +priority 43 Open:Active - - - - 75835 75803 32 0 +night 40 Open:Inact - - - - 0 0 0 0 +short 35 Open:Active - - - - 0 0 0 0 +dataq 33 Open:Active - - - - 0 0 0 0 +normal 30 Open:Active - - - - 0 0 0 0 +interactive 30 Open:Active - - - - 0 0 0 0 +sendq 30 Open:Active - - - - 0 0 0 0 +idle 20 Open:Active - - - - 0 0 0 0 +lsfuser@My-MacBook-Air LSF_Desktop_Client % bhosts +HOST_NAME STATUS JL/U MAX NJOBS RUN SSUSP USUSP RSV +kilenc ok - 32 19 19 0 0 0</code></pre></div> + +<ol start="9"> +<li>Running the bjobs will result in a warning message appearing on macOS stating: <em>&ldquo;bjobs&rdquo; cannot be opened because the developer cannot be verified.</em></li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/bjobs_unverified.png" /> +</figure> + +<ol start="10"> +<li>To remedy the issue observed in step 9, click cancel on the warning message and browse to <strong>System Settings -&gt; Privacy &amp; Security -&gt; Security Settings</strong>. In the Security Settings view, +you&rsquo;ll see the message: <em>&ldquo;bjobs&rdquo; was blocked from use because it is not from an identified developer.</em> To allow the bjobs command to execute, click on the <strong>Allow Anyway</strong> button. You will +then be promped to authenticate to make the change take effect.</li> +</ol> +<p><figure><img src="https://www.gaborsamu.com/images/bjobs_allow.png" /> +</figure> + +<figure><img src="https://www.gaborsamu.com/images/bjobs_authenticate.png" /> +</figure> +</p> + +<ol start="11"> +<li>Run the LSF <em>bjobs</em> command again. You will now receive a new warning error popup indicating: <em>macOS cannot verify the developer of &ldquo;bjobs&rdquo;. Are you sure you want to open it?</em>. To +proceed, click on the Open button.The bjobs command will then run to completion as expected. Subsequent executions of bjobs will run without any system warnings. Finally, to submit +a job, run the bsub command. Here we try to submit a simple sleep job (i.e. bsub -q normal sleep 3600). As was the case with the bjobs command, the bsub command is also blocked. Here, +repeat the steps 10, 11 as described above but for the bsub command. Once the steps have been completed, repeat the bsub job submission command.</li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/bjobs_open.png" /> +</figure> + +<ol start="12"> +<li>Finally, to submit a job, run the <em>bsub</em> command. Here we try to submit a simple sleep job (i.e. <em>bsub -q normal sleep 3600</em>). As was the case with the <em>bjobs</em> command, the <em>bsub</em> +command is also blocked. Here, repeat the steps 10, 11 as described above but for the <em>bsub</em> command. Once the steps have been completed, repeat the <em>bsub</em> job submission command.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">lsfuser@My-MacBook-Air LSF_Desktop_Client % bsub -q normal sleep 3600 +Job &lt;617551&gt; is submitted to queue &lt;normal&gt;.</code></pre></div>Ramblings of a supercomputing enthusiast.In traditional HPC environments, login nodes are typically used as an access point for users to submit and manage jobs. Although login nodes are still used today, HPC environments are increasingly being used by a broad class of users with domain expertise and not necessarily IT experts. In other words, such users may be more comfortable using their native desktop environment rather than the CLI. Given the factors, in the commercial HPC space, organizations are always looking for ways to lower the barto access and interact with HPC environments.Monitoring .-.. … ..-. (IBM Spectrum LSF) with the TIG stack2023-01-24T19:48:44-07:002023-01-24T19:48:44-07:00https://hpc.social/personal-blog/2023/monitoring-ibm-spectrum-lsf-with-the-tig-stack<p>Much like dashboards in automobiles, dashboards in the context of HPC infrastructure are crucial to get an understanding of what&rsquo;s happening under the hood of your HPC cluster - at +a glance. During my IT career, I&rsquo;ve used a myriad of monitoring solutions ranging from SNMP and Ganglia, to the ELK (Elasticsearch, Logstash, Kibana) stack. For example, I&rsquo;ve recently +written an overview on how it is possible to visualize <a href="https://www.ibm.com/products/hpc-workload-management">IBM Spectrum LSF</a> (LSF) data in Grafana. LSF is an HPC job scheduler which brings to the table three decades of experience in +workload and resource management.</p> + +<p>For this blog, I decided to take this to the next level by monitoring IBM Spectrum LSF with the well known TIG (Telegraf, InfluxDB, Grafana) stack. This article is not meant to be a +debate on the advantages of one monitoring stack over another. Rather, the focus is to demonstrate what is feasible in terms of monitoring Spectrum LSF clusters with the TIG stack, +given the many available ways to query LSF for key information using CLI commands.</p> + +<hr /> + +<p><strong>The Journey</strong></p> + +<p>There already exists many write-ups on how to deploy the TIG stack to monitor systems. This isn&rsquo;t meant to be a guide on setting up the TIG stack. Rather, it&rsquo;s assumed that the reader +already has some familiarity with the TIG stack. If not, then [<em>insert your favourite search engine</em>] is your friend.</p> + +<p>On my home network, I decided to setup a VM running on my trusty <a href="https://traverse.com.au/products/ten64-networking-platform/">Traverse Ten64</a> running Fedora where InfluxDB was installed. The idea was to run InfluxDB on a system that is guaranteed +to be always on in my home environment and that is energy efficient. Installing telegraf on all of the LSF cluster servers (x3) proved to be straight forward. Note that in all cases, I used the OS +supplied versions of InfluxDB, Telegraf. Finally, I already had a Grafana server running on a server in my network.</p> + +<p>Out of the box, Telegraf has the ability to monitor numerous system metrics. Furthermore, there exists literally hundreds of plugins for Telegraf to monitor a wide variety of devices, +services and software. A search however, didn&rsquo;t reveal the existence of any plugin to monitor LSF. So it was time to get creative.</p> + +<hr /> + +<p><strong>What to monitor?</strong></p> + +<p>A bit of research revealed that InfluxDB supports what is known as &ldquo;line protocol&rdquo;. This is a well defined text-based format for writing data to InfluxDB. I used the following +<a href="https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/">reference</a> on &ldquo;line protocol&rdquo; to guide me. Using line protocol it would be ultimately possible to +write a plugin for Telegraf to effecively scrape information from Spectrum LSF and output in line protocol format for writing to InfluxDB.</p> + +<p>Before I could begin writing the plugin, the key was to determine what information from Spectrum LSF would be useful to display in the dashboard, and how that information could be +extracted. For this I followed the KISS principle to keep things as simple as possible. The key metrics I decided to report on were servers, queues and jobs (oh my!), as well as process +information for the LSF scheduler daemons. Refer to the following table for details:</p> + +<hr /> + +<table> +<thead> +<tr> +<th>Metric(s)</th> +<th>Command</th> +</tr> +</thead> +<tbody> +<tr> +<td>LSF scheduler performance metrics</td> +<td><em>badmin perfmon view -json</em></td> +</tr> +<tr> +<td>LSF available servers, CPUs, cores, slots</td> +<td><em>badmin showstatus</em></td> +</tr> +<tr> +<td>LSF server by status (total number Ok, closed, unreachable, unavailable)</td> +<td><em>badmin showstatus</em></td> +</tr> +<tr> +<td>LSF job statistics (total number running, suspended, pending)</td> +<td><em>badmin showstatus</em></td> +</tr> +<tr> +<td>LSF queue statistics (per queue, total number of jobs running, suspended, pending)</td> +<td><em>bqueues -json -o queue_name:12 njobs pend run susp rsv ususp ssusp</em></td> +</tr> +<tr> +<td>LSF mbatchd process metrics</td> +<td>(Telegraf - inputs.procstat)</td> +</tr> +<tr> +<td>LSF mbschd process metrics</td> +<td>(Telegraf - inputs.procstat)</td> +</tr> +<tr> +<td>LSF management lim process metrics</td> +<td>(Telegraf - inputs.procstat)</td> +</tr> +</tbody> +</table> +<hr /> + +<p><strong>Scrapin' fun</strong></p> + +<p>These above metrics would give a good idea of the state of the Spectrum LSF cluster at a glance. With the list of metrics prepared, the next step was to create a plugin script which would +scrape data from the noted commands. Both <em>bqueues</em> and <em>badmin perfmon view</em> support output in JSON format with the appropriate flags specified. However, <em>badmin showstatus</em> does not support +output in JSON format. This meant that for <em>badmin showstatus</em> it was necessary to scrape data assuming hard coded field positions in the output.</p> + +<p>A copy of the Telegraf plugin for Spectrum LSF is provided below. This is just an example and is provided &ldquo;as is&rdquo; for testing purposes. Your mileage may vary.</p> + +<hr /> + +<details> + <strong>Example lsf_telegraf_agent.py script. Click to expand!</strong> + <div class="highlight"><pre><code class="language-python"><span style="color: #75715e;">#!/usr/bin/python3.8</span> +<span style="color: #75715e;"># </span> +<span style="color: #75715e;"># v0.9 </span> +<span style="color: #75715e;"># Sample inputs.exec script for Telegraf which outputs metrics from an IBM Spectrum LSF management server</span> +<span style="color: #75715e;"># in InfluxDB Line Protocol input format.</span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># NOTE: It is required to set the lsf_envfile variable to point to the LSF profile.lsf file</span> +<span style="color: #75715e;"># for the LSF installation. </span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Gabor Samu</span> +<span style="color: #75715e;"># January 4, 2023</span> +<span style="color: #75715e;">#</span> + +<span style="color: #f92672;">import</span> os +<span style="color: #f92672;">import</span> json +<span style="color: #f92672;">import</span> time +<span style="color: #f92672;">import</span> subprocess +<span style="color: #f92672;">import</span> sys +<span style="color: #f92672;">from</span> pathlib <span style="color: #f92672;">import</span> Path + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Variable declarations</span> +<span style="color: #75715e;"># **NOTE: lsf_envfile needs to be set to point to the profile.lsf file for the LSF installation. </span> +<span style="color: #75715e;">#</span> +lsf_envfile <span style="color: #f92672;">=</span> <span style="color: #e6db74;">"/opt/ibm/lsfsuite/lsf/conf/profile.lsf"</span> + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Source the Spectrum LSF profile. </span> +<span style="color: #75715e;"># Check for existing of lsf_envfile (profile.lsf) and source the environment. </span> +<span style="color: #75715e;"># If the specified file does not exist, then exit. </span> +<span style="color: #75715e;">#</span> +path <span style="color: #f92672;">=</span> Path(lsf_envfile) +<span style="color: #66d9ef;">if</span> path<span style="color: #f92672;">.</span>is_file(): + lsf_env <span style="color: #f92672;">=</span> (<span style="color: #e6db74;">f</span><span style="color: #e6db74;">'env -i sh -c "source </span><span style="color: #e6db74;">{</span>lsf_envfile<span style="color: #e6db74;">}</span><span style="color: #e6db74;"> &amp;&amp; env"'</span>) + <span style="color: #66d9ef;">for</span> line <span style="color: #f92672;">in</span> subprocess<span style="color: #f92672;">.</span>getoutput(lsf_env)<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"</span><span style="color: #ae81ff;">\n</span><span style="color: #e6db74;">"</span>): + key, value <span style="color: #f92672;">=</span> line<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"="</span>) + os<span style="color: #f92672;">.</span>environ[key]<span style="color: #f92672;">=</span> value +<span style="color: #66d9ef;">else</span>: + sys<span style="color: #f92672;">.</span>exit(<span style="color: #e6db74;">f</span><span style="color: #e6db74;">'The file </span><span style="color: #e6db74;">{</span>lsf_envfile<span style="color: #e6db74;">}</span><span style="color: #e6db74;"> does not exist.'</span>) + +<span style="color: #75715e;"># </span> +<span style="color: #75715e;"># Get the time in nanoseconds since the epoch. </span> +<span style="color: #75715e;"># This is required as part of the InfluxDB line protocol reference. </span> +<span style="color: #75715e;"># Only supported on Python 3.7+</span> +<span style="color: #75715e;">#</span> +time_nanosec <span style="color: #f92672;">=</span> time<span style="color: #f92672;">.</span>time_ns() + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Here we set the LSF environment variable LSB_NTRIES. This will be used to determine the </span> +<span style="color: #75715e;"># number of retries before failure of a LSF batch command. This is used to cover the case </span> +<span style="color: #75715e;"># when the LSF mbatchd is not running. </span> +<span style="color: #75715e;">#</span> +os<span style="color: #f92672;">.</span>environ[<span style="color: #e6db74;">"LSB_NTRIES"</span>] <span style="color: #f92672;">=</span> <span style="color: #e6db74;">"2"</span> + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Check if LSF performance metric monitoring is enabled. This is done by running</span> +<span style="color: #75715e;"># 'badmin perfmon view'. If badmin is not found, then exit. </span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Check the return status from 'badmin perfmon view' and take the appropriate action:</span> +<span style="color: #75715e;"># - If return status is 7, it means that performance monitoring is not enabled. The script</span> +<span style="color: #75715e;"># will enable LSF performance metric monitoring by running 'badmin perfmon start'.</span> +<span style="color: #75715e;"># Note that a 70 second sleep is required before LSF metrics will be available. </span> +<span style="color: #75715e;"># - If return status is 65, it means that the badmin command reported that the</span> +<span style="color: #75715e;"># LSF batch system is down. This is a fatal error which will cause the script</span> +<span style="color: #75715e;"># to exit. </span> +<span style="color: #75715e;">#</span> +lsf_path <span style="color: #f92672;">=</span> os<span style="color: #f92672;">.</span>environ[<span style="color: #e6db74;">'LSF_BINDIR'</span>] +badmin_path <span style="color: #f92672;">=</span> lsf_path <span style="color: #f92672;">+</span> <span style="color: #e6db74;">"/badmin"</span> +bqueues_path <span style="color: #f92672;">=</span> lsf_path <span style="color: #f92672;">+</span> <span style="color: #e6db74;">"/bqueues"</span> + +path <span style="color: #f92672;">=</span> Path(badmin_path) +<span style="color: #66d9ef;">if</span> path<span style="color: #f92672;">.</span>is_file(): + cmd <span style="color: #f92672;">=</span> [badmin_path, <span style="color: #e6db74;">'perfmon'</span>, <span style="color: #e6db74;">'view'</span>] + p <span style="color: #f92672;">=</span> subprocess<span style="color: #f92672;">.</span>Popen(cmd, stdout<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL, stderr<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL) + <span style="color: #66d9ef;">while</span> p<span style="color: #f92672;">.</span>poll() <span style="color: #f92672;">is</span> <span style="color: #66d9ef;">None</span>: + time<span style="color: #f92672;">.</span>sleep(<span style="color: #ae81ff;">0.1</span>) + return_code <span style="color: #f92672;">=</span> p<span style="color: #f92672;">.</span>returncode + <span style="color: #66d9ef;">if</span> return_code <span style="color: #f92672;">==</span> <span style="color: #ae81ff;">7</span>: + cmd <span style="color: #f92672;">=</span> [badmin_path, <span style="color: #e6db74;">'perfmon'</span>, <span style="color: #e6db74;">'start'</span>] + p <span style="color: #f92672;">=</span> subprocess<span style="color: #f92672;">.</span>Popen(cmd, stdout<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL, stderr<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL) + <span style="color: #66d9ef;">while</span> p<span style="color: #f92672;">.</span>poll() <span style="color: #f92672;">is</span> <span style="color: #66d9ef;">None</span>: + time<span style="color: #f92672;">.</span>sleep(<span style="color: #ae81ff;">0.1</span>) + return_code <span style="color: #f92672;">=</span> p<span style="color: #f92672;">.</span>returncode + time<span style="color: #f92672;">.</span>sleep(<span style="color: #ae81ff;">70</span>) + <span style="color: #66d9ef;">elif</span> return_code <span style="color: #f92672;">==</span> <span style="color: #ae81ff;">65</span>: + sys<span style="color: #f92672;">.</span>exit(<span style="color: #e6db74;">f</span><span style="color: #e6db74;">'The LSF batch system is down.'</span>) +<span style="color: #66d9ef;">else</span>: + sys<span style="color: #f92672;">.</span>exit(<span style="color: #e6db74;">f</span><span style="color: #e6db74;">'</span><span style="color: #e6db74;">{</span>badmin_path<span style="color: #e6db74;">}</span><span style="color: #e6db74;"> does not exist.'</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Run badmin with the "perfmon view" keywords and the -json option to product JSON output</span> +<span style="color: #75715e;"># We assume here that the LSF batch system is responsive (a check was done above); if</span> +<span style="color: #75715e;"># the mbatchd is very busy there is a possiblity that it may not be responsive here. This</span> +<span style="color: #75715e;"># case is not considered; LSB_NTRIES setting will determine how many tries are made before</span> +<span style="color: #75715e;"># badmin gives up the ghost. </span> +<span style="color: #75715e;"># </span> +<span style="color: #75715e;"># Note: We previously checked for the existence of the 'badmin' binary. </span> +<span style="color: #75715e;">#</span> +cmd <span style="color: #f92672;">=</span> [badmin_path, <span style="color: #e6db74;">'perfmon'</span>, <span style="color: #e6db74;">'view'</span>, <span style="color: #e6db74;">'-json'</span>] +p <span style="color: #f92672;">=</span> subprocess<span style="color: #f92672;">.</span>Popen(cmd, stdout<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>PIPE, stderr<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL, text<span style="color: #f92672;">=</span><span style="color: #66d9ef;">True</span>) +stdout, stderr <span style="color: #f92672;">=</span> p<span style="color: #f92672;">.</span>communicate() +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Guard for the case that the performance monitor has just been enabled, but is not</span> +<span style="color: #75715e;"># producing any data as the first sample period has not elapsed. </span> +<span style="color: #75715e;">#</span> +<span style="color: #66d9ef;">if</span> stdout <span style="color: #f92672;">==</span> <span style="color: #e6db74;">""</span>: + sys<span style="color: #f92672;">.</span>exit(<span style="color: #e6db74;">f</span><span style="color: #e6db74;">'Output from badmin perfmon view -json is empty.'</span>) +<span style="color: #66d9ef;">else</span>: + data <span style="color: #f92672;">=</span> json<span style="color: #f92672;">.</span>loads(stdout) + +<span style="color: #75715e;"># </span> +<span style="color: #75715e;"># Run badmin showstatus</span> +<span style="color: #75715e;"># Next, run the command 'badmin showstatus' and capture the output. Note that badmin showstatus</span> +<span style="color: #75715e;"># does not produce JSON output. So here we must do some scraping of the output. </span> +<span style="color: #75715e;"># The output from 'badmin showstatus' it placed into the array 'showstatus'. The hard coded</span> +<span style="color: #75715e;"># positions in the output of 'badmin showstatus' are assumed when building the output </span> +<span style="color: #75715e;"># strings below. Should the format of the output of 'badmin showstatus' change, this will</span> +<span style="color: #75715e;"># need to be updated. </span> +cmd <span style="color: #f92672;">=</span> [badmin_path, <span style="color: #e6db74;">'showstatus'</span>] +p <span style="color: #f92672;">=</span> subprocess<span style="color: #f92672;">.</span>Popen(cmd, stdout<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>PIPE, stderr<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL, text<span style="color: #f92672;">=</span><span style="color: #66d9ef;">True</span>) +stdout, stderr <span style="color: #f92672;">=</span> p<span style="color: #f92672;">.</span>communicate() +<span style="color: #75715e;"># Convert badmin showstatus output into an array</span> +showstatus <span style="color: #f92672;">=</span> stdout<span style="color: #f92672;">.</span>split() + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Run bqueues</span> +<span style="color: #75715e;">#</span> +cmd <span style="color: #f92672;">=</span> [bqueues_path, <span style="color: #e6db74;">'-json'</span>, <span style="color: #e6db74;">'-o'</span>, <span style="color: #e6db74;">'queue_name:12 njobs pend run susp rsv ususp ssusp'</span>] +p <span style="color: #f92672;">=</span> subprocess<span style="color: #f92672;">.</span>Popen(cmd, stdout<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>PIPE, stderr<span style="color: #f92672;">=</span>subprocess<span style="color: #f92672;">.</span>DEVNULL, text<span style="color: #f92672;">=</span><span style="color: #66d9ef;">True</span>) +stdout, stderr <span style="color: #f92672;">=</span> p<span style="color: #f92672;">.</span>communicate() +data_queues <span style="color: #f92672;">=</span> json<span style="color: #f92672;">.</span>loads(stdout) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># At this stage, we've captured the output from 'badmin perfmon view -json' and </span> +<span style="color: #75715e;"># 'badmin showstatus'. We're now ready to print to standard output the metric</span> +<span style="color: #75715e;"># strings in InfluxDB line procotol format. </span> +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Details about the line protocol format can be found here:</span> +<span style="color: #75715e;"># https://docs.influxdata.com/influxdb/v2.6/reference/syntax/line-protocol/</span> +<span style="color: #75715e;"># </span> +<span style="color: #75715e;"># </span> + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># LSF server status</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_servers,"</span>,<span style="color: #e6db74;">"status=total"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">21</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_servers,"</span>,<span style="color: #e6db74;">"status=ok"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">23</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_servers,"</span>,<span style="color: #e6db74;">"status=closed"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">25</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_servers,"</span>,<span style="color: #e6db74;">"status=unreachable"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">27</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_servers,"</span>,<span style="color: #e6db74;">"status=unavailable"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">29</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># LSF job status</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_jobs,"</span>,<span style="color: #e6db74;">"state=total"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">33</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_jobs,"</span>,<span style="color: #e6db74;">"state=running"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">35</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_jobs,"</span>,<span style="color: #e6db74;">"state=suspended"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">37</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_jobs,"</span>,<span style="color: #e6db74;">"state=pending"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">39</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_jobs,"</span>,<span style="color: #e6db74;">"state=finished"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">41</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># LSF user stats</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_users,"</span>,<span style="color: #e6db74;">"state=numusers"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">45</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_users,"</span>,<span style="color: #e6db74;">"state=numgroups"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">50</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_users,"</span>,<span style="color: #e6db74;">"state=numactive"</span>,<span style="color: #e6db74;">" value="</span>,showstatus[<span style="color: #ae81ff;">55</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># LSF hosts stats</span> +<span style="color: #75715e;"># First we split out the current and peak values for clients, servers, cpus, cores, and slots.</span> +<span style="color: #75715e;"># The current and peak values are separated by the "/" delimiter.</span> +<span style="color: #75715e;"># </span> +clientssplit <span style="color: #f92672;">=</span> showstatus[<span style="color: #ae81ff;">9</span>]<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"/"</span>) +serverssplit <span style="color: #f92672;">=</span> showstatus[<span style="color: #ae81ff;">11</span>]<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"/"</span>) +cpussplit <span style="color: #f92672;">=</span> showstatus[<span style="color: #ae81ff;">13</span>]<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"/"</span>) +coressplit <span style="color: #f92672;">=</span> showstatus[<span style="color: #ae81ff;">15</span>]<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"/"</span>) +slotssplit <span style="color: #f92672;">=</span> showstatus[<span style="color: #ae81ff;">17</span>]<span style="color: #f92672;">.</span>split(<span style="color: #e6db74;">"/"</span>) + +print(<span style="color: #e6db74;">"lsf_hosts,"</span>,<span style="color: #e6db74;">"state=clients"</span>,<span style="color: #e6db74;">" current="</span>,clientssplit[<span style="color: #ae81ff;">0</span>],<span style="color: #e6db74;">"i,"</span>,<span style="color: #e6db74;">"peak="</span>,clientssplit[<span style="color: #ae81ff;">1</span>],<span style="color: #e6db74;">"i "</span>,time_n +anosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_hosts,"</span>,<span style="color: #e6db74;">"state=servers"</span>,<span style="color: #e6db74;">" current="</span>,serverssplit[<span style="color: #ae81ff;">0</span>],<span style="color: #e6db74;">"i,"</span>,<span style="color: #e6db74;">"peak="</span>,serverssplit[<span style="color: #ae81ff;">1</span>],<span style="color: #e6db74;">"i "</span>,time_n +anosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_hosts,"</span>,<span style="color: #e6db74;">"state=cpus"</span>,<span style="color: #e6db74;">" current="</span>,cpussplit[<span style="color: #ae81ff;">0</span>],<span style="color: #e6db74;">"i,"</span>,<span style="color: #e6db74;">"peak="</span>,cpussplit[<span style="color: #ae81ff;">1</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,se +p<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_hosts,"</span>,<span style="color: #e6db74;">"state=cores"</span>,<span style="color: #e6db74;">" current="</span>,coressplit[<span style="color: #ae81ff;">0</span>],<span style="color: #e6db74;">"i,"</span>,<span style="color: #e6db74;">"peak="</span>,coressplit[<span style="color: #ae81ff;">1</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec +,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_hosts,"</span>,<span style="color: #e6db74;">"state=slots"</span>,<span style="color: #e6db74;">" current="</span>,slotssplit[<span style="color: #ae81ff;">0</span>],<span style="color: #e6db74;">"i,"</span>,<span style="color: #e6db74;">"peak="</span>,slotssplit[<span style="color: #ae81ff;">1</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec +,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Print mbatchd query metrics</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"query=job"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">1</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"query=host"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">2</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"query=queue"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">3</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Print mbatchd job metrics</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=submitreqs"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">4</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=submitted"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">5</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=dispatched"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">6</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=completed"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">7</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=sentremote"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">8</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"jobs=acceptremote"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">9</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">'</span> +<span style="color: #e6db74;">')</span> +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"sched=interval"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">10</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"sched=matchhost"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">11</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span> +) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"sched=buckets"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">12</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"sched=reordered"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">13</span>][<span style="color: #e6db74;">'current'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span> +) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Print mbatchd efficiency metrics. Here check if the efficiency metric indicated is "-". If so, </span> +<span style="color: #75715e;"># then assume a zero value. The trailing "%" sign on the metrics (percentages) is also stripped here. </span> +<span style="color: #75715e;">#</span> +slots <span style="color: #f92672;">=</span> (data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">14</span>][<span style="color: #e6db74;">'current'</span>]) +slots_percent <span style="color: #f92672;">=</span> slots +<span style="color: #66d9ef;">if</span> slots_percent <span style="color: #f92672;">==</span> <span style="color: #e6db74;">"-"</span>: + slots_percent <span style="color: #f92672;">=</span> <span style="color: #e6db74;">"0"</span> +<span style="color: #66d9ef;">elif</span> slots_percent <span style="color: #f92672;">!=</span> <span style="color: #e6db74;">"0"</span>: + <span style="color: #75715e;"># Strip % sign and decimal. This is to work around issue inserting float to InfluxDB</span> + <span style="color: #75715e;"># "type float, already exists as type integer dropped ..."</span> + slots_percent <span style="color: #f92672;">=</span> slots[:<span style="color: #f92672;">-</span><span style="color: #ae81ff;">4</span>] + +memory <span style="color: #f92672;">=</span> (data[<span style="color: #e6db74;">'record'</span>][<span style="color: #ae81ff;">15</span>][<span style="color: #e6db74;">'current'</span>]) +memory_percent <span style="color: #f92672;">=</span> memory +<span style="color: #66d9ef;">if</span> memory_percent <span style="color: #f92672;">==</span> <span style="color: #e6db74;">"-"</span>: + memory_percent <span style="color: #f92672;">=</span> <span style="color: #e6db74;">"0"</span> +<span style="color: #66d9ef;">elif</span> memory_percent <span style="color: #f92672;">!=</span> <span style="color: #e6db74;">"0"</span>: + <span style="color: #75715e;"># Strip % sign and decimal. This is to work around issue inserting float to InfluxDB</span> + <span style="color: #75715e;"># "type float, already exists as type integer dropped ..."</span> + memory_percent <span style="color: #f92672;">=</span> memory[:<span style="color: #f92672;">-</span><span style="color: #ae81ff;">4</span>] + +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"utilization=slots"</span>,<span style="color: #e6db74;">" value="</span>,slots_percent,<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"utilization=memory"</span>,<span style="color: #e6db74;">" value="</span>,memory_percent,<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Print mbatchd file descriptor usage</span> +<span style="color: #75715e;">#</span> +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"fd=free"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'fd'</span>][<span style="color: #e6db74;">'free'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"fd=used"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'fd'</span>][<span style="color: #e6db74;">'used'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) +print(<span style="color: #e6db74;">"lsf_mbatchd,"</span>,<span style="color: #e6db74;">"fd=total"</span>,<span style="color: #e6db74;">" value="</span>,data[<span style="color: #e6db74;">'fd'</span>][<span style="color: #e6db74;">'total'</span>],<span style="color: #e6db74;">"i "</span>,time_nanosec,sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +<span style="color: #75715e;">#</span> +<span style="color: #75715e;"># Print LSF queue status (njobs)</span> +<span style="color: #75715e;">#</span> +iterations <span style="color: #f92672;">=</span> data_queues[<span style="color: #e6db74;">"QUEUES"</span>] + +<span style="color: #66d9ef;">for</span> n <span style="color: #f92672;">in</span> range(iterations): + print(<span style="color: #e6db74;">"lsf_queues,"</span>,<span style="color: #e6db74;">"name="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'QUEUE_NAME'</span>], <span style="color: #e6db74;">" njobs="</span>, data_queues[<span style="color: #e6db74;">'RECOR</span> +DS<span style="color: #e6db74;">'][n]['</span>NJOBS<span style="color: #e6db74;">'],"i,",</span> + <span style="color: #e6db74;">"pend="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'PEND'</span>],<span style="color: #e6db74;">"i,"</span>, + <span style="color: #e6db74;">"run="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'RUN'</span>],<span style="color: #e6db74;">"i,"</span>, + <span style="color: #e6db74;">"susp="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'SUSP'</span>],<span style="color: #e6db74;">"i,"</span>, + <span style="color: #e6db74;">"rsv="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'RSV'</span>],<span style="color: #e6db74;">"i,"</span>, + <span style="color: #e6db74;">"ususp="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'USUSP'</span>],<span style="color: #e6db74;">"i,"</span>, + <span style="color: #e6db74;">"ssusp="</span>, data_queues[<span style="color: #e6db74;">'RECORDS'</span>][n][<span style="color: #e6db74;">'SSUSP'</span>],<span style="color: #e6db74;">"i "</span>, + time_nanosec, sep<span style="color: #f92672;">=</span><span style="color: #e6db74;">''</span>) + +exit() +</code></pre></div> + +</details> + +<hr /> + +<p><strong>Bringing it all together</strong></p> + +<p>For completeness, below is the detail regarding the configuration of the environment. It should be noted that the simple test environment consists of a single server running IBM +Spectrum LSF Suite for HPC and a separate server which runs the InfluxDB instance.</p> + +<hr /> + +<table> +<thead> +<tr> +<th style="text-align: left;">Hostname</th> +<th>Component</th> +<th>Version</th> +</tr> +</thead> +<tbody> +<tr> +<td style="text-align: left;"><em>kilenc</em></td> +<td>OS (LSF mgmt server)</td> +<td><em>CentOS Stream release 8 (ppc64le)</em></td> +</tr> +<tr> +<td style="text-align: left;"><em>kilenc</em></td> +<td>Spectrum LSF Suite for HPC</td> +<td><em>v10.2.0.13</em></td> +</tr> +<tr> +<td style="text-align: left;"><em>adatbazis</em></td> +<td>OS (InfluxDB server)</td> +<td><em>Fedora release 36 (aarch64)</em></td> +</tr> +<tr> +<td style="text-align: left;"><em>adatbazis</em></td> +<td>InfluxDB</td> +<td><em>v1.8.10</em></td> +</tr> +<tr> +<td style="text-align: left;"><em>kilenc</em></td> +<td>Telegraf</td> +<td><em>v1.24.3</em></td> +</tr> +<tr> +<td style="text-align: left;"><em>kilenc</em></td> +<td>Grafana</td> +<td><em>v9.1.6</em></td> +</tr> +</tbody> +</table> +<hr /> + +<p>The follwing steps assume that IBM Spectrum LSF Suite for HPC, InfluxDB and Telegraf have been installed.</p> + +<ol> +<li> +<p>Start InfluxDB on the host <em>adatbazis</em></p> + +</li> +<li> +<p>On the LSF management server <em>kilenc</em>, configure telegraf to connect to the influxDB instance on host <em>adatbazis</em>. Edit the configuration <em>/etc/telegraf/telegraf.conf</em> and specify +the correct URL in the <em>outputs.influxdb</em> section as follows:</p> + +</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext"># # Configuration for sending metrics to InfluxDB +[[outputs.influxdb]] +# ## The full HTTP or UDP URL for your InfluxDB instance. +# ## +# ## Multiple URLs can be specified for a single cluster, only ONE of the +# ## urls will be written to each interval. +# # urls = ["unix:///var/run/influxdb.sock"] +# # urls = ["udp://127.0.0.1:8089"] +# # urls = ["http://127.0.0.1:8086"] +# Added gsamu Jan 04 2023 +urls = ["http://adatbazis:8086"]</code></pre></div> + +<ol start="3"> +<li>On the LSF management server <em>kilenc</em>, configure telegraf with the custom plugin script <em>lsf_telegraf_agent_0.9.py</em> to collect and log metrics from IBM Spectrum LSF Suite for HPC. +Edit the configuration <em>/etc/telegraf/telegraf.conf</em> and specify the correct command path in the section <em>inputs.exec</em>. Additionally, set <em>data_format</em> equal to <em>influx</em>.Note that the +script <em>lsf_telegraf_agent_0.9.py</em> was copied to the directory <em>/etc/telegraf/telegraf.d/scripts</em> with permissions octal 755 and owner set to user <em>telegraf</em>. +<strong>Note:</strong> User <em>telegraf</em> was automatically created during the installation of telegraf.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext"> +# ## Gather LSF metrics +[[inputs.exec]] + ## Commands array + commands = [ "/etc/telegraf/telegraf.d/scripts/lsf_telegraf_agent_0.9.py" ] + timeout = "30s" + interval = "30s" + data_format = "influx" + # ## End LSF metrics</code></pre></div> + +<ol start="4"> +<li>Telegraf provides the ability to collect metrics on processes. Here we&rsquo;ll use the telegraf <em>procstat</em> facility to monitor the LSF mbatchd and mbschd processes. These are the key +daemons involved in handling query requests and making scheduling decisions for jobs in the environment. Edit the configuration <em>/etc/telegraf/telegraf.conf</em> and configure the two +following <em>inputs.procstat</em> sections.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext"># ## Monitor CPU and memory utilization for LSF processes +# ## mbatchd, mbschd, lim (manager) +[[inputs.procstat]] +exe = "lim" +pattern = "lim" +pid_finder = "pgrep" + +[[inputs.procstat]] +exe = "mbschd" +pattern = "mbschd" +pid_finder = "pgrep" + +[[inputs.procstat]] +exe = "mbatchd" +pattern = "mbatchd" +pid_finder = "pgrep"</code></pre></div> + +<ol start="5"> +<li>With the configuration to telegraf complete, it&rsquo;s now time to test if the configuration and custom LSF agent is functioning as expected. Note that the following operation is performed +on the LSF management candidate host <em>kilenc</em> and assumes that the LSF daemons are up and running. This is achieve by running the command: +<em>telegraf &ndash;config /etc/telegraf/telegraf.conf &ndash;test</em>. <strong>Note:</strong> Any errors in the configuration file <em>/etc/telegraf/telegraf.conf</em> will result in errors in the output.</li> +</ol> +<hr /> + +<details> + <strong>Output of <em>telegraf &ndash;config /etc/telegraf/telegraf.conf &ndash;test</em>. Click to expand!</strong> + <div class="highlight"><pre><code class="language-python">[root<span style="color: #a6e22e;">@kilenc</span> telegraf]<span style="color: #75715e;"># pwd</span> +<span style="color: #f92672;">/</span>etc<span style="color: #f92672;">/</span>telegraf +[root<span style="color: #a6e22e;">@kilenc</span> telegraf]<span style="color: #75715e;"># telegraf --config /etc/telegraf/telegraf.conf --test</span> +<span style="color: #f92672;">&gt;</span> mem,host<span style="color: #f92672;">=</span>kilenc active<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1938817024</span>i,available<span style="color: #f92672;">=</span><span style="color: #ae81ff;">6820003840</span>i,available_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20.653390597462806</span>,buffered<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4849664</span>i,cached<span style="color: #f92672;">=</span><span style="color: #ae81ff;">6317735936</span>i,commit_limit<span style="color: #f92672;">=</span><span style="color: #ae81ff;">33560395776</span>i,committed_as<span style="color: #f92672;">=</span><span style="color: #ae81ff;">18635292672</span>i,dirty<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4128768</span>i,free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2623799296</span>i,high_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,high_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,huge_page_size<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2097152</span>i,huge_pages_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,huge_pages_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,inactive<span style="color: #f92672;">=</span><span style="color: #ae81ff;">13852016640</span>i,low_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,low_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,mapped<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1007353856</span>i,page_tables<span style="color: #f92672;">=</span><span style="color: #ae81ff;">22478848</span>i,shared<span style="color: #f92672;">=</span><span style="color: #ae81ff;">259063808</span>i,slab<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4946919424</span>i,sreclaimable<span style="color: #f92672;">=</span><span style="color: #ae81ff;">902234112</span>i,sunreclaim<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4044685312</span>i,swap_cached<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3866624</span>i,swap_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">16994729984</span>i,swap_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">17049780224</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">33021231104</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">24074846208</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">72.90717336424115</span>,vmalloc_chunk<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,vmalloc_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">562949953421312</span>i,vmalloc_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_back<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_back_tmp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> kernel,host<span style="color: #f92672;">=</span>kilenc boot_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1673790850</span>i,context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1943864437</span>i,entropy_avail<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4037</span>i,interrupts<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1294179599</span>i,processes_forked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4255316</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> swap,host<span style="color: #f92672;">=</span>kilenc free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">16994729984</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">17049780224</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">55050240</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.3228794698626609</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> swap,host<span style="color: #f92672;">=</span>kilenc <span style="color: #f92672;">in</span><span style="color: #f92672;">=</span><span style="color: #ae81ff;">172032</span>i,out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">851968</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> net,host<span style="color: #f92672;">=</span>kilenc,interface<span style="color: #f92672;">=</span>lo bytes_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">90039931116</span>i,bytes_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">90039931116</span>i,drop_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,drop_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,packets_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">17245997</span>i,packets_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">17245997</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> net,host<span style="color: #f92672;">=</span>kilenc,interface<span style="color: #f92672;">=</span>enP4p1s0f0 bytes_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,bytes_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,drop_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,drop_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,packets_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,packets_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> net,host<span style="color: #f92672;">=</span>kilenc,interface<span style="color: #f92672;">=</span>enP4p1s0f1 bytes_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11791041280</span>i,bytes_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1701152001</span>i,drop_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,drop_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_in<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,err_out<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,packets_recv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10322276</span>i,packets_sent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4594948</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> net,host<span style="color: #f92672;">=</span>kilenc,interface<span style="color: #f92672;">=</span>all icmp_inaddrmaskreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_inaddrmasks<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_incsumerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_indestunreachs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8609</span>i,icmp_inechoreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20</span>i,icmp_inechos<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11</span>i,icmp_inerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1084</span>i,icmp_inmsgs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8640</span>i,icmp_inparmprobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_inredirects<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_insrcquenchs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_intimeexcds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_intimestampreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_intimestamps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outaddrmaskreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outaddrmasks<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outdestunreachs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4805</span>i,icmp_outechoreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11</span>i,icmp_outechos<span style="color: #f92672;">=</span><span style="color: #ae81ff;">94</span>i,icmp_outerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outmsgs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4910</span>i,icmp_outparmprobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outredirects<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outsrcquenchs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outtimeexcds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outtimestampreps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmp_outtimestamps<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,icmpmsg_intype0<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20</span>i,icmpmsg_intype3<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8609</span>i,icmpmsg_intype8<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11</span>i,icmpmsg_outtype0<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11</span>i,icmpmsg_outtype3<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4805</span>i,icmpmsg_outtype8<span style="color: #f92672;">=</span><span style="color: #ae81ff;">94</span>i,ip_defaultttl<span style="color: #f92672;">=</span><span style="color: #ae81ff;">64</span>i,ip_forwarding<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,ip_forwdatagrams<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_fragcreates<span style="color: #f92672;">=</span><span style="color: #ae81ff;">62958</span>i,ip_fragfails<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_fragoks<span style="color: #f92672;">=</span><span style="color: #ae81ff;">12611</span>i,ip_inaddrerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,ip_indelivers<span style="color: #f92672;">=</span><span style="color: #ae81ff;">21324370</span>i,ip_indiscards<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_inhdrerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_inreceives<span style="color: #f92672;">=</span><span style="color: #ae81ff;">21324371</span>i,ip_inunknownprotos<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_outdiscards<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_outnoroutes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">30</span>i,ip_outrequests<span style="color: #f92672;">=</span><span style="color: #ae81ff;">21248264</span>i,ip_reasmfails<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_reasmoks<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_reasmreqds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ip_reasmtimeout<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,tcp_activeopens<span style="color: #f92672;">=</span><span style="color: #ae81ff;">763497</span>i,tcp_attemptfails<span style="color: #f92672;">=</span><span style="color: #ae81ff;">96617</span>i,tcp_currestab<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118</span>i,tcp_estabresets<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1917</span>i,tcp_incsumerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,tcp_inerrs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,tcp_insegs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">19488475</span>i,tcp_maxconn<span style="color: #f92672;">=-</span><span style="color: #ae81ff;">1</span>i,tcp_outrsts<span style="color: #f92672;">=</span><span style="color: #ae81ff;">137188</span>i,tcp_outsegs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20220038</span>i,tcp_passiveopens<span style="color: #f92672;">=</span><span style="color: #ae81ff;">675805</span>i,tcp_retranssegs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9827</span>i,tcp_rtoalgorithm<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,tcp_rtomax<span style="color: #f92672;">=</span><span style="color: #ae81ff;">120000</span>i,tcp_rtomin<span style="color: #f92672;">=</span><span style="color: #ae81ff;">200</span>i,udp_ignoredmulti<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10509</span>i,udp_incsumerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udp_indatagrams<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1816997</span>i,udp_inerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udp_memerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udp_noports<span style="color: #f92672;">=</span><span style="color: #ae81ff;">264</span>i,udp_outdatagrams<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1506724</span>i,udp_rcvbuferrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udp_sndbuferrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_ignoredmulti<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_incsumerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_indatagrams<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_inerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_memerrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_noports<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_outdatagrams<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_rcvbuferrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,udplite_sndbuferrors<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">2</span> io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9739370</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4015612416</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">604060</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">40592</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">60563370</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">47025459712</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">59959310</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1079691</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sda1 io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1460</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4849664</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1304</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1304</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sda3 io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">45872430</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">623</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1061314</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">16398521856</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3371612</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">139298</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">311521720</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">133715422208</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">308150107</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7031512</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">1</span> io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5780</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5636096</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3030</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">81</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">26500</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">13631488</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">23470</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">208</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">0</span>,fstype<span style="color: #f92672;">=</span>xfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span> free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9315028992</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">18214222</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">19822888</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1608666</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">53660876800</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">44345847808</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">82.64093032486566</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>sda2,fstype<span style="color: #f92672;">=</span>ext4,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>boot free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">309653504</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65264</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65536</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">272</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1020702720</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">640585728</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67.41310045173972</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">2</span>,fstype<span style="color: #f92672;">=</span>xfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>home free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">856442515456</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">452529686</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">453312512</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">782826</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">927930712064</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">71488196608</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7.704044674735306</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">2</span>,fstype<span style="color: #f92672;">=</span>xfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>home<span style="color: #f92672;">/</span>opt<span style="color: #f92672;">/</span>at13<span style="color: #ae81ff;">.0</span><span style="color: #f92672;">/</span>lib free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">856442515456</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">452529686</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">453312512</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">782826</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">927930712064</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">71488196608</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7.704044674735306</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">2</span>,fstype<span style="color: #f92672;">=</span>xfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>home<span style="color: #f92672;">/</span>opt<span style="color: #f92672;">/</span>at13<span style="color: #ae81ff;">.0</span><span style="color: #f92672;">/</span>lib64 free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">856442515456</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">452529686</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">453312512</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">782826</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">927930712064</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">71488196608</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7.704044674735306</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>ST31000524AS<span style="color: #f92672;">/</span>raktar,fstype<span style="color: #f92672;">=</span>zfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>mnt<span style="color: #f92672;">/</span>ST31000524AS free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">210837438464</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">411792117</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">412304487</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">512370</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">965496143872</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">754658705408</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">78.16278813725106</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sda io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">45899860</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">650</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1061332</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">16495536128</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3440899</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">141325</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">311596362</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">133715696640</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">308155462</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7031531</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> disk,device<span style="color: #f92672;">=</span>ST31000524AS,fstype<span style="color: #f92672;">=</span>zfs,host<span style="color: #f92672;">=</span>kilenc,mode<span style="color: #f92672;">=</span>rw,path<span style="color: #f92672;">=/</span>ST31000524AS free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">210837438464</span>i,inodes_free<span style="color: #f92672;">=</span><span style="color: #ae81ff;">411792117</span>i,inodes_total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">411792123</span>i,inodes_used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">6</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">210837569536</span>i,used<span style="color: #f92672;">=</span><span style="color: #ae81ff;">131072</span>i,used_percent<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.00006216728844316324</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sda2 io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">18060</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">27</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">18</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">88372224</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">31224</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">436</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">36579</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">274432</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5355</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">19</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>dm<span style="color: #f92672;">-</span><span style="color: #ae81ff;">0</span> io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">38788720</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">12341294080</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1143210</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">51814</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">303329620</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">86676331008</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">302186410</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">6798400</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sdb io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">668810</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">58</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">104550912</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">746540</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">31054</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1445858</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10845920256</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">699318</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">124780</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sdb1 io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">341330</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">58</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">95562240</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">383066</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">25026</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1082385</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10845920256</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">699318</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">124780</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> diskio,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sdb9 io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">190</span>i,iops_in_progress<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,merged_writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4980736</span>i,read_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">37</span>i,reads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">69</span>i,weighted_io_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">37</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,writes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> system,host<span style="color: #f92672;">=</span>kilenc load1<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.06</span>,load15<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.12</span>,load5<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.12</span>,n_cpus<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i,n_users<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> system,host<span style="color: #f92672;">=</span>kilenc uptime<span style="color: #f92672;">=</span><span style="color: #ae81ff;">456127</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> system,host<span style="color: #f92672;">=</span>kilenc uptime_format<span style="color: #f92672;">=</span><span style="color: #e6db74;">"5 days, 6:42"</span> <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> processes,host<span style="color: #f92672;">=</span>kilenc blocked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,dead<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">569</span>i,paging<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,parked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,running<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,sleeping<span style="color: #f92672;">=</span><span style="color: #ae81ff;">412</span>i,stopped<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,total<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1366</span>i,total_threads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2683</span>i,unknown<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,zombies<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_servers,host<span style="color: #f92672;">=</span>kilenc,status<span style="color: #f92672;">=</span>total value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_servers,host<span style="color: #f92672;">=</span>kilenc,status<span style="color: #f92672;">=</span>ok value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_servers,host<span style="color: #f92672;">=</span>kilenc,status<span style="color: #f92672;">=</span>closed value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_servers,host<span style="color: #f92672;">=</span>kilenc,status<span style="color: #f92672;">=</span>unreachable value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_servers,host<span style="color: #f92672;">=</span>kilenc,status<span style="color: #f92672;">=</span>unavailable value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_jobs,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>total value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">121776</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_jobs,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>running value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_jobs,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>suspended value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_jobs,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>pending value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">120771</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_jobs,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>finished value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">973</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_users,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>numusers value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_users,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>numgroups value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_users,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>numactive value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_hosts,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>clients current<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,peak<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_hosts,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>servers current<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,peak<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_hosts,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>cpus current<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i,peak<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_hosts,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>cores current<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i,peak<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_hosts,host<span style="color: #f92672;">=</span>kilenc,state<span style="color: #f92672;">=</span>slots current<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i,peak<span style="color: #f92672;">=</span><span style="color: #ae81ff;">32</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,query<span style="color: #f92672;">=</span>job value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,query<span style="color: #f92672;">=</span>host value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,query<span style="color: #f92672;">=</span>queue value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>submitreqs value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>submitted value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>dispatched value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">19</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>completed value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">12</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>sentremote value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,jobs<span style="color: #f92672;">=</span>acceptremote value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,sched<span style="color: #f92672;">=</span>interval value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,sched<span style="color: #f92672;">=</span>matchhost value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,sched<span style="color: #f92672;">=</span>buckets value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,sched<span style="color: #f92672;">=</span>reordered value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">7</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,utilization<span style="color: #f92672;">=</span>slots value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,host<span style="color: #f92672;">=</span>kilenc,utilization<span style="color: #f92672;">=</span>memory value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,fd<span style="color: #f92672;">=</span>free,host<span style="color: #f92672;">=</span>kilenc value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65509</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,fd<span style="color: #f92672;">=</span>used,host<span style="color: #f92672;">=</span>kilenc value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">26</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_mbatchd,fd<span style="color: #f92672;">=</span>total,host<span style="color: #f92672;">=</span>kilenc value<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65535</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>admin njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>owners njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>priority njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">93951</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">93923</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">28</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>night njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>short njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2504</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2504</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>dataq njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>normal njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1750</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1750</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>interactive njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>sendq njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">22598</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">22594</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> lsf_queues,host<span style="color: #f92672;">=</span>kilenc,name<span style="color: #f92672;">=</span>idle njobs<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,pend<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rsv<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,run<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ssusp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,susp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,ususp<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i <span style="color: #ae81ff;">1674246976000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu0,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu4,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu8,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu12,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu16,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">98.03921568448419</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.9607843137324836</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu20,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu24,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu28,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu32,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu36,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu40,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">98.03921568448419</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.9607843136879006</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu44,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu48,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu52,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu56,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu60,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu64,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">87.99999999906868</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10.000000001155058</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.0000000002764864</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu68,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu72,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">86.27450980280263</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11.764705882127403</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.9607843137324836</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu76,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu80,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">92.30769231113655</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.8461538464431086</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.84615384653056</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu84,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">94.11764706486585</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5.882352941197451</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu88,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu92,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">70.58823529344627</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">29.411764701983955</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu96,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">96.15384615040192</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.8461538460125784</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu100,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">97.99999999813735</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.999999999998181</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu104,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">96.07843137993407</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.92156862782338</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu108,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">96.07843136896838</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.9607843136879006</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.9607843137324836</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu112,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu116,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">95.91836734305988</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4.08163265313509</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu120,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">84.61538461280144</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.8461538460344413</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">11.53846153830009</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu124,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">100</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> cpu,cpu<span style="color: #f92672;">=</span>cpu<span style="color: #f92672;">-</span>total,host<span style="color: #f92672;">=</span>kilenc usage_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">93.47826086554115</span>,usage_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3.1055900618243673</span>,usage_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_softirq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,usage_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.484472049468532</span>,usage_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.9316770186919254</span> <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> procstat,exe<span style="color: #f92672;">=</span>mbatchd,host<span style="color: #f92672;">=</span>kilenc,process_name<span style="color: #f92672;">=</span>mbatchd,user<span style="color: #f92672;">=</span>root child_major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,child_minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,cpu_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,cpu_time_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_soft_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.03</span>,cpu_time_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.05</span>,cpu_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,created_at<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1674246974000000000</span>i,involuntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_data<span style="color: #f92672;">=</span><span style="color: #ae81ff;">834994176</span>i,memory_locked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_rss<span style="color: #f92672;">=</span><span style="color: #ae81ff;">815595520</span>i,memory_stack<span style="color: #f92672;">=</span><span style="color: #ae81ff;">327680</span>i,memory_swap<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.469912528991699</span>,memory_vms<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1091108864</span>i,minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">726</span>i,nice_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20</span>i,num_fds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">10</span>i,num_threads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i,pid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">62056</span>i,ppid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4103699</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,read_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">27</span>i,realtime_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_cpu_time_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_cpu_time_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_locked_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_locked_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_rss_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_rss_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8388608</span>i,rlimit_memory_vms_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_vms_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_nice_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_nice_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_num_fds_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">262144</span>i,rlimit_num_fds_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65535</span>i,rlimit_realtime_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_realtime_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_signals_pending_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,rlimit_signals_pending_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,signals_pending<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,voluntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">5</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">16</span>i <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> procstat,exe<span style="color: #f92672;">=</span>mbschd,host<span style="color: #f92672;">=</span>kilenc,process_name<span style="color: #f92672;">=</span>mbschd,user<span style="color: #f92672;">=</span>lsfadmin child_major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,child_minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2457641</span>i,cpu_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">320</span>i,cpu_time_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.02</span>,cpu_time_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_soft_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8.4</span>,cpu_time_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">312.14</span>,cpu_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1.836645120693344</span>,created_at<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1674227581000000000</span>i,involuntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3553</span>i,major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,memory_data<span style="color: #f92672;">=</span><span style="color: #ae81ff;">228851712</span>i,memory_locked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_rss<span style="color: #f92672;">=</span><span style="color: #ae81ff;">236847104</span>i,memory_stack<span style="color: #f92672;">=</span><span style="color: #ae81ff;">196608</span>i,memory_swap<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0.717257022857666</span>,memory_vms<span style="color: #f92672;">=</span><span style="color: #ae81ff;">246808576</span>i,minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2137969</span>i,nice_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20</span>i,num_fds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3</span>i,num_threads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,pid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4103740</span>i,ppid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4103699</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1552384</span>i,read_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">936861</span>i,realtime_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_cpu_time_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_cpu_time_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_locked_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_locked_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_rss_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_rss_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8388608</span>i,rlimit_memory_vms_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_vms_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_nice_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_nice_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_num_fds_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">262144</span>i,rlimit_num_fds_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65535</span>i,rlimit_realtime_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_realtime_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_signals_pending_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,rlimit_signals_pending_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,signals_pending<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,voluntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">43952</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,write_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">42311</span>i <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> procstat_lookup,exe<span style="color: #f92672;">=</span>mbschd,host<span style="color: #f92672;">=</span>kilenc,pid_finder<span style="color: #f92672;">=</span>pgrep,result<span style="color: #f92672;">=</span>success pid_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i,result_code<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,running<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1</span>i <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> procstat,exe<span style="color: #f92672;">=</span>mbatchd,host<span style="color: #f92672;">=</span>kilenc,process_name<span style="color: #f92672;">=</span>mbatchd,user<span style="color: #f92672;">=</span>root child_major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i,child_minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4476280</span>i,cpu_time<span style="color: #f92672;">=</span><span style="color: #ae81ff;">177</span>i,cpu_time_guest<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_guest_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_idle<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_iowait<span style="color: #f92672;">=</span><span style="color: #ae81ff;">6.68</span>,cpu_time_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_nice<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_soft_irq<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_steal<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,cpu_time_system<span style="color: #f92672;">=</span><span style="color: #ae81ff;">51.01</span>,cpu_time_user<span style="color: #f92672;">=</span><span style="color: #ae81ff;">126.42</span>,cpu_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>,created_at<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1674227573000000000</span>i,involuntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4993</span>i,major_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3</span>i,memory_data<span style="color: #f92672;">=</span><span style="color: #ae81ff;">834994176</span>i,memory_locked<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_rss<span style="color: #f92672;">=</span><span style="color: #ae81ff;">827785216</span>i,memory_stack<span style="color: #f92672;">=</span><span style="color: #ae81ff;">327680</span>i,memory_swap<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,memory_usage<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2.5068273544311523</span>,memory_vms<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1091108864</span>i,minor_faults<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2406945</span>i,nice_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">20</span>i,num_fds<span style="color: #f92672;">=</span><span style="color: #ae81ff;">26</span>i,num_threads<span style="color: #f92672;">=</span><span style="color: #ae81ff;">3</span>i,pid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4103699</span>i,ppid<span style="color: #f92672;">=</span><span style="color: #ae81ff;">4103684</span>i,read_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">21008384</span>i,read_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">364726</span>i,realtime_priority<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_cpu_time_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_cpu_time_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_file_locks_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_data_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_locked_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_locked_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">67108864</span>i,rlimit_memory_rss_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_rss_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_stack_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">8388608</span>i,rlimit_memory_vms_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_memory_vms_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">9223372036854775807</span>i,rlimit_nice_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_nice_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_num_fds_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">262144</span>i,rlimit_num_fds_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">65535</span>i,rlimit_realtime_priority_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_realtime_priority_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,rlimit_signals_pending_hard<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,rlimit_signals_pending_soft<span style="color: #f92672;">=</span><span style="color: #ae81ff;">118856</span>i,signals_pending<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,voluntary_context_switches<span style="color: #f92672;">=</span><span style="color: #ae81ff;">172583</span>i,write_bytes<span style="color: #f92672;">=</span><span style="color: #ae81ff;">1562181632</span>i,write_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">12164760</span>i <span style="color: #ae81ff;">1674246977000000000</span> +<span style="color: #f92672;">&gt;</span> procstat_lookup,exe<span style="color: #f92672;">=</span>mbatchd,host<span style="color: #f92672;">=</span>kilenc,pid_finder<span style="color: #f92672;">=</span>pgrep,result<span style="color: #f92672;">=</span>success pid_count<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i,result_code<span style="color: #f92672;">=</span><span style="color: #ae81ff;">0</span>i,running<span style="color: #f92672;">=</span><span style="color: #ae81ff;">2</span>i <span style="color: #ae81ff;">1674246977000000000</span> +</code></pre></div> + +</details> + +<hr /> + +<ol start="6"> +<li>Assuming there were no errors in the previous step with telegraf, proceed to start the telegraf process via systemd.</li> +</ol> +<div class="highlight"><pre><code class="language-plaintext">[root@kilenc telegraf]# systemctl start telegraf +[root@kilenc telegraf]# systemctl status telegraf +● telegraf.service - Telegraf + Loaded: loaded (/usr/lib/systemd/system/telegraf.service; enabled; vendor preset: disabled) + Active: active (running) since Thu 2023-01-19 14:13:51 EST; 1 day 1h ago + Docs: https://github.com/influxdata/telegraf + Main PID: 3225959 (telegraf) + Tasks: 35 (limit: 190169) + Memory: 192.6M + CGroup: /system.slice/telegraf.service + └─3225959 /usr/bin/telegraf -config /etc/telegraf/telegraf.conf -config-directory /etc/tele&gt; + +Jan 19 14:13:51 kilenc systemd[1]: Starting Telegraf... +Jan 19 14:13:51 kilenc systemd[1]: Started Telegraf.</code></pre></div> + +<ol start="7"> +<li>On the host running the database instance, <em>adatbazis</em>, perform queries to check whether the database <em>telegraf</em> exists, as well as checking if LSF related data is being logged. +This is confirmed in the output below.</li> +</ol> +<hr /> + +<details> + <strong>Output from InfluxDB queries. Click to expand!</strong> + <div class="highlight"><pre><code class="language-js">[<span style="color: #a6e22e;">root</span><span style="color: #960050; background-color: #1e0010;">@</span><span style="color: #a6e22e;">adatbazis</span> <span style="color: #a6e22e;">fedora</span>]<span style="color: #960050; background-color: #1e0010;">#</span> <span style="color: #a6e22e;">influx</span> +<span style="color: #a6e22e;">Connected</span> <span style="color: #a6e22e;">to</span> <span style="color: #a6e22e;">https</span><span style="color: #f92672;">:</span><span style="color: #75715e;">//localhost:8086 version 1.8.10 +</span><span style="color: #75715e;"></span><span style="color: #a6e22e;">InfluxDB</span> <span style="color: #a6e22e;">shell</span> <span style="color: #a6e22e;">version</span><span style="color: #f92672;">:</span> <span style="color: #ae81ff;">1.8</span>.<span style="color: #ae81ff;">10</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">auth</span> +<span style="color: #a6e22e;">username</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">influx</span> +<span style="color: #a6e22e;">password</span><span style="color: #f92672;">:</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">show</span> <span style="color: #a6e22e;">databases</span> +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">databases</span> +<span style="color: #a6e22e;">name</span> +<span style="color: #f92672;">----</span> +<span style="color: #ae81ff;">_</span><span style="color: #a6e22e;">internal</span> +<span style="color: #a6e22e;">telegraf</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">use</span> <span style="color: #a6e22e;">telegraf</span> +<span style="color: #a6e22e;">Using</span> <span style="color: #a6e22e;">database</span> <span style="color: #a6e22e;">telegraf</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">show</span> <span style="color: #a6e22e;">field</span> <span style="color: #a6e22e;">keys</span> +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">cpu</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">usage_guest</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_guest_nice</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_idle</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_iowait</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_irq</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_nice</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_softirq</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_steal</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_system</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">usage_user</span> <span style="color: #66d9ef;">float</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">disk</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">inodes_free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">inodes_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">inodes_used</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used_percent</span> <span style="color: #66d9ef;">float</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">diskio</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">io_time</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">iops_in_progress</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">merged_reads</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">merged_writes</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">read_bytes</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">read_time</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">reads</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">weighted_io_time</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">write_bytes</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">write_time</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">writes</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">kernel</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">boot_time</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">context_switches</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">entropy_avail</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">interrupts</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">processes_forked</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_hosts</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">current</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">peak</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_jobs</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">value</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_mbatchd</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">value</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_queues</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">njobs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">pend</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">rsv</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">run</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ssusp</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">susp</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ususp</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_servers</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">value</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_users</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">value</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">mem</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">active</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">available</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">available_percent</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">buffered</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">cached</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">commit_limit</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">committed_as</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">dirty</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">high_free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">high_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">huge_page_size</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">huge_pages_free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">huge_pages_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">inactive</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">low_free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">low_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">mapped</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">page_tables</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">shared</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">slab</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">sreclaimable</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">sunreclaim</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">swap_cached</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">swap_free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">swap_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used_percent</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">vmalloc_chunk</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">vmalloc_total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">vmalloc_used</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">write_back</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">write_back_tmp</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">net</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">bytes_recv</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">bytes_sent</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">drop_in</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">drop_out</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">err_in</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">err_out</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inaddrmaskreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inaddrmasks</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_incsumerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_indestunreachs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inechoreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inechos</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inmsgs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inparmprobs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_inredirects</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_insrcquenchs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_intimeexcds</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_intimestampreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_intimestamps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outaddrmaskreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outaddrmasks</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outdestunreachs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outechoreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outechos</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outmsgs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outparmprobs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outredirects</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outsrcquenchs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outtimeexcds</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outtimestampreps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmp_outtimestamps</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_intype0</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_intype3</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_intype8</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_outtype0</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_outtype3</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">icmpmsg_outtype8</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_defaultttl</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_forwarding</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_forwdatagrams</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_fragcreates</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_fragfails</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_fragoks</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_inaddrerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_indelivers</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_indiscards</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_inhdrerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_inreceives</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_inunknownprotos</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_outdiscards</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_outnoroutes</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_outrequests</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_reasmfails</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_reasmoks</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_reasmreqds</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ip_reasmtimeout</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">packets_recv</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">packets_sent</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_activeopens</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_attemptfails</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_currestab</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_estabresets</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_incsumerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_inerrs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_insegs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_maxconn</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_outrsts</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_outsegs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_passiveopens</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_retranssegs</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_rtoalgorithm</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_rtomax</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">tcp_rtomin</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_ignoredmulti</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_incsumerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_indatagrams</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_inerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_memerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_noports</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_outdatagrams</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_rcvbuferrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udp_sndbuferrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_ignoredmulti</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_incsumerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_indatagrams</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_inerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_memerrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_noports</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_outdatagrams</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_rcvbuferrors</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">udplite_sndbuferrors</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">processes</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">blocked</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">dead</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">idle</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">paging</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">parked</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">running</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">sleeping</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">stopped</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">total_threads</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">unknown</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">zombies</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">procstat</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">child_major_faults</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">child_minor_faults</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">cpu_time_guest</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_guest_nice</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_idle</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_iowait</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_irq</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_nice</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_soft_irq</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_steal</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_system</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_time_user</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">cpu_usage</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">created_at</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">involuntary_context_switches</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">major_faults</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_data</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_locked</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_rss</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_stack</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_swap</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">memory_usage</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">memory_vms</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">minor_faults</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">num_threads</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">pid</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">ppid</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">voluntary_context_switches</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">procstat_lookup</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">pid_count</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">result_code</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">running</span> <span style="color: #a6e22e;">integer</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">swap</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">free</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #66d9ef;">in</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">out</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">total</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">used_percent</span> <span style="color: #66d9ef;">float</span> + +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">system</span> +<span style="color: #a6e22e;">fieldKey</span> <span style="color: #a6e22e;">fieldType</span> +<span style="color: #f92672;">--------</span> <span style="color: #f92672;">---------</span> +<span style="color: #a6e22e;">load1</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">load15</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">load5</span> <span style="color: #66d9ef;">float</span> +<span style="color: #a6e22e;">n_cpus</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">n_unique_users</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">n_users</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">uptime</span> <span style="color: #a6e22e;">integer</span> +<span style="color: #a6e22e;">uptime_format</span> <span style="color: #a6e22e;">string</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">select</span> <span style="color: #f92672;">*</span> <span style="color: #a6e22e;">from</span> <span style="color: #a6e22e;">metrics</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">SELECT</span> <span style="color: #f92672;">*</span> <span style="color: #a6e22e;">FROM</span> <span style="color: #e6db74;">"lsf_hosts"</span>; +<span style="color: #a6e22e;">name</span><span style="color: #f92672;">:</span> <span style="color: #a6e22e;">lsf_hosts</span> +<span style="color: #a6e22e;">time</span> <span style="color: #a6e22e;">current</span> <span style="color: #a6e22e;">host</span> <span style="color: #a6e22e;">peak</span> <span style="color: #a6e22e;">state</span> +<span style="color: #f92672;">----</span> <span style="color: #f92672;">-------</span> <span style="color: #f92672;">----</span> <span style="color: #f92672;">----</span> <span style="color: #f92672;">-----</span> +<span style="color: #ae81ff;">1674493170000000000</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">clients</span> +<span style="color: #ae81ff;">1674493170000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">slots</span> +<span style="color: #ae81ff;">1674493170000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">cores</span> +<span style="color: #ae81ff;">1674493170000000000</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">servers</span> +<span style="color: #ae81ff;">1674493170000000000</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">cpus</span> +<span style="color: #ae81ff;">1674493200000000000</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">servers</span> +<span style="color: #ae81ff;">1674493200000000000</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">cpus</span> +<span style="color: #ae81ff;">1674493200000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">slots</span> +<span style="color: #ae81ff;">1674493200000000000</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">clients</span> +<span style="color: #ae81ff;">1674493200000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">cores</span> +<span style="color: #ae81ff;">1674493230000000000</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">clients</span> +<span style="color: #ae81ff;">1674493230000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">cores</span> +<span style="color: #ae81ff;">1674493230000000000</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">cpus</span> +<span style="color: #ae81ff;">1674493230000000000</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">servers</span> +<span style="color: #ae81ff;">1674493230000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">slots</span> +<span style="color: #ae81ff;">1674493260000000000</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">1</span> <span style="color: #a6e22e;">servers</span> +<span style="color: #ae81ff;">1674493260000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">slots</span> +<span style="color: #ae81ff;">1674493260000000000</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">0</span> <span style="color: #a6e22e;">clients</span> +<span style="color: #ae81ff;">1674493260000000000</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">2</span> <span style="color: #a6e22e;">cpus</span> +<span style="color: #ae81ff;">1674493260000000000</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">kilenc</span> <span style="color: #ae81ff;">32</span> <span style="color: #a6e22e;">cores</span> +<span style="color: #f92672;">&gt;</span> <span style="color: #a6e22e;">quit</span> +</code></pre></div> + +</details> + +<hr /> + +<ol start="8"> +<li>With telegraf successfully logging data to the InfluxDB instance, it will now be possible to create a data source in Grafana in order to create a dashboard containing LSF metrics. +As noted at the outset, this article is not meant to be an extensive guide to the creation of dashoards in Grafana. In the Grafana navigation select <em>Configuration</em> &gt; <em>Data sources</em>.</li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/configure_datasource.png" /> +</figure> + +<ol start="9"> +<li>Select the <em>Add data source</em> button, followed by InfluxDB, which is listed under <em>Time series databases</em>. On the settings page specify following values:</li> +</ol> +<hr /> + +<table> +<thead> +<tr> +<th>Variable</th> +<th>Value</th> +</tr> +</thead> +<tbody> +<tr> +<td>URL</td> +<td><em>http://adatbazis:8086</em></td> +</tr> +<tr> +<td>Database</td> +<td><em>telegraf</em></td> +</tr> +<tr> +<td>Basic auth</td> +<td>(enable)</td> +</tr> +<tr> +<td>User</td> +<td>&lt;influxdb_username&gt;</td> +</tr> +<tr> +<td>Password</td> +<td>&lt;influxdb_password</td> +</tr> +</tbody> +</table> +<hr /> + +<p>Next, click on <em>Save &amp; test</em>. If all variables and settings were properly specified, the message <em>datasource is working. 17 measurements found</em>.</p> + +<figure><img src="https://www.gaborsamu.com/images/test_datasource.png" /> +</figure> + +<ol start="10"> +<li>With the datasource configured in Grafana, the final step is to create a dashboard. Creating a dashboard requires creating panels which display data pulled from the confiugred data +source using targeted queries. With a bit of effort, I was able to piece together the following dashboard which includes both metrics from LSF, as well as metrics from Telegraf +<em>input.procstat</em> for the LSF processes <em>mbatchd</em>, <em>mbschd</em> and the management <em>lim</em>.</li> +</ol> +<figure><img src="https://www.gaborsamu.com/images/lsf_dashboard3.jpg" /> +</figure> + +<hr /> + +<details> + <strong>Example dashboard definition (JSON). Click to expand!</strong> + <div class="highlight"><pre><code class="language-json">{ + <span style="color: #f92672;">"annotations"</span>: { + <span style="color: #f92672;">"list"</span>: [ + { + <span style="color: #f92672;">"builtIn"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"datasource"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"grafana"</span> + }, + <span style="color: #f92672;">"enable"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"iconColor"</span>: <span style="color: #e6db74;">"rgba(0, 211, 255, 1)"</span>, + <span style="color: #f92672;">"name"</span>: <span style="color: #e6db74;">"Annotations &amp; Alerts"</span>, + <span style="color: #f92672;">"target"</span>: { + <span style="color: #f92672;">"limit"</span>: <span style="color: #ae81ff;">100</span>, + <span style="color: #f92672;">"matchAny"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tags"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"dashboard"</span> + }, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"dashboard"</span> + } + ] + }, + <span style="color: #f92672;">"editable"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"fiscalYearStartMonth"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"graphTooltip"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">17</span>, + <span style="color: #f92672;">"links"</span>: [], + <span style="color: #f92672;">"liveNow"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"panels"</span>: [ + { + <span style="color: #f92672;">"collapsed"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">24</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">0</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">35</span>, + <span style="color: #f92672;">"panels"</span>: [], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Cluster aggregate current statistics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"row"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">"A view of the current status of the LSF servers in the cluster. Servers can be in one of four states: Ok, Unavailable, Closed and Unreachable. "</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + } + }, + <span style="color: #f92672;">"decimals"</span>: <span style="color: #ae81ff;">2</span>, + <span style="color: #f92672;">"mappings"</span>: [] + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">1</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">32</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"displayLabels"</span>: [ + <span style="color: #e6db74;">"name"</span>, + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"table"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"sortBy"</span>: <span style="color: #e6db74;">"Value"</span>, + <span style="color: #f92672;">"sortDesc"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"values"</span>: [ + <span style="color: #e6db74;">"value"</span>, + <span style="color: #e6db74;">"percent"</span> + ] + }, + <span style="color: #f92672;">"pieType"</span>: <span style="color: #e6db74;">"donut"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"multi"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Ok"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"ok"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Closed"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"closed"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Unreachable"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"unreachable"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Unavailable"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"D"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"unavailable"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Current aggregate LSF server statistics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"piechart"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">1</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">43</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"distinct"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"running"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Currently running"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"light-red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">1</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">45</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"suspended"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Currently suspended"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + } + }, + <span style="color: #f92672;">"decimals"</span>: <span style="color: #ae81ff;">2</span>, + <span style="color: #f92672;">"mappings"</span>: [] + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">15</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">1</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">33</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"displayLabels"</span>: [ + <span style="color: #e6db74;">"name"</span>, + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"table"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"sortBy"</span>: <span style="color: #e6db74;">"Value"</span>, + <span style="color: #f92672;">"sortDesc"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"values"</span>: [ + <span style="color: #e6db74;">"value"</span>, + <span style="color: #e6db74;">"percent"</span> + ] + }, + <span style="color: #f92672;">"pieType"</span>: <span style="color: #e6db74;">"donut"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"multi"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Running"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"running"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Pending"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"pending"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Suspended"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"suspended"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Current aggregate LSF job statistics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"piechart"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"yellow"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">5</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">44</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"pending"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Currently pending "</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"blue"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">5</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">46</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"finished"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Finished (past hour)"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">"Spectrum LSF queue statistics. Here we show jobs in running, pending and suspended jobs. "</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">9</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">41</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"lcd"</span>, + <span style="color: #f92672;">"minVizHeight"</span>: <span style="color: #ae81ff;">10</span>, + <span style="color: #f92672;">"minVizWidth"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"horizontal"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showUnfilled"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Running"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_queues"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"run"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"name"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"=~"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"/^$Queue$/"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Pending"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_queues"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"pend"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"name"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"=~"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"/^$Queue$/"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Suspended"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_queues"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"susp"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"name"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"=~"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"/^$Queue$/"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Current queue statistics ($Queue)"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"bargauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">9</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">53</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"servers"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Servers"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"yellow"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">9</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">54</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cpus"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"CPUs"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"axisCenteredZero"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"axisColorMode"</span>: <span style="color: #e6db74;">"text"</span>, + <span style="color: #f92672;">"axisLabel"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"axisPlacement"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"barAlignment"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"drawStyle"</span>: <span style="color: #e6db74;">"line"</span>, + <span style="color: #f92672;">"fillOpacity"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"gradientMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"lineInterpolation"</span>: <span style="color: #e6db74;">"stepBefore"</span>, + <span style="color: #f92672;">"lineWidth"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"pointSize"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"scaleDistribution"</span>: { + <span style="color: #f92672;">"log"</span>: <span style="color: #ae81ff;">2</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"log"</span> + }, + <span style="color: #f92672;">"showPoints"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"spanNulls"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"stacking"</span>: { + <span style="color: #f92672;">"group"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"thresholdsStyle"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"off"</span> + } + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">15</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">9</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">42</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"calcs"</span>: [], + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"list"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"bottom"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"single"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Running"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"running"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Pending"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"pending"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Suspended"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"suspended"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Aggregate LSF job statistics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"timeseries"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"light-red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">13</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">55</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cores"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Cores"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"blue"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">13</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">56</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"slots"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Slots"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"collapsed"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">24</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">17</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">37</span>, + <span style="color: #f92672;">"panels"</span>: [], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF scheduler statistics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"row"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"axisCenteredZero"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"axisColorMode"</span>: <span style="color: #e6db74;">"text"</span>, + <span style="color: #f92672;">"axisLabel"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"axisPlacement"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"barAlignment"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"drawStyle"</span>: <span style="color: #e6db74;">"line"</span>, + <span style="color: #f92672;">"fillOpacity"</span>: <span style="color: #ae81ff;">10</span>, + <span style="color: #f92672;">"gradientMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"graph"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"lineInterpolation"</span>: <span style="color: #e6db74;">"linear"</span>, + <span style="color: #f92672;">"lineWidth"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"pointSize"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"scaleDistribution"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"linear"</span> + }, + <span style="color: #f92672;">"showPoints"</span>: <span style="color: #e6db74;">"never"</span>, + <span style="color: #f92672;">"spanNulls"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"stacking"</span>: { + <span style="color: #f92672;">"group"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"thresholdsStyle"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"off"</span> + } + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"short"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">18</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">20</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"graph"</span>: {}, + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"calcs"</span>: [], + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"list"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"single"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"7.5.15"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"CPU utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"cpu_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbatchd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Memory utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"memory_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbatchd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Number of threads"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"num_threads"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbatchd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"File descriptors"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_mbatchd"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"D"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"fd"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"used"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF mbatchd process metrics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"timeseries"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"axisCenteredZero"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"axisColorMode"</span>: <span style="color: #e6db74;">"text"</span>, + <span style="color: #f92672;">"axisLabel"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"axisPlacement"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"barAlignment"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"drawStyle"</span>: <span style="color: #e6db74;">"line"</span>, + <span style="color: #f92672;">"fillOpacity"</span>: <span style="color: #ae81ff;">10</span>, + <span style="color: #f92672;">"gradientMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"graph"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"lineInterpolation"</span>: <span style="color: #e6db74;">"linear"</span>, + <span style="color: #f92672;">"lineWidth"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"pointSize"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"scaleDistribution"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"linear"</span> + }, + <span style="color: #f92672;">"showPoints"</span>: <span style="color: #e6db74;">"never"</span>, + <span style="color: #f92672;">"spanNulls"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"stacking"</span>: { + <span style="color: #f92672;">"group"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"thresholdsStyle"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"off"</span> + } + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"short"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">18</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">57</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"graph"</span>: {}, + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"calcs"</span>: [], + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"list"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"single"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"7.5.15"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"CPU utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"cpu_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"lim"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Memory utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"memory_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"lim"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Number of threads"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"num_threads"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"lim"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF management lim process metrics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"timeseries"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"axisCenteredZero"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"axisColorMode"</span>: <span style="color: #e6db74;">"text"</span>, + <span style="color: #f92672;">"axisLabel"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"axisPlacement"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"barAlignment"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"drawStyle"</span>: <span style="color: #e6db74;">"line"</span>, + <span style="color: #f92672;">"fillOpacity"</span>: <span style="color: #ae81ff;">10</span>, + <span style="color: #f92672;">"gradientMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"graph"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"lineInterpolation"</span>: <span style="color: #e6db74;">"linear"</span>, + <span style="color: #f92672;">"lineWidth"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"pointSize"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"scaleDistribution"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"linear"</span> + }, + <span style="color: #f92672;">"showPoints"</span>: <span style="color: #e6db74;">"never"</span>, + <span style="color: #f92672;">"spanNulls"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"stacking"</span>: { + <span style="color: #f92672;">"group"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"thresholdsStyle"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"off"</span> + } + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"short"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">26</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">27</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"graph"</span>: {}, + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"calcs"</span>: [], + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"list"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"single"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"7.5.15"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Job buckets"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_mbatchd"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"sched"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"buckets"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Matching host criteria"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_mbatchd"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"sched"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"matchhost"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Scheduling interval (seconds)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_mbatchd"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"sched"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"interval"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF scheduler metrics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"timeseries"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"palette-classic"</span> + }, + <span style="color: #f92672;">"custom"</span>: { + <span style="color: #f92672;">"axisCenteredZero"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"axisColorMode"</span>: <span style="color: #e6db74;">"text"</span>, + <span style="color: #f92672;">"axisLabel"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"axisPlacement"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"barAlignment"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"drawStyle"</span>: <span style="color: #e6db74;">"line"</span>, + <span style="color: #f92672;">"fillOpacity"</span>: <span style="color: #ae81ff;">10</span>, + <span style="color: #f92672;">"gradientMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"hideFrom"</span>: { + <span style="color: #f92672;">"graph"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"legend"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"tooltip"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"viz"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"lineInterpolation"</span>: <span style="color: #e6db74;">"linear"</span>, + <span style="color: #f92672;">"lineWidth"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"pointSize"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"scaleDistribution"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"linear"</span> + }, + <span style="color: #f92672;">"showPoints"</span>: <span style="color: #e6db74;">"never"</span>, + <span style="color: #f92672;">"spanNulls"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"stacking"</span>: { + <span style="color: #f92672;">"group"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"thresholdsStyle"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"off"</span> + } + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + }, + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #ae81ff;">80</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"short"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">8</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">26</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">58</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"graph"</span>: {}, + <span style="color: #f92672;">"legend"</span>: { + <span style="color: #f92672;">"calcs"</span>: [], + <span style="color: #f92672;">"displayMode"</span>: <span style="color: #e6db74;">"list"</span>, + <span style="color: #f92672;">"placement"</span>: <span style="color: #e6db74;">"right"</span>, + <span style="color: #f92672;">"showLegend"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"tooltip"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"single"</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #e6db74;">"none"</span> + } + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"7.5.15"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"CPU utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"cpu_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbschd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Memory utilization (%)"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"B"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"memory_usage"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbatchd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + }, + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Number of threads"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"procstat"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"C"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"num_threads"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"exe"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"mbatchd"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF mbschd process metrics"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"timeseries"</span> + }, + { + <span style="color: #f92672;">"collapsed"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">24</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">34</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">39</span>, + <span style="color: #f92672;">"panels"</span>: [], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Additional metrics (scratch)"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"row"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">2</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"distinct"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"running"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Running"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"yellow"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">5</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"pending"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Pending"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">6</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">6</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"suspended"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Suspended"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"blue"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">7</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_jobs"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"default"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"finished"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Finished"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">15</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Ok"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"ok"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Ok"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"blue"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">15</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">16</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Closed"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"closed"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Closed"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"yellow"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">18</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">17</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Unreachable"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"unreachable"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Unreachable"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">21</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">35</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">18</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Unavailable"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_servers"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"value"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"mean"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"status"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"unavailable"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Unavailable"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">39</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">21</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Clients"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"clients"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Clients"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">39</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">22</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Servers"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"servers"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Servers"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">6</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">39</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">23</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Servers"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cpus"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"CPUs"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">39</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">24</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Cores"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cores"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Cores"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"absolute"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + } + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">39</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">25</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"colorMode"</span>: <span style="color: #e6db74;">"value"</span>, + <span style="color: #f92672;">"graphMode"</span>: <span style="color: #e6db74;">"none"</span>, + <span style="color: #f92672;">"justifyMode"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"text"</span>: {}, + <span style="color: #f92672;">"textMode"</span>: <span style="color: #e6db74;">"auto"</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"alias"</span>: <span style="color: #e6db74;">"Slots"</span>, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"slots"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Slots"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"stat"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"green"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">43</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">52</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"servers"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Servers"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"yellow"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">6</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">43</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">51</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cpus"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"CPUs"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"light-red"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">9</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">43</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">50</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"cores"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Cores"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + }, + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"description"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"fieldConfig"</span>: { + <span style="color: #f92672;">"defaults"</span>: { + <span style="color: #f92672;">"color"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"thresholds"</span> + }, + <span style="color: #f92672;">"mappings"</span>: [], + <span style="color: #f92672;">"min"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"thresholds"</span>: { + <span style="color: #f92672;">"mode"</span>: <span style="color: #e6db74;">"percentage"</span>, + <span style="color: #f92672;">"steps"</span>: [ + { + <span style="color: #f92672;">"color"</span>: <span style="color: #e6db74;">"blue"</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #66d9ef;">null</span> + } + ] + }, + <span style="color: #f92672;">"unit"</span>: <span style="color: #e6db74;">"none"</span> + }, + <span style="color: #f92672;">"overrides"</span>: [] + }, + <span style="color: #f92672;">"gridPos"</span>: { + <span style="color: #f92672;">"h"</span>: <span style="color: #ae81ff;">4</span>, + <span style="color: #f92672;">"w"</span>: <span style="color: #ae81ff;">3</span>, + <span style="color: #f92672;">"x"</span>: <span style="color: #ae81ff;">12</span>, + <span style="color: #f92672;">"y"</span>: <span style="color: #ae81ff;">43</span> + }, + <span style="color: #f92672;">"id"</span>: <span style="color: #ae81ff;">49</span>, + <span style="color: #f92672;">"options"</span>: { + <span style="color: #f92672;">"orientation"</span>: <span style="color: #e6db74;">"auto"</span>, + <span style="color: #f92672;">"reduceOptions"</span>: { + <span style="color: #f92672;">"calcs"</span>: [ + <span style="color: #e6db74;">"lastNotNull"</span> + ], + <span style="color: #f92672;">"fields"</span>: <span style="color: #e6db74;">"/^lsf_hosts\\.last$/"</span>, + <span style="color: #f92672;">"values"</span>: <span style="color: #66d9ef;">false</span> + }, + <span style="color: #f92672;">"showThresholdLabels"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"showThresholdMarkers"</span>: <span style="color: #66d9ef;">true</span> + }, + <span style="color: #f92672;">"pluginVersion"</span>: <span style="color: #e6db74;">"9.1.6"</span>, + <span style="color: #f92672;">"targets"</span>: [ + { + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"eNfWCy5Vk"</span> + }, + <span style="color: #f92672;">"groupBy"</span>: [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"$__interval"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"time"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"null"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"fill"</span> + } + ], + <span style="color: #f92672;">"hide"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"measurement"</span>: <span style="color: #e6db74;">"lsf_hosts"</span>, + <span style="color: #f92672;">"orderByTime"</span>: <span style="color: #e6db74;">"ASC"</span>, + <span style="color: #f92672;">"policy"</span>: <span style="color: #e6db74;">"autogen"</span>, + <span style="color: #f92672;">"refId"</span>: <span style="color: #e6db74;">"A"</span>, + <span style="color: #f92672;">"resultFormat"</span>: <span style="color: #e6db74;">"time_series"</span>, + <span style="color: #f92672;">"select"</span>: [ + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"current"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + }, + { + <span style="color: #f92672;">"params"</span>: [], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"last"</span> + } + ], + [ + { + <span style="color: #f92672;">"params"</span>: [ + <span style="color: #e6db74;">"peak"</span> + ], + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"field"</span> + } + ] + ], + <span style="color: #f92672;">"tags"</span>: [ + { + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"host"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"kilenc"</span> + }, + { + <span style="color: #f92672;">"condition"</span>: <span style="color: #e6db74;">"AND"</span>, + <span style="color: #f92672;">"key"</span>: <span style="color: #e6db74;">"state"</span>, + <span style="color: #f92672;">"operator"</span>: <span style="color: #e6db74;">"="</span>, + <span style="color: #f92672;">"value"</span>: <span style="color: #e6db74;">"slots"</span> + } + ] + } + ], + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"Slots"</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"gauge"</span> + } + ], + <span style="color: #f92672;">"refresh"</span>: <span style="color: #e6db74;">"30s"</span>, + <span style="color: #f92672;">"schemaVersion"</span>: <span style="color: #ae81ff;">37</span>, + <span style="color: #f92672;">"style"</span>: <span style="color: #e6db74;">"dark"</span>, + <span style="color: #f92672;">"tags"</span>: [], + <span style="color: #f92672;">"templating"</span>: { + <span style="color: #f92672;">"list"</span>: [ + { + <span style="color: #f92672;">"current"</span>: { + <span style="color: #f92672;">"selected"</span>: <span style="color: #66d9ef;">true</span>, + <span style="color: #f92672;">"text"</span>: [ + <span style="color: #e6db74;">"priority"</span> + ], + <span style="color: #f92672;">"value"</span>: [ + <span style="color: #e6db74;">"priority"</span> + ] + }, + <span style="color: #f92672;">"datasource"</span>: { + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"influxdb"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"oSnSlVc4k"</span> + }, + <span style="color: #f92672;">"definition"</span>: <span style="color: #e6db74;">"show tag values from \"lsf_queues\" with key=\"name\""</span>, + <span style="color: #f92672;">"hide"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"includeAll"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"multi"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"name"</span>: <span style="color: #e6db74;">"Queue"</span>, + <span style="color: #f92672;">"options"</span>: [], + <span style="color: #f92672;">"query"</span>: <span style="color: #e6db74;">"show tag values from \"lsf_queues\" with key=\"name\""</span>, + <span style="color: #f92672;">"refresh"</span>: <span style="color: #ae81ff;">1</span>, + <span style="color: #f92672;">"regex"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"skipUrlSync"</span>: <span style="color: #66d9ef;">false</span>, + <span style="color: #f92672;">"sort"</span>: <span style="color: #ae81ff;">0</span>, + <span style="color: #f92672;">"tagValuesQuery"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"tagsQuery"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"type"</span>: <span style="color: #e6db74;">"query"</span>, + <span style="color: #f92672;">"useTags"</span>: <span style="color: #66d9ef;">false</span> + } + ] + }, + <span style="color: #f92672;">"time"</span>: { + <span style="color: #f92672;">"from"</span>: <span style="color: #e6db74;">"now-1h"</span>, + <span style="color: #f92672;">"to"</span>: <span style="color: #e6db74;">"now"</span> + }, + <span style="color: #f92672;">"timepicker"</span>: {}, + <span style="color: #f92672;">"timezone"</span>: <span style="color: #e6db74;">""</span>, + <span style="color: #f92672;">"title"</span>: <span style="color: #e6db74;">"LSF cluster status"</span>, + <span style="color: #f92672;">"uid"</span>: <span style="color: #e6db74;">"ORojp8cVz"</span>, + <span style="color: #f92672;">"version"</span>: <span style="color: #ae81ff;">160</span>, + <span style="color: #f92672;">"weekStart"</span>: <span style="color: #e6db74;">""</span> +} +</code></pre></div> + +</details> + +<hr /> + +<p>As you can see, with a short plugin script to collect information from LSF, it&rsquo;s possible to monitor your LSF cluster using the TIG stack. It&rsquo;s important to note that there are powerful +monitoring and reporting tools available from IBM as add-ons to LSF; IBM Spectrum LSF RTM and IBM Spectrum LSF Explorer. You can find more details about the add-on capabilities for LSF +<a href="https://www.ibm.com/products/hpc-workload-management/resources">here</a>.</p>Ramblings of a supercomputing enthusiast.Much like dashboards in automobiles, dashboards in the context of HPC infrastructure are crucial to get an understanding of what&rsquo;s happening under the hood of your HPC cluster - at a glance. During my IT career, I&rsquo;ve used a myriad of monitoring solutions ranging from SNMP and Ganglia, to the ELK (Elasticsearch, Logstash, Kibana) stack. For example, I&rsquo;ve recently written an overview on how it is possible to visualize IBM Spectrum LSF (LSF) data in Grafana. LSF is an HPC job scheduler which brings to the table three decades of experience in workload and resource management.Adam’s weekly (-ish) update, 2022-12-202022-12-20T18:14:52-07:002022-12-20T18:14:52-07:00https://hpc.social/personal-blog/2022/adam-s-weekly-ish-update-2022-12-20<h2>What&#8217;s new</h2> + +<p>The past few weeks have been on the intense side at work, so I completely lost track of the blog and haven&#8217;t had a chance to write much in that time. However, I&#8217;m now on a holiday break, and finally have time to sit down at a keyboard to write more than code and Slack messages.</p> + +<p><span id="more-289"></span></p> + +<p>One of the highlights of the past few weeks was a trip to San Jose, and the NVIDIA headquarters. I changed teams at work back in July, transferring from a group that was closely integrated with product management, to a more straightforward engineering team which <a href="https://blogs.nvidia.com/blog/2020/08/14/making-selene-pandemic-ai/">designs and builds new high-performance computing systems</a>. </p> + +<p>This was the first chance I&#8217;ve had to meet up with other members of my new team in person, and it was a really wonderful experience to be in the same physical space as folks who were previously just images on my screen. I love working remotely, but it&#8217;s also great to be able to stand in front of a white board with someone and brainstorm, or get coffee and just have a chat with a coworker outside of a video call with an agenda.</p> + +<p>(Plus, we were all careful and managed to avoid catching COVID from each other! Which was a win on its own.)</p> + +<p>Now, for the next two weeks I&#8217;m off work, and planning to take some time to relax and spend time on projects that are harder to focus on during busy work weeks. Expect (maybe) less about computers in my blog and social feeds, and more about D&amp;D, baking, and tasty cocktails.</p> + +<h2>What I&#8217;m reading, watching, and listening to</h2> + +<p>I&#8217;ve been a bit too scattered to focus on actual books the past few weeks, but I did find time for a few interesting articles and podcasts. In particular,</p> + +<ul> +<li><a href="https://acoup.blog/2022/12/02/collections-why-roman-egypt-was-such-a-strange-province/">&#8220;Why Roman Egypt was such a strange province&#8221;</a>, from Bret Devereaux: As usual from Devereaux, an accessible but extremely detailed discussion of why so much of what we know about the Roman empire is from Egyptian records, but why that also might not be representative of the broader empire.</li> + + + +<li><a href="https://willgallego.com/2022/12/18/emoji-as-incident-resolution-tools/">&#8220;Emoji as incident resolution tools&#8221;</a>, from Will Gallego: A fun discussion of how using emoji as part of a team&#8217;s communication can add nuance and shared understanding during incident management, along with a discussion of the disadvantages and costs associated with the practice.</li> + + + +<li><a href="https://www.mikulskibartosz.name/modern-software-architecture-in-2022/">&#8220;What does modern software architecture look like in 2022?&#8221;</a>, from Bartosz Mikulski: A nice article which discusses how service-oriented software architecture can often include an explicit expectation of change. For example, the architecture might include notes on an ongoing deprecation of a library, or might signpost the need to factor a new microservice out when overall system load gets high enough.</li> + + + +<li><a href="https://www.bradyheywood.com.au/podcasts/">The Brady Heywood podcast</a>: Found via the <a href="https://oxide.computer/podcasts/oxide-and-friends/1137359">Oxide and Friends podcast</a>, the Brady Heywood podcast is a series on engineering disasters and their consequences from a forensic engineering firm. It&#8217;s mostly not being updated any more (with the podcasters moving on to a separate series on complexity science), but it has a deep back catalog of good episodes, and includes thoughtful discussions of human factors, safety engineering, and how organizational pressures become manifest in engineering artifacts.</li> +</ul> + +<h2>Recent recipes</h2> + +<ul> +<li><a href="https://smittenkitchen.com/2016/12/homemade-irish-cream/">Smitten Kitchen&#8217;s Homemade Irish Cream</a>: This is a recipe I make every year, and I often give away small bottles of it as holiday gifts. It&#8217;s really ridiculously tasty, much better than Baileys or similar, and good either on its own or in hot chocolate.</li> + + + +<li><a href="https://smittenkitchen.com/2014/12/fairytale-of-new-york/">Smitten Kitchen&#8217;s Fairytale of New York</a>: This is a really tasty whiskey cocktail, and the star of the show is a &#8220;winter warmth syrup&#8221; that substitutes in for simple syrup. The syrup is simply very tasty, and turns what&#8217;s effectively an OId Fashioned variant into a lovely holiday cocktail.</li> + + + +<li>Sparkling gingerbread from <a href="http://www.apt2bbakingco.com/snacking-cakes">Yossy Arefi&#8217;s Snaking Cakes</a>: This recipe takes a little more prep than most of Arefi&#8217;s &#8220;snacking cakes&#8221;, as it includes ginger three ways (ground, fresh, and crystallized), but it&#8217;s worth the few minutes of extra work.</li> +</ul> + +<h2>Pet photos</h2> + +<figure class="wp-block-image size-large is-resized"><img alt="A white calico cat and a gray tabby cat lounging on a large brown pet bed in front of a gas fireplace." class="wp-image-295" height="512" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_7207-768x1024.jpeg" width="384" /><figcaption class="wp-element-caption">I&#8217;m pretty sure these two want me to turn the fireplace on.</figcaption></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="A gray tabby cat lounges on a dog bed, while a golden doodle lays on the floor nearby and looks forlornly at the bed." class="wp-image-294" height="512" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_1725-1024x1024.jpeg" width="512" /><figcaption class="wp-element-caption">Just Percy bullying the dog by stealing his bed.</figcaption></figure>Thinking Out LoudWhat&#8217;s newVisualizing Spectrum LSF data with Grafana2022-12-13T00:06:51-07:002022-12-13T00:06:51-07:00https://hpc.social/personal-blog/2022/visualizing-spectrum-lsf-data-with-grafana<p><strong>Overview</strong></p> + +<p>System monitoring is a fundamental part of IT best practices. High performance computing (HPC) environments are no exception to this. At the high-end, HPC clusters can consist of +thousands of servers, processing millions of jobs per day. HPC admins need ways to monitor the overall cluster to determine system status and availability through to the efficiency +of workloads. Servers today produce a wide array of metrics which can be monitored for example to check for various conditions. Additionally, workload schedulers also produce a wealth +of data about jobs. Having a single dashboard to show this type of detail can be of great benefit.</p> + +<p><a href="https://www.ibm.com/products/hpc-workload-management">IBM Spectrum LSF Suites</a> provide a complete solution for HPC workload management. This includes reporting capabilities out of the box. Spectrum LSF Suite features an integrated web +interface for job management and reporting. The reporting capabilities include a number of reports out of the box, with the ability to customize and add new reports. The reporting +capability in Spectrum LSF Suite and IBM Spectrum LSF Explorer is underpinned by Elasticsearch, which is used to store, index and query data. With LSF data in Elasticsearch, it’s +also possible to configure LSF command-line interface (CLI) tools to query information from Elasticsearch rather than flat files – for greater performance. This is controlled via +the <strong>LSF_QUERY_ES_FUNCTIONS</strong> parameter of Spectrum LSF. More details about the <strong>LSF_QUERY_ES_FUNCTIONS</strong> can be found in the LSF documentation <a href="https://www.ibm.com/docs/en/spectrum-lsf/10.1.0?topic=lsfconf-lsf-query-es-functions">here</a>.</p> + +<p>(1) Here is a look at the indices that are created by LSF in Elasticsearch. Note that the status shows as yellow because I only have a single Elasticsearch node.</p> + +<div class="highlight"><pre><code class="language-plaintext"># curl -XGET localhost:9200/_cat/indices +yellow open lsf_events-202205 tejh7jsMSwSeQUJzYM7cww 5 1 1137 0 808.1kb 808.1kb +yellow open lsf_jobs_pendingreason-202204 4wi7Ta8uQPSXlFBqPh4kOQ 5 1 90531 0 8.6mb 8.6mb +yellow open lsf_events-202204 tWYvW_w8TVyU1deRFOEoZg 5 1 116957 32691 59.1mb 59.1mb +yellow open lsf_jobs_active-202212 Q0pStQxvTgaeL7R-f02XWA 5 1 210052 0 50.6mb 50.6mb +yellow open lsf_jobs_pendingreason-202206 ENWIwfGrSqCHvi53aUQXJQ 5 1 44991 0 4.5mb 4.5mb +yellow open host_booleanres_latest RE8thZCgTGeMBGodeMfXEQ 5 1 5 0 23.3kb 23.3kb +yellow open lsf_jobs_pendingreason-202205 yo0iZH_4TvOqq6kQgBluvA 5 1 111 0 181.4kb 181.4kb +yellow open lsf_jobs_pend-202212 9ViIS3nDRFewrqtILEbKTQ 5 1 707 0 446.9kb 446.9kb +yellow open lsf_hostconf_latest 9N1Y8ML4TiyaamCPEDRQog 5 1 2 0 10.6kb 10.6kb +yellow open lsf_events-202209 rtKQ8F4bSleHl8EbAQez8A 5 1 8200 955 4.4mb 4.4mb +yellow open lsf_events-202206 UUKPWfN7SZ-dzVs5NAkjUg 5 1 79503 23452 36.8mb 36.8mb +yellow open lsf_hostmetrics-202209 7FUNFCWPQtuGyx5jTJLb1A 5 1 4701 0 2.2mb 2.2mb +yellow open lsf_hostmetrics-202208 52xef_3hQWK-jVuJqyUpHA 5 1 3823 0 1.9mb 1.9mb +yellow open lsf_hostmetrics-202207 IqZYhU0RQNGIFWSRH-Ym8Q 5 1 6316 0 2.9mb 2.9mb +yellow open lsf_job_acct-202209 h1ZgCSB8RwCBxwIUUzDHEQ 5 1 2050 438 1.9mb 1.9mb +yellow open lsf_jobs_active-202209 iBfnf07CTcS7Gb6TxwomRA 5 1 2658 0 1mb 1mb +yellow open lsf_hostmetrics-202206 0PXSYBOgTA2Qa_zzaafUPg 5 1 4301 0 2.1mb 2.1mb +yellow open model xSqB_T_VSByOzYavEcEVyQ 1 1 55 0 257kb 257kb +yellow open lsf_job_acct-202206 C639GnzBSjCEVczfh5u23g 5 1 16719 353 8.9mb 8.9mb +yellow open lsf_jobs_active-202204 8gN_ENkQRTSfnmxrtMcOlA 5 1 33286 0 9.8mb 9.8mb +yellow open lsf_job_acct-202205 LOxmhm_8RxaCuTd7YWYbLw 5 1 274 0 439.4kb 439.4kb +yellow open lsf_jobs_active-202205 61u2RlXgR_SXagmZfrmttQ 5 1 1880 0 1.1mb 1.1mb +yellow open lsf_jobs_pend-202209 eTgqPp9nQOScNiwyUWXmHA 5 1 9 0 106.2kb 106.2kb +yellow open lsf_job_acct-202204 dDDegS6RQSWtWN99eklexg 5 1 28902 2177 17.4mb 17.4mb +yellow open lsf_jobs_active-202206 8ivkjWSNR1Sh_BxWACP0ZA 5 1 16921 0 4.6mb 4.6mb +yellow open lsf_current_status 92KE3V4YSJ-RtRp_kepxYg 5 1 115450 0 9mb 9mb +yellow open lsf_hostmetrics-202210 vbuK2wW3RRmXuY07tDPUNQ 5 1 785 0 942.1kb 942.1kb +yellow open lsf_jobs_pend-202206 OhSwn-b0SiSj8mCW5tcNIA 5 1 22 0 244.6kb 244.6kb +yellow open lsf_jobs_pend-202205 OfBtWklETYK9cRx000aNPw 5 1 1 0 12.7kb 12.7kb +yellow open lsf_events-202212 WUC5KJWmS-2WIN8XCQpSuw 5 1 712399 74728 337mb 337mb +yellow open lsf_jobs_pend-202204 OhUsXqohSciZTPZlTryMyA 5 1 50 0 275.3kb 275.3kb +yellow open resource_attributes_latest R9bk_WIPTU62dVg3O1LDBA 5 1 5 0 24.4kb 24.4kb +yellow open lsf_jobs_pendingreason-202212 55iwDC5mRI-eRbzQLwWP6Q 5 1 3314828 0 288.7mb 288.7mb +yellow open pa-lite-log o8-jaNoGTsSVcjJW5Ufs0w 5 1 1549 0 547.2kb 547.2kb +yellow open lsf_job_acct-202212 4HXvAD02Sxq0tgp2fS2cfQ 5 1 161502 0 73.6mb 73.6mb +yellow open lsf_hostmetrics-202212 Tki6OJ41R363u9Tx02N4zw 5 1 2548 0 1.7mb 1.7mb +yellow open lsf_jobs_pendingreason-202209 D3TOZY2ORiK9PppGVt10Fg 5 1 2511 0 381.4kb 381.4kb</code></pre></div> + +<p>(2) With the LSF data stored in Elasticsearch, the next step is to connect to the Grafana server. Here we point our browser to the Grafana server on the default port: <em>http://lsf_manager:3000</em> and login to Grafana. This step assumes an account has already been setup on Grafana. Here we are using the default admin account.</p> + +<p>(3) In Grafana, navigate to <strong>Configuration</strong> -&gt; <strong>Data sources</strong>. It’s here that it will be possible to add an Elasticsearch data source</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_3.png" /> +</figure> + +<p>(4) Next, click the <strong>Add data source</strong> button.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_4.png" /> +</figure> + +<p>(5 In the list of data sources, filter by name for <em>Elasticsearch</em> and click the Select button on the Elasticsearch entry.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_5.png" /> +</figure> + +<p>(6) When configuring the data source, it’s necessary to specify an index name. This is where the list of indices in Elasticsearch that we generated earlier will come in handy. For this example, we wish to display the total number of pending jobs in the Spectrum LSF cluster over time. This data is stored in the <em>lsf_jobs_pend*</em> indices in Elasticsearch. To configure the data source appropriately, we specify the following values:</p> + +<ul> +<li>Name: “LSF pending jobs”</li> +<li>URL: http://localhost:9200</li> +<li>Index name: “lsf_jobs_pend*”</li> +<li>Time field name: “time_stamp”</li> +<li>Version: 7.0+ +Note that the URL needs to point to the Elasticsearch server. In this case, both the Elasticsearch server and Grafana server are running on the same host.</li> +</ul> +<p>Next click on the <strong>Save &amp; Test button</strong>. It should return the message <em>Index OK. Time field name OK.</em>.</p> + +<p>Assuming that no errors were found, click on the <strong>Back</strong> button.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_6.png" /> +</figure> + +<p>(7) Now you should see <em>LSF pending jobs</em> listed as a Data Source.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_7.png" /> +</figure> + +<p>(8) With the data source configured, we’re now ready to configure a dashboard to display the LSF pending job information. Navigate to <strong>Create</strong> -&gt; <strong>Dashboard</strong>.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_8.png" /> +</figure> + +<p>(9) Click on <strong>Add an empty panel</strong>. This is used to create a new panel where the LSF pending job information will be plotted.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_9.png" /> +</figure> + +<p>(10) In the panel editor, specify the following options:</p> + +<ul> +<li>Panel title: “LSF pending jobs”</li> +<li>Specify the data source “LSF pending jobs” which was created previously</li> +<li>Specify a suitable time range (2 days)</li> +<li>Line width (5 points)</li> +</ul> +<p>You should immediately see in the panel editor the plot of the hourly pending jobs. Click on the <strong>Apply</strong> button to save the changes.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_10.png" /> +</figure> + +<p>(11) After clicking Apply, you will be returned to the Dashboard screen. The Dashboard should now display the new LSF pending jobs panel that was created above. This Dashboard could also include panels for system metrics collected by Prometheus for example.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_11.png" /> +</figure> + +<p>(12) Next, click on the diskette icon in the upper right to save the Dashboard with the LSF pending jobs panel. We’ll name it <em>Spectrum LSF cluster status</em>.</p> + +<figure><img src="https://www.gaborsamu.com/images/grafana_12.png" /> +</figure> + +<p>Additional panels can be added to the <em>Spectrum LSF cluster status</em> based on the data logged by Spectrum LSF to Elasticsearch.</p> + +<p>That concludes the simple example of plotting Spectrum LSF cluster data from Elasticsearch in Grafana. As mentioned, the IBM Spectrum LSF Suites integrated web interface also provides reporting capabilities, with several built-in reports provided out of the box. Below, we’ve included a screenshot of the <em>pending job analysis</em> report included with Spectrum LSF Suites.</p> + +<figure><img src="https://www.gaborsamu.com/images/lsf_pending.png" /> +</figure> + +<p><strong>Summary</strong></p> + +<p>Spectrum LSF provides many hooks and integration points enabling administrators to change things ranging from scheduling behavior and the output of query commands through to job information being logged to Elasticsearch. Spectrum LSF is highly customizable by organizations to suit specific needs and requirements. We’ve demonstrated this using Grafana to visualize data from the LSF scheduler in a simple example. Following the above example, administrators can combine existing HPC cluster system level reporting in Grafana with job information from Spectrum LSF for a better overall view and understanding of the infrastructure.</p>Ramblings of a supercomputing enthusiast.OverviewAdam’s weekly update, 2022-12-042022-12-05T05:49:35-07:002022-12-05T05:49:35-07:00https://hpc.social/personal-blog/2022/adam-s-weekly-update-2022-12-04<h2>What&#8217;s new</h2> + +<p>This week was really intense from a work perspective. Not &#8220;bad intense&#8221;, but the kind of week where every day was spent with such a level of focus, that at 5 PM or so I found myself staring off into space and forgetting words. I think I got some good things accomplished, but my brain also felt like mush by the time the weekend came.</p> + +<p><span id="more-268"></span></p> + +<p>This week I&#8217;m traveling to San Jose for work (I just checked into my hotel a little while ago!), so I fully expect this week to also be eaten by work. So I don&#8217;t promise anything terribly interesting for next week&#8217;s post&#8230;</p> + +<p>However, I did take advantage of a Sunday in San Jose to visit the <a href="https://computerhistory.org/">Computer History Museum</a> in Mountain View! I try to visit the museum every few years, and while a lot of the exhibits are the same, enough things change that I always get something new from the visit. Also, I&#8217;ve been doing a lot of reading about hardware development and the history thereof lately, so it was interesting to examine the museum through that new lens.</p> + +<p>I may write more about my visit later this week &#8212; it definitely sparked some thoughts &#8212; but in the mean time, here are a few photos I took while wandering around the museum.</p> + +<figure class="wp-block-image size-large is-resized"><img alt="A mechanical computer built mostly of brass, with various numerical dials. A small placard labels this as a replica of the Babbage Difference Engine No. 1 Demonstration Piece." class="wp-image-282" height="800" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6894-768x1024.jpg" width="600" /><figcaption class="wp-element-caption">The Babbage Difference Engine, and other mechanical computers, have always fascinated me.</figcaption></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="The Cray-1, a round computer with its own built-in seating attached." class="wp-image-283" height="446" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6965-1024x768.jpg" width="595" /><figcaption class="wp-element-caption">Can&#8217;t visit the museum without visiting the Cray-1.</figcaption></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="The Connection Machine 1, a large black cube divided in eight sections." class="wp-image-284" height="768" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6973-768x1024.jpg" width="576" /><figcaption class="wp-element-caption">I would have loved to have seen a CM-1 in operation, with its red LEDs showing the operation of its many single-bit CPUs.</figcaption></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="The front panel of an Altair 8800 computer, with an array of LEDs and switches controlling the state of individual bits." class="wp-image-285" height="449" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_7037-1024x768.jpg" width="598" /><figcaption class="wp-element-caption">Having recently read Charles Petzold&#8217;s &#8220;Code&#8221;, I was struck by how closely the front panel of the Altair 8800 resembles the fictional front panel of the computer that Petzold constructs from logic gates up.</figcaption></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="A Dell PowerEdge R710 lays on a white plastic table, top cover off, surrounded by instructions on how to disassemble it." class="wp-image-286" height="467" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_7073-1024x768.jpg" width="623" /><figcaption class="wp-element-caption">The CHM Learning Lab now includes a back room with a couple of Dell PowerEdge R710 servers, complete with instructions for how to disassemble and reassemble them. Anyone who wants can wander in and take them apart. It was great fun watching a 5-year-old kid pulling components out of one of these&#8230; As well as feeling a little weird, as I think I&#8217;ve run these in production!</figcaption></figure> + +<h2>What I&#8217;m reading</h2> + +<p>I don&#8217;t have a ton to share this week &#8212; honestly, the whole week feels like a blur &#8212; but here are two books that I recommend.</p> + +<ul> +<li><a href="https://www.aliettedebodard.com/bibliography/novels/the-universe-of-xuya/the-red-scholars-wake/">The Red Scholar&#8217;s Wake, by Aliette de Bodard</a>: As the blurb says, &#8220;Lesbian space pirates!&#8221; Also, a really wonderful novella about building a new relationship amidst grief, power differentials, politics, and space battles. I think I basically recommend everything that de Bodard writes, but especially this. And it basically stands alone! So you can read this first, without going back to the other stories in the same world.</li> + + + +<li><a href="https://www.harpercollins.com/products/dealers-of-lightning-michael-a-hiltzik?variant=40824247779362">Dealers of Lightning: XEROX PARC and the Dawn of the Computer Age, by Michael Hiltzik</a>: I&#8217;ve just started this, but it&#8217;s already a really interesting snapshot of a key period in the development of the personal computer.</li> +</ul> + +<h2>Recent recipes</h2> + +<ul> +<li><a href="https://smittenkitchen.com/2019/12/unfussy-sugar-cookies/">Smitten Kitchen&#8217;s Unfussy Sugar Cookies</a>: These cookies did, indeed, prove to be both tasty and easy to make. If you just want some easy cookies to snack on, I absolutely recommend this recipe.</li> +</ul> + +<h2>Pet photos</h2> + +<figure class="wp-block-image size-large is-resized"><img alt="Phyrne the calico cat stares down into the camera from a stairway" class="wp-image-279" height="414" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6881-768x1024.jpg" width="310" /></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="Close-up on the face of Percy the gray tabby cat" class="wp-image-280" height="420" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6879-768x1024.jpg" width="314" /></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="Benny the golden doodle curled up on a dog bed" class="wp-image-281" height="238" src="https://thinking.ajdecon.org/wp-content/uploads/2022/12/IMG_6876-1024x768.jpg" width="317" /></figure>Thinking Out LoudWhat&#8217;s newAn Initial Look at Deep Learning IO Performance2022-11-28T00:00:00-07:002022-11-28T00:00:00-07:00https://hpc.social/personal-blog/2022/an-initial-look-at-deep-learning-io-performance<h2 id="abstract">Abstract</h2> + +<p>This blog post describes an investigation of IO behavior of TensorFlow and PyTorch during resnet50 training running on Lambda Lab’s 8x V100 GPU instances. Both ephemeral local NVMe storage and network attached persistent storage was tested. The local NVMe storage was fast enough to achieve a throughput rate required to hit synthetic test targets. The network attached persistent storage may not be able to fully saturate 8 V100 GPUs during training, though can achieve nearly the same level of performance as the local storage so long as TFRecords are utilized. Further, there are specific behaviors and bottlenecks in TensorFlow and PyTorch that can reduce training performance when using real data from ImageNet.</p> + +<h2 id="acknowledgements">Acknowledgements</h2> + +<p>Thank you to Michael Balaban at Lambda Labs for providing access to their GPU cloud for this testing. Thank you to Chuan Li for the creation of his TensorFlow benchmarking tools. Thank you also to Andrej Karpathy, Toby Boyd, Yanan Cao, Sanjoy Das, Thomas Joerg, and Justin Lebar for their excellent blog posts on deep learning and XLA performance that helped inform this article. I hope that this post will be useful for others as your work and writing was useful for me.</p> + +<h2 id="introduction">Introduction</h2> + +<blockquote> + <p><em>…just because you can formulate your problem as RL doesn’t mean you should. If you insist on using the technology without understanding how it works you are likely to fail.</em></p> + + + <p>        Andrej Karpathy, <a href="https://karpathy.github.io/2019/04/25/recipe/">A Recipe for Training Neural Networks</a>, 2019</p> + +</blockquote> + +<p>That was the phrase that stuck in my head when I first started this project. What project you may ask? I want to understand how deep learning experiments utilize fast storage devices. Not just any experiments either: <em>real</em> ones, preferably big. That’s how I happened upon Andrej Karpathy’s blog. He is the former Sr. Director of AI at Tesla and knows a thing or two about training big neural networks. I’ve spent the last decade working on Ceph and have worked on distributed systems and distributed storage for nearly 2 decades at this point. But training neural nets? The closest I’ve come was back in the early 2000s when I tried to build a tool to predict video game framerates. I scraped benchmark numbers from review websites and built M5 decision trees based on hardware and video card settings. It sort of worked, but was terribly overtrained on a small (~4000 sample) dataset. Training with petabytes of data to teach an AI how to responsibly drive a car? I can already feel a bit of imposter syndrome setting in.</p> + +<p>Thankfully my goal is comparatively modest. I don’t need to build a cutting edge classifier or explore the intricacies of manually implementing back-propagation. I simply want to understand the IO patterns that are involved when training big datasets with fast GPUs so I can help researchers speed up their work. Up until now, my ability to do this was fairly limited. At the day job I’ve had access to a small group of nodes with extremely modest GPUs. I set up runs with MLPerf but the datasets (WMT G-E and CoCo) easily fit into memory. Other than a short burst of read traffic at the very beginning of training there was very little IO. Recently I had the opportunity to meet Michael Balaban, Co-Founder of <a href="https://lambdalabs.com/">Lambda Labs</a>. I told him what I wanted to do and he gave me access to Lambda’s GPU cloud and beta persistent storage to give it a try. I was able to grab one of Lambda’s 8x Tesla V100 instances (These things are incredibly popular so it’s best to grab one early in the morning!). Not all of Lambda’s instance types currently have access to the persistent storage but the V100 instances in the Texas zone do. Once secured, I got to work.</p> + +<h2 id="tensorflow---synthetic">TensorFlow - Synthetic</h2> + +<p>Before even attempting to run tests with real data, I realized I needed a baseline to start with. Luckily, Chuan Li, Lambda’s Chief Scientific Officer, wrote a tool for running TensorFlow benchmarks and made it available on github <a href="https://github.com/lambdal/lambda-tensorflow-benchmark">here</a>. One of the advantages of Lambda’s cloud is that they’ve already bundled up many popular tools for running deep-learning workloads into one package called <a href="https://lambdalabs.com/lambda-stack-deep-learning-software">Lambda Stack</a> which comes pre-installed when you start an instance. This made it fast to get started, though I did run into one issue. Lambda Stack comes standard with TensorFlow 2, but Chuan Li’s tool relies on a TensorFlow benchmark submodule that is designed to work with TensorFlow 1. Luckily, the parent repository was unofficially updated to work with Tensorflow 2 (with a warning that it is no longer being maintained). A quick “git checkout master” in the “benchmarks” submodule directory got everything working. Chuan Li’s tool makes it simple to run tests with several preconfigured templates already included. I chose the fp16 resnet50 configuration as it should be fast at processing images and is fairly standard.</p> + +<pre><code>TF_XLA_FLAGS=--tf_xla_auto_jit=2 ./batch_benchmark.sh X X 1 100 2 config/config_resnet50_replicated_fp16_train_syn +</code></pre> + +<p>Using the invocation provided in the benchmark README.md file, I was able to quickly run benchmarks with synthetic data on up to 8 V100 GPUs in the node. At one point I got stuck, hitting what appeared at first to be an unexplainable 25% performance loss. I reran the tests multiple times and even monitored GPU clockspeeds/temperatures in nvidia-smi with no luck. Ultimately I discovered my error. In the slow cases, I had inadvertently left out the “TF_XLA_FLAGS=–tf_xla_auto_jit=2” environment variable. It turns out that setting this allows Tensorflow compile and execute functions with XLA (Accelerated Linear Algebra) support which is a pretty big win for these tests.</p> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Tensorflow_-_ResNet50_Synthetic_Training_fp16.svg" /></p> + +<p>At this point I decided that I needed to understand how Chuan Li’s tool works. It turns out that he is using the same base tf_cnn_benchmarks.py benchmark code that companies like Nvidia and Dell also use for benchmarking their GPU solutions. I spent some time running it directly with Dell’s settings from their deep learning overview <a href="https://infohub.delltechnologies.com/l/high-speed-object-storage-for-deep-learning/overview-3284">here</a>. Unfortunately those tests had mixed results, even after various tweaks. While researching the XLA issues I mentioned earlier however, I made an even better <a href="https://blog.tensorflow.org/2018/11/pushing-limits-of-gpu-performance-with-xla.html">discovery</a> on the TensorFlow website. I found an excellent blog post with performance data written by some of the core Tensorflow developers. It’s now 4 years old, but still appears to be quite valid. The tuning options used were both simpler and resulted in higher performance versus other configurations that I’ve come across.</p> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Tensorflow_-_ResNet50_Synthetic_Training_fp16_blog_compare.svg" /></p> + +<p>Training with synthetic data in Lambda’s cloud resulted in similar performance to what the Tensorflow developer’s reported. In fact, using their own settings yielded slightly faster results when running on Lambda’s 8xV100 instance! It was incredibly encouraging to me that even in Lambda’s cloud environment with virtual machine instances I could achieve performance that was as fast or faster than what the Tensorflow developers were reporting.</p> + +<h1 id="choosing-a-real-data-set">Choosing a Real Data Set</h1> + +<blockquote> + <p><em>The first step to training a neural net is to not touch any neural net code at all and instead begin by thoroughly inspecting your data.</em></p> + + + <p>        Andrej Karpathy, <a href="https://karpathy.github.io/2019/04/25/recipe/">A Recipe for Training Neural Networks</a>, 2019</p> + +</blockquote> + +<p>Having convinced myself that I had Tensorflow operating reasonably efficiently in synthetic tests, it was time to start thinking about what dataset to use for “real” training. The largest and most obvious choice is ImageNet. ImageNet is composed of over 1.2 million categorized images that form a roughly 160GB training dataset. It is also the largest dataset I could find that was publicly accessible. Downloading it isn’t so easy however. The only version that I could access is the ImageNet Object Localization Challenge dataset hosted on <a href="https://www.kaggle.com/c/imagenet-object-localization-challenge">kaggle</a>.</p> + +<p>After finally figuring out how to download the data, it was time to follow Andrej’s advice and try to learn something about it. While ImageNet is curated and annotated, it has many images of different sizes, dimensions, and pixel counts. Images also come from many sources with different levels of quality. Through the power of stack-exchange I was able to find a bash one-liner script to generate a histogram of image sizes:</p> + +<pre><code>find . -type f -print0 | xargs -0 ls -l | awk '{size[int(log($5)/log(2))]++}END{for (i in size) printf("%10d %3d\n", 2^i, size[i])}' | sort -n +</code></pre> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/ImageNet_-_Image_Distribution_by_Approximate_Size.svg" /></p> + +<p>Roughly 80% of the images are in the 64KB or 128KB size bins. Almost all of the remaining images are smaller. That gives us a pretty good idea of what kind of IOs to expect during classification. Or at least…it does for frameworks that read those images directly. In Tensorflow’s case, there’s an alternative format called TFRecord. TFRecords are basically collections of image data sequentially laid out in much larger files. Instead of iterating over thousands or millions of individual image files, TFRecords allow Tensorflow to instead stream fewer, larger files that each house multiple images. It’s a one time cost to pre-process the data so Tensorflow has less work to do during training. After I downloaded the ImageNet data I took a shot at converting the ImageNet LOC data into TensorFlow records. Luckily, the TensorFlow tpu github repository already has a <a href="https://github.com/tensorflow/tpu/blob/master/tools/datasets/README.md">tool</a> that can do this. I had to manipulate the dataset slightly, but ultimately this process worked (at least for the training data):</p> + +<pre><code>pip install gcloud google-cloud-storage +pip install protobuf==3.20.1 + +mkdir ~/data/ImageNetFoo +ln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/train ~/data/ImageNetFoo/train +ln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/val ~/data/ImageNetFoo/val +ln -s ~/data/ImageNet/ILSVRC/Data/CLS-LOC/test ~/data/ImageNetFoo/test +ln -s ~/data/ImageNet/LOC_synset_mapping.txt ~/data/ImageNetFoo/synset_labels.txt +python imagenet_to_gcs.py --raw_data_dir=/home/ubuntu/data/ImageNetFoo --local_scratch_dir=/home/ubuntu/ExaltedOrbs/ImageNet/tf_records --nogcs_upload +</code></pre> + +<p>Perhaps I should say that this worked so long as the original dataset was located on the local NVMe drive. The persistent storage didn’t fare as well. Attempting to decompress ImageNet on the persistent storage resulted in blowing past the max number of open files allowed with errors like:</p> + +<pre><code>OSError: [Errno 24] Too many open files. +</code></pre> + +<p>Unfortunately this couldn’t be fixed on the instance. It appeared to be passed through from the host and the persistent storage was completely unusable until the instance was rebooted. Recently I spoke to one of Lambda’s engineers and they are working on a fix. (It may already be implemented by the time you read this!) I also want to note that the persistent storage is still in beta so issues like this are not entirely unexpected. Having said that, before hitting the error it was significantly slower extracting ImageNet on the persistent storage vs on the local NVMe storage. It’s probably best to extract ImageNet locally and then write the large TFRecords to the persistent storage during the conversion process. Luckily extracting ImageNet to local storage was fine, and storing the original archive and the resulting TFRecords on the persistent storage worked perfectly fine as well.</p> + +<h2 id="fio---baseline-io-results">FIO - Baseline IO Results</h2> + +<p>Next, I turned my attention to running baseline tests on Lambda’s local and persistent storage using fio. Fio is a highly configurable and well respected benchmark in the storage community and perfect for generating baseline results. I decided to use a dataset size that is roughly similar to ImageNet (200GB), the libaio engine in fio with direct IO, and an appropriately high IO depth to let the NVMe drives stretch their legs a bit.</p> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Lambda_Labs_8xv100_Storage.svg" /></p> + +<p>Throughput with the local NVMe drive(s) is surprisingly good. The persistent storage is slower, but still might be fast enough at a little over 1GB/s for large reads. 16K IOPS was somewhat slower in both cases. I chose 16K so that I could quickly compare to tests I ran in my Ceph QEMU/KVM performance blog post <a href="https://ceph.io/en/news/blog/2022/qemu-kvm-tuning/">here</a>. Without getting into the details, I suspect there’s still some room for improved IOPS with Lambda’s setup. Luckily though, converting into TFRecords should make Tensorflow throughput bound instead of latency bound. What about PyTorch or other tools that want to read images directly though? Fio gives us the ability to simulate it by using its ‘bssplit’ feature. We can take the size ranges and percentiles generated when examining ImageNet and give fio a similar distribution:</p> + +<pre><code>fio --ioengine=libaio --direct=1 --bssplit=2K/1:4K/2:8K/4:16K/8:32K/13:64K/38:128K/33:256K/1 --iodepth=128 --rw=randread --norandommap --size=200G --numjobs=1 --runtime=300 --time_based --name=foo +</code></pre> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Lambda_Labs_8xV100_Storage_Reads_Second_Bssplit.svg" /></p> + +<p>This isn’t exactly right as we are not reading data spread across millions of files, but it should provide something of an upper bound on what to expect. It looks like the persistent storage can do approximately 10K reads/second at a throughput rate of around 750MB/s. The local storage is about 3-4 times faster. Local storage should be fast enough to support the kind of images/second throughput rates we want to hit in Tensorflow on 8 V100 GPUs, but the jury is still out for the persistent storage.</p> + +<h2 id="tensorflow---imagenet">Tensorflow - ImageNet</h2> + +<p>Running benchmarks with real data rather than synthetic data is fairly straightforward in Tensorflow. You simply append data_dir and data_name flags to the CLI invocation to let it know where the TFRecords are located:</p> + +<pre><code>sync; echo 3 | sudo tee /proc/sys/vm/drop_caches +python ./tf_cnn_benchmarks.py --batch_size=256 --num_batches=100 --model=resnet50 --optimizer=momentum --variable_update=replicated --all_reduce_spec=nccl --use_fp16=True --nodistortions --gradient_repacking=2 --compute_lr_on_cpu=True --single_l2_loss_op=True --xla_compile=True --num_gpus=8 --loss_type_to_report=base_loss --data_dir=/home/ubuntu/ImageNet-TF/train --data_name=imagenet +</code></pre> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Tensorflow_-_ResNet50_Real_Training_First_Attempt_fp16.svg" /></p> + +<p>Ouch. Much lower performance with the ImageNet data vs synthetic! This is especially unfortunate given that 4 years ago the Tensorflow developers reported much better results. I spent some time reading and experimenting with different settings. Ultimately the one setting that made a substantial difference was “datasets_num_private_threads”. In the Tensorflow benchmark source code, this setting is described as: “[The] number of threads for a private threadpool created for all datasets computation.” I’ll go into more detail what these threads are doing in a bit. For now, let’s see how increasing the number of threads affects the results:</p> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Tensorflow_-_ResNet50_ImageNet_Training_fp16_private_threads.svg" /></p> + +<p>Increasing the number of private threads has a dramatic effect on performance, though I was unable to fully match the performance achieved in the synthetic tests on either the local or persistent storage. The local storage fared better at high thread counts gradually topping out at around 8600 images/second. At high private thread counts the persistent storage topped out between 7000-8000 images/second with a higher degree of variability between runs. I suspect that in this case the persistent storage has likely hit its (per instance) limit.</p> + +<p>In addition to having a dramatic effect on performance, changing the private thread count also had a large effect on the CPU consumption of the TensorFlow process. CPU usage increases almost linearly with additional private threads up to around 30 cores. What exactly are these private threads doing? To answer that question, I utilized two tools that I often deploy when diagnosing CPU usage in Ceph. When testing with a lower number of private threads, I used linux’s perf tool to look at where cycles are being consumed when the private threads are fully saturated. At higher levels of private threads, I used my wallclock profiler <a href="https://github.com/markhpc/uwpmp">uwpmp</a> to look at how private threads spend their time when increasing the thread count no longer improves performance.</p> + +<p>In the first case with perf, we can get a good view of the work that these private threads are doing:</p> + +<pre><code>--77.31%--tensorflow::ThreadPoolDevice::Compute + | + |--51.19%--0x7f511a00c7d8 + | | + | --51.18%--tensorflow::jpeg::Uncompress + |--14.48%--tensorflow::ResizeBilinearOp&lt;Eigen::ThreadPoolDevice, unsigned char&gt;::Compute + |--5.47%--tensorflow::CastOpBase::Compute + |--2.66%--tensorflow::ReverseV2Op&lt;Eigen::ThreadPoolDevice, unsigned char, int&gt;::Compute +</code></pre> +<p>The majority of the cycles consumed is in jpeg decompression and resize operations, along with a smattering of other stuff. What happens if we look at a case with a higher private thread count but now look at wallclock time instead of cycles? I ended up having some trouble getting the profiler to work properly and consistently get clean callgraphs, but I was able to get at least one run in that revealed some interesting information. First, I saw time spent in the same functions that perf told us we were spending cycles in:</p> + +<pre><code>+ 100.00% Eigen::ThreadPoolTempl&lt;tensorflow::thread::EigenEnvironment&gt;::WorkerLoop(int) + + 99.90% ??? + |+ 97.30% ??? + ||+ 92.40% ??? + |||+ 77.10% _PyEval_EvalFrameDefault + ||||+ 47.20% ??? + |||||+ 38.10% tensorflow::jpeg::Uncompress(void const*, int, tensorflow::jpeg::UncompressFlags const&amp;, long*, std::function&lt;unsigned char* (int, int, int)&gt;) + ||||+ 12.20% tensorflow::ResizeBilinearOp&lt;Eigen::ThreadPoolDevice, unsigned char&gt;::Compute(tensorflow::OpKernelContext*) + ||||+ 4.40% tensorflow::CastOpBase::Compute(tensorflow::OpKernelContext*) + ||||+ 1.70% tensorflow::ReverseV2Op&lt;Eigen::ThreadPoolDevice, unsigned char, int&gt;::Compute(tensorflow::OpKernelContext*) +</code></pre> + +<p>But the wallclock profile also exposed that there may be contention in multiple areas in the private threads around some of the nsync synchronization primitives being used:</p> + +<pre><code> ||||||| | + 4.50% nsync::nsync_mu_semaphore_p(nsync::nsync_semaphore_s_*) + ||||||| | + 4.50% syscall + +</code></pre> + +<p>This almost always appeared nested deep inside:</p> + +<pre><code>tensorflow::BFCAllocator::AllocateRaw(unsigned long, unsigned long, tensorflow::AllocationAttributes const&amp;) +</code></pre> + +<p>Sadly I was missing a number of debug symbols and don’t 100% trust the wallclock trace. For now I’ll just say that the private threads are doing a significant amount of work decompressing and manipulating the image data to keep the GPUs fed. I suspect that with newer and faster GPUs the image retrieval pipeline could become an even bigger issue when training with real image data. The mystery for me is how The TensorFlow developers achieved such good results 4 years ago without using dedicated private threads at all. Perhaps they had a significantly faster jpeg decompression mechanism that I am unaware of?</p> + +<h2 id="pytorch---imagenet">PyTorch - ImageNet</h2> + +<p>After running Tensorflow, I also ran some benchmarks in PyTorch using Nvidia’s “DeepLearningExamples” github <a href="https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/Classification/ConvNets/resnet50v1.5">repo</a>. First, I installed the prereqs and setup the repository:</p> + +<pre><code>pip install 'git+https://github.com/NVIDIA/dllogger' +pip install --extra-index-url https://developer.download.nvidia.com/compute/redist --upgrade nvidia-dali-cuda110 +git clone https://github.com/NVIDIA/DeepLearningExamples +</code></pre> + +<p>Then, prepared ImageNet for usage in PyTorch:</p> + +<pre><code>cd ~/data/ImageNet/ILSVRC/Data/CLS-LOC/val +wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash +</code></pre> + +<p>And finally ran a test:</p> + +<pre><code>cd DeepLearningExamples/PyTorch/Classification/ConvNets +sync; echo 3 | sudo tee /proc/sys/vm/drop_caches +python ./multiproc.py --nproc_per_node 1 ./main.py --arch resnet50 --label-smoothing 0.1 --run-epoch 1 --amp --static-loss-scale 256 --workspace /home/ubuntu/data/ImageNet-Scratch /home/ubuntu/data/ImageNet-Orig/ILSVRC/Data/CLS-LOC/ +</code></pre> + +<p>There are a couple of differences here versus the TensorFlow tests. First, I’m using the raw ImageNet archive instead of a preprocessed TFRecord dataset, so the read behavior is different. Because I was unable to extract or copy the raw ImageNet archive onto the persistent storage, I’m also only testing the local NVMe drive. Finally, I didn’t see any specific examples for running with fp16 in nVidia’s documentation, so I’m using amp (automatic mixed precision) which may be slightly slower.</p> + +<p><img alt="" src="https://markhpc.github.io/images/2022-11-28-Lambda/Pytorch_-_ResNet50v15_ImageNet_Training_AMP.svg" /></p> + +<p>Given the number of differences it’s tough to draw direct comparisons with Tensorflow. Amp is one difference, but it’s quite possible that there are tuning options that could improve performance here that I don’t know about. I did notice that PyTorch, like Tensorflow, is using quite a bit of CPU to keep the GPUs working. I suspect that there are ways to tweak the IO pipeline that could improve performance. For now though, let’s compare the IO patterns on the local NVMe drive during the Tensorflow and PyTorch runs. I was hoping to be able to use blktrace to do this, but unfortunately was unable to get any data from the virtual devices in the instance. I was able to collect more general statistics using collectl however.</p> + +<h5 id="disk-read-statistics-during-pytorch-8-gpu-run">Disk Read Statistics During PyTorch 8 GPU run:</h5> + +<table> + <thead> + <tr> + <th>Time</th> + <th>Name</th> + <th>KBytes</th> + <th>Merged</th> + <th>IOs</th> + <th>Size</th> + <th>Wait</th> + <th>QLen</th> + <th>SvcTim</th> + </tr> + </thead> + <tbody> + <tr> + <td>00:29:18</td> + <td>vda</td> + <td>761136</td> + <td>0</td> + <td>6746</td> + <td>113</td> + <td>58</td> + <td>431</td> + <td>0</td> + </tr> + <tr> + <td>00:29:19</td> + <td>vda</td> + <td>752172</td> + <td>0</td> + <td>6648</td> + <td>113</td> + <td>112</td> + <td>810</td> + <td>0</td> + </tr> + <tr> + <td>00:29:20</td> + <td>vda</td> + <td>747824</td> + <td>0</td> + <td>6595</td> + <td>113</td> + <td>84</td> + <td>604</td> + <td>0</td> + </tr> + <tr> + <td>00:29:21</td> + <td>vda</td> + <td>735964</td> + <td>0</td> + <td>6583</td> + <td>112</td> + <td>73</td> + <td>551</td> + <td>0</td> + </tr> + <tr> + <td>00:29:22</td> + <td>vda</td> + <td>695636</td> + <td>0</td> + <td>6237</td> + <td>112</td> + <td>102</td> + <td>760</td> + <td>0</td> + </tr> + </tbody> +</table> + +<h5 id="disk-read-statistics-during-tensorflow-8-gpu-run">Disk Read Statistics During TensorFlow 8 GPU run:</h5> + +<table> + <thead> + <tr> + <th>Time</th> + <th>Name</th> + <th>KBytes</th> + <th>Merged</th> + <th>IOs</th> + <th>Size</th> + <th>Wait</th> + <th>QLen</th> + <th>SvcTim</th> + </tr> + </thead> + <tbody> + <tr> + <td>00:38:45</td> + <td>vda</td> + <td>1081324</td> + <td>0</td> + <td>8440</td> + <td>128</td> + <td>0</td> + <td>7</td> + <td>0</td> + </tr> + <tr> + <td>00:38:46</td> + <td>vda</td> + <td>927512</td> + <td>0</td> + <td>7241</td> + <td>128</td> + <td>0</td> + <td>7</td> + <td>0</td> + </tr> + <tr> + <td>00:38:47</td> + <td>vda</td> + <td>913512</td> + <td>0</td> + <td>7130</td> + <td>128</td> + <td>0</td> + <td>7</td> + <td>0</td> + </tr> + <tr> + <td>00:38:48</td> + <td>vda</td> + <td>1047444</td> + <td>0</td> + <td>8186</td> + <td>128</td> + <td>0</td> + <td>6</td> + <td>0</td> + </tr> + <tr> + <td>00:38:49</td> + <td>vda</td> + <td>968776</td> + <td>0</td> + <td>7560</td> + <td>128</td> + <td>0</td> + <td>6</td> + <td>0</td> + </tr> + </tbody> +</table> + +<p><br /> +When just looking at the IO sizes, both runs appear similar, but that doesn’t tell the whole story. It is likely that Tensorflow is doing much larger reads that are broken up into contiguous 128KB chunks by the block layer based on the underlying device’s max_sectors_kb setting. The tells here are the very low queue length and wait times for the TensorFlow run versus the PyTorch run. In both case the device service times are low (0), but in the TensorFlow case IOs are still backing up in the device queue.</p> + +<p>Interestingly, it appears that it may be possible to use nVidia’s DALI (Data Loading Library) package to <a href="https://docs.nvidia.com/deeplearning/dali/archives/dali_170/user-guide/docs/examples/frameworks/pytorch/pytorch-various-readers.html">read TFRecords into PyTorch</a>. I didn’t have time to attempt it, but potentially that could have a big effect on IO behavior and performance as well.</p> + +<h2 id="conclusion">Conclusion</h2> + +<p>As I’ve been writing this post, I realize just how complicated it is to understand the performance characteristics of training of neural networks. Even as we talk about metrics like images/second, the options that are used (batch size for instance) can also affect convergence. It’s very difficult to come up with a common methodology that is always better than others. I wonder if another metric, like reaching a desired level of convergence, would be better in the end. Having said that, I am glad for having done this exercise as I learned some valuable things:</p> + +<ol> + <li> + <p>Pre-processing data into a format like TFRecords on fast local storage is a big win from an IO perspective. It lets storage systems that have slow metadata performance succeed so long as they have enough sequential read throughput to keep the machine learning framework busy. This is a big win for many distributed file systems that may have substandard metadata performance (and even the good ones may still benefit).</p> + + </li> + <li> + <p>To train on a dataset like ImageNet, you need somewhere around 1-1.3GB/s of raw disk throughput to keep 8 V100 GPUs busy when training in fp16. For amp or fp32 the requirements are likely lower since the GPUs can’t work quite as fast. With modern GPUs that are faster than the V100, the disk throughput requirements could be significantly higher.</p> + + </li> + <li> + <p>Lambda’s local NVMe storage is likely fast enough to saturate 8 GPUs, even newer ones, so long as the rest of the IO path can keep up. The persistent storage appears to become a bottleneck with sufficient GPUs and TensorFlow private threads, though can still function fairly well so long as TFRecords are used. A concern going forward is how to ensure that the data pipeline in TensorFlow and PyTorch are fast enough to keep the GPUs fed. The Tensorflow benchmark required a large number of private threads and showed potential evidence of contention at high thread counts. PyTorch did not appear to natively support TFRecords, but NVidia DALI or other 3rd party code might help improve the IO path.</p> + + </li> + <li> + <p>If it’s necessary to train directly with images rather than TFRecords, it may not make sense to host them on shared file systems. It appears that Tensorflow and possibly PyTorch give users the ability to specify a separate training data and work directory. If all operations against the training data are reads, it may be better to host datasets on read-only block device snapshots. For instance with Ceph, perhaps you could create a read/write RBD volume where you put a certain dataset, take a snapshot, and then map that snapshot as read only on multiple instances that all need access to the same image set.</p> + + </li> + <li> + <p>Even with a training set as large as ImageNet, Lambda’s instances have so much memory that eventually the entire dataset becomes cached. It was necessary to sync and drop caches before each test and keep tests short enough that they didn’t re-read the same data from buffer cache. I was able to watch as long running tests eventually stopped performing reads and got faster as time went on. This could make apples-to-apples comparison between different storage vendors difficult if not carefully controlled.</p> + + </li> + <li> + <p>I’m almost certainly missing additional tweaks that can help speed up both Tensorflow and PyTorch. This post shouldn’t be seen as the be-all/end-all for how to achieve high performance with these frameworks, but I hope it may at least help showcase some of the areas that are valuable to investigate when trying to train with real data and achieve high performance.</p> + + </li> +</ol> + +<p>This wraps up my initial work looking at Deep Learning IO behavior. I hope that next time I can come armed with a bit more knowledge about the internals of how PyTorch and Tensorflow work, focus a bit more on the quality of the training, find even larger datasets to work with, and maybe actually accomplish something useful rather than just play with ImageNet.</p> + +<p>Thanks for reading!</p>Mark Nelson's BlogAbstractAdam’s weekly update, 2022-11-272022-11-27T15:28:16-07:002022-11-27T15:28:16-07:00https://hpc.social/personal-blog/2022/adam-s-weekly-update-2022-11-27<h2>What&#8217;s new</h2> + +<p>The first thing that&#8217;s new is&#8230; this post! I&#8217;m going to try to do at least a weekly post on the blog now, just a general update and some links. This will <em>hopefully</em> help me get back into the habit of writing on the blog regularly, and maybe inspire me to write a bit more in general.</p> + +<p><span id="more-264"></span></p> + +<p>I was off work this week for the Thanksgiving holiday, and traveled Michigan to visit my parents and my brother&#8217;s family. My mom has been struggling with some pretty major health issues this year, so it was really wonderful and reassuring to get to spend some time with her and my dad. I also finally got to meet my brother&#8217;s three-year-old son, who was born <em>right</em> before the pandemic started, and who I hadn&#8217;t managed to meet up until now.</p> + +<p>On the tech-related front, I used this week to take a break from Twitter (mostly), and to be honest&#8230; it was kinda refreshing! I had developed a pretty bad Twitter habit this year, doomscrolling for more time than I like to admit. While I really like Twitter and I&#8217;ve had some nice career boosts from it, it was also a time sink that was not entirely healthy.</p> + +<p>Admittedly, that time was somewhat replaced by playing around on the <a href="https://calico.social/ajdecon">Fediverse / Mastodon</a>. But with the lack of algorithmic suggestions, quote tweets, and other means of virality, that network so far feels a lot quieter and less time-consuming than Twitter. Tim Bray has a <a href="https://www.tbray.org/ongoing/When/202x/2022/11/26/Bye-Twitter">good post</a> up which discusses some of the advantages and pitfalls of federated social media, and I can highly recommend reading that. I&#8217;m still a bit skeptical that it will be a practical &#8220;Twitter replacement&#8221; for most people, but so far I&#8217;m finding it pleasant.</p> + +<h2>What I&#8217;m reading</h2> + +<ul> +<li><strong>Nonfiction book: </strong><a href="https://bookshop.org/p/books/code-the-hidden-language-of-computer-hardware-and-software-charles-petzold/18465738">Code, Second Edition, by Charles Petzold</a>. This book walks through the process of building a working computer, starting with ideas like Morse code, then working up from logic gates on up. This is technically a re-read, as I read the first edition&#8230; 10+ years ago? But I&#8217;m getting a lot more out of it this time around, and really enjoying it.</li> + + + +<li><strong>Fiction book: </strong><a href="https://bookshop.org/p/books/the-spare-man-mary-robinette-kowal/18834426">The Spare Man, by Mary Robinette Kowal</a>. A cozy murder mystery on a luxury cruise to Mars. I&#8217;m only a few chapters in, but already greatly enjoying myself.</li> + + + +<li><a href="https://ferd.ca/hiding-theory-in-practice.html">&#8220;Hiding theory in practice&#8221;, by Fred Hebert</a>. I&#8217;ve been reading a lot about safety engineering and its application to computing lately, but that community can sometimes get off into the weeds about points of theory that don&#8217;t have consensus in the broader computing community. This post has a good discussion of how to use the theory of safety engineering to guide decisions, without requiring that everyone working with you be handed a reading list.</li> + + + +<li><a href="https://cohost.org/mononcqc/post/385225-paper-repentance-as">&#8220;Paper: Repentance as Rebuke: Betrayal and Moral Injury in Safety Engineering&#8221;, also by Fred Hebert</a>. A discussion of <a href="https://link.springer.com/article/10.1007/s11948-022-00412-2">a paper by Dekker <em>et al</em></a> which looks at the aftermath of the 737 MAX air disasters, and the public repentance of some of the engineers who were involved. Go read the post, it&#8217;s great. And I&#8217;m planning to read the original paper this week.</li> + + + +<li><a href="https://chipsandcheese.com/2022/11/15/cannon-lake-intels-forgotten-generation/">&#8220;Cannon Lake: Intel&#8217;s Forgotten Generation&#8221;, from <em>Chips and Cheese</em></a>. Really I&#8217;ve been reading a bunch of the technical posts from <em>Chips and Cheese</em> lately, and they&#8217;re doing pretty good analyses of recent hardware. They&#8217;ve definitely earned that spot in my RSS reader.</li> + + + +<li><a href="https://glennklockwood.blogspot.com/2022/11/sc22-recap.html">Glenn K Lockwood&#8217;s &#8220;SC&#8217;22 Recap&#8221;</a>. I was sad to miss Supercomputing this year, though enough folks have come down with COVID that I don&#8217;t really regret the decision. But Glenn wrote up a really interesting recap post, with an interesting new viewpoint now that he&#8217;s working at Microsoft Azure. Among other things, he included a whole section titled <em>The underwhelming</em>, with the opening line &#8220;The biggest deal appears to be that exascale is here, and it turns out that it&#8217;s not that big of a deal.&#8221;</li> +</ul> + +<h2>Recent recipes</h2> + +<p>Because it was Thanksgiving, I did a lot of cooking this week! I&#8217;m not going to list everything I made, but a few of my favorites were:</p> + +<ul> +<li><a href="https://www.delish.com/cooking/recipe-ideas/a23340027/cheesy-garlic-butter-rolls-recipe/">Cheesy Garlic Butter Rolls from Delish</a>: Nothing special, but really tasty.</li> + + + +<li><a href="https://smittenkitchen.com/2019/11/challah-stuffing/">Challah Stuffing from Smitten Kitchen</a>: This recipe was a huge winner, with most of the family coming back for seconds, and then having more the next day for leftovers. It was really good, and is probably what I&#8217;ll make if I ever do stuffing again.</li> + + + +<li><a href="https://smittenkitchen.com/2008/09/best-challah-egg-bread/">Best Challah from Smitten Kitchen</a>: I baked the bread that went into the stuffing, and it was really tasty on its own! This recipe makes two loaves, and I only needed one for the stuffing. So I also made french toast with it, which worked really nicely.</li> +</ul> + +<h2>Pet photos</h2> + +<p>Gotta have those pet photos.</p> + +<figure class="wp-block-image size-large is-resized"><img alt="A blond golden doodle in a red harness and a blue bandanna lays on sandy dirt and looks into the camera" class="wp-image-271" height="233" src="https://thinking.ajdecon.org/wp-content/uploads/2022/11/IMG_6863-1024x768.jpeg" width="311" /></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="A white calico cat sits on a blanket and washes her front paw" class="wp-image-272" height="410" src="https://thinking.ajdecon.org/wp-content/uploads/2022/11/69075713241__19379770-6B0C-4780-8DD0-30C62A033C88-768x1024.jpeg" width="308" /></figure> + +<figure class="wp-block-image size-large is-resized"><img alt="A gray-brown tabby cat wearing a green collar sitting on a wall, looking vaguely toward the camera" class="wp-image-273" height="405" src="https://thinking.ajdecon.org/wp-content/uploads/2022/11/69073206299__DB9CA33B-0EB5-4681-96DA-8368554B6B8A-768x1024.jpeg" width="304" /></figure>Thinking Out LoudWhat&#8217;s newSC’22 Recap2022-11-24T02:00:00-07:002022-11-24T02:00:00-07:00https://hpc.social/personal-blog/2022/sc-22-recap<p>The biggest annual conference in HPC, the <a href="https://sc22.supercomputing.org">SC conference</a>, was recently held in Dallas, Texas in its second hybrid incarnation since being all-remote for the pandemic. This year attracted over 11,000 attendees which is much closer to the pre-pandemic high of 14,000 than last year's 7,000, and judging from the crushed conference rooms and busy expo floor, it looks like SC is not that much worse for wear.</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>This year's conference quite different for me since I attended for my first time as a vendor, not a researcher or practitioner, and I spent most of my days behind closed doors talking to customers. I didn't get to attend any of the keynotes, BOFs, or panels to which I wasn't invited as a result, so I'm not really qualified to give an erudite summary of the conference or expo this year.</p> +<p>So instead, I'm just writing down what I remember in order that I remember it and not necessarily in a coherent narrative form. I'm sure I missed a lot (for example, mixed precision seemed big this year, and I heard Jack Dongarra gave a fantastic Turing Award talk) so I encourage others to write their own recaps and share with the community!<span></span></p> +<p></p> +<h2 style="text-align: left;">High-level themes</h2> +<p>I actually started writing an SC'21 recap last year which I never posted, and re-reading the intro was funny--you'd think nothing has changed in the last year.</p> +<h3 style="text-align: left;">The underwhelming</h3> +<p>The biggest deal appears to be that exascale is here, and it turns out that it's not that big of a deal. China let the air out of the tires by debuting their exascale systems at SC'21, and not only did they thumb their nose at Top500 by not submitting, they debuted by winning a Gordon Bell prize instead. The first US exascale system, Frontier, was debuted at ISC this year leaving its showing at SC a bit deflated too. <a href="https://www.hpcwire.com/2022/11/17/2022-gordon-bell-prize-goes-to-plasma-accelerator-research/">Frontier was featured in the Gordon Bell prize-winning paper</a> this year, but that work required the use of four Top-10 systems, not just Frontier, painting the reality that one giant computer rarely stands on its own when it comes to advancing science.</p> +<p>This isn't to say that deploying exascale systems isn't a noteworthy feat and worth commendation, but I felt like the hype over the last five years treated the achievement like an end state instead of a milestone. And now that we've passed the milestone, the community is grasping to figure out what comes next. So what <i>is</i> next?</p> +<p><b>Quantum</b> had a strong and growing presence at SC, as it has for the last few years. But the conclusion of the panel "<a href="https://www.hpcwire.com/2022/11/19/quantum-are-we-there-or-close-yet-no-says-the-panel/">Quantum Computing: A Future for HPC Acceleration</a>" was that no, it's not close to being ready.</p> +<p><b>Disaggregation and composability</b> was another theme with growing momentum. And like quantum, there was a panel asking the same question: "<a href="https://www.hpcwire.com/off-the-wire/informal-poll-of-sc22-attendees-suggests-a-bright-future-for-composability/">Does HPC need composability now?</a>" The answer, again, was no, not yet. More on that below.</p> +<p>What about <b>RISC-V</b>? Surely that will revolutionize the field. As it turns out, the answer there is also that <a href="https://www.hpcwire.com/2022/11/18/risc-v-is-far-from-being-an-alternative-to-x86-and-arm-in-hpc/">RISC-V is not ready to do anything useful for HPC yet</a>.</p> +<p>The list goes on of technologies and trends that people are trying to boost now that exascale is "solved." The reality, I think, is that "exascale" will take years to actually mature since it appears to have a ton of technical debt that accumulated during the race to be first. US Exascale rests on the shoulders of AMD and Intel, two companies whose software stacks have not caught up to the market leader, so there will be a lot of thrashing around as development practices and optimization settle out around these systems.</p> +<p>Struggling with code porting is not very exciting to computer science Ph.D.s, so I expect future SCs to mirror this one and bifurcate into two distinct tracks: those struggling to identify the next big thing in the research space, and those struggling to use the systems that were rushed to deployment.</p> +<h3 style="text-align: left;">The unexpected</h3> +<p>My SC experience was very biased since I didn't get out much, but two related themes kept popping up across different meetings and the sessions I did attend.</p> +<p><b>Power efficiency is serious business now</b>. It used to seem like people talked about the need for energy-efficient HPC in an abstract sense while continuing to jam more power into every rack without changing their approach to system design, facilities, and deployment models. That has hit a hard wall with energy prices soaring in Europe, though. The financial impacts of power-inefficient supercomputing have gone from a one-time capex cost to an ongoing opex cost that is putting many HPC facilities on an unsustainable cost trajectory. Even sites that aren't doing new deployments are facing sudden, sharp increases in their costs, and nobody has good answers about how they will keep the lights on.</p> +<p><b>Cloud HPC is confusing</b>. With only <a href="https://www.nextplatform.com/2022/11/08/hpc-follows-the-enterprise-into-the-cloud/">15% of total HPC dollars winding up in the cloud</a>, it's little surprise that most HPC folks are only peripherally aware of what HPC in the cloud really means. Worse yet, a subset of those folks are actively hostile towards the idea of running HPC workloads in the cloud. I spoke with my colleagues from all three major cloud service providers as well as my colleagues in DOE, NSF, and education throughout the week, and everyone painted this same general picture.</p> +<p>There seems to be a mismatch between the expectations of on-prem HPC folks and cloud HPC folks. For example, I was asked why Windows doesn't support OpenMP very well, and after a bit of digging, I realized that the question really wasn't about using OpenMP on Windows as much as it was about using OpenMP in the cloud. There was a latent assumption that "HPC in Microsoft's cloud" must mean "HPC on Windows" which, for the record, is false--I don't even know how to use Windows anymore. Similarly, people decried the performance impacts of sharing HPC nodes with others in the cloud (they are not shared), overheads of virtualizing InfiniBand or GPUs (everyone uses PCIe passthrough or SR-IOV for HPC nodes), and other misconceptions.</p> +<p>This isn't to say that cloud people aren't confused too; I heard stories about conversations that went sideways because a cloud folks (not from my employer, thankfully!) didn’t realize that the requirements of a traditional gov/edu HPC facility couldn’t be neatly wrapped up into a single workload with a single solution, contrary to the case across many commercial AI shops. And both sides are struggling to find models for partnership and engagement that mirror the traditional relationship between places like a DOE or NSF facility and a company like Cray. HPC departments are used to buying supercomputers and parallel file systems, while cloud providers sell computing and storage as a <i>service</i>. The distinction may seem trivial at the surface, but there's a large divide that becomes evident once both sides start trying to drill into the details of what a partnership would look like.</p> +<h2 style="text-align: left;">Parallel I/O in Practice Tutorial</h2> +<p>This was my fifth year contributing to the Parallel I/O in Practice Tutorial with my colleagues at Argonne and Google, and it was our first time doing it in-person since 2019. It felt really good to be back in front of people to opine about the perils of POSIX and the greatness of the <a href="https://www.mcs.anl.gov/research/projects/darshan/">Darshan I/O profiling tool</a>, and this year I retired out the material I used to present on burst buffers (since DataWarp and Infinite Memory Engine have lost relevance in HPC) and the <a href="https://www.nersc.gov/tokio/">TOKIO holistic I/O analysis framework</a> (since it is no longer funded/maintained). In their stead, I presented material on <a href="https://wiki.lustre.org/Lustre_User_Group_2022">benchmarking with IOR and mdtest I debuted at LUG 2022 this year</a>.</p> +<p>I haven't gotten feedback yet on whether this change was a net positive one, but I think it went over well. Benchmarking I/O is really challenging if you don't understand how things like page cache really work in distributed systems, and walking through some benchmark examples concretizes a lot of abstract parallel file system concepts like locking and striping. And since benchmarking is a rabbit hole of arbitrary complexity, ending the tutorial with advanced benchmarking topics turned out to be a nice way to add buffer to the end of an eight-hour stretch of carefully timed presentations. It's very easy to skip over the nuances of analyzing mdtest outputs if attendees have a lot of questions about more important things at the end of the day.</p> +<p>The most surprising observation of the tutorial is how many attendees aren't using MPI anymore. We got a lot of questions last year about task-oriented I/O, and this year had some great questions about trying to understand or tune the I/O performed by Python-based analytics frameworks. We decided to add support for <a href="https://www.mcs.anl.gov/research/projects/darshan/2019/12/11/new-experimental-version-of-darshan-available-for-instrumenting-non-mpi-applications/">Darshan to profile non-MPI applications back in 2019</a> which is now paying dividends by ensuring it is a relevant tool for these new analytics and AI workloads, and we'll probably have to give more attention to optimizing these workloads' I/O in the future.</p> +<h2 style="text-align: left;">DAOS User Group</h2> +<p>Monday morning was cold and rainy--a perfect day to attend the <a href="https://daosio.atlassian.net/wiki/spaces/DC/pages/11248861216/DUG22">2022 DAOS User Group</a> which was held off-site at the Fairmont Hotel.</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>Whether you particularly care about DAOS or not, the cross-community HPC I/O brain trust is guaranteed to be in attendance, and this year did not disappoint. In addition to the expected stakeholders from Intel and DOE, representatives from all three big CSPs were in attendance. Google Cloud, Seagate, and HPE/Cray were all on the agenda, painting a diversifying landscape of large HPC companies investing time into DAOS and the strength and willingness of the DAOS team to partner with all comers.</p> +<h3 style="text-align: left;">Life after Optane</h3> +<p>The question that opened up the meeting, of course, was "what is the future of DAOS since Intel cancelled Optane?" Kelsey Prantis had the official statement (I'll replace the grainy photo once the DUG slides are online...):</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>The high-level project answer is that DAOS isn't going anywhere. Aurora, by virtue of still having Optane DIMMs, will not be affected, and DAOS will maintain support for Optane until Intel drops its last Optane DIMMs (Crow Pass for Sapphire Rapids) from support life sometime towards the end of this decade.</p> +<p>For new customers who aren't going to use Optane, the answer is "<a href="https://daosio.atlassian.net/issues/?jql=labels%20%3D%20%22md_on_ssd%22">Metadata on NVMe</a>," a development being codeveloped by Intel, HPE, and Google to implement a write-ahead log (WAL) and allow DAOS to use volatile DRAM instead of Optane. It will work like a file system journal in that a compact representation of writes will be committed to NVMe immediately after landing in DRAM, and then DAOS will asynchronously write back the properly serialized representation of that transaction after it is acknowledged. Johann Lombardi had a helpful cartoon that showed how this WAL will fit into DAOS:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>A key benefit of DAOS's implementation of this WAL is that it will be able to still service incoming writes while flushing old writes; although I don't fully grasp how this works, it is something enabled by the sophisticated I/O scheduler already implemented in DAOS.</p> +<p>The complete implementation isn't expected to be released until Spring 2024, but it appears to touch only a few components of DAOS and doesn't affect anything above the VOS layer of the DAOS server.</p> +<p>There was also mention of developing operability with new <a href="https://news.samsung.com/global/samsung-electronics-unveils-far-reaching-next-generation-memory-solutions-at-flash-memory-summit-2022">CXL-attached memory-semantic SSDs</a> to keep the persistent memory capability of DAOS alive beyond Optane. I'm not sure if this would offer a performance benefit over the metadata-on-NVMe feature; early results show that metadata-on-NVMe actually delivers higher IOPS than Optane since the synchronous write path is much simpler without having to account for memory persistence. That said, I didn't really follow the full extent of options on the table for how DAOS metadata may work across different types of memory though.</p> +<h3 style="text-align: left;">DAOS in the flesh at Argonne</h3> +<p>Kevin Harms presented an update on Aurora's massive 220 PB DAOS installation and laid out its configuration. There are 1,024 DAOS servers based on the Intel Coyote Pass server design, each sporting</p> +<p></p> +<ul style="text-align: left;"><li>2x Intel Xeon 5320 (Ice Lake) sockets</li><li>2x DAOS engines (one per socket)</li><li>16x 32GB DDR4 DIMMs</li><li>16x 512GB Optane DIMMs (Persistent Memory 200)</li><li>16x 15.36 TB Samsung PM1733 NVMe SSDs</li><li>2x 200 Gb/s Slingshot NICs</li></ul> +<p>The total configuration is quoted at 220 PB usable, but Kevin pointed out that this assumes that every object is erasure coded at 16+2. Unlike virtually every other storage system out there, though, users can choose the data protection for their individual objects when they create them, meaning this 220 PB capacity is an upper limit to what users can do. Users with very hot, read-only objects may choose to replicate instead of erasure code, while others who are capacity-constrained may choose to erasure code everything at 16+2 at the cost of latency and IOPS. This flexibility is really powerful for users since they can tailor their object layout ("<a href="https://www.intel.com/content/www/us/en/developer/articles/technical/understanding-data-redundancy-and-sharding-in-daos.html">object class</a>" in DAOS parlance) to match the needs of their workload.</p> +<p>Argonne will be slicing up this DAOS system by giving each scientific project its own DAOS pool, and each pool will be assigned to only 80% of the available DAOS servers by default. This seems like a nice way of providing most of the storage system performance to every user, but offering more freedom to work around bad hardware, bad users, and other performance problems that plague file systems like Lustre that distribute everything across every single server equally.</p> +<p>Finally, I noticed that Aurora will be using Samsung SSDs, not the Intel (now Solidigm) QLC NAND that appeared in all the DAOS slides floating around two years ago. I'm not sure what happened there, but the move from Solidigm QLC to Samsung TLC couldn't have been cheap.</p> +<h3 style="text-align: left;">New features and contributions</h3> +<p>DAOS is starting to pick up some truly valuable features that are being developed and contributed by third parties. Of note, croit has contributed a feature which allows DAOS to serve up NVMe over Fabrics targets, and Seagate contributed an S3 gateway for DAOS. Along with the DFS file system interface, DAOS now offers the trifecta of standard object, block, and file services just like Ceph. Unlike Ceph though, performance on DAOS is a first-class citizen. While croit made it clear that the NVMeoF support still has a ways to go to improve the way it does thread pooling and provides resilience, they showed 1.4 million IOPS from a single storage client using TCP over Ethernet with minimal client-side overhead.</p> +<p>Intel is also developing multitenant support for DFUSE, allowing a single compute node to share a DAOS mount and let permissions be enforced through UID/GID just like a regular file system. Before this update, the FUSE-based nature of DAOS allowed any unprivileged user to mount their container (good), but only one FUSE agent could be alive on a single node at a time (not good) which prevented multiple users sharing a node from both mounting their own containers.</p> +<p>DAOS also has some longer-term enhancements that I thought were interesting:</p> +<p></p> +<ul style="text-align: left;"><li>expanding the range of POSIX calls supported by DAOS's intercept library to include metadata calls and memory-mapped I/O using <a href="https://docs.kernel.org/admin-guide/mm/userfaultfd.html">userfaultfd</a></li><li>implementing collaborative caching - essentially reimplementing the Linux kernel page cache in userspace so that multiple processes can share cached DAOS pages</li><li>supporting a computational storage paradigm by enabling offload of <a href="https://github.com/rlane/ubpf">userspace eBPF scripts</a> to DAOS servers</li></ul> +<h3 style="text-align: left;">DAOS in a larger data center ecosystem</h3> +<p>Dean Hildebrand from Google Cloud then gave an overview of Google's efforts in bringing DAOS into the cloud. He had some nice performance graphs and I'll link the full presentation here once it's uploaded (it's worth a watch), but the part I found the most insightful was how they are trying to decide where a technology like DAOS fits in the larger cloud storage ecosystem. He outlined two different ways DAOS could work in GCP:</p> +<p></p> +<ol style="text-align: left;"><li><b>Caching</b>: Google Cloud Storage (GCS) is the point of truth and DAOS is a cache</li><li><b>Tiering</b>: DAOS is a point of truth, and GCS is an archive</li></ol> +<p></p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>He said they were leaning towards the caching model where data only lives ephemerally in DAOS, and personally, I think this is the right move since DAOS in the cloud is not resilient without Optane. However, this choice reflects a much larger tension in cloud storage for HPC:</p> +<p></p> +<ol style="text-align: left;"><li>The centerpiece of every cloud's data story is a scalable, low-cost, low-performance object store which is analogous to what on-prem HPC would call campaign, community, or project storage.</li><li>HPC demands higher performance than what these object stores can generally deliver though.</li></ol> +<div>To bridge the gap between these two truths, auxiliary services must bolt on to the object layer and provide higher performance, at a higher cost, for the duration of I/O-intensive HPC jobs. Some choose to provide true tiering from object into a resilient layer of flash (like <a href="https://aws.amazon.com/fsx/lustre/">FSx Lustre</a> and <a href="https://docs.weka.io/overview/data-storage">Weka</a> do), while others project the contents of the object through a high-performance caching layer (like <a href="https://azure.microsoft.com/en-us/products/hpc-cache/#overview">HPC Cache</a> and <a href="https://aws.amazon.com/blogs/aws/amazon-file-cache-a-high-performance-cache-on-aws-for-your-on-premises-file-systems/">File Cache</a>) and are never meant to persistently hold data.</div> +<p></p> +<p>This isn't rocket science, but I never thought deeply about the two models since campaign/community/project storage in on-prem HPC is usually fast enough to avoid needing caches or fine-grained tiering capabilities.</p> +<p>John Bent also had a thought-provoking presentation about how Seagate's now-"deprioritized" CORTX object store, which once <a href="https://blog.seagate.com/enterprises/seagate-and-sage-project-innovate-to-boost-hpc-and-big-data-community/">competed with DAOS as Mero</a>, contains ideas that can complement DAOS:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>Whereas DAOS delivers high performance using NVMe, CORTX delivers great economics using HDDs, and their strengths are complementary to each other. While I don't fully grasp how a tiered (or caching!) system comprised of DAOS and CORTX could be implemented, John rightly pointed out that the same level of space efficiency can deliver higher data protection if multi-level erasure coding is used to stripe across durable block storage. His specific example was erasure coding at 8+1 across servers and 10+1 within servers to deliver both high efficiency and high durability. This could map to something like running DAOS atop something like CORVAULT, but I don't think all the necessary pieces are in place to realize such a harmonious coexistence yet.</p> +<p>Of course, completely tossing Reed-Solomon for something more sophisticated (like VAST does with its locally decodable 150+4 scheme) obviates the need for multilevel erasure entirely. But DAOS has not gone down that route yet.</p> +<p>And as with every talk John gives, there were lots of other interesting nuggets scattered throughout his presentation. Two of my favorites were:</p> +<p></p> +<ul style="text-align: left;"><li>A slide that pointed out that, when you buy something like Ceph as an appliance, you may be spending only 25% of the total cost on storage media and the rest is infrastructure, service, and support. This struck me as a bit on the low end, but some enterprisey NAS and midrange parallel file system appliances can go this low. Spending 60% to 90% on media is a lot nicer for the buyer (and companies like Seagate) if you can buy at scale or eschew the white-glove support, and John suggested that it's up to companies like Seagate to fix the software issues that require customers to pay for white-glove support in the first place.  After all, the less someone spends on support and licenses, the more they can spend on Seagate hard drives.</li><li>John's final slide pointed out that object stores were originally designed to get around the limitations of POSIX file systems, but as they've evolved over the last decade, they're starting to look a lot like file systems anyway since they require strong consistency, hierarchical namespaces, and familiar file semantics. Has all the work put into developing super-fast object stores like DAOS over the last ten years really just brought us back full circle to parallel file systems?  Companies like VAST and Weka have shown that <a href="https://www.nextplatform.com/2017/09/11/whats-bad-posix-io/">maybe POSIX isn't as bad as the research community (myself included!) have claimed it to be</a>; it was really just low-performance implementations that nobody wanted.</li></ul> +<div>Once John's talk is uploaded to the DUG 2022 website, I'll link it here.  Like Dean Hildebrand's talk, it is well worth watching (but for wildly different reasons!)</div> +<p></p> +<p></p> +<p></p> +<h2 style="text-align: left;">PDSW 2022</h2> +<p>I had to duck out of the DAOS User Group early to run (through the rain) to the 7th International Parallel Data Systems Workshop (PDSW 2022) on Monday afternoon.</p> +<p></p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p><br />Much to everyone’s surprise, PDSW was only given a half day this year and everything felt a little compressed as a result. The organizers kept the work-in-progress (WIP) sessions which can often be an interesting peek into what students are pursuing, but little A/V problems and the unforgiving schedule probably did a disservice to the up-and-comers who use the WIP track to lay the groundwork for future full-length papers. Hopefully SC’23 restores PDSW to its original full-day status.&lt;p&gt;&lt;/p&gt;</p> +<h3 style="text-align: left;">Splinters keynote from Arif Merchant at Google</h3> +<p>The keynote presentation was given by Arif Merchant from Google about Splinters, the framework that Google Cloud uses to sample I/Os in a scalable way. The challenge they face is that it's impossible to trace and store every single I/O that hits Google's storage servers (D servers), but having an understanding of I/O patterns is essential for characterizing workload I/O behavior and planning for future infrastructure. In fact, this problem is so important that Google isn't the only cloud that's solved it!</p> +<p>A lot of what Arif talked about is very similar to how Azure does its I/O tracing under the hood. I suppose it should not be surprise that there are only so many ways to solve the challenge of sampling individual IOPS in a way that fairly represents the aggregate workload of a huge distributed storage system. One really smart thing Splinters does that I liked was sample along two different dimensions: not only do they evenly sample across all IOPS at a fixed rate (the obvious thing), but they also sample across files at a fixed rate. In this latter case of per-file sampling, they take a tiny fraction of files and capture every I/O for that file to get a complete picture of how individual files are being accessed.</p> +<p>This file sampling fills the huge gap that exists when randomly sampling IOPS alone. Because different I/Os have different "costs" (for example, reading a 1 MiB file using a single 1 MiB read op or 256x 4 KiB read ops are functionally equivalent to an application), randomly sampling ops introduces systematic biases that can be difficult to back out after the data has been sampled, subsampled, aggregated, and reduced. Splinters' approach lets you see the workload from two different angles (and biases) and answer a much larger range of questions about what's really happening across thousands of storage servers.</p> +<p>That said, it was interesting to hear Arif describe how Splinters evolved out of a different internal Google project but wound up outliving it. Splinters is also similar to, but slightly different from, their <a href="https://research.google/pubs/pub36356/">Dapper</a> infrastructure which also does scalable distributed system tracing. And he made overtures to <a href="https://research.google/pubs/pub41344/">F1</a>, a scalable SQL database that is similar to (but not the same as) the SQL-like query interface that Splinters uses. I got the impression that new technologies come and go pretty quickly at Google, and there's a large appetite for creating new software systems outright rather than shoehorning an existing system into solving a new problem. I can't say one way is better than the other; I was just surprised at the contrast with my own experiences.</p> +<h3 style="text-align: left;">Practical papers</h3> +<p>PDSW had a healthy combination of both very-researchy papers and applied research papers this year. I could only stick around for the applied papers, and two left an impression.</p> +<p>In the first, <a href="https://jeanlucabez.io">Jean Luca Bez</a> presented <a href="https://github.com/hpc-io/drishti">Drishti</a>, a tool that lives downstream of the Darshan I/O profiling library and finally does what the Darshan community has danced around for years--turning a Darshan log into an actionable set of recommendations on how to improve I/O performance. It does this by cataloguing a bunch of heuristics and using Darshan's new Python integrations to pore through a log and identify known-problematic I/O patterns. Like Jean Luca's <a href="https://dxt-explorer.readthedocs.io/en/latest/">DXT Explorer tool</a>, Drishti has a slick user interface and greatly extends the usability and insights that can be pulled out of a Darshan log file. It probably won't win a Turing Award, but this sort of work is probably going to benefit scores of HPC end-users by making Darshan (and troubleshooting I/O problems) much more accessible to mere mortals for years to come.</p> +<p>Adrian Jackson also presented a very tidy <a href="https://arxiv.org/abs/2211.09162">apples-to-apples comparison of DAOS and Lustre on the same hardware</a> using both a systems-level benchmark and an application-inspired, object-oriented data model benchmark. The specific bake-off of a new curiosity (DAOS) and the decades-old incumbent (Lustre) is probably interesting to storage nerds, but I think the real novelty of the work is in its exploration of some uncomfortable realities that the HPC I/O community will have to face in the coming years:</p> +<p></p> +<ul style="text-align: left;"><li>Does "slow memory" (nonvolatile Optane or CXL-attached memory SSDs) give actual benefit to existing file systems (like Lustre), or is rethinking the entire storage stack (like DAOS did) really necessary to unlock the performance of new hardware?</li><li>Do applications need to rethink their approach to I/O to make use of post-POSIX storage systems like DAOS, or is performing I/O as you would on a file system (Lustre) on a post-POSIX storage system (DAOS) good enough?</li></ul> +<p>My take from the work is that, for simple I/O patterns like checkpoint/restart, you can get pretty far by just treating something like DAOS the same as you would a parallel file system:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<div class="separator" style="clear: both; text-align: center;"><b><span style="font-size: x-small;">Figure from Manubens et al, "<a href="https://arxiv.org/abs/2211.09162">Performance Comparison of DAOS and Lustre for Object Data Storage Approaches</a>."</span></b></div> +<p>But if you want your data at rest to have the same data model as how it's handled within the application, you really ought to use a storage system that supports data models that are more expressive than a stream of bytes (which is what POSIX files are).</p> +<p>The authors didn't do a perfect job of giving Lustre its fair shake since they chose to use (abuse) directories and files to represent their application's data model on-disk instead of developing an object-file model that file systems like Lustre handle a little better. But let's be real--HPC is full of applications that do the exact same thing and represent datasets on-disk using complex hierarchies of directories and files simply because that's the easiest way to map the application's representation of data into the standard file system model. In that sense, storage systems that represent rich data models in a high-performance way should be really valuable to naive applications that map in-memory data structures directly to files and directories.</p> +<p>Going back to John Bent's closing slide from his DAOS User Group talk, though, does any of this even matter since all answers lead back to parallel file systems? Maybe there's something to be learned about adding better back-door APIs that support more diverse data models than what POSIX file interfaces give us.</p> +<h2 style="text-align: left;">The SC22 Expo</h2> +<p>The expo is my favorite part of SC because it's when I get to talk to people one-on-one and learn about corners of the HPC industry that I would've never otherwise sought out. Much to my dismay, though, I had very little time to walk the floor this year--so little that I didn't get any swag. If you want to read up on what interesting technology was being showcased, I strongly recommend reading <a href="https://www.servethehome.com/?s=sc22">all the great content that Patrick Kennedy and his team at STH created covering the expo</a>.</p> +<p>That said, I did notice some curious trends about the show floor overall.</p> +<p>The NVIDIA booth was notably absent this year (though they shared booth space with partners), and many of the usual top vendors had significantly smaller presence on the expo floor. Just for fun, I compiled the top ten(ish) vendors by booth size:</p> +<p></p> +<ol style="text-align: left;"><li>Weka.io (3,200 sqft)</li><li>VAST Data, Department of Energy, Penguin Computing, HPE, and Microsoft (2,500 sqft)</li><li>AWS (2,000 sqft)</li><li>Google and TACC (1,600 sqft)</li><li>Supermicro, AMD, Intel, Dell, NASA, and Indiana University (1,500 sqft)</li></ol> +<p>I think it's amazing to see all-flash storage companies at the top of the list alongside all of the Big 3 cloud service providers. I may be reading too much into this, but this may mean that the money behind SC is shifting towards companies playing in the cloud-based AI space instead of traditional big iron for simulation. Or perhaps it's a sign that most of the traditional HPC players are taking a hard look at the return they get on a big booth given the current economic climate and pulled back this year.</p> +<p>I did chat with a couple colleagues who completely opted out of a booth this year (for reference, <a href="https://hallerickson.ungerboeck.com/prod/app85.cshtml?AppCode=VFP&amp;OrgCode=34&amp;EvtID=5025&amp;CC=SC22SM">SC'21</a> had 10% fewer exhibitor booths than <a href="https://hallerickson.ungerboeck.com/prod/app85.cshtml?AppCode=VFP&amp;OrgCode=34&amp;EvtID=5020&amp;CC=SC19">SC'19</a>), and the reasoning was consistent: they found more value in having staff meet with customers privately or attend the technical sessions and engage with people organically. Combined with a bit of bad taste left over from SC's <a href="https://sc21.supercomputing.org/exhibits/exhibit-at-sc/">high cost of hosting pandemic-era "digital booths"</a> despite low return (did anyone visit digital booths at SC'20 or SC'21?), I can see why some vendors may have chosen to skip the expo this year.</p> +<p>Whatever the reasons may be, I was a bit sad to see such a small presence from some of my favorites like IBM, Fujitsu, Atos, and NEC. Hopefully the SC Exhibits Committee (and the economy!) can find ways to bring back the pre-pandemic glory of the show floor.</p> +<p>The expo wasn't all doom and gloom though! Even though I couldn't make my complete rounds this year, there were a couple of highlights for me.</p> +<p></p> +<h3 style="text-align: left;">VAST's masterful marketing</h3> +<p>Perhaps the splashiest vendor at SC was VAST Data who had a brilliant marketing presence. First was the giant Vastronaut mascot that was the centerpiece of their booth:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>A <a href="https://twitter.com/search?q=sc22%20vast&amp;f=live">quick search of Twitter</a> shows just how many people seized the opportunity to take a selfie at their booth. I would love to know how they transported that thing to and from the conference, but whatever the cost, I'll bet it was worth it.</p> +<p>At the Grand Opening Gala on Monday, they also gave out delightfully tacky light-up cowboy hats that everyone seemed to be wearing:</p> +<blockquote class="twitter-tweet"><p dir="ltr" lang="en">We were there! <a href="https://twitter.com/hashtag/sc22?src=hash&amp;ref_src=twsrc%5Etfw">#sc22</a> <a href="https://twitter.com/hashtag/sc2022?src=hash&amp;ref_src=twsrc%5Etfw">#sc2022</a> <a href="https://twitter.com/VAST_Data?ref_src=twsrc%5Etfw">@VAST_Data</a> <a href="https://t.co/fWhuSgBfpL">pic.twitter.com/fWhuSgBfpL</a></p> +— ntnu-hpc (@ntnuhpc) <a href="https://twitter.com/ntnuhpc/status/1592330266932301829?ref_src=twsrc%5Etfw">November 15, 2022</a></blockquote> +<p>The subtle genius of this was that not only did people wear them during the gala and the <a href="https://beowulfbash.com">Flop Gun-themed Beowulf Bash 2022 party</a> later that night, but they had to wear them on their plane rides home since they were so inconveniently bulky. Proof in point, my wife (who doesn't work in tech) sent me this text message to confirm that she was waiting for me at the right luggage carousel at San Francisco Airport:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>I wonder how many innocent bystanders, traveling home for Thanksgiving on Thursday or Friday, saw the shiny cowboy hats at airports around the country and wondered what VAST was.</p> +<p>The icing on the cake was VAST's CEO, Renen Hallak, parading around in an unmissable Chuck McGill-style space suit all week, clearly not taking himself too seriously and painting VAST as a work hard/play hard kind of company. Now, do flashy space suits and blinking cowboy hats alone mean VAST has a great product? I can't say<sup>**</sup>. But marketing is an art that I appreciate, and VAST hit some great notes this year.</p> +<p style="font-size: xx-small;"><sup>**</sup> (Seriously, I'm not sure I wouldn't get in trouble for opining about another company here.)</p> +<h3 style="text-align: left;">The Microsoft hardware bar</h3> +<p>The only booth where I spent any appreciable time this year was my own employer's. I personally love booth duty and accosting strangers on the show floor, especially if there's something interesting at the booth to jumpstart a conversation. When I worked at SDSC it was a <a href="https://www.sdsc.edu/News%20Items/PR111213_meteor.html">Raspberry Pi cluster</a>, and at the Microsoft booth this year it was the "hardware bar."</p> +<p>In addition to the customary booth presentations with giveaways, swag desk, seating area, and a fun caricature artist, the physical servers that underpin the HPC nodes in Azure were on display. <a href="https://www.opencompute.org/wiki/Server/ProjectOlympus">Microsoft contributes its hardware platform designs to the Open Compute Project</a> so the physical hardware that runs in Azure data centers isn't entirely mysterious. Still, every cloud has its hardware secrets, so I was surprised to see these servers laid bare.</p> +<p>The newest HPC node type (dubbed <a href="https://learn.microsoft.com/en-us/azure/virtual-machines/hbv4-series">HBv4</a>) on display was a node powered by AMD's Genoa processors just announced a few days earlier:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>This wasn't a display model, either; it had real DDR5 DRAM, a real NDR InfiniBand HCA, real PCIe Gen5, and real big OCP mezzanine card with real big aluminum heat sinks and a big Microsoft sticker on top. A couple visitors commented on the way the heat piping for those Genoa CPUs was done which I guess is unusual; rather than have a giant copper block on top of each socket, heat pipes connect the socket to massive aluminum heat sinks that are closer to the chassis inlets. In retrospect it makes sense; Genoa has a whopping twelve DDR5 DIMMs per socket which leaves little extra room for heat sinks, and these 88+ core sockets have a staggering thermal design power.</p> +<p>Another exotic piece of hardware on display was an "ND MI200 v4" server:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>It's logically similar to Azure's "<a href="https://learn.microsoft.com/en-us/azure/virtual-machines/nda100-v4-series">ND A100 v4</a>" server platform with two CPU sockets, eight SXM4 GPU sockets, eight 200G HDR InfiniBand HCAs, and a bunch of M.2 NVMes. But this specific server has eight MI200 GPUs on a common OAM baseboard and uses Infinity Fabric for GPU-to-GPU communication. I've never seen an OAM-socketed anything in real life before, much less eight of them on a baseboard, so I thought this was pretty great to see in the flesh.</p> +<p>The ND A100 v4 platform was also on display and looked very similar-but-different with its eight A100 GPUs and HGX baseboard:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>And unlike the MI200 variant, the general public can run on these nodes.</p> +<p>I'm not sure what more I'm allowed to say, but my colleague Karl made a nice, <a href="https://twitter.com/KarlPodesta/status/1593627537330126851?s=20&amp;t=uthjeb7YYmTZWRVWaF4XUA">quick video that runs through the entire Microsoft booth</a> that's worth a watch, and more details can be had by contacting me or your favorite Microsoft account team privately.</p> +<p>Of course, the hardware bar was just a way to lure people into the booth so I could achieve my real goal: meeting new folks. As I wrote before, one of my biggest realizations at SC this year is how generally confused people are about what HPC in the cloud really means--both people who come from traditional on-prem HPC and people who come from traditional enterprisey cloud. I found myself surprising many of the people with whom I spoke on the show floor with factoids that I have taken for granted. For example,</p> +<p></p> +<ul style="text-align: left;"><li>Linux is the most common OS on these HPC node types. While you probably(?) can run Windows if you want on this stuff, I think only a few niche markets do this.</li><li>The usage model for an HPC cluster in the cloud can be the same as on-prem. You can have login nodes, Slurm, home directories, parallel file systems, and all that. Jobs don't have to be containerized or turned into a VM image.</li><li>The InfiniBand coming out of these nodes is real InfiniBand with real OFED that supports real mpich/mvapich/OpenMPI. It's the same stuff as in on-prem supercomputers. And nodes are assembled into <a href="https://learn.microsoft.com/en-us/azure/virtual-machines/sizes-hpc">full-bisection fat tree InfiniBand</a> clusters just like normal.</li><li>There's no noisy neighbor problem on compute nodes because HPC node types aren't shared between users. When you run a VM on an HPC node, you get the whole thing. Just like on large supercomputers.</li><li>There's no horrible loss of performance due to running in a VM. Virtualization extensions, PCIe passthrough, and SR-IOV bypass the hypervisor for most things. Inside your VM, you see real Zen cores and real Mellanox HCAs, not virtualized devices.</li></ul> +<p>My takeaway impression is that a lot of traditional HPC folks looked at the cloud five or ten years ago, had a sour experience, and haven't paid attention since. In those last five years, though, AI has changed the game. Massive demand for the latest CPUs and accelerators, funded by live-fast-die-young venture capital, has given cloud vendors tremendous financial incentive to catch up to on-prem levels of performance efficiency for AI workloads. And it just so happens that infrastructure that's good for AI is also good for traditional modeling and simulation.</p> +<h2 style="text-align: left;">SCinet!</h2> +<p>One of the unexpected highlights of my SC this year arose from a chance encounter with a former coworker from NERSC, <a href="https://www.nersc.gov/about/nersc-staff/networking-security/ronal-kumar/">Ron Kumar</a>, who gave me a whirlwind tour of SCinet.</p> +<p>I have to confess great ignorance around SCinet in general; I always saw it was a weird technological proof of concept that the strange networking people at work would go off and do in the weeks leading up to the actual conference. I knew they did some impressive wide-area transfer demos (like the <a href="https://scinet.supercomputing.org/community/documents/43/sc17-Kettimuthu-transferring_1petabyte_per_day.pdf">petabyte-in-a-day demo at SC'16</a>), but I didn't really get the significance.</p> +<p>So what is SCinet? It's this yellow bundle of cables dangling from the ceiling.</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p><br />&lt;p&gt;The yellow cables are 144-core fiber trunks that bring over a terabit per second of bandwidth into the convention center from the Internet via the national research backbones like ESnet and Internet2 and distribute many terabits per second of capacity throughout the SC conference venue. For comparison, most HPC centers in the US only have a tenth of SCinet’s wide-area bandwidth at best since 400G infrastructure is still rolling out.&lt;/p&gt;</p> +<p>Most attendees may be familiar with the row of expensive-looking networking racks behind a glass wall towards the back of the expo which is where those yellow cables dangling from the ceiling end. Here's a photo from inside that glass wall:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>What I didn't realize is that if you go around to the back of the giant walled area behind this glass display, there's a security checkpoint that gates entry into a massive network operations center (NOC) full of laptops, spools of fiber, meeting rooms, and busily working teams in charge of all the lower layers of the networking stack.</p> +<p>The process to get into the NOC involves an escort and being tagged in with a tamper-proof wristband, and I learned on the tour that there's millions upon millions of dollars worth of high-end networking equipment in the racks shown above. If you look closely, you can see a security camera at the end of the aisle that speaks to this; that camera was one of many.</p> +<p>Behind the pretty public-facing side of the SCinet racks is a mess of fiber and cables:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>I guess if you have to tear all this down after just a few weeks, there's no point in investing days in dressing it all up nicely! I particularly enjoyed the fiber panels in the third rack that appear to be affixed to the rack post with shoe laces.</p> +<p>This year, SCinet did do a neat proof-of-concept where they demonstrated three 400G routers from three vendors (Juniper, Arista, and Cisco?) all talking the same protocol to handle what I assume is the core routing for everything in the convention center:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>I wish I remembered exactly what was going on here, but I know enough about networking to know that, despite there being standard protocols for coordinating between networking gear, each vendor does their own implementation that is rarely easy to get interoperability from. If anyone out there knows the details of this achievement, please let me know so I can explain this a little better!</p> +<p>In addition to networking nerd-level demonstrations, SCinet also serves up all the wifi across the convention center. That is why there were tripods with access points scattered around, and why astute attendees may have noticed janky networking equipment scattered around that looked like this:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>Again, I get it: for a network infrastructure that's only going to last a week, I don't think it's a good use of anyone's time or money to nicely dress all the networking.</p> +<p>One last factoid I didn't know until this year was that exhibitors can request 100 Gb/s network drops into their individual booths for demos (or downloading the latest version of a PowerPoint presentation <i>really fast</i>). The end result of supporting both a vast wifi network and 100G fiber across the show floor is that there was a <u>lot</u> of fiber going into the single row of SCinet equipment:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>Finally, when I <a href="https://twitter.com/glennklockwood/status/1592725187015114752?s=61&amp;t=1c4Kbx75SpTJhCruzuy0Ng">posted some of these photos online</a> during the conference, my colleague Bilel was kind enough to post a slide from the SC22 opening presentation that had the speeds and feeds of what I had toured:</p> +<blockquote class="twitter-tweet"><p dir="ltr" lang="en">Candy Culhane shared Scinet facts <a href="https://twitter.com/hashtag/SC22?src=hash&amp;ref_src=twsrc%5Etfw">#SC22</a> <a href="https://twitter.com/hashtag/HPC?src=hash&amp;ref_src=twsrc%5Etfw">#HPC</a><br /><br />5.01 Tb/s of WAN capacity<br />$70M in HW &amp; SW, &amp; services provided by 29 SCinet contrib.<br />175 volunteers from 80 vol. organiz.<br />&gt; 450 wireless deployed<br />29 network research exhibition proposals<br />11.7 miles of fiber <br />2384 fiber patch <a href="https://t.co/JtPhjVHZJd">https://t.co/JtPhjVHZJd</a> <a href="https://t.co/kwGl5Ydqp5">pic.twitter.com/kwGl5Ydqp5</a></p> +— Bilel Hadri (@mnoukhiya) <a href="https://twitter.com/mnoukhiya/status/1592737463617089536?ref_src=twsrc%5Etfw">November 16, 2022</a></blockquote> +<p>If you know anyone involved with SCinet, I highly recommend seeing if you can get a tour at the next SC. Even as a relative networking novice, I walked away with a much greater appreciation for the annual achievement of building SCinet. And who knows? Once I get bored of this whole storage thing, maybe I'll try getting into high-performance networking.</p> +<h2 style="text-align: left;">Composability panel</h2> +<p>This year I was invited to participate in a panel titled "Smackdown! Does HPC Need Composability Now?" moderated by Addison Snell and Dan Olds from <a href="https://www.intersect360.com">Intersect360 Research</a>. This panel was...different. Unlike the traditional SC panel where panelists take turns presenting slides and saying erudite things, this panel had two teams of panelists. And my team only had one slide to present:</p> +<div class="separator" style="clear: both; text-align: center;"></div> +<p>The ground rules included "personal attacks are allowed," and needless to say, the panel was about equal parts entertainment and technical discourse. That's not a bad thing, though.</p> +<p>Addison and Dan did a phenomenal job of pulling their respective teams together and leading discussion in a format that both brought forward the key pros and cons of composability in HPC while poking fun at the thinly veiled, ego-driven personalities that often make up these sorts of panels. Rather than politely dancing around issues like sacrificing memory bandwidth by putting accelerators at the far end of a PCIe bus or gaining higher utilization by allowing users to mix and match CPU, NICs, and GPUs, us panelists were free to shoot straight (or perhaps a bit hyperbolically) and call each other out on our hidden agendas.</p> +<p>I hope it goes without saying that all us panelists were in on the format and don't actually think people on the other side are dumb. By wrapping technical arguments in snarky comments, we could keep the level of discussion accessible to a wide audience, drive home the key points from both sides, and ensure that we weren't losing audience members who don't care about the PhD-level details as much as they want to hear what their peers are thinking about this exciting new space. I got some feedback afterwards that I didn't seem to hold back, so if anyone did take anything I said seriously, I am very sorry!</p> +<p>On a technical level, what was the outcome?</p> +<p>It turns out that <a href="https://www.hpcwire.com/off-the-wire/informal-poll-of-sc22-attendees-suggests-a-bright-future-for-composability/">there was about a 60/40 split between people who felt composability wasn't required yet and those who felt it was</a> after both sides argued their case. Even among panelists, many of us were a lot less convinced about our respective positions than we let on during the panel itself. I got a chuckle when I realized that I wasn't the only one who, when invited to be on the panel, asked "what side do you want me to argue?" I honestly could have gone either way because the dust has not yet settled. <a href="https://www.tacc.utexas.edu/about/directory/dan-stanzione">Dan Stanzione, director of TACC</a>, gave the truest answer to the question of "will composability help HPC" up front--"<a href="https://twitter.com/HPC_Guru/status/1592604467698241537?s=20&amp;t=tn3WQBUY9M0MWSfqx1XLKA">it depends</a>." Maybe this is a growth opportunity, or maybe it's a lukewarm reception.</p> +<p>Either way, composable technologies are hitting the market regardless of whether you think they'll be useful or not.  <a href="https://www.nextplatform.com/2022/11/10/amd-genoa-epyc-server-cpus-take-the-heavyweight-title/">AMD Genoa supports CXL 1.1 with extensions for memory pooling</a>, <a href="https://news.samsung.com/global/samsung-electronics-unveils-far-reaching-next-generation-memory-solutions-at-flash-memory-summit-2022">Samsung has memory-semantic SSDs</a>, and everyone and their mother is working on photonics to get higher bandwidths and lower latencies over longer distances. This makes it easier for people to dip their toes in the water to see if composability makes sense, and I think that's what a lot of people will wind up doing in the coming years.</p> +<h2 style="text-align: left;">Customer meetings</h2> +<p>Unlike in years past, my SC experience this year was dominated by customer meetings. I've been on the customer side of the table plenty of times, but I was surprised to find that it was actually more fun to be on the vendor side for a change. I'm part salesman at heart, so I found it personally gratifying to end a meeting with people nodding along rather than scratching their heads. I learned as a customer that it's very easy for vendors to go way off the rails and waste everyone's time, so I was grateful to have avoided the awkward confusion that punctuates those kinds of meetings. </p> +<p>I also went into the week worrying that I'd be sitting in the same room, hearing the same pitch and the same jokes, and answering the same questions all week. Thankfully, I work with some great field, business, and product teams who set up interesting conversations rather than rote recitations of boring roadmap slides. Approaching the same topics from different angles helped me figure out how all the pieces of what I'm working on fit together to make a complete picture too; there weren't nearly as many opportunities to do this in the DOE world since the end-users of the HPC systems on which I worked aren't told anything until all the design decisions have already been made.</p> +<h2 style="text-align: left;">A few personal notes</h2> +<p>This SC was significant to me at a variety of levels; it was the first time I'd gotten on an airplane since February 2020, the first time I'd traveled since starting a new job at a new company, and the first time I'd met any of my new coworkers outside of the structure of a Teams call. During the pandemic I realized that getting out into the world and talking to people from all corners of HPC were my favorite part of my job. Not being able to go to events like SC and maintain that a sense of community involvement dramatically impacted my level of professional satisfaction for the last two years, so I'm glad I was able to finally go this year.</p> +<p>Though customer meetings were a lot more fun than I expected them to be, I still felt bummed that I could spend so little time walking the expo, talking to folks, and attending all the BOFs normally on my <a href="https://sc22.supercomputing.org/presentation/?id=bof124&amp;sess=sess331">must</a>-<a href="https://sc22.supercomputing.org/presentation/?id=bof112&amp;sess=sess307">attend</a> <a href="https://sc22.supercomputing.org/presentation/?id=bof110&amp;sess=sess369">list</a>. Compounding this was my personal choice to not dine indoors and consequently miss out on almost all other chances to catch up with old friends and colleagues. I also decided to leave SC a day earlier than I usually do to reduce my risk of getting sick which didn't help either. There's never enough time at SC, but this year was particularly pressed.</p> +<p>I say all this not to complain, but to say how much I appreciated the people who went out of their way to come accost me during the precious few hours I actually had on the exhibit floor. Some I'd not seen since SC'19, and some I'd never actually met since we only started working together mid-pandemic. The conference is busy for everyone, so giving me a slice of your time was very meaningful. That sense of community membership is why I go to SC, it's why I still work in this business, and it's why I try to contribute whatever I can to whomever wants it whether it be a student, engineer, salesperson, or marketer.</p>Glenn K. Lockwood's BlogThe biggest annual conference in HPC, the SC conference, was recently held in Dallas, Texas in its second hybrid incarnation since being all-remote for the pandemic. This year attracted over 11,000 attendees which is much closer to the pre-pandemic high of 14,000 than last year's 7,000, and judging from the crushed conference rooms and busy expo floor, it looks like SC is not that much worse for wear. This year's conference quite different for me since I attended for my first time as a vendor, not a researcher or practitioner, and I spent most of my days behind closed doors talking to customers. I didn't get to attend any of the keynotes, BOFs, or panels to which I wasn't invited as a result, so I'm not really qualified to give an erudite summary of the conference or expo this year. So instead, I'm just writing down what I remember in order that I remember it and not necessarily in a coherent narrative form. I'm sure I missed a lot (for example, mixed precision seemed big this year, and I heard Jack Dongarra gave a fantastic Turing Award talk) so I encourage others to write their own recaps and share with the community! High-level themes I actually started writing an SC'21 recap last year which I never posted, and re-reading the intro was funny--you'd think nothing has changed in the last year. The underwhelming The biggest deal appears to be that exascale is here, and it turns out that it's not that big of a deal. China let the air out of the tires by debuting their exascale systems at SC'21, and not only did they thumb their nose at Top500 by not submitting, they debuted by winning a Gordon Bell prize instead. The first US exascale system, Frontier, was debuted at ISC this year leaving its showing at SC a bit deflated too. Frontier was featured in the Gordon Bell prize-winning paper this year, but that work required the use of four Top-10 systems, not just Frontier, painting the reality that one giant computer rarely stands on its own when it comes to advancing science. This isn't to say that deploying exascale systems isn't a noteworthy feat and worth commendation, but I felt like the hype over the last five years treated the achievement like an end state instead of a milestone. And now that we've passed the milestone, the community is grasping to figure out what comes next. So what is next? Quantum had a strong and growing presence at SC, as it has for the last few years. But the conclusion of the panel "Quantum Computing: A Future for HPC Acceleration" was that no, it's not close to being ready. Disaggregation and composability was another theme with growing momentum. And like quantum, there was a panel asking the same question: "Does HPC need composability now?" The answer, again, was no, not yet. More on that below. What about RISC-V? Surely that will revolutionize the field. As it turns out, the answer there is also that RISC-V is not ready to do anything useful for HPC yet. The list goes on of technologies and trends that people are trying to boost now that exascale is "solved." The reality, I think, is that "exascale" will take years to actually mature since it appears to have a ton of technical debt that accumulated during the race to be first. US Exascale rests on the shoulders of AMD and Intel, two companies whose software stacks have not caught up to the market leader, so there will be a lot of thrashing around as development practices and optimization settle out around these systems. Struggling with code porting is not very exciting to computer science Ph.D.s, so I expect future SCs to mirror this one and bifurcate into two distinct tracks: those struggling to identify the next big thing in the research space, and those struggling to use the systems that were rushed to deployment. The unexpected My SC experience was very biased since I didn't get out much, but two related themes kept popping up across different meetings and the sessions I did attend. Power efficiency is serious business now. It used to seem like people talked about the need for energy-efficient HPC in an abstract sense while continuing to jam more power into every rack without changing their approach to system design, facilities, and deployment models. That has hit a hard wall with energy prices soaring in Europe, though. The financial impacts of power-inefficient supercomputing have gone from a one-time capex cost to an ongoing opex cost that is putting many HPC facilities on an unsustainable cost trajectory. Even sites that aren't doing new deployments are facing sudden, sharp increases in their costs, and nobody has good answers about how they will keep the lights on. Cloud HPC is confusing. With only 15% of total HPC dollars winding up in the cloud, it's little surprise that most HPC folks are only peripherally aware of what HPC in the cloud really means. Worse yet, a subset of those folks are actively hostile towards the idea of running HPC workloads in the cloud. I spoke with my colleagues from all three major cloud service providers as well as my colleagues in DOE, NSF, and education throughout the week, and everyone painted this same general picture. There seems to be a mismatch between the expectations of on-prem HPC folks and cloud HPC folks. For example, I was asked why Windows doesn't support OpenMP very well, and after a bit of digging, I realized that the question really wasn't about using OpenMP on Windows as much as it was about using OpenMP in the cloud. There was a latent assumption that "HPC in Microsoft's cloud" must mean "HPC on Windows" which, for the record, is false--I don't even know how to use Windows anymore. Similarly, people decried the performance impacts of sharing HPC nodes with others in the cloud (they are not shared), overheads of virtualizing InfiniBand or GPUs (everyone uses PCIe passthrough or SR-IOV for HPC nodes), and other misconceptions. This isn't to say that cloud people aren't confused too; I heard stories about conversations that went sideways because a cloud folks (not from my employer, thankfully!) didn’t realize that the requirements of a traditional gov/edu HPC facility couldn’t be neatly wrapped up into a single workload with a single solution, contrary to the case across many commercial AI shops. And both sides are struggling to find models for partnership and engagement that mirror the traditional relationship between places like a DOE or NSF facility and a company like Cray. HPC departments are used to buying supercomputers and parallel file systems, while cloud providers sell computing and storage as a service. The distinction may seem trivial at the surface, but there's a large divide that becomes evident once both sides start trying to drill into the details of what a partnership would look like. Parallel I/O in Practice Tutorial This was my fifth year contributing to the Parallel I/O in Practice Tutorial with my colleagues at Argonne and Google, and it was our first time doing it in-person since 2019. It felt really good to be back in front of people to opine about the perils of POSIX and the greatness of the Darshan I/O profiling tool, and this year I retired out the material I used to present on burst buffers (since DataWarp and Infinite Memory Engine have lost relevance in HPC) and the TOKIO holistic I/O analysis framework (since it is no longer funded/maintained). In their stead, I presented material on benchmarking with IOR and mdtest I debuted at LUG 2022 this year. I haven't gotten feedback yet on whether this change was a net positive one, but I think it went over well. Benchmarking I/O is really challenging if you don't understand how things like page cache really work in distributed systems, and walking through some benchmark examples concretizes a lot of abstract parallel file system concepts like locking and striping. And since benchmarking is a rabbit hole of arbitrary complexity, ending the tutorial with advanced benchmarking topics turned out to be a nice way to add buffer to the end of an eight-hour stretch of carefully timed presentations. It's very easy to skip over the nuances of analyzing mdtest outputs if attendees have a lot of questions about more important things at the end of the day. The most surprising observation of the tutorial is how many attendees aren't using MPI anymore. We got a lot of questions last year about task-oriented I/O, and this year had some great questions about trying to understand or tune the I/O performed by Python-based analytics frameworks. We decided to add support for Darshan to profile non-MPI applications back in 2019 which is now paying dividends by ensuring it is a relevant tool for these new analytics and AI workloads, and we'll probably have to give more attention to optimizing these workloads' I/O in the future. DAOS User Group Monday morning was cold and rainy--a perfect day to attend the 2022 DAOS User Group which was held off-site at the Fairmont Hotel. Whether you particularly care about DAOS or not, the cross-community HPC I/O brain trust is guaranteed to be in attendance, and this year did not disappoint. In addition to the expected stakeholders from Intel and DOE, representatives from all three big CSPs were in attendance. Google Cloud, Seagate, and HPE/Cray were all on the agenda, painting a diversifying landscape of large HPC companies investing time into DAOS and the strength and willingness of the DAOS team to partner with all comers. Life after Optane The question that opened up the meeting, of course, was "what is the future of DAOS since Intel cancelled Optane?" Kelsey Prantis had the official statement (I'll replace the grainy photo once the DUG slides are online...): The high-level project answer is that DAOS isn't going anywhere. Aurora, by virtue of still having Optane DIMMs, will not be affected, and DAOS will maintain support for Optane until Intel drops its last Optane DIMMs (Crow Pass for Sapphire Rapids) from support life sometime towards the end of this decade. For new customers who aren't going to use Optane, the answer is "Metadata on NVMe," a development being codeveloped by Intel, HPE, and Google to implement a write-ahead log (WAL) and allow DAOS to use volatile DRAM instead of Optane. It will work like a file system journal in that a compact representation of writes will be committed to NVMe immediately after landing in DRAM, and then DAOS will asynchronously write back the properly serialized representation of that transaction after it is acknowledged. Johann Lombardi had a helpful cartoon that showed how this WAL will fit into DAOS: A key benefit of DAOS's implementation of this WAL is that it will be able to still service incoming writes while flushing old writes; although I don't fully grasp how this works, it is something enabled by the sophisticated I/O scheduler already implemented in DAOS. The complete implementation isn't expected to be released until Spring 2024, but it appears to touch only a few components of DAOS and doesn't affect anything above the VOS layer of the DAOS server. There was also mention of developing operability with new CXL-attached memory-semantic SSDs to keep the persistent memory capability of DAOS alive beyond Optane. I'm not sure if this would offer a performance benefit over the metadata-on-NVMe feature; early results show that metadata-on-NVMe actually delivers higher IOPS than Optane since the synchronous write path is much simpler without having to account for memory persistence. That said, I didn't really follow the full extent of options on the table for how DAOS metadata may work across different types of memory though. DAOS in the flesh at Argonne Kevin Harms presented an update on Aurora's massive 220 PB DAOS installation and laid out its configuration. There are 1,024 DAOS servers based on the Intel Coyote Pass server design, each sporting 2x Intel Xeon 5320 (Ice Lake) sockets2x DAOS engines (one per socket)16x 32GB DDR4 DIMMs16x 512GB Optane DIMMs (Persistent Memory 200)16x 15.36 TB Samsung PM1733 NVMe SSDs2x 200 Gb/s Slingshot NICs The total configuration is quoted at 220 PB usable, but Kevin pointed out that this assumes that every object is erasure coded at 16+2. Unlike virtually every other storage system out there, though, users can choose the data protection for their individual objects when they create them, meaning this 220 PB capacity is an upper limit to what users can do. Users with very hot, read-only objects may choose to replicate instead of erasure code, while others who are capacity-constrained may choose to erasure code everything at 16+2 at the cost of latency and IOPS. This flexibility is really powerful for users since they can tailor their object layout ("object class" in DAOS parlance) to match the needs of their workload. Argonne will be slicing up this DAOS system by giving each scientific project its own DAOS pool, and each pool will be assigned to only 80% of the available DAOS servers by default. This seems like a nice way of providing most of the storage system performance to every user, but offering more freedom to work around bad hardware, bad users, and other performance problems that plague file systems like Lustre that distribute everything across every single server equally. Finally, I noticed that Aurora will be using Samsung SSDs, not the Intel (now Solidigm) QLC NAND that appeared in all the DAOS slides floating around two years ago. I'm not sure what happened there, but the move from Solidigm QLC to Samsung TLC couldn't have been cheap. New features and contributions DAOS is starting to pick up some truly valuable features that are being developed and contributed by third parties. Of note, croit has contributed a feature which allows DAOS to serve up NVMe over Fabrics targets, and Seagate contributed an S3 gateway for DAOS. Along with the DFS file system interface, DAOS now offers the trifecta of standard object, block, and file services just like Ceph. Unlike Ceph though, performance on DAOS is a first-class citizen. While croit made it clear that the NVMeoF support still has a ways to go to improve the way it does thread pooling and provides resilience, they showed 1.4 million IOPS from a single storage client using TCP over Ethernet with minimal client-side overhead. Intel is also developing multitenant support for DFUSE, allowing a single compute node to share a DAOS mount and let permissions be enforced through UID/GID just like a regular file system. Before this update, the FUSE-based nature of DAOS allowed any unprivileged user to mount their container (good), but only one FUSE agent could be alive on a single node at a time (not good) which prevented multiple users sharing a node from both mounting their own containers. DAOS also has some longer-term enhancements that I thought were interesting: expanding the range of POSIX calls supported by DAOS's intercept library to include metadata calls and memory-mapped I/O using userfaultfdimplementing collaborative caching - essentially reimplementing the Linux kernel page cache in userspace so that multiple processes can share cached DAOS pagessupporting a computational storage paradigm by enabling offload of userspace eBPF scripts to DAOS servers DAOS in a larger data center ecosystem Dean Hildebrand from Google Cloud then gave an overview of Google's efforts in bringing DAOS into the cloud. He had some nice performance graphs and I'll link the full presentation here once it's uploaded (it's worth a watch), but the part I found the most insightful was how they are trying to decide where a technology like DAOS fits in the larger cloud storage ecosystem. He outlined two different ways DAOS could work in GCP: Caching: Google Cloud Storage (GCS) is the point of truth and DAOS is a cacheTiering: DAOS is a point of truth, and GCS is an archive He said they were leaning towards the caching model where data only lives ephemerally in DAOS, and personally, I think this is the right move since DAOS in the cloud is not resilient without Optane. However, this choice reflects a much larger tension in cloud storage for HPC: The centerpiece of every cloud's data story is a scalable, low-cost, low-performance object store which is analogous to what on-prem HPC would call campaign, community, or project storage.HPC demands higher performance than what these object stores can generally deliver though. To bridge the gap between these two truths, auxiliary services must bolt on to the object layer and provide higher performance, at a higher cost, for the duration of I/O-intensive HPC jobs. Some choose to provide true tiering from object into a resilient layer of flash (like FSx Lustre and Weka do), while others project the contents of the object through a high-performance caching layer (like HPC Cache and File Cache) and are never meant to persistently hold data. This isn't rocket science, but I never thought deeply about the two models since campaign/community/project storage in on-prem HPC is usually fast enough to avoid needing caches or fine-grained tiering capabilities. John Bent also had a thought-provoking presentation about how Seagate's now-"deprioritized" CORTX object store, which once competed with DAOS as Mero, contains ideas that can complement DAOS: Whereas DAOS delivers high performance using NVMe, CORTX delivers great economics using HDDs, and their strengths are complementary to each other. While I don't fully grasp how a tiered (or caching!) system comprised of DAOS and CORTX could be implemented, John rightly pointed out that the same level of space efficiency can deliver higher data protection if multi-level erasure coding is used to stripe across durable block storage. His specific example was erasure coding at 8+1 across servers and 10+1 within servers to deliver both high efficiency and high durability. This could map to something like running DAOS atop something like CORVAULT, but I don't think all the necessary pieces are in place to realize such a harmonious coexistence yet. Of course, completely tossing Reed-Solomon for something more sophisticated (like VAST does with its locally decodable 150+4 scheme) obviates the need for multilevel erasure entirely. But DAOS has not gone down that route yet. And as with every talk John gives, there were lots of other interesting nuggets scattered throughout his presentation. Two of my favorites were: A slide that pointed out that, when you buy something like Ceph as an appliance, you may be spending only 25% of the total cost on storage media and the rest is infrastructure, service, and support. This struck me as a bit on the low end, but some enterprisey NAS and midrange parallel file system appliances can go this low. Spending 60% to 90% on media is a lot nicer for the buyer (and companies like Seagate) if you can buy at scale or eschew the white-glove support, and John suggested that it's up to companies like Seagate to fix the software issues that require customers to pay for white-glove support in the first place.  After all, the less someone spends on support and licenses, the more they can spend on Seagate hard drives.John's final slide pointed out that object stores were originally designed to get around the limitations of POSIX file systems, but as they've evolved over the last decade, they're starting to look a lot like file systems anyway since they require strong consistency, hierarchical namespaces, and familiar file semantics. Has all the work put into developing super-fast object stores like DAOS over the last ten years really just brought us back full circle to parallel file systems?  Companies like VAST and Weka have shown that maybe POSIX isn't as bad as the research community (myself included!) have claimed it to be; it was really just low-performance implementations that nobody wanted. Once John's talk is uploaded to the DUG 2022 website, I'll link it here.  Like Dean Hildebrand's talk, it is well worth watching (but for wildly different reasons!) PDSW 2022 I had to duck out of the DAOS User Group early to run (through the rain) to the 7th International Parallel Data Systems Workshop (PDSW 2022) on Monday afternoon. Much to everyone’s surprise, PDSW was only given a half day this year and everything felt a little compressed as a result. The organizers kept the work-in-progress (WIP) sessions which can often be an interesting peek into what students are pursuing, but little A/V problems and the unforgiving schedule probably did a disservice to the up-and-comers who use the WIP track to lay the groundwork for future full-length papers. Hopefully SC’23 restores PDSW to its original full-day status.&lt;p&gt;&lt;/p&gt; Splinters keynote from Arif Merchant at Google The keynote presentation was given by Arif Merchant from Google about Splinters, the framework that Google Cloud uses to sample I/Os in a scalable way. The challenge they face is that it's impossible to trace and store every single I/O that hits Google's storage servers (D servers), but having an understanding of I/O patterns is essential for characterizing workload I/O behavior and planning for future infrastructure. In fact, this problem is so important that Google isn't the only cloud that's solved it! A lot of what Arif talked about is very similar to how Azure does its I/O tracing under the hood. I suppose it should not be surprise that there are only so many ways to solve the challenge of sampling individual IOPS in a way that fairly represents the aggregate workload of a huge distributed storage system. One really smart thing Splinters does that I liked was sample along two different dimensions: not only do they evenly sample across all IOPS at a fixed rate (the obvious thing), but they also sample across files at a fixed rate. In this latter case of per-file sampling, they take a tiny fraction of files and capture every I/O for that file to get a complete picture of how individual files are being accessed. This file sampling fills the huge gap that exists when randomly sampling IOPS alone. Because different I/Os have different "costs" (for example, reading a 1 MiB file using a single 1 MiB read op or 256x 4 KiB read ops are functionally equivalent to an application), randomly sampling ops introduces systematic biases that can be difficult to back out after the data has been sampled, subsampled, aggregated, and reduced. Splinters' approach lets you see the workload from two different angles (and biases) and answer a much larger range of questions about what's really happening across thousands of storage servers. That said, it was interesting to hear Arif describe how Splinters evolved out of a different internal Google project but wound up outliving it. Splinters is also similar to, but slightly different from, their Dapper infrastructure which also does scalable distributed system tracing. And he made overtures to F1, a scalable SQL database that is similar to (but not the same as) the SQL-like query interface that Splinters uses. I got the impression that new technologies come and go pretty quickly at Google, and there's a large appetite for creating new software systems outright rather than shoehorning an existing system into solving a new problem. I can't say one way is better than the other; I was just surprised at the contrast with my own experiences. Practical papers PDSW had a healthy combination of both very-researchy papers and applied research papers this year. I could only stick around for the applied papers, and two left an impression. In the first, Jean Luca Bez presented Drishti, a tool that lives downstream of the Darshan I/O profiling library and finally does what the Darshan community has danced around for years--turning a Darshan log into an actionable set of recommendations on how to improve I/O performance. It does this by cataloguing a bunch of heuristics and using Darshan's new Python integrations to pore through a log and identify known-problematic I/O patterns. Like Jean Luca's DXT Explorer tool, Drishti has a slick user interface and greatly extends the usability and insights that can be pulled out of a Darshan log file. It probably won't win a Turing Award, but this sort of work is probably going to benefit scores of HPC end-users by making Darshan (and troubleshooting I/O problems) much more accessible to mere mortals for years to come. Adrian Jackson also presented a very tidy apples-to-apples comparison of DAOS and Lustre on the same hardware using both a systems-level benchmark and an application-inspired, object-oriented data model benchmark. The specific bake-off of a new curiosity (DAOS) and the decades-old incumbent (Lustre) is probably interesting to storage nerds, but I think the real novelty of the work is in its exploration of some uncomfortable realities that the HPC I/O community will have to face in the coming years: Does "slow memory" (nonvolatile Optane or CXL-attached memory SSDs) give actual benefit to existing file systems (like Lustre), or is rethinking the entire storage stack (like DAOS did) really necessary to unlock the performance of new hardware?Do applications need to rethink their approach to I/O to make use of post-POSIX storage systems like DAOS, or is performing I/O as you would on a file system (Lustre) on a post-POSIX storage system (DAOS) good enough? My take from the work is that, for simple I/O patterns like checkpoint/restart, you can get pretty far by just treating something like DAOS the same as you would a parallel file system: Figure from Manubens et al, "Performance Comparison of DAOS and Lustre for Object Data Storage Approaches." But if you want your data at rest to have the same data model as how it's handled within the application, you really ought to use a storage system that supports data models that are more expressive than a stream of bytes (which is what POSIX files are). The authors didn't do a perfect job of giving Lustre its fair shake since they chose to use (abuse) directories and files to represent their application's data model on-disk instead of developing an object-file model that file systems like Lustre handle a little better. But let's be real--HPC is full of applications that do the exact same thing and represent datasets on-disk using complex hierarchies of directories and files simply because that's the easiest way to map the application's representation of data into the standard file system model. In that sense, storage systems that represent rich data models in a high-performance way should be really valuable to naive applications that map in-memory data structures directly to files and directories. Going back to John Bent's closing slide from his DAOS User Group talk, though, does any of this even matter since all answers lead back to parallel file systems? Maybe there's something to be learned about adding better back-door APIs that support more diverse data models than what POSIX file interfaces give us. The SC22 Expo The expo is my favorite part of SC because it's when I get to talk to people one-on-one and learn about corners of the HPC industry that I would've never otherwise sought out. Much to my dismay, though, I had very little time to walk the floor this year--so little that I didn't get any swag. If you want to read up on what interesting technology was being showcased, I strongly recommend reading all the great content that Patrick Kennedy and his team at STH created covering the expo. That said, I did notice some curious trends about the show floor overall. The NVIDIA booth was notably absent this year (though they shared booth space with partners), and many of the usual top vendors had significantly smaller presence on the expo floor. Just for fun, I compiled the top ten(ish) vendors by booth size: Weka.io (3,200 sqft)VAST Data, Department of Energy, Penguin Computing, HPE, and Microsoft (2,500 sqft)AWS (2,000 sqft)Google and TACC (1,600 sqft)Supermicro, AMD, Intel, Dell, NASA, and Indiana University (1,500 sqft) I think it's amazing to see all-flash storage companies at the top of the list alongside all of the Big 3 cloud service providers. I may be reading too much into this, but this may mean that the money behind SC is shifting towards companies playing in the cloud-based AI space instead of traditional big iron for simulation. Or perhaps it's a sign that most of the traditional HPC players are taking a hard look at the return they get on a big booth given the current economic climate and pulled back this year. I did chat with a couple colleagues who completely opted out of a booth this year (for reference, SC'21 had 10% fewer exhibitor booths than SC'19), and the reasoning was consistent: they found more value in having staff meet with customers privately or attend the technical sessions and engage with people organically. Combined with a bit of bad taste left over from SC's high cost of hosting pandemic-era "digital booths" despite low return (did anyone visit digital booths at SC'20 or SC'21?), I can see why some vendors may have chosen to skip the expo this year. Whatever the reasons may be, I was a bit sad to see such a small presence from some of my favorites like IBM, Fujitsu, Atos, and NEC. Hopefully the SC Exhibits Committee (and the economy!) can find ways to bring back the pre-pandemic glory of the show floor. The expo wasn't all doom and gloom though! Even though I couldn't make my complete rounds this year, there were a couple of highlights for me. VAST's masterful marketing Perhaps the splashiest vendor at SC was VAST Data who had a brilliant marketing presence. First was the giant Vastronaut mascot that was the centerpiece of their booth: A quick search of Twitter shows just how many people seized the opportunity to take a selfie at their booth. I would love to know how they transported that thing to and from the conference, but whatever the cost, I'll bet it was worth it. At the Grand Opening Gala on Monday, they also gave out delightfully tacky light-up cowboy hats that everyone seemed to be wearing: We were there! #sc22 #sc2022 @VAST_Data pic.twitter.com/fWhuSgBfpL — ntnu-hpc (@ntnuhpc) November 15, 2022 The subtle genius of this was that not only did people wear them during the gala and the Flop Gun-themed Beowulf Bash 2022 party later that night, but they had to wear them on their plane rides home since they were so inconveniently bulky. Proof in point, my wife (who doesn't work in tech) sent me this text message to confirm that she was waiting for me at the right luggage carousel at San Francisco Airport: I wonder how many innocent bystanders, traveling home for Thanksgiving on Thursday or Friday, saw the shiny cowboy hats at airports around the country and wondered what VAST was. The icing on the cake was VAST's CEO, Renen Hallak, parading around in an unmissable Chuck McGill-style space suit all week, clearly not taking himself too seriously and painting VAST as a work hard/play hard kind of company. Now, do flashy space suits and blinking cowboy hats alone mean VAST has a great product? I can't say**. But marketing is an art that I appreciate, and VAST hit some great notes this year. ** (Seriously, I'm not sure I wouldn't get in trouble for opining about another company here.) The Microsoft hardware bar The only booth where I spent any appreciable time this year was my own employer's. I personally love booth duty and accosting strangers on the show floor, especially if there's something interesting at the booth to jumpstart a conversation. When I worked at SDSC it was a Raspberry Pi cluster, and at the Microsoft booth this year it was the "hardware bar." In addition to the customary booth presentations with giveaways, swag desk, seating area, and a fun caricature artist, the physical servers that underpin the HPC nodes in Azure were on display. Microsoft contributes its hardware platform designs to the Open Compute Project so the physical hardware that runs in Azure data centers isn't entirely mysterious. Still, every cloud has its hardware secrets, so I was surprised to see these servers laid bare. The newest HPC node type (dubbed HBv4) on display was a node powered by AMD's Genoa processors just announced a few days earlier: This wasn't a display model, either; it had real DDR5 DRAM, a real NDR InfiniBand HCA, real PCIe Gen5, and real big OCP mezzanine card with real big aluminum heat sinks and a big Microsoft sticker on top. A couple visitors commented on the way the heat piping for those Genoa CPUs was done which I guess is unusual; rather than have a giant copper block on top of each socket, heat pipes connect the socket to massive aluminum heat sinks that are closer to the chassis inlets. In retrospect it makes sense; Genoa has a whopping twelve DDR5 DIMMs per socket which leaves little extra room for heat sinks, and these 88+ core sockets have a staggering thermal design power. Another exotic piece of hardware on display was an "ND MI200 v4" server: It's logically similar to Azure's "ND A100 v4" server platform with two CPU sockets, eight SXM4 GPU sockets, eight 200G HDR InfiniBand HCAs, and a bunch of M.2 NVMes. But this specific server has eight MI200 GPUs on a common OAM baseboard and uses Infinity Fabric for GPU-to-GPU communication. I've never seen an OAM-socketed anything in real life before, much less eight of them on a baseboard, so I thought this was pretty great to see in the flesh. The ND A100 v4 platform was also on display and looked very similar-but-different with its eight A100 GPUs and HGX baseboard: And unlike the MI200 variant, the general public can run on these nodes. I'm not sure what more I'm allowed to say, but my colleague Karl made a nice, quick video that runs through the entire Microsoft booth that's worth a watch, and more details can be had by contacting me or your favorite Microsoft account team privately. Of course, the hardware bar was just a way to lure people into the booth so I could achieve my real goal: meeting new folks. As I wrote before, one of my biggest realizations at SC this year is how generally confused people are about what HPC in the cloud really means--both people who come from traditional on-prem HPC and people who come from traditional enterprisey cloud. I found myself surprising many of the people with whom I spoke on the show floor with factoids that I have taken for granted. For example, Linux is the most common OS on these HPC node types. While you probably(?) can run Windows if you want on this stuff, I think only a few niche markets do this.The usage model for an HPC cluster in the cloud can be the same as on-prem. You can have login nodes, Slurm, home directories, parallel file systems, and all that. Jobs don't have to be containerized or turned into a VM image.The InfiniBand coming out of these nodes is real InfiniBand with real OFED that supports real mpich/mvapich/OpenMPI. It's the same stuff as in on-prem supercomputers. And nodes are assembled into full-bisection fat tree InfiniBand clusters just like normal.There's no noisy neighbor problem on compute nodes because HPC node types aren't shared between users. When you run a VM on an HPC node, you get the whole thing. Just like on large supercomputers.There's no horrible loss of performance due to running in a VM. Virtualization extensions, PCIe passthrough, and SR-IOV bypass the hypervisor for most things. Inside your VM, you see real Zen cores and real Mellanox HCAs, not virtualized devices. My takeaway impression is that a lot of traditional HPC folks looked at the cloud five or ten years ago, had a sour experience, and haven't paid attention since. In those last five years, though, AI has changed the game. Massive demand for the latest CPUs and accelerators, funded by live-fast-die-young venture capital, has given cloud vendors tremendous financial incentive to catch up to on-prem levels of performance efficiency for AI workloads. And it just so happens that infrastructure that's good for AI is also good for traditional modeling and simulation. SCinet! One of the unexpected highlights of my SC this year arose from a chance encounter with a former coworker from NERSC, Ron Kumar, who gave me a whirlwind tour of SCinet. I have to confess great ignorance around SCinet in general; I always saw it was a weird technological proof of concept that the strange networking people at work would go off and do in the weeks leading up to the actual conference. I knew they did some impressive wide-area transfer demos (like the petabyte-in-a-day demo at SC'16), but I didn't really get the significance. So what is SCinet? It's this yellow bundle of cables dangling from the ceiling. &lt;p&gt;The yellow cables are 144-core fiber trunks that bring over a terabit per second of bandwidth into the convention center from the Internet via the national research backbones like ESnet and Internet2 and distribute many terabits per second of capacity throughout the SC conference venue. For comparison, most HPC centers in the US only have a tenth of SCinet’s wide-area bandwidth at best since 400G infrastructure is still rolling out.&lt;/p&gt; Most attendees may be familiar with the row of expensive-looking networking racks behind a glass wall towards the back of the expo which is where those yellow cables dangling from the ceiling end. Here's a photo from inside that glass wall: What I didn't realize is that if you go around to the back of the giant walled area behind this glass display, there's a security checkpoint that gates entry into a massive network operations center (NOC) full of laptops, spools of fiber, meeting rooms, and busily working teams in charge of all the lower layers of the networking stack. The process to get into the NOC involves an escort and being tagged in with a tamper-proof wristband, and I learned on the tour that there's millions upon millions of dollars worth of high-end networking equipment in the racks shown above. If you look closely, you can see a security camera at the end of the aisle that speaks to this; that camera was one of many. Behind the pretty public-facing side of the SCinet racks is a mess of fiber and cables: I guess if you have to tear all this down after just a few weeks, there's no point in investing days in dressing it all up nicely! I particularly enjoyed the fiber panels in the third rack that appear to be affixed to the rack post with shoe laces. This year, SCinet did do a neat proof-of-concept where they demonstrated three 400G routers from three vendors (Juniper, Arista, and Cisco?) all talking the same protocol to handle what I assume is the core routing for everything in the convention center: I wish I remembered exactly what was going on here, but I know enough about networking to know that, despite there being standard protocols for coordinating between networking gear, each vendor does their own implementation that is rarely easy to get interoperability from. If anyone out there knows the details of this achievement, please let me know so I can explain this a little better! In addition to networking nerd-level demonstrations, SCinet also serves up all the wifi across the convention center. That is why there were tripods with access points scattered around, and why astute attendees may have noticed janky networking equipment scattered around that looked like this: Again, I get it: for a network infrastructure that's only going to last a week, I don't think it's a good use of anyone's time or money to nicely dress all the networking. One last factoid I didn't know until this year was that exhibitors can request 100 Gb/s network drops into their individual booths for demos (or downloading the latest version of a PowerPoint presentation really fast). The end result of supporting both a vast wifi network and 100G fiber across the show floor is that there was a lot of fiber going into the single row of SCinet equipment: Finally, when I posted some of these photos online during the conference, my colleague Bilel was kind enough to post a slide from the SC22 opening presentation that had the speeds and feeds of what I had toured: Candy Culhane shared Scinet facts #SC22 #HPC5.01 Tb/s of WAN capacity$70M in HW &amp; SW, &amp; services provided by 29 SCinet contrib.175 volunteers from 80 vol. organiz.&gt; 450 wireless deployed29 network research exhibition proposals11.7 miles of fiber 2384 fiber patch https://t.co/JtPhjVHZJd pic.twitter.com/kwGl5Ydqp5 — Bilel Hadri (@mnoukhiya) November 16, 2022 If you know anyone involved with SCinet, I highly recommend seeing if you can get a tour at the next SC. Even as a relative networking novice, I walked away with a much greater appreciation for the annual achievement of building SCinet. And who knows? Once I get bored of this whole storage thing, maybe I'll try getting into high-performance networking. Composability panel This year I was invited to participate in a panel titled "Smackdown! Does HPC Need Composability Now?" moderated by Addison Snell and Dan Olds from Intersect360 Research. This panel was...different. Unlike the traditional SC panel where panelists take turns presenting slides and saying erudite things, this panel had two teams of panelists. And my team only had one slide to present: The ground rules included "personal attacks are allowed," and needless to say, the panel was about equal parts entertainment and technical discourse. That's not a bad thing, though. Addison and Dan did a phenomenal job of pulling their respective teams together and leading discussion in a format that both brought forward the key pros and cons of composability in HPC while poking fun at the thinly veiled, ego-driven personalities that often make up these sorts of panels. Rather than politely dancing around issues like sacrificing memory bandwidth by putting accelerators at the far end of a PCIe bus or gaining higher utilization by allowing users to mix and match CPU, NICs, and GPUs, us panelists were free to shoot straight (or perhaps a bit hyperbolically) and call each other out on our hidden agendas. I hope it goes without saying that all us panelists were in on the format and don't actually think people on the other side are dumb. By wrapping technical arguments in snarky comments, we could keep the level of discussion accessible to a wide audience, drive home the key points from both sides, and ensure that we weren't losing audience members who don't care about the PhD-level details as much as they want to hear what their peers are thinking about this exciting new space. I got some feedback afterwards that I didn't seem to hold back, so if anyone did take anything I said seriously, I am very sorry! On a technical level, what was the outcome? It turns out that there was about a 60/40 split between people who felt composability wasn't required yet and those who felt it was after both sides argued their case. Even among panelists, many of us were a lot less convinced about our respective positions than we let on during the panel itself. I got a chuckle when I realized that I wasn't the only one who, when invited to be on the panel, asked "what side do you want me to argue?" I honestly could have gone either way because the dust has not yet settled. Dan Stanzione, director of TACC, gave the truest answer to the question of "will composability help HPC" up front--"it depends." Maybe this is a growth opportunity, or maybe it's a lukewarm reception. Either way, composable technologies are hitting the market regardless of whether you think they'll be useful or not.  AMD Genoa supports CXL 1.1 with extensions for memory pooling, Samsung has memory-semantic SSDs, and everyone and their mother is working on photonics to get higher bandwidths and lower latencies over longer distances. This makes it easier for people to dip their toes in the water to see if composability makes sense, and I think that's what a lot of people will wind up doing in the coming years. Customer meetings Unlike in years past, my SC experience this year was dominated by customer meetings. I've been on the customer side of the table plenty of times, but I was surprised to find that it was actually more fun to be on the vendor side for a change. I'm part salesman at heart, so I found it personally gratifying to end a meeting with people nodding along rather than scratching their heads. I learned as a customer that it's very easy for vendors to go way off the rails and waste everyone's time, so I was grateful to have avoided the awkward confusion that punctuates those kinds of meetings. I also went into the week worrying that I'd be sitting in the same room, hearing the same pitch and the same jokes, and answering the same questions all week. Thankfully, I work with some great field, business, and product teams who set up interesting conversations rather than rote recitations of boring roadmap slides. Approaching the same topics from different angles helped me figure out how all the pieces of what I'm working on fit together to make a complete picture too; there weren't nearly as many opportunities to do this in the DOE world since the end-users of the HPC systems on which I worked aren't told anything until all the design decisions have already been made. A few personal notes This SC was significant to me at a variety of levels; it was the first time I'd gotten on an airplane since February 2020, the first time I'd traveled since starting a new job at a new company, and the first time I'd met any of my new coworkers outside of the structure of a Teams call. During the pandemic I realized that getting out into the world and talking to people from all corners of HPC were my favorite part of my job. Not being able to go to events like SC and maintain that a sense of community involvement dramatically impacted my level of professional satisfaction for the last two years, so I'm glad I was able to finally go this year. Though customer meetings were a lot more fun than I expected them to be, I still felt bummed that I could spend so little time walking the expo, talking to folks, and attending all the BOFs normally on my must-attend list. Compounding this was my personal choice to not dine indoors and consequently miss out on almost all other chances to catch up with old friends and colleagues. I also decided to leave SC a day earlier than I usually do to reduce my risk of getting sick which didn't help either. There's never enough time at SC, but this year was particularly pressed. I say all this not to complain, but to say how much I appreciated the people who went out of their way to come accost me during the precious few hours I actually had on the exhibit floor. Some I'd not seen since SC'19, and some I'd never actually met since we only started working together mid-pandemic. The conference is busy for everyone, so giving me a slice of your time was very meaningful. That sense of community membership is why I go to SC, it's why I still work in this business, and it's why I try to contribute whatever I can to whomever wants it whether it be a student, engineer, salesperson, or marketer.Converged Computing2022-11-18T08:30:00-07:002022-11-18T08:30:00-07:00https://hpc.social/personal-blog/2022/converged-computing<p>For many years, there has been a battle between cloud and HPC. The cloud side of the equation says “micro services, cloud native!” +and the HPC side says “too expensive!” Conversations often don’t progress because both sides are up-in-arms and +focused on why they cannot work together. At best, we might get access to cloud from an HPC center, +or an company might present a product as branded for “HPC.” But it’s not truly collaborative in the way that I’d like.</p> + +<p>I’ll also step back and comment that (I do not believe) folks (myself included) on the HPC side have done enough +to sit at the table. For example, we haven’t been a voice in the Open Containers Initiative (<a href="https://supercontainers.github.io/containers-wg/" target="_blank">although I’ve tried</a>), nor have we been present (historically) for conferences that are more focused around cloud native technologies. +There is no pointing fingers or fault here - it’s just a matter of two different cultures, and it’s been challenging figuring out how to talk to one another, and how to work together. I’ve tried my best to be involved, to the best of my ability, in small ways on both sides. But I’m only one person. This isn’t to say there haven’t been small collaborations, but I believe we can do more.</p> + +<h2 id="change-is-coming">Change is Coming</h2> + +<p>I think this is going to change. The reason is because both sides of the equation have started to realize we have similar goals, +and it’s not about creating hybrid environments – having both pancakes and waffles for breakfast – but rather convergence – recognizing that pancakes and waffles are both kinds of breakfast cakes, and we can take features that we like of each to create a breakfast cake that will make everyone happy. +The idea of “Converged Computing” comes from my amazing team (see <a href="https://www.youtube.com/watch?v=9VwAcSOtph0" target="_blank">Dan’s talk at KubeCon here</a>) and is the idea that technologies from HPC can be integrated into more traditionally cloud approaches to produce a solution that +solves problems on both sides. Explicitly for these projects, it means testing the Flux Framework scheduler alongside Kubernetes. Do we still want portable workflows that can move from an HPC environment to cloud? Of course. +However, the niche or gradient that I’m interested in is the space that lives <em>between</em> these two worlds.</p> + +<p>While I won’t go into huge detail (this would be more appropriate for a talk) the lab openly works on +<a href="https://github.com/flux-framework" target="_blank">Flux Framework</a>, a resource manager that (in my opinion) is one of the coolest projects coming out of our space. I started working with these teams a few months ago, and am bringing my excitement and vision for (what I hope to be) a future where we are actively developing alongside other Kubernetes projects, and our work is well-known and established in this space. +What does that mean? Let me share some cool work under development. This is all being done publicly on GitHub, so there is +no issue to talk about it! My first year or so at the lab I was hired under a research project, and although I learned a lot, I haven’t felt inspired and driven until starting this work. Let’s talk about some of it! 🎉️</p> + +<h3 id="the-flux-operator">The Flux Operator</h3> + +<div style="padding: 20px;"> +<img src="https://flux-framework.org/flux-operator/_images/the-operator.jpg" /> +</div> + +<p>If you aren’t familiar with Kubernetes Operators, let’s step back and talk about a human operator. If you are a syadmin managing apps +with associated services and databases on a cluster, you often had to do maintenance or update tasks like increasing a storage volume, +or modifying a service to a new user need. As this pattern has emerged as a common thing, they have come up with the concept of a Kubernetes Operator - an actual controller you install to your cluster that can automate this. In simple terms, after you install an operator to your cluster, +you can hand it a desired state (represented in a yaml configuration file) and the operator will do whatever it takes to reach that state. What does that means in the context of Flux? The Flux Operator is interested in creating +what we are calling a “Mini Cluster,” illustrated below.</p> + +<div style="padding: 20px;"> +<img src="https://flux-framework.org/flux-operator/_images/design-three-team1.png" /> +</div> + +<p>In Kubernetes object terms this is an <a href="https://kubernetes.io/docs/tasks/job/indexed-parallel-processing-static/" target="_blank">Indexed Job</a>, a few config maps, secrets, and a <a href="https://flux-framework.org/flux-restful-api/" target="_blank">RESTFul API</a> and user interface that I designed exposed as a service. You can read more about our current design <a href="https://flux-framework.org/flux-operator/development/designs.html" target="_blank">here</a>.</p> + +<p>This Mini Cluster is generated from a “custom resource definition” or CRD (the yaml you provide), and it can take <a href="https://flux-framework.org/flux-operator/getting_started/custom-resource-definition.html" target="_blank">these parameters</a>. Concetually, you as the user own the Mini Cluster and can submit jobs to it (either via the web interface or the API) until you are done. When you are done, you can bring down the cluster.</p> + +<p>We are excited for this work because in the next months (to a bit longer) we are going to be testing different kinds of workloads +running using Flux alongside this Mini Cluster, but on Kubernetes! I’ve started a small repository of dummy examples that I’m extending quickly at +<a href="https://github.com/rse-ops/flux-hpc" target="_blank">rse-ops/flux-hpc</a> and please open an issue there if you have a suggestion.</p> + +<h3 id="stay-tuned">Stay Tuned!</h3> + +<p>Stay tuned for more work in this space! I’ve been doing a ton of programming in Go, Python, and working +on a wide range of technologies, and fairly quickly, and I am very much in my happy place. Please come and join us! ❤️</p>VanessasaurusFor many years, there has been a battle between cloud and HPC. The cloud side of the equation says “micro services, cloud native!” and the HPC side says “too expensive!” Conversations often don’t progress because both sides are up-in-arms and focused on why they cannot work together. At best, we might get access to cloud from an HPC center, or an company might present a product as branded for “HPC.” But it’s not truly collaborative in the way that I’d like.Ceph OSD CPU Scaling - Part 12022-11-08T00:00:00-07:002022-11-08T00:00:00-07:00https://hpc.social/personal-blog/2022/ceph-osd-cpu-scaling-part-1<p>Last summer we had a user that hit some performance issues based on a recommendation to use 2 cores per OSD in their systems. I wanted to provide some data for the community and wrote up a blog <a href="https://ceph.io/en/news/blog/2022/ceph-osd-cpu-scaling/">post</a> on the ceph.io website. Please take a look!</p>Mark Nelson's BlogLast summer we had a user that hit some performance issues based on a recommendation to use 2 cores per OSD in their systems. I wanted to provide some data for the community and wrote up a blog post on the ceph.io website. Please take a look! \ No newline at end of file diff --git a/index.html b/index.html index fde9b9b..8b98676 100644 --- a/index.html +++ b/index.html @@ -1,4 +1,203 @@ ---- -layout: home -title: Posts ---- + + + + + + + Posts - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    Posts

    +
    + +
    + gaborsamu LSF client on macOS - submitting from your laptop + +
    + +
    + gaborsamu Monitoring .-.. ... ..-. (IBM Spectrum LSF) with the TIG stack + +
    + +
    + ajdecon Adam’s weekly (-ish) update, 2022-12-20 + +
    + +
    + gaborsamu Visualizing Spectrum LSF data with Grafana + +
    + +
    + ajdecon Adam’s weekly update, 2022-12-04 + +
    + +
    + markhpc An Initial Look at Deep Learning IO Performance + +
    + +
    + ajdecon Adam’s weekly update, 2022-11-27 + +
    + +
    + glennklockwood SC'22 Recap + +
    + +
    + vsoch Converged Computing + +
    + +
    + markhpc Ceph OSD CPU Scaling - Part 1 + +
    + +
    + vsoch Containerize It, Baby! + +
    + +
    + ajdecon happy living close (-ish) to the metal + +
    + +
    + ajdecon The web services I self-host + +
    + +
    + markhpc QEMU/KVM + Ceph Librbd Performance + +
    + +
    + dweitzel Dashboards for Learning Data Visualizations + +
    + +
    + vsoch Tunel- Apps for HPC + +
    + +
    + markhpc Ceph RocksDB Tuning Deep-Dive + +
    + +
    + dursi The Utility vs the Professional Services Firm + +
    + +
    + vsoch SSH Tunnels + +
    + +
    + vsoch Research Software Registries + +
    + +
    + + + +
    + + +
    + + + + + diff --git a/page/2/index.html b/page/2/index.html new file mode 100644 index 0000000..f94295a --- /dev/null +++ b/page/2/index.html @@ -0,0 +1,207 @@ + + + + + + + Posts - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    Posts

    +
    + +
    + gaborsamu MNT Reform 2 - part deux + +
    + +
    + gaborsamu Neunundneunzig MNT Reform(s) + +
    + +
    + glennklockwood Life and leaving NERSC + +
    + +
    + markhpc Experimenting with Igor’s Bluestore WAL + +
    + +
    + ajdecon Interesting links I clicked this week + +
    + +
    + gaborsamu Customizing command output in IBM Spectrum LSF + +
    + +
    + vsoch Pipelib- Simple Library to Parse, Filter, and Sort Things + +
    + +
    + vsoch The Research Software Ecosystem + +
    + +
    + markhpc Spooky Allocator Issues and Fixes + +
    + +
    + gaborsamu LSF hookin' up with the CRIU + +
    + +
    + gaborsamu Relivin' the 90's - Amiga style + +
    + +
    + ajdecon An unstructured rant on running long-lived software services + +
    + +
    + dursi What I've Learned from Looking at 1,500 Jobs Leading Research Computing Teams + +
    + +
    + ajdecon A supportive job interview story + +
    + +
    + vsoch Interactive Development Containers + +
    + +
    + ajdecon Developing managed vs self-hosted software + +
    + +
    + gaborsamu NUMA on POWER9 + +
    + +
    + dweitzel Improving the Open Science Data Federation’s Cache Selection + +
    + +
    + ajdecon Toy programs for learning a new language + +
    + +
    + markhpc Cache Age Binning PR Finally Merged! + +
    + +
    + + + +
    + + +
    + + + + + diff --git a/page/3/index.html b/page/3/index.html new file mode 100644 index 0000000..12d320f --- /dev/null +++ b/page/3/index.html @@ -0,0 +1,207 @@ + + + + + + + Posts - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    Posts

    +
    + +
    + vsoch Things that are Hard + +
    + +
    + gaborsamu New Year's Resolution for HPC- Using Resources More Efficiently + +
    + +
    + gaborsamu Hello operator. I need an HPC cluster – fast + +
    + +
    + gaborsamu The Easy HPC button + +
    + +
    + dursi Researcher's Time Has Value, Too + +
    + +
    + markhpc Ceph Crimson 2021 Q3 Project Update + +
    + +
    + gaborsamu Weekend IT SPARC-eology + +
    + +
    + glennklockwood IOPS are dumb + +
    + +
    + gaborsamu 10-4 to the Ten64 with Rockstor + +
    + +
    + vsoch Uptodate + +
    + +
    + gaborsamu HiFive Unmatched - some benchmarking results + +
    + +
    + dursi To Compete, Your Team Needs a Specialty + +
    + +
    + markhpc Crimson vs Classic 1 NVMe Multi-OSD Analysis + +
    + +
    + gaborsamu Booting HiFive Unmatched + +
    + +
    + markhpc Ceph Crimson 2021 Q2 Project Update + +
    + +
    + gaborsamu Very risqué computing + +
    + +
    + dursi Research Computing Funding Should Mostly Just Go To Researchers + +
    + +
    + dursi Nobody Else Cares About Your Tech Stack + +
    + +
    + gaborsamu Fun with an AArch64 NAS + +
    + +
    + gaborsamu Late to the party and a few bits short + +
    + +
    + + + +
    + + +
    + + + + + diff --git a/page/4/index.html b/page/4/index.html new file mode 100644 index 0000000..94cb770 --- /dev/null +++ b/page/4/index.html @@ -0,0 +1,207 @@ + + + + + + + Posts - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    Posts

    +
    + +
    + ajdecon SRE to Solutions Architect + +
    + +
    + ajdecon Sketching out HPC clusters at different scales + +
    + +
    + dursi When Research Infrastructure Is and Isn't Maintained + +
    + +
    + glennklockwood SC'20 Recap + +
    + +
    + dursi Buckle up, CPUs are going to get weirder + +
    + +
    + glennklockwood PDSW'20 Recap + +
    + +
    + dursi What will Post-Pandemic Academic Research Computing Look Like? + +
    + +
    + dursi Things I Learned from Looking at 500 Research Computing Manager Jobs over 10 ... + +
    + +
    + dweitzel XRootD Client Manager + +
    + +
    + gaborsamu Extending the Spectrum LSF GUI to display job GPU metrics + +
    + +
    + dursi White Managers in Research Computing, We Need to be Speaking Out About Racism... + +
    + +
    + glennklockwood Exascale's long shadow and the HPC being left behind + +
    + +
    + dursi COBOL, Imperial College, Bursty Maintenance, and Sustained Scientific Software + +
    + +
    + gaborsamu When you got time on your side - create something + +
    + +
    + dursi How To Quickly Start One-on-Ones with your Research Computing Team- A One-Wee... + +
    + +
    + dweitzel GRACC Transition Visualization + +
    + +
    + gaborsamu Bootup fun- dual-socket POWER9 + +
    + +
    + gaborsamu NUMA on NINE with Spectrum LSF + +
    + +
    + gaborsamu Intelligent HPC - Keeping Hard Work at Bay(es) + +
    + +
    + glennklockwood SC'19 Recap + +
    + +
    + + + +
    + + +
    + + + + + diff --git a/page/5/index.html b/page/5/index.html new file mode 100644 index 0000000..24e635a --- /dev/null +++ b/page/5/index.html @@ -0,0 +1,207 @@ + + + + + + + Posts - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    Posts

    +
    + +
    + dursi The Purpose of Research Computing is the Research, not the Computing + +
    + +
    + dweitzel LetsEncrypt for Multiple Hosts + +
    + +
    + dursi Computational Science Collaborations Train Great Managers - But Trainees Migh... + +
    + +
    + glennklockwood ISC'19 Recap + +
    + +
    + gaborsamu Beyond Simulation – Harnessing AI for Next-Generation HPC at ISC + +
    + +
    + gaborsamu Powering the Future of HPC & AI with OpenPOWER + +
    + +
    + glennklockwood VAST Data's storage system architecture + +
    + +
    + dursi What Should a National Research Computing Platform Be? + +
    + +
    + glennklockwood A week in the life of an SC attendee + +
    + +
    + gaborsamu GPU usage information for jobs in IBM Spectrum LSF + +
    + +
    + dweitzel StashCache By The Numbers + +
    + +
    + gaborsamu A hands-on look at GPU "autoconfig" in IBM Spectrum LSF + +
    + +
    + dweitzel HTCondor Pull Mode + +
    + +
    + dursi A Killer Feature for Scientific Development Frameworks- An Incremental Path T... + +
    + +
    + gaborsamu The Taming of the GPU + +
    + +
    + glennklockwood Are FPGAs the answer to HPC's woes? + +
    + +
    + dweitzel Cleaning Up GRACC + +
    + +
    + gaborsamu Cool and quiet benchmarking on MACCHIATObin (Armada 8040) + +
    + +
    + dweitzel Installing SciTokens on a Mac + +
    + +
    + gaborsamu Standing up a IBM Spectrum LSF Community Edition cluster on Arm v8 + +
    + +
    + + + +
    + + +
    + + + + + diff --git a/page/6/index.html b/page/6/index.html new file mode 100644 index 0000000..db2018a --- /dev/null +++ b/page/6/index.html @@ -0,0 +1,207 @@ + + + + + + + Posts - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    Posts

    +
    + +
    + gaborsamu Turning up the heat...on my Armada 8040 + +
    + +
    + dweitzel StashCache + +
    + +
    + dursi Chapel's Home in the Landscape of New Scientific Computing Languages + +
    + +
    + dursi Compute Canadian- Building a successful and federated computational research ... + +
    + +
    + dursi Should I use Chapel or Julia for my next project? + +
    + +
    + glennklockwood Reviewing the state of the art of burst buffers + +
    + +
    + dursi Beyond Single Core R- Parallel Data Analysis + +
    + +
    + gaborsamu Oh Woe is 2016 - NOT! + +
    + +
    + gaborsamu SC16- Stir it up! + +
    + +
    + dursi MPI's Place in Big Computing + +
    + +
    + dursi Jupyter Notebooks for Performing and Sharing Bioinformatics Analyses + +
    + +
    + glennklockwood Basics of I/O Benchmarking + +
    + +
    + glennklockwood An uninformed perspective on TaihuLight's design + +
    + +
    + dursi Spark, Chapel, TensorFlow- Workshop at UMich + +
    + +
    + gaborsamu Reminiscing and the computing renaissance + +
    + +
    + dursi Approximate Mapping of Nanopore Squiggle Data with Spatial Indexing + +
    + +
    + gaborsamu Looking back at ISC High-Performance 2015 + +
    + +
    + gaborsamu IBM Platform Cluster Manager - how do you like your eggs? + +
    + +
    + gaborsamu IBM Platform LSF and Docker- A Whale of a time! + +
    + +
    + dursi On Random vs. Streaming I/O Performance; Or seek(), and You Shall Find --- Ev... + +
    + +
    + + + +
    + + +
    + + + + + diff --git a/page/7/index.html b/page/7/index.html new file mode 100644 index 0000000..9ee3830 --- /dev/null +++ b/page/7/index.html @@ -0,0 +1,207 @@ + + + + + + + Posts - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    Posts

    +
    + +
    + gaborsamu IBM Software Defined Infrastructure- Put the POWER down and jump the chasm! + +
    + +
    + gaborsamu HPC- Seeing is believing + +
    + +
    + dursi Understanding Partial Order Alignment for Multiple Sequence Alignment + +
    + +
    + dursi HPC+MPI on RCE Podcast + +
    + +
    + glennklockwood More Conjecture on KNL's Near Memory + +
    + +
    + dursi Coarray Fortran Goes Mainstream- GCC 5.1 + +
    + +
    + dursi In Praise of MPI Collectives and MPI-IO + +
    + +
    + dursi Objections, Continued + +
    + +
    + dursi HPC is dying, and MPI is killing it + +
    + +
    + dursi Spark in HPC clusters + +
    + +
    + gaborsamu IBM Workpad z50 & NetBSD - an interesting combination + +
    + +
    + gaborsamu UltraSPARC powered laptop - circa 2001 + +
    + +
    + glennklockwood Thoughts on the NSF Future Directions Interim Report + +
    + +
    + dursi Machine Learning for Scientists + +
    + +
    + glennklockwood Storage Utilization in the Long Tail of Science + +
    + +
    + dursi The Shell For Scientists + +
    + +
    + dursi Floating-Point Data Shouldn't Be Serialized As Text + +
    + +
    + gaborsamu Docker for HPC + +
    + +
    + dursi Hadoop For HPCers + +
    + +
    + glennklockwood Exascale in perspective- RSC's 1.2 petaflop rack + +
    + +
    + + + +
    + + +
    + + + + + diff --git a/page/8/index.html b/page/8/index.html new file mode 100644 index 0000000..00fca15 --- /dev/null +++ b/page/8/index.html @@ -0,0 +1,207 @@ + + + + + + + Posts - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    Posts

    +
    + +
    + gaborsamu ISC 2014- Auf Wiedersehen Leipzig + +
    + +
    + glennklockwood Perspectives on the Current State of Data-Intensive Scientific Computing + +
    + +
    + glennklockwood Spark on Supercomputers- A Few Notes + +
    + +
    + glennklockwood Hadoop's Uncomfortable Fit in HPC + +
    + +
    + glennklockwood Parallelizing R on Supercomputers + +
    + +
    + gaborsamu Armed and ready with IBM Platform LSF + +
    + +
    + gaborsamu Udoo Quad test drive + +
    + +
    + gaborsamu Looking forward from the ARM days of old + +
    + +
    + glennklockwood Quantum ESPRESSO- Performance Benefits of Vendor-Optimized Libraries + +
    + +
    + glennklockwood Quantum ESPRESSO- Compiling and Choice of Libraries + +
    + +
    + glennklockwood Linux perf, libquadmath, and GFortran's Insane Behavior + +
    + +
    + dursi Scalable Data Analysis in R + +
    + +
    + gaborsamu IBM Platform HPC V4.1.1.1- Best Practices for Managing NVIDIA GPU devices + +
    + +
    + gaborsamu Supercomputing 2013 (SC13) + +
    + +
    + gaborsamu IBM Platform HPC 4.1.1- Creating a network bridge on compute nodes + +
    + +
    + gaborsamu ISC 2013 wrapup- IBM Platform HPC and Intel Xeon Phi + +
    + +
    + gaborsamu IBM Platform HPC V3.2- GPU Management with NVIDIA CUDA 5 + +
    + +
    + dursi Present and Future Computing, Data, and Networks Committee of the Canadian As... + +
    + +
    + dursi Stopping your program at the first NaN + +
    + +
    + dursi Testing Roundoff + +
    + +
    + + + +
    + + +
    + + + + + diff --git a/page/9/index.html b/page/9/index.html new file mode 100644 index 0000000..8e6ebaa --- /dev/null +++ b/page/9/index.html @@ -0,0 +1,115 @@ + + + + + + + Posts - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    +
    +
    +
    +

    Posts

    +
    + +
    + dursi Codes as Instruments- Community Applications and Simulation Software for the ... + +
    + +
    + dursi Canadian Astronomical Computing, Data And Network Facilities- A White Paper f... + +
    + +
    + + + +
    + + +
    + + + + + diff --git a/pages/404.md b/pages/404.md deleted file mode 100755 index 98cc312..0000000 --- a/pages/404.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -layout: page -title: "404: Page not found" -permalink: /404.html ---- - -Whoops! We couldn't find that link! [Go home]({{ site.baseurl }}) or just admire this cute animation. diff --git a/pages/about.md b/pages/about.md deleted file mode 100755 index ef409a5..0000000 --- a/pages/about.md +++ /dev/null @@ -1,19 +0,0 @@ ---- -layout: page -title: "About the hpc.social Community Blog" -permalink: /about/ ---- - -This belongs in the hpc.social family of blogs. In particular, this is an aggregated feed of personal posts that meet the following criteria: - -> The personal blogs aggregator is the "soul" of the HPC community - HPCers who are personally invested in the minutiae of the work they are doing, the projects they are working on with some content about their culture and pet pictures :D - things that we all find in common and share and talk about. - -The hpc.social blogs aims to share ideas, news, and stories from diverse people, centers and groups across the High Performance Computing (HPC) community. -Whether you are an advanced practitioner, a developer that touches HPC, or a novice, your voice is welcome here. This community blog reflects exactly that - a syndicated feed of our community voices. It's one places that you can subscribe to via a variety of channels (below) and also follow instructions to contribute your own feed. - - - - - -As a word of caution, we curate authors at the point of adding the blog, but do not regularly curate individual posts that are added nightly via automation. The opinions expressed here are owned by the authors and do not reflect the opinion of the hpc.social community at large. Offensive or inappropriate posts should be reported to [info@hpc.social](mailto:info@hpc.social) if you wish to report anonymously. You can also submit a pull request to remove the offending feed if you are comfortable doing so publicly. - diff --git a/pages/archive.md b/pages/archive.md deleted file mode 100644 index 8477f28..0000000 --- a/pages/archive.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -layout: page -title: Archive -description: A collection of posts, organized by year -permalink: /archive/ ---- - -{% include scrolltop.html %} -
    - {% for post in site.posts %} - {% assign currentDate = post.date | date: "%Y" %} - {% if currentDate != myDate %} - {% unless forloop.first %}{% endunless %} -

    {{ currentDate }}

    - {% endif %} - {% endfor %} -
    diff --git a/pages/atom.xml b/pages/atom.xml deleted file mode 100755 index 24704ce..0000000 --- a/pages/atom.xml +++ /dev/null @@ -1,29 +0,0 @@ ---- -layout: null -permalink: /atom.xml ---- - - - - - {{ site.title }} - - - {{ site.time | date_to_xmlschema }} - {{ site.url }} - - {{ site.author.name }} - {{ site.author.email }} - - - {% for post in site.posts %} - - {{ post.title | xml_escape }} - - {{ post.date | date_to_xmlschema }} - {{ site.url }}{{ post.id }} - {{ post.content | xml_escape }} - - {% endfor %} - - diff --git a/pages/feed.json b/pages/feed.json deleted file mode 100755 index 3b46295..0000000 --- a/pages/feed.json +++ /dev/null @@ -1,61 +0,0 @@ ---- -layout: null -permalink: /feed.json ---- -{ - "version": "https://jsonfeed.org/version/1", - "title": "{{ site.title | xml_escape }}", - "home_page_url": "{{ "/" | absolute_url }}", - "feed_url": "{{ "/feed.json" | absolute_url }}", - "description": {{ site.description | jsonify }}, - "icon": "{{ "assets/images/apple-touch-icon.png" | absolute_url }}", - "favicon": "{{ "assets/images/favicon.png" | absolute_url }}", - "expired": false, - {% if site.author %} - "author": {% if site.author.name %} { - "name": "{{ site.author.name }}", - "url": {% if site.author.url %}"{{ site.author.url }}"{% else %}null{% endif %}, - "avatar": {% if site.author.avatar %}"{{ site.author.avatar }}"{% else %}null{% endif %} - },{% else %}"{{ site.author }}",{% endif %} - {% endif %} -"items": [ - {% for post in site.posts limit:36 %} - { - "id": "{{ post.url | absolute_url | sha1 }}", - "title": {{ post.title | jsonify }}, - "summary": {{ post.description | jsonify }}, - "content_text": {{ post.content | strip_html | strip_newlines | jsonify }}, - "content_html": {{ post.content | strip_newlines | jsonify }}, - "url": "{{ post.url | absolute_url }}", - {% if post.image.size > 1 %}"image": {{ post.image | jsonify }},{% endif %} - {% if post.link.size > 1 %}"external_url": "{{ post.link }}",{% endif %} - {% if post.banner.size > 1 %}"banner_image": "{{ post.banner }}",{% endif %} - {% if post.tags.size > 1 %}"tags": {{ post.tags | jsonify }},{% endif %} - {% if post.enclosure.size > 1 %}"attachments": [ { - "url": "{{ post.enclosure }}", - "mime_type": "{{ post.enclosure_type }}", - "size_in_bytes": "{{ post.enclosure_length }}" - },{% endif %} - "date_published": "{{ post.date | date_to_xmlschema }}", - "date_modified": "{{ post.date | date_to_xmlschema }}", - {% if post.author %} - "author": {% if post.author.name %} { - "name": "{{ post.author.name }}", - "url": {% if post.author.url %}"{{ post.author.url }}"{% else %}null{% endif %}, - "avatar": {% if post.author.avatar %}"{{ post.author.avatar }}"{% else %}null{% endif %} - } - {% else %}"{{ post.author }}"{% endif %} - {% else %} - "author": {% if site.author.name %} { - "name": "{{ site.author.name }}", - "url": {% if site.author.url %}"{{ site.author.url }}"{% else %}null{% endif %}, - "avatar": {% if site.author.avatar %}"{{ site.author.avatar }}"{% else %}null{% endif %} - } - {% else %} - "{{ site.author }}" - {% endif %} - {% endif %} - }{% if forloop.last == false %},{% endif %} - {% endfor %} - ] -} diff --git a/pages/tags.md b/pages/tags.md deleted file mode 100644 index 62563c5..0000000 --- a/pages/tags.md +++ /dev/null @@ -1,29 +0,0 @@ ---- -layout: default -title: Tags -permalink: /tags/ ---- -{% capture site_tags %}{% for tag in site.tags %}{{ tag | first }}{% unless forloop.last %},{% endunless %}{% endfor %}{% endcapture %} -{% assign tag_words = site_tags | split:',' | sort %} -
    -

    Tags in Blog

    -
    - {% for item in (0..site.tags.size) %}{% unless forloop.last %} - {% capture this_word %}{{ tag_words[item] | strip_newlines }}{% endcapture %} - {{ this_word }} ({{ site.tags[this_word].size }}) - {% endunless %}{% endfor %} -
    -
    -
    - {% for item in (0..site.tags.size) %}{% unless forloop.last %} - {% capture this_word %}{{ tag_words[item] | strip_newlines }}{% endcapture %} -

    {{ this_word }}

    - - {% for post in site.tags[this_word] %}{% if post.title != null %} -
    - {{ post.title }} - -
    - {% endif %}{% endfor %} - {% endunless %}{% endfor %} -
    \ No newline at end of file diff --git a/robots.txt b/robots.txt new file mode 100644 index 0000000..b2db0f3 --- /dev/null +++ b/robots.txt @@ -0,0 +1 @@ +Sitemap: https://hpc.social/personal-blog/sitemap.xml diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000..fe30d5c --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,687 @@ + + + +https://hpc.social/personal-blog/2010/canadian-astronomical-computing-data-and-network-facilities-a-white-paper-for-the-2010-long-range-plan/ +2010-05-01T01:00:00-06:00 + + +https://hpc.social/personal-blog/2010/codes-as-instruments-community-applications-and-simulation-software-for-the-hardware-architectures-of-the-next-decade/ +2010-06-17T01:00:00-06:00 + + +https://hpc.social/personal-blog/2011/testing-roundoff/ +2011-11-23T00:00:00-07:00 + + +https://hpc.social/personal-blog/2012/stopping-your-program-at-the-first-nan/ +2012-01-12T00:00:00-07:00 + + +https://hpc.social/personal-blog/2012/present-and-future-computing-data-and-networks-committee-of-the-canadian-astronomical-society-casca/ +2012-01-13T00:00:00-07:00 + + +https://hpc.social/personal-blog/2013/ibm-platform-hpc-v3-2-gpu-management-with-nvidia-cuda-5/ +2013-04-24T18:32:15-06:00 + + +https://hpc.social/personal-blog/2013/isc-2013-wrapup-ibm-platform-hpc-and-intel-xeon-phi/ +2013-07-09T17:58:32-06:00 + + +https://hpc.social/personal-blog/2013/ibm-platform-hpc-4-1-1-creating-a-network-bridge-on-compute-nodes/ +2013-10-09T19:12:15-06:00 + + +https://hpc.social/personal-blog/2013/supercomputing-2013-sc13/ +2013-11-22T16:44:50-07:00 + + +https://hpc.social/personal-blog/2013/ibm-platform-hpc-v4-1-1-1-best-practices-for-managing-nvidia-gpu-devices/ +2013-12-20T18:35:22-07:00 + + +https://hpc.social/personal-blog/2014/scalable-data-analysis-in-r/ +2014-01-16T00:00:00-07:00 + + +https://hpc.social/personal-blog/2014/linux-perf-libquadmath-and-gfortran-s-insane-behavior/ +2014-02-12T20:20:00-07:00 + + +https://hpc.social/personal-blog/2014/quantum-espresso-compiling-and-choice-of-libraries/ +2014-02-25T00:59:00-07:00 + + +https://hpc.social/personal-blog/2014/quantum-espresso-performance-benefits-of-vendor-optimized-libraries/ +2014-02-25T16:42:00-07:00 + + +https://hpc.social/personal-blog/2014/looking-forward-from-the-arm-days-of-old/ +2014-03-13T17:29:42-06:00 + + +https://hpc.social/personal-blog/2014/udoo-quad-test-drive/ +2014-03-23T16:53:38-06:00 + + +https://hpc.social/personal-blog/2014/armed-and-ready-with-ibm-platform-lsf/ +2014-04-11T16:39:53-06:00 + + +https://hpc.social/personal-blog/2014/parallelizing-r-on-supercomputers/ +2014-04-25T00:29:00-06:00 + + +https://hpc.social/personal-blog/2014/hadoop-s-uncomfortable-fit-in-hpc/ +2014-05-17T06:28:00-06:00 + + +https://hpc.social/personal-blog/2014/spark-on-supercomputers-a-few-notes/ +2014-06-08T03:34:00-06:00 + + +https://hpc.social/personal-blog/2014/perspectives-on-the-current-state-of-data-intensive-scientific-computing/ +2014-06-24T19:08:00-06:00 + + +https://hpc.social/personal-blog/2014/isc-2014-auf-wiedersehen-leipzig/ +2014-06-28T16:24:51-06:00 + + +https://hpc.social/personal-blog/2014/exascale-in-perspective-rsc-s-1-2-petaflop-rack/ +2014-06-29T21:31:00-06:00 + + +https://hpc.social/personal-blog/2014/hadoop-for-hpcers/ +2014-09-04T01:00:00-06:00 + + +https://hpc.social/personal-blog/2014/docker-for-hpc/ +2014-09-05T16:21:11-06:00 + + +https://hpc.social/personal-blog/2014/floating-point-data-shouldn-t-be-serialized-as-text/ +2014-10-01T01:00:00-06:00 + + +https://hpc.social/personal-blog/2014/the-shell-for-scientists/ +2014-10-05T01:00:00-06:00 + + +https://hpc.social/personal-blog/2014/storage-utilization-in-the-long-tail-of-science/ +2014-11-05T15:53:00-07:00 + + +https://hpc.social/personal-blog/2014/machine-learning-for-scientists/ +2014-12-20T00:00:00-07:00 + + +https://hpc.social/personal-blog/2015/thoughts-on-the-nsf-future-directions-interim-report/ +2015-01-29T07:53:00-07:00 + + +https://hpc.social/personal-blog/2015/ultrasparc-powered-laptop-circa-2001/ +2015-01-30T15:07:57-07:00 + + +https://hpc.social/personal-blog/2015/ibm-workpad-z50-netbsd-an-interesting-combination/ +2015-02-16T14:51:20-07:00 + + +https://hpc.social/personal-blog/2015/spark-in-hpc-clusters/ +2015-03-02T00:00:00-07:00 + + +https://hpc.social/personal-blog/2015/hpc-is-dying-and-mpi-is-killing-it/ +2015-04-03T01:00:00-06:00 + + +https://hpc.social/personal-blog/2015/objections-continued/ +2015-04-09T01:00:00-06:00 + + +https://hpc.social/personal-blog/2015/in-praise-of-mpi-collectives-and-mpi-io/ +2015-04-19T01:00:00-06:00 + + +https://hpc.social/personal-blog/2015/coarray-fortran-goes-mainstream-gcc-5-1/ +2015-04-26T01:00:00-06:00 + + +https://hpc.social/personal-blog/2015/more-conjecture-on-knl-s-near-memory/ +2015-04-29T17:33:00-06:00 + + +https://hpc.social/personal-blog/2015/hpc-mpi-on-rce-podcast/ +2015-05-01T01:00:00-06:00 + + +https://hpc.social/personal-blog/2015/understanding-partial-order-alignment-for-multiple-sequence-alignment/ +2015-05-01T01:00:00-06:00 + + +https://hpc.social/personal-blog/2015/hpc-seeing-is-believing/ +2015-05-11T15:34:40-06:00 + + +https://hpc.social/personal-blog/2015/ibm-software-defined-infrastructure-put-the-power-down-and-jump-the-chasm/ +2015-05-13T15:24:34-06:00 + + +https://hpc.social/personal-blog/2015/on-random-vs-streaming-i-o-performance-or-seek-and-you-shall-find-eventually/ +2015-05-19T01:00:00-06:00 + + +https://hpc.social/personal-blog/2015/ibm-platform-lsf-and-docker-a-whale-of-a-time/ +2015-05-29T03:49:58-06:00 + + +https://hpc.social/personal-blog/2015/ibm-platform-cluster-manager-how-do-you-like-your-eggs/ +2015-06-07T03:42:03-06:00 + + +https://hpc.social/personal-blog/2015/looking-back-at-isc-high-performance-2015/ +2015-07-29T03:08:23-06:00 + + +https://hpc.social/personal-blog/2015/approximate-mapping-of-nanopore-squiggle-data-with-spatial-indexing/ +2015-12-18T00:00:00-07:00 + + +https://hpc.social/personal-blog/2016/reminiscing-and-the-computing-renaissance/ +2016-04-02T02:44:49-06:00 + + +https://hpc.social/personal-blog/2016/spark-chapel-tensorflow-workshop-at-umich/ +2016-05-10T01:00:00-06:00 + + +https://hpc.social/personal-blog/2016/an-uninformed-perspective-on-taihulight-s-design/ +2016-06-21T07:36:00-06:00 + + +https://hpc.social/personal-blog/2016/basics-of-i-o-benchmarking/ +2016-07-22T07:07:00-06:00 + + +https://hpc.social/personal-blog/2016/jupyter-notebooks-for-performing-and-sharing-bioinformatics-analyses/ +2016-09-09T01:00:00-06:00 + + +https://hpc.social/personal-blog/2016/mpi-s-place-in-big-computing/ +2016-10-14T01:00:00-06:00 + + +https://hpc.social/personal-blog/2016/sc16-stir-it-up/ +2016-11-11T00:54:36-07:00 + + +https://hpc.social/personal-blog/2017/oh-woe-is-2016-not/ +2017-01-09T21:17:58-07:00 + + +https://hpc.social/personal-blog/2017/beyond-single-core-r-parallel-data-analysis/ +2017-02-15T00:00:00-07:00 + + +https://hpc.social/personal-blog/2017/reviewing-the-state-of-the-art-of-burst-buffers/ +2017-03-13T01:07:00-06:00 + + +https://hpc.social/personal-blog/2017/should-i-use-chapel-or-julia-for-my-next-project/ +2017-05-28T01:00:00-06:00 + + +https://hpc.social/personal-blog/2017/compute-canadian-building-a-successful-and-federated-computational-research-enterprise-together/ +2017-06-01T01:00:00-06:00 + + +https://hpc.social/personal-blog/2017/chapel-s-home-in-the-landscape-of-new-scientific-computing-languages/ +2017-06-04T01:00:00-06:00 + + +https://hpc.social/personal-blog/2017/stashcache/ +2017-06-14T17:11:55-06:00 + + +https://hpc.social/personal-blog/2017/turning-up-the-heat-on-my-armada-8040/ +2017-08-29T20:07:13-06:00 + + +https://hpc.social/personal-blog/2017/standing-up-a-ibm-spectrum-lsf-community-edition-cluster-on-arm-v8/ +2017-08-31T18:01:46-06:00 + + +https://hpc.social/personal-blog/2017/installing-scitokens-on-a-mac/ +2017-09-07T19:20:04-06:00 + + +https://hpc.social/personal-blog/2017/cool-and-quiet-benchmarking-on-macchiatobin-armada-8040/ +2017-09-15T17:37:03-06:00 + + +https://hpc.social/personal-blog/2017/cleaning-up-gracc/ +2017-11-06T19:09:23-07:00 + + +https://hpc.social/personal-blog/2018/are-fpgas-the-answer-to-hpc-s-woes/ +2018-02-24T09:21:00-07:00 + + +https://hpc.social/personal-blog/2018/the-taming-of-the-gpu/ +2018-06-21T03:21:37-06:00 + + +https://hpc.social/personal-blog/2018/a-killer-feature-for-scientific-development-frameworks-an-incremental-path-to-maturity/ +2018-07-16T01:00:00-06:00 + + +https://hpc.social/personal-blog/2018/htcondor-pull-mode/ +2018-08-31T18:28:42-06:00 + + +https://hpc.social/personal-blog/2018/a-hands-on-look-at-gpu-autoconfig-in-ibm-spectrum-lsf/ +2018-09-14T16:38:29-06:00 + + +https://hpc.social/personal-blog/2018/stashcache-by-the-numbers/ +2018-09-26T06:00:00-06:00 + + +https://hpc.social/personal-blog/2018/gpu-usage-information-for-jobs-in-ibm-spectrum-lsf/ +2018-10-02T03:21:37-06:00 + + +https://hpc.social/personal-blog/2018/a-week-in-the-life-of-an-sc-attendee/ +2018-11-24T01:44:00-07:00 + + +https://hpc.social/personal-blog/2019/what-should-a-national-research-computing-platform-be/ +2019-01-19T00:00:00-07:00 + + +https://hpc.social/personal-blog/2019/vast-data-s-storage-system-architecture/ +2019-02-27T05:23:00-07:00 + + +https://hpc.social/personal-blog/2019/powering-the-future-of-hpc-ai-with-openpower/ +2019-05-30T03:21:37-06:00 + + +https://hpc.social/personal-blog/2019/beyond-simulation-harnessing-ai-for-next-generation-hpc-at-isc/ +2019-06-05T03:21:37-06:00 + + +https://hpc.social/personal-blog/2019/isc-19-recap/ +2019-06-27T01:31:00-06:00 + + +https://hpc.social/personal-blog/2019/computational-science-collaborations-train-great-managers-but-trainees-might-need-help-to-become-good-managers-first/ +2019-09-29T01:00:00-06:00 + + +https://hpc.social/personal-blog/2019/letsencrypt-for-multiple-hosts/ +2019-10-11T20:38:14-06:00 + + +https://hpc.social/personal-blog/2019/the-purpose-of-research-computing-is-the-research-not-the-computing/ +2019-11-06T00:00:00-07:00 + + +https://hpc.social/personal-blog/2019/sc-19-recap/ +2019-11-27T09:59:00-07:00 + + +https://hpc.social/personal-blog/2019/intelligent-hpc-keeping-hard-work-at-bay-es/ +2019-12-19T02:21:37-07:00 + + +https://hpc.social/personal-blog/2020/numa-on-nine-with-spectrum-lsf/ +2020-01-10T06:21:37-07:00 + + +https://hpc.social/personal-blog/2020/bootup-fun-dual-socket-power9/ +2020-01-16T13:43:37-07:00 + + +https://hpc.social/personal-blog/2020/gracc-transition-visualization/ +2020-03-08T06:00:00-06:00 + + +https://hpc.social/personal-blog/2020/how-to-quickly-start-one-on-ones-with-your-research-computing-team-a-one-week-plan-of-action/ +2020-03-24T01:00:00-06:00 + + +https://hpc.social/personal-blog/2020/when-you-got-time-on-your-side-create-something/ +2020-04-01T13:04:28-06:00 + + +https://hpc.social/personal-blog/2020/cobol-imperial-college-bursty-maintenance-and-sustained-scientific-software/ +2020-04-18T01:00:00-06:00 + + +https://hpc.social/personal-blog/2020/exascale-s-long-shadow-and-the-hpc-being-left-behind/ +2020-05-20T18:33:00-06:00 + + +https://hpc.social/personal-blog/2020/white-managers-in-research-computing-we-need-to-be-speaking-out-about-racism-then-listening-and-advocating/ +2020-06-05T01:00:00-06:00 + + +https://hpc.social/personal-blog/2020/extending-the-spectrum-lsf-gui-to-display-job-gpu-metrics/ +2020-09-04T17:30:44-06:00 + + +https://hpc.social/personal-blog/2020/xrootd-client-manager/ +2020-10-11T06:00:00-06:00 + + +https://hpc.social/personal-blog/2020/things-i-learned-from-looking-at-500-research-computing-manager-jobs-over-10-months/ +2020-10-14T01:00:00-06:00 + + +https://hpc.social/personal-blog/2020/what-will-post-pandemic-academic-research-computing-look-like/ +2020-10-24T01:00:00-06:00 + + +https://hpc.social/personal-blog/2020/pdsw-20-recap/ +2020-11-20T06:00:00-07:00 + + +https://hpc.social/personal-blog/2020/buckle-up-cpus-are-going-to-get-weirder/ +2020-11-22T00:00:00-07:00 + + +https://hpc.social/personal-blog/2020/sc-20-recap/ +2020-11-23T13:00:00-07:00 + + +https://hpc.social/personal-blog/2020/when-research-infrastructure-is-and-isn-t-maintained/ +2020-12-04T00:00:00-07:00 + + +https://hpc.social/personal-blog/2020/sketching-out-hpc-clusters-at-different-scales/ +2020-12-14T01:03:23-07:00 + + +https://hpc.social/personal-blog/2021/sre-to-solutions-architect/ +2021-01-02T19:06:36-07:00 + + +https://hpc.social/personal-blog/2021/late-to-the-party-and-a-few-bits-short/ +2021-01-16T21:16:11-07:00 + + +https://hpc.social/personal-blog/2021/fun-with-an-aarch64-nas/ +2021-03-05T20:57:31-07:00 + + +https://hpc.social/personal-blog/2021/nobody-else-cares-about-your-tech-stack/ +2021-06-06T01:00:00-06:00 + + +https://hpc.social/personal-blog/2021/research-computing-funding-should-mostly-just-go-to-researchers/ +2021-06-08T01:00:00-06:00 + + +https://hpc.social/personal-blog/2021/very-risqu%C3%A9-computing/ +2021-06-16T00:57:31-06:00 + + +https://hpc.social/personal-blog/2021/ceph-crimson-2021-q2-project-update/ +2021-07-29T01:00:00-06:00 + + +https://hpc.social/personal-blog/2021/booting-hifive-unmatched/ +2021-08-08T11:57:08-06:00 + + +https://hpc.social/personal-blog/2021/crimson-vs-classic-1-nvme-multi-osd-analysis/ +2021-08-30T01:00:00-06:00 + + +https://hpc.social/personal-blog/2021/to-compete-your-team-needs-a-specialty/ +2021-09-11T01:00:00-06:00 + + +https://hpc.social/personal-blog/2021/hifive-unmatched-some-benchmarking-results/ +2021-09-15T00:14:28-06:00 + + +https://hpc.social/personal-blog/2021/uptodate/ +2021-09-19T09:30:00-06:00 + + +https://hpc.social/personal-blog/2021/10-4-to-the-ten64-with-rockstor/ +2021-09-26T17:55:00-06:00 + + +https://hpc.social/personal-blog/2021/iops-are-dumb/ +2021-10-24T17:56:00-06:00 + + +https://hpc.social/personal-blog/2021/weekend-it-sparc-eology/ +2021-11-03T00:49:40-06:00 + + +https://hpc.social/personal-blog/2021/ceph-crimson-2021-q3-project-update/ +2021-11-22T00:00:00-07:00 + + +https://hpc.social/personal-blog/2021/researcher-s-time-has-value-too/ +2021-11-23T00:00:00-07:00 + + +https://hpc.social/personal-blog/2021/the-easy-hpc-button/ +2021-11-23T14:14:56-07:00 + + +https://hpc.social/personal-blog/2021/hello-operator-i-need-an-hpc-cluster-fast/ +2021-12-15T00:00:39-07:00 + + +https://hpc.social/personal-blog/2022/new-year-s-resolution-for-hpc-using-resources-more-efficiently/ +2022-01-04T20:27:54-07:00 + + +https://hpc.social/personal-blog/2022/things-that-are-hard/ +2022-01-07T12:30:00-07:00 + + +https://hpc.social/personal-blog/2022/cache-age-binning-pr-finally-merged/ +2022-01-12T00:00:00-07:00 + + +https://hpc.social/personal-blog/2022/toy-programs-for-learning-a-new-language/ +2022-01-15T16:00:00-07:00 + + +https://hpc.social/personal-blog/2022/improving-the-open-science-data-federation-s-cache-selection/ +2022-01-22T05:00:00-07:00 + + +https://hpc.social/personal-blog/2022/numa-on-power9/ +2022-01-25T00:53:21-07:00 + + +https://hpc.social/personal-blog/2022/developing-managed-vs-self-hosted-software/ +2022-02-12T16:00:00-07:00 + + +https://hpc.social/personal-blog/2022/interactive-development-containers/ +2022-02-15T12:30:00-07:00 + + +https://hpc.social/personal-blog/2022/a-supportive-job-interview-story/ +2022-02-25T16:00:00-07:00 + + +https://hpc.social/personal-blog/2022/what-i-ve-learned-from-looking-at-1-500-jobs-leading-research-computing-teams/ +2022-02-26T00:00:00-07:00 + + +https://hpc.social/personal-blog/2022/an-unstructured-rant-on-running-long-lived-software-services/ +2022-03-12T16:00:00-07:00 + + +https://hpc.social/personal-blog/2022/relivin-the-90-s-amiga-style/ +2022-03-29T13:53:09-06:00 + + +https://hpc.social/personal-blog/2022/lsf-hookin-up-with-the-criu/ +2022-04-12T19:32:04-06:00 + + +https://hpc.social/personal-blog/2022/spooky-allocator-issues-and-fixes/ +2022-04-13T01:00:00-06:00 + + +https://hpc.social/personal-blog/2022/the-research-software-ecosystem/ +2022-04-24T13:30:00-06:00 + + +https://hpc.social/personal-blog/2022/pipelib-simple-library-to-parse-filter-and-sort-things/ +2022-05-07T13:30:00-06:00 + + +https://hpc.social/personal-blog/2022/customizing-command-output-in-ibm-spectrum-lsf/ +2022-05-12T13:16:02-06:00 + + +https://hpc.social/personal-blog/2022/interesting-links-i-clicked-this-week/ +2022-05-14T19:35:32-06:00 + + +https://hpc.social/personal-blog/2022/experimenting-with-igor-s-bluestore-wal/ +2022-05-26T01:00:00-06:00 + + +https://hpc.social/personal-blog/2022/life-and-leaving-nersc/ +2022-05-27T06:42:00-06:00 + + +https://hpc.social/personal-blog/2022/neunundneunzig-mnt-reform-s/ +2022-06-06T18:54:07-06:00 + + +https://hpc.social/personal-blog/2022/mnt-reform-2-part-deux/ +2022-06-09T01:06:51-06:00 + + +https://hpc.social/personal-blog/2022/research-software-registries/ +2022-06-19T13:15:00-06:00 + + +https://hpc.social/personal-blog/2022/ssh-tunnels/ +2022-06-26T13:30:00-06:00 + + +https://hpc.social/personal-blog/2022/the-utility-vs-the-professional-services-firm/ +2022-07-03T01:00:00-06:00 + + +https://hpc.social/personal-blog/2022/ceph-rocksdb-tuning-deep-dive/ +2022-07-25T01:00:00-06:00 + + +https://hpc.social/personal-blog/2022/tunel-apps-for-hpc/ +2022-08-04T13:30:00-06:00 + + +https://hpc.social/personal-blog/2022/dashboards-for-learning-data-visualizations/ +2022-09-14T06:00:00-06:00 + + +https://hpc.social/personal-blog/2022/qemu-kvm-ceph-librbd-performance/ +2022-10-24T01:00:00-06:00 + + +https://hpc.social/personal-blog/2022/the-web-services-i-self-host/ +2022-10-30T21:59:55-06:00 + + +https://hpc.social/personal-blog/2022/happy-living-close-ish-to-the-metal/ +2022-11-02T00:18:17-06:00 + + +https://hpc.social/personal-blog/2022/containerize-it-baby/ +2022-11-03T09:30:00-06:00 + + +https://hpc.social/personal-blog/2022/ceph-osd-cpu-scaling-part-1/ +2022-11-08T00:00:00-07:00 + + +https://hpc.social/personal-blog/2022/converged-computing/ +2022-11-18T08:30:00-07:00 + + +https://hpc.social/personal-blog/2022/sc-22-recap/ +2022-11-24T02:00:00-07:00 + + +https://hpc.social/personal-blog/2022/adam-s-weekly-update-2022-11-27/ +2022-11-27T15:28:16-07:00 + + +https://hpc.social/personal-blog/2022/an-initial-look-at-deep-learning-io-performance/ +2022-11-28T00:00:00-07:00 + + +https://hpc.social/personal-blog/2022/adam-s-weekly-update-2022-12-04/ +2022-12-05T05:49:35-07:00 + + +https://hpc.social/personal-blog/2022/visualizing-spectrum-lsf-data-with-grafana/ +2022-12-13T00:06:51-07:00 + + +https://hpc.social/personal-blog/2022/adam-s-weekly-ish-update-2022-12-20/ +2022-12-20T18:14:52-07:00 + + +https://hpc.social/personal-blog/2023/monitoring-ibm-spectrum-lsf-with-the-tig-stack/ +2023-01-24T19:48:44-07:00 + + +https://hpc.social/personal-blog/2023/lsf-client-on-macos-submitting-from-your-laptop/ +2023-03-01T19:10:58-07:00 + + +https://hpc.social/personal-blog/about/ + + +https://hpc.social/personal-blog/archive/ + + +https://hpc.social/personal-blog/ + + +https://hpc.social/personal-blog/tags/ + + +https://hpc.social/personal-blog/page/2/ + + +https://hpc.social/personal-blog/page/3/ + + +https://hpc.social/personal-blog/page/4/ + + +https://hpc.social/personal-blog/page/5/ + + +https://hpc.social/personal-blog/page/6/ + + +https://hpc.social/personal-blog/page/7/ + + +https://hpc.social/personal-blog/page/8/ + + +https://hpc.social/personal-blog/page/9/ + + diff --git a/tags/index.html b/tags/index.html new file mode 100644 index 0000000..2e243ea --- /dev/null +++ b/tags/index.html @@ -0,0 +1,96 @@ + + + + + + + Tags - hpc.social - Aggregated Personal Blog + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    +
    + +
    +

    hpc.social

    + + + + + + + + + + + +
    + High Performance Computing
    Practitioners
    and friends /#hpc +
    +
    + +
    +
    +
    + +
    +

    Tags in Blog

    +
    + +
    +
    +
    + +
    + + +
    + + + + +